diff --git a/integration-tests/.golangci.yml b/integration-tests/.golangci.yml new file mode 100644 index 0000000000..d22b26b826 --- /dev/null +++ b/integration-tests/.golangci.yml @@ -0,0 +1,78 @@ +run: + timeout: 15m +linters: + enable: + - exhaustive + - exportloopref + - revive + - goimports + - gosec + - misspell + - rowserrcheck + - errorlint +linters-settings: + exhaustive: + default-signifies-exhaustive: true + goimports: + local-prefixes: github.com/smartcontractkit/chainlink + golint: + min-confidence: 0.999 + gosec: + excludes: + - G101 + govet: + # report about shadowed variables + check-shadowing: true + revive: + confidence: 0.8 + rules: + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: if-return + - name: increment-decrement + # - name: var-naming // doesn't work with some generated names + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: empty-block + - name: superfluous-else + - name: unused-parameter + - name: unreachable-code + - name: redefines-builtin-id + - name: waitgroup-by-value + - name: unconditional-recursion + - name: struct-tag + - name: string-format + - name: string-of-int + - name: range-val-address + - name: range-val-in-closure + - name: modifies-value-receiver + - name: modifies-parameter + - name: identical-branches + - name: get-return + # - name: flag-parameter // probably one we should work on doing better at in the future + # - name: early-return // probably one we should work on doing better at in the future + - name: defer + - name: constant-logical-expr + - name: confusing-naming + - name: confusing-results + - name: bool-literal-in-expr + - name: atomic +issues: + exclude-rules: + - text: "^G404: Use of weak random number generator" + linters: + - gosec + - linters: + - govet + text: "declaration of \"err\" shadows" diff --git a/integration-tests/.tool-versions b/integration-tests/.tool-versions index 68b6d99419..47b73e9de1 100644 --- a/integration-tests/.tool-versions +++ b/integration-tests/.tool-versions @@ -1,4 +1,5 @@ -golang 1.21.1 +golang 1.21.4 k3d 5.4.6 kubectl 1.25.5 nodejs 18.13.0 +golangci-lint 1.55.2 diff --git a/integration-tests/LOG_POLLER.md b/integration-tests/LOG_POLLER.md new file mode 100644 index 0000000000..6e98fba552 --- /dev/null +++ b/integration-tests/LOG_POLLER.md @@ -0,0 +1,163 @@ +# How to run Log Poller's tests + +## Limitations +* currently they can only be run in Docker, not in Kubernetes +* when using `looped` runner it's not possible to directly control execution time +* WASP's `gun` implementation is imperfect in terms of generated load + +## Configuration +Due to unfinished migration to TOML config tests use a mixed configuration approach: +* network, RPC endpoints, funding keys, etc need to be provided by env vars +* test-specific configuration can be provided by TOML file or via a `Config` struct (to which TOML is parsed anyway) additionally some of it can be overridden by env vars (for ease of use in CI) +** smoke tests use the programmatical approach +** load test uses the TOML approach + +## Approximated test scenario +Different tests might have slightly modified scenarios, but generally they follow this pattern: +* start CL nodes +* setup OCR +* upload Automation Registry 2.1 +* deploy UpKeep Consumers +* deploy test contracts +* register filters for test contracts +* make sure all CL nodes have filters registered +* emit test logs +* wait for log poller to finalise last block in which logs were emitted +** block number is determined either by finality tag or fixed finality depth depending on network configuration +* wait for all CL nodes to have expected log count +* compare logs that present in the EVM node with logs in CL nodes + +All of the checks use fluent waits. + +### Required env vars +* `CHAINLINK_IMAGE` +* `CHAINLINK_VERSION` +* `SELECTED_NETWORKS` + +### Env vars required for live testnet tests +* `EVM_WS_URL` -- RPC websocket +* `EVM_HTTP_URL` -- RPC HTTP +* `EVM_KEYS` -- private keys used for funding + +Since on live testnets we are using existing and canonical LINK contracts funding keys need to contain enough LINK to pay for the test. There's an automated check that fails during setup if there's not enough LINK. Approximately `9 LINK` is required for each UpKeep contract test uses to register a `LogTrigger`. Test contract emits 3 types of events and unless configured otherwise (programmatically!) all of them will be used, which means that due to Automation's limitation we need to register a separate `LogTrigger` for each event type for each contract. So if you want to test with 100 contracts, then you'd need to register 300 UpKeep contracts and thus your funding address needs to have at least 2700 LINK. + +### Programmatical config +There are two load generators available: +* `looped` -- it's a simple generator that just loops over all contracts and emits events at random intervals +* `wasp` -- based on WASP load testing tool, it's more sophisticated and allows to control execution time + +#### Looped config +``` + cfg := logpoller.Config{ + General: &logpoller.General{ + Generator: logpoller.GeneratorType_Looped, + Contracts: 2, # number of test contracts to deploy + EventsPerTx: 4, # number of events to emit in a single transaction + UseFinalityTag: false, # if set to true then Log Poller will use finality tag returned by chain, when determining last finalised block (won't work on a simulated network, it requires eth2) + }, + LoopedConfig: &logpoller.LoopedConfig{ + ContractConfig: logpoller.ContractConfig{ + ExecutionCount: 100, # number of times each contract will be called + }, + FuzzConfig: logpoller.FuzzConfig{ + MinEmitWaitTimeMs: 200, # minimum number of milliseconds to wait before emitting events + MaxEmitWaitTimeMs: 500, # maximum number of milliseconds to wait before emitting events + }, + }, + } + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { # modify that function to emit only logs you want + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit +``` + +Remember that final number of events emitted will be `Contracts * EventsPerTx * ExecutionCount * len(eventToEmit)`. And that that last number by default is equal to `3` (that's because we want to emit different event types, not just one). You can change that by overriding `EventsToEmit` field. + +#### WASP config +``` + cfg := logpoller.Config{ + General: &logpoller.General{ + Generator: logpoller.GeneratorType_Looped, + Contracts: 2, + EventsPerTx: 4, + UseFinalityTag: false, + }, + Wasp: &logpoller.WaspConfig{ + Load: &logpoller.Load{ + RPS: 10, # requests per second + LPS: 0, # logs per second + RateLimitUnitDuration: models.MustNewDuration(5 * time.Minutes), # for how long the load should be limited (ramp-up period) + Duration: models.MustNewDuration(5 * time.Minutes), # how long to generate the load for + CallTimeout: models.MustNewDuration(5 * time.Minutes), # how long to wait for a single call to finish + }, + }, + } + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit +``` + +Remember that you cannot specify both `RPS` and `LPS`. If you want to use `LPS` then omit `RPS` field. Also remember that depending on the events you decide to emit RPS might mean 1 request or might mean 3 requests (if you go with the default `EventsToEmit`). + +For other nuances do check [gun.go][integration-tests/universal/log_poller/gun.go]. + +### TOML config +That config follows the same structure as programmatical config shown above. + +Sample config: [config.toml](integration-tests/load/log_poller/config.toml) + +Use this snippet instead of creating the `Config` struct programmatically: +``` + cfg, err := lp_helpers.ReadConfig(lp_helpers.DefaultConfigFilename) + require.NoError(t, err) +``` + +And remember to add events you want emit: +``` + eventsToEmit := []abi.Event{} + for _, event := range lp_helpers.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit +``` + +### Timeouts +Various checks inside the tests have hardcoded timeouts, which might not be suitable for your execution parameters, for example if you decided to emit 1M logs, then waiting for all of them to be indexed for `1m` might not be enough. Remember to adjust them accordingly. + +Sample snippet: +``` + gom.Eventually(func(g gomega.Gomega) { + logCountMatches, err := clNodesHaveExpectedLogCount(startBlock, endBlock, testEnv.EVMClient.GetChainID(), totalLogsEmitted, expectedFilters, l, coreLogger, testEnv.ClCluster) + if err != nil { + l.Warn().Err(err).Msg("Error checking if CL nodes have expected log count. Retrying...") + } + g.Expect(logCountMatches).To(gomega.BeTrue(), "Not all CL nodes have expected log count") + }, "1m", "30s").Should(gomega.Succeed()) # 1m is the timeout for all nodes to have expected log count +``` + +## Tests +* [Load](integration-tests/load/log_poller/log_poller_test.go) +* [Smoke](integration-tests/smoke/log_poller/log_poller_test.go) + +## Running tests +After setting all the environment variables you can run the test with: +``` +# run in the root folder of chainlink repo +go test -v -test.timeout=2700s -run TestLogPollerReplay integration-tests/smoke/log_poller_test.go +``` + +Remember to adjust test timeout accordingly to match expected duration. + + +## Github Actions +If all of that seems too complicated use this [on-demand workflow](https://github.com/smartcontractkit/chainlink/actions/workflows/on-demand-log-poller.yml). + +Execution time here is an approximation, so depending on network conditions it might be slightly longer or shorter. \ No newline at end of file diff --git a/integration-tests/Makefile b/integration-tests/Makefile index f26518c007..fb4bfa74f3 100644 --- a/integration-tests/Makefile +++ b/integration-tests/Makefile @@ -56,6 +56,12 @@ install_gotestfmt: go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest set -euo pipefail +lint: + golangci-lint --color=always run ./... --fix -v + +build: + @go build ./... && SELECTED_NETWORKS=SIMULATED go test -run=^# ./... + # Builds the test image # tag: the tag for the test image being built, example: tag=tate # base_tag: the tag for the base-test-image to use, example: base_tag=latest @@ -118,7 +124,7 @@ test_chaos_verbose: ## Run all smoke tests with verbose logging # Performance .PHONY: test_perf -test_perf: test_need_operator_assets ## Run core node performance tests. +test_perf: ## Run core node performance tests. TEST_LOG_LEVEL="disabled" \ SELECTED_NETWORKS="SIMULATED,SIMULATED_1,SIMULATED_2" \ go test -timeout 1h -count=1 -json $(args) ./performance 2>&1 | tee /tmp/gotest.log | gotestfmt diff --git a/integration-tests/actions/actions.go b/integration-tests/actions/actions.go index dcdca91cc7..02a2523477 100644 --- a/integration-tests/actions/actions.go +++ b/integration-tests/actions/actions.go @@ -2,23 +2,25 @@ package actions import ( + "crypto/ecdsa" "encoding/json" "fmt" "math/big" "strings" "testing" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" "github.com/google/uuid" - "github.com/pkg/errors" "github.com/rs/zerolog/log" "go.uber.org/zap/zapcore" - "github.com/smartcontractkit/chainlink-env/environment" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/testreporters" "github.com/smartcontractkit/chainlink-testing-framework/utils" @@ -252,7 +254,6 @@ func GetMockserverInitializerDataForOTPE( func TeardownSuite( t *testing.T, env *environment.Environment, - logsFolderPath string, chainlinkNodes []*client.ChainlinkK8sClient, optionalTestReporter testreporters.TestReporter, // Optionally pass in a test reporter to log further metrics failingLogLevel zapcore.Level, // Examines logs after the test, and fails the test if any Chainlink logs are found at or above provided level @@ -260,7 +261,7 @@ func TeardownSuite( ) error { l := logging.GetTestLogger(t) if err := testreporters.WriteTeardownLogs(t, env, optionalTestReporter, failingLogLevel); err != nil { - return errors.Wrap(err, "Error dumping environment logs, leaving environment running for manual retrieval") + return fmt.Errorf("Error dumping environment logs, leaving environment running for manual retrieval, err: %w", err) } // Delete all jobs to stop depleting the funds err := DeleteAllJobs(chainlinkNodes) @@ -328,16 +329,16 @@ func DeleteAllJobs(chainlinkNodes []*client.ChainlinkK8sClient) error { } jobs, _, err := node.ReadJobs() if err != nil { - return errors.Wrap(err, "error reading jobs from chainlink node") + return fmt.Errorf("error reading jobs from chainlink node, err: %w", err) } for _, maps := range jobs.Data { if _, ok := maps["id"]; !ok { - return errors.Errorf("error reading job id from chainlink node's jobs %+v", jobs.Data) + return fmt.Errorf("error reading job id from chainlink node's jobs %+v", jobs.Data) } id := maps["id"].(string) _, err := node.DeleteJob(id) if err != nil { - return errors.Wrap(err, "error deleting job from chainlink node") + return fmt.Errorf("error deleting job from chainlink node, err: %w", err) } } } @@ -348,7 +349,7 @@ func DeleteAllJobs(chainlinkNodes []*client.ChainlinkK8sClient) error { // all from a remote, k8s style environment func ReturnFunds(chainlinkNodes []*client.ChainlinkK8sClient, blockchainClient blockchain.EVMClient) error { if blockchainClient == nil { - return errors.New("blockchain client is nil, unable to return funds from chainlink nodes") + return fmt.Errorf("blockchain client is nil, unable to return funds from chainlink nodes") } log.Info().Msg("Attempting to return Chainlink node funds to default network wallets") if blockchainClient.NetworkSimulated() { @@ -414,7 +415,7 @@ func UpgradeChainlinkNodeVersions( nodes ...*client.ChainlinkK8sClient, ) error { if newImage == "" && newVersion == "" { - return errors.New("unable to upgrade node version, found empty image and version, must provide either a new image or a new version") + return fmt.Errorf("unable to upgrade node version, found empty image and version, must provide either a new image or a new version") } for _, node := range nodes { if err := node.UpgradeVersion(testEnvironment, newImage, newVersion); err != nil { @@ -443,3 +444,17 @@ func DeployMockETHLinkFeed(cd contracts.ContractDeployer, answer *big.Int) (cont } return mockETHLINKFeed, err } + +// todo - move to CTF +func GenerateWallet() (common.Address, error) { + privateKey, err := crypto.GenerateKey() + if err != nil { + return common.Address{}, err + } + publicKey := privateKey.Public() + publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return common.Address{}, fmt.Errorf("cannot assert type: publicKey is not of type *ecdsa.PublicKey") + } + return crypto.PubkeyToAddress(*publicKeyECDSA), nil +} diff --git a/integration-tests/actions/actions_local.go b/integration-tests/actions/actions_local.go index b65bac43bb..d4913cabd8 100644 --- a/integration-tests/actions/actions_local.go +++ b/integration-tests/actions/actions_local.go @@ -2,7 +2,8 @@ package actions import ( - "github.com/pkg/errors" + "fmt" + "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" ) @@ -13,10 +14,10 @@ func UpgradeChainlinkNodeVersionsLocal( nodes ...*test_env.ClNode, ) error { if newImage == "" && newVersion == "" { - return errors.New("unable to upgrade node version, found empty image and version, must provide either a new image or a new version") + return fmt.Errorf("unable to upgrade node version, found empty image and version, must provide either a new image or a new version") } for _, node := range nodes { - if err := node.UpgradeVersion(node.NodeConfig, newImage, newVersion); err != nil { + if err := node.UpgradeVersion(newImage, newVersion); err != nil { return err } } diff --git a/integration-tests/actions/automation_ocr_helpers.go b/integration-tests/actions/automation_ocr_helpers.go index 998b1ee89c..e1635902db 100644 --- a/integration-tests/actions/automation_ocr_helpers.go +++ b/integration-tests/actions/automation_ocr_helpers.go @@ -14,14 +14,15 @@ import ( "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v4" - "github.com/smartcontractkit/chainlink-testing-framework/blockchain" - "github.com/smartcontractkit/chainlink-testing-framework/logging" ocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" ocr3 "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3confighelper" "github.com/smartcontractkit/libocr/offchainreporting2plus/types" ocr2keepers20config "github.com/smartcontractkit/ocr2keepers/pkg/v2/config" ocr2keepers30config "github.com/smartcontractkit/ocr2keepers/pkg/v3/config" + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/logging" + "github.com/smartcontractkit/chainlink/v2/core/services/job" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" "github.com/smartcontractkit/chainlink/v2/core/store/models" diff --git a/integration-tests/actions/automation_ocr_helpers_local.go b/integration-tests/actions/automation_ocr_helpers_local.go index ccc2eea99d..f541594c4d 100644 --- a/integration-tests/actions/automation_ocr_helpers_local.go +++ b/integration-tests/actions/automation_ocr_helpers_local.go @@ -8,7 +8,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/lib/pq" - "github.com/pkg/errors" "github.com/rs/zerolog" ocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" ocr3 "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3confighelper" @@ -187,7 +186,7 @@ func CreateOCRKeeperJobsLocal( } else if registryVersion == ethereum.RegistryVersion_2_0 { contractVersion = "v2.0" } else { - return errors.New("v2.0 and v2.1 are the only supported versions") + return fmt.Errorf("v2.0 and v2.1 are the only supported versions") } bootstrapSpec := &client.OCR2TaskJobSpec{ diff --git a/integration-tests/actions/ocr2_helpers.go b/integration-tests/actions/ocr2_helpers.go index aead74f2bd..02ce73e813 100644 --- a/integration-tests/actions/ocr2_helpers.go +++ b/integration-tests/actions/ocr2_helpers.go @@ -15,14 +15,15 @@ import ( "golang.org/x/sync/errgroup" "gopkg.in/guregu/null.v4" + "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median" + "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client" "github.com/smartcontractkit/chainlink/v2/core/services/job" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" "github.com/smartcontractkit/chainlink/v2/core/store/models" - "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median" - "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" - "github.com/smartcontractkit/libocr/offchainreporting2plus/types" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" diff --git a/integration-tests/actions/ocr2_helpers_local.go b/integration-tests/actions/ocr2_helpers_local.go index b3fe6eb041..65e0a466be 100644 --- a/integration-tests/actions/ocr2_helpers_local.go +++ b/integration-tests/actions/ocr2_helpers_local.go @@ -12,6 +12,12 @@ import ( "github.com/google/uuid" "github.com/lib/pq" "github.com/rs/zerolog/log" + "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median" + "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "golang.org/x/sync/errgroup" + "gopkg.in/guregu/null.v4" + "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" @@ -19,11 +25,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/testhelpers" "github.com/smartcontractkit/chainlink/v2/core/store/models" - "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median" - "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" - "github.com/smartcontractkit/libocr/offchainreporting2plus/types" - "golang.org/x/sync/errgroup" - "gopkg.in/guregu/null.v4" ) func CreateOCRv2JobsLocal( diff --git a/integration-tests/actions/ocr2vrf_actions/ocr2vrf_config_helpers.go b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_config_helpers.go index ce69396432..e424aaa11b 100644 --- a/integration-tests/actions/ocr2vrf_actions/ocr2vrf_config_helpers.go +++ b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_config_helpers.go @@ -16,9 +16,6 @@ import ( "go.dedis.ch/kyber/v3/group/edwards25519" "gopkg.in/guregu/null.v4" - "github.com/smartcontractkit/chainlink-testing-framework/logging" - "github.com/smartcontractkit/chainlink/v2/core/services/job" - "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" "github.com/smartcontractkit/libocr/offchainreporting2plus/types" "github.com/smartcontractkit/ocr2vrf/altbn_128" @@ -26,6 +23,10 @@ import ( "github.com/smartcontractkit/ocr2vrf/ocr2vrf" ocr2vrftypes "github.com/smartcontractkit/ocr2vrf/types" + "github.com/smartcontractkit/chainlink-testing-framework/logging" + "github.com/smartcontractkit/chainlink/v2/core/services/job" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" + "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" ) diff --git a/integration-tests/actions/ocr2vrf_actions/ocr2vrf_steps.go b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_steps.go index c123aaff6a..72d668076e 100644 --- a/integration-tests/actions/ocr2vrf_actions/ocr2vrf_steps.go +++ b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_steps.go @@ -22,6 +22,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/actions/ocr2vrf_actions/ocr2vrf_constants" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func SetAndWaitForVRFBeaconProcessToFinish(t *testing.T, ocr2VRFPluginConfig *OCR2VRFPluginConfig, vrfBeacon contracts.VRFBeacon) { @@ -172,7 +173,7 @@ func FundVRFCoordinatorV3Subscription(t *testing.T, linkToken contracts.LinkToke require.NoError(t, err, "Error waiting for TXs to complete") } -func DeployOCR2VRFContracts(t *testing.T, contractDeployer contracts.ContractDeployer, chainClient blockchain.EVMClient, linkToken contracts.LinkToken, mockETHLinkFeed contracts.MockETHLINKFeed, beaconPeriodBlocksCount *big.Int, keyID string) (contracts.DKG, contracts.VRFCoordinatorV3, contracts.VRFBeacon, contracts.VRFBeaconConsumer) { +func DeployOCR2VRFContracts(t *testing.T, contractDeployer contracts.ContractDeployer, chainClient blockchain.EVMClient, linkToken contracts.LinkToken, beaconPeriodBlocksCount *big.Int, keyID string) (contracts.DKG, contracts.VRFCoordinatorV3, contracts.VRFBeacon, contracts.VRFBeaconConsumer) { dkg, err := contractDeployer.DeployDKG() require.NoError(t, err, "Error deploying DKG Contract") @@ -272,14 +273,14 @@ func RequestRandomnessFulfillmentAndWaitForFulfilment( } func getRequestId(t *testing.T, consumer contracts.VRFBeaconConsumer, receipt *types.Receipt, confirmationDelay *big.Int) *big.Int { - periodBlocks, err := consumer.IBeaconPeriodBlocks(nil) + periodBlocks, err := consumer.IBeaconPeriodBlocks(utils.TestContext(t)) require.NoError(t, err, "Error getting Beacon Period block count") blockNumber := receipt.BlockNumber periodOffset := new(big.Int).Mod(blockNumber, periodBlocks) nextBeaconOutputHeight := new(big.Int).Sub(new(big.Int).Add(blockNumber, periodBlocks), periodOffset) - requestID, err := consumer.GetRequestIdsBy(nil, nextBeaconOutputHeight, confirmationDelay) + requestID, err := consumer.GetRequestIdsBy(utils.TestContext(t), nextBeaconOutputHeight, confirmationDelay) require.NoError(t, err, "Error getting requestID from consumer contract") return requestID @@ -305,7 +306,6 @@ func SetupOCR2VRFUniverse( contractDeployer, chainClient, linkToken, - mockETHLinkFeed, ocr2vrf_constants.BeaconPeriodBlocksCount, ocr2vrf_constants.KeyID, ) diff --git a/integration-tests/actions/ocr_helpers.go b/integration-tests/actions/ocr_helpers.go index cfc8cfe589..4f713dcdd6 100644 --- a/integration-tests/actions/ocr_helpers.go +++ b/integration-tests/actions/ocr_helpers.go @@ -27,7 +27,6 @@ func DeployOCRContracts( numberOfContracts int, linkTokenContract contracts.LinkToken, contractDeployer contracts.ContractDeployer, - bootstrapNode *client.ChainlinkK8sClient, workerNodes []*client.ChainlinkK8sClient, client blockchain.EVMClient, ) ([]contracts.OffchainAggregator, error) { diff --git a/integration-tests/actions/ocr_helpers_local.go b/integration-tests/actions/ocr_helpers_local.go index 8bb4e83479..e6dd5ae77f 100644 --- a/integration-tests/actions/ocr_helpers_local.go +++ b/integration-tests/actions/ocr_helpers_local.go @@ -9,11 +9,11 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/google/uuid" - "github.com/pkg/errors" "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" - "golang.org/x/sync/errgroup" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" @@ -280,7 +280,7 @@ func TrackForwarderLocal( chainID := chainClient.GetChainID() _, _, err := node.TrackForwarder(chainID, authorizedForwarder) if err != nil { - return errors.Wrap(err, "failed to track forwarder") + return fmt.Errorf("failed to track forwarder, err: %w", err) } logger.Info().Str("NodeURL", node.Config.URL). Str("ForwarderAddress", authorizedForwarder.Hex()). @@ -305,7 +305,7 @@ func DeployOCRContractsForwarderFlowLocal( contracts.DefaultOffChainAggregatorOptions(), ) if err != nil { - return nil, errors.Wrap(err, "failed to deploy offchain aggregator") + return nil, fmt.Errorf("failed to deploy offchain aggregator, err: %w", err) } ocrInstances = append(ocrInstances, ocrInstance) err = client.WaitForEvents() @@ -329,7 +329,7 @@ func DeployOCRContractsForwarderFlowLocal( for _, ocrInstance := range ocrInstances { err := ocrInstance.SetPayees(transmitters, payees) if err != nil { - return nil, errors.Wrap(err, "failed to set OCR payees") + return nil, fmt.Errorf("failed to set OCR payees, err: %w", err) } if err := client.WaitForEvents(); err != nil { return nil, err @@ -348,7 +348,7 @@ func DeployOCRContractsForwarderFlowLocal( forwarderAddresses, ) if err != nil { - return nil, errors.Wrap(err, "failed to set on-chain config") + return nil, fmt.Errorf("failed to set on-chain config, err: %w", err) } if err = client.WaitForEvents(); err != nil { return nil, err diff --git a/integration-tests/actions/operator_forwarder_helpers.go b/integration-tests/actions/operator_forwarder_helpers.go index 37b50c4fa9..a1d7135416 100644 --- a/integration-tests/actions/operator_forwarder_helpers.go +++ b/integration-tests/actions/operator_forwarder_helpers.go @@ -1,7 +1,6 @@ package actions import ( - "context" "math/big" "testing" @@ -17,6 +16,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func DeployForwarderContracts( @@ -67,7 +67,7 @@ func AcceptAuthorizedReceiversOperator( err = chainClient.WaitForEvents() require.NoError(t, err, "Waiting for events in nodes shouldn't fail") - senders, err := forwarderInstance.GetAuthorizedSenders(context.Background()) + senders, err := forwarderInstance.GetAuthorizedSenders(utils.TestContext(t)) require.NoError(t, err, "Getting authorized senders shouldn't fail") var nodesAddrs []string for _, o := range nodeAddresses { @@ -75,20 +75,18 @@ func AcceptAuthorizedReceiversOperator( } require.Equal(t, nodesAddrs, senders, "Senders addresses should match node addresses") - owner, err := forwarderInstance.Owner(context.Background()) + owner, err := forwarderInstance.Owner(utils.TestContext(t)) require.NoError(t, err, "Getting authorized forwarder owner shouldn't fail") require.Equal(t, operator.Hex(), owner, "Forwarder owner should match operator") } func ProcessNewEvent( t *testing.T, - eventSub geth.Subscription, operatorCreated chan *operator_factory.OperatorFactoryOperatorCreated, authorizedForwarderCreated chan *operator_factory.OperatorFactoryAuthorizedForwarderCreated, event *types.Log, eventDetails *abi.Event, operatorFactoryInstance contracts.OperatorFactory, - contractABI *abi.ABI, chainClient blockchain.EVMClient, ) { l := logging.GetTestLogger(t) @@ -141,7 +139,7 @@ func SubscribeOperatorFactoryEvents( l := logging.GetTestLogger(t) contractABI, err := operator_factory.OperatorFactoryMetaData.GetAbi() require.NoError(t, err, "Getting contract abi for OperatorFactory shouldn't fail") - latestBlockNum, err := chainClient.LatestBlockNumber(context.Background()) + latestBlockNum, err := chainClient.LatestBlockNumber(utils.TestContext(t)) require.NoError(t, err, "Subscribing to contract event log for OperatorFactory instance shouldn't fail") query := geth.FilterQuery{ FromBlock: big.NewInt(0).SetUint64(latestBlockNum), @@ -149,7 +147,7 @@ func SubscribeOperatorFactoryEvents( } eventLogs := make(chan types.Log) - sub, err := chainClient.SubscribeFilterLogs(context.Background(), query, eventLogs) + sub, err := chainClient.SubscribeFilterLogs(utils.TestContext(t), query, eventLogs) require.NoError(t, err, "Subscribing to contract event log for OperatorFactory instance shouldn't fail") go func() { defer sub.Unsubscribe() @@ -160,14 +158,14 @@ func SubscribeOperatorFactoryEvents( l.Error().Err(err).Msg("Error while watching for new contract events. Retrying Subscription") sub.Unsubscribe() - sub, err = chainClient.SubscribeFilterLogs(context.Background(), query, eventLogs) + sub, err = chainClient.SubscribeFilterLogs(utils.TestContext(t), query, eventLogs) require.NoError(t, err, "Subscribing to contract event log for OperatorFactory instance shouldn't fail") case vLog := <-eventLogs: eventDetails, err := contractABI.EventByID(vLog.Topics[0]) require.NoError(t, err, "Getting event details for OperatorFactory instance shouldn't fail") go ProcessNewEvent( - t, sub, operatorCreated, authorizedForwarderCreated, &vLog, - eventDetails, operatorFactoryInstance, contractABI, chainClient, + t, operatorCreated, authorizedForwarderCreated, &vLog, + eventDetails, operatorFactoryInstance, chainClient, ) if eventDetails.Name == "AuthorizedForwarderCreated" || eventDetails.Name == "OperatorCreated" { remainingExpectedEvents-- diff --git a/integration-tests/actions/vrfv1/actions.go b/integration-tests/actions/vrfv1/actions.go index 68d3e584ce..f8d7190709 100644 --- a/integration-tests/actions/vrfv1/actions.go +++ b/integration-tests/actions/vrfv1/actions.go @@ -1,7 +1,8 @@ package vrfv1 import ( - "github.com/pkg/errors" + "fmt" + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" "github.com/smartcontractkit/chainlink/integration-tests/contracts" ) @@ -21,15 +22,15 @@ type Contracts struct { func DeployVRFContracts(cd contracts.ContractDeployer, bc blockchain.EVMClient, lt contracts.LinkToken) (*Contracts, error) { bhs, err := cd.DeployBlockhashStore() if err != nil { - return nil, errors.Wrap(err, ErrDeployBHSV1) + return nil, fmt.Errorf("%s, err %w", ErrDeployBHSV1, err) } coordinator, err := cd.DeployVRFCoordinator(lt.Address(), bhs.Address()) if err != nil { - return nil, errors.Wrap(err, ErrDeployVRFCootrinatorV1) + return nil, fmt.Errorf("%s, err %w", ErrDeployVRFCootrinatorV1, err) } consumer, err := cd.DeployVRFConsumer(lt.Address(), coordinator.Address()) if err != nil { - return nil, errors.Wrap(err, ErrDeployVRFConsumerV1) + return nil, fmt.Errorf("%s, err %w", ErrDeployVRFConsumerV1, err) } if err := bc.WaitForEvents(); err != nil { return nil, err diff --git a/integration-tests/actions/vrfv2_actions/vrfv2_steps.go b/integration-tests/actions/vrfv2_actions/vrfv2_steps.go index 24ac217a33..a832d020b0 100644 --- a/integration-tests/actions/vrfv2_actions/vrfv2_steps.go +++ b/integration-tests/actions/vrfv2_actions/vrfv2_steps.go @@ -6,7 +6,6 @@ import ( "math/big" "github.com/google/uuid" - "github.com/pkg/errors" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" chainlinkutils "github.com/smartcontractkit/chainlink/v2/core/utils" @@ -43,15 +42,15 @@ func DeployVRFV2Contracts( ) (*VRFV2Contracts, error) { bhs, err := contractDeployer.DeployBlockhashStore() if err != nil { - return nil, errors.Wrap(err, ErrDeployBlockHashStore) + return nil, fmt.Errorf("%s, err %w", ErrDeployBlockHashStore, err) } coordinator, err := contractDeployer.DeployVRFCoordinatorV2(linkTokenContract.Address(), bhs.Address(), linkEthFeedContract.Address()) if err != nil { - return nil, errors.Wrap(err, ErrDeployCoordinator) + return nil, fmt.Errorf("%s, err %w", ErrDeployCoordinator, err) } loadTestConsumer, err := contractDeployer.DeployVRFv2LoadTestConsumer(coordinator.Address()) if err != nil { - return nil, errors.Wrap(err, ErrAdvancedConsumer) + return nil, fmt.Errorf("%s, err %w", ErrAdvancedConsumer, err) } err = chainClient.WaitForEvents() if err != nil { @@ -70,7 +69,7 @@ func CreateVRFV2Jobs( for _, chainlinkNode := range chainlinkNodes { vrfKey, err := chainlinkNode.MustCreateVRFKey() if err != nil { - return nil, errors.Wrap(err, ErrCreatingVRFv2Key) + return nil, fmt.Errorf("%s, err %w", ErrCreatingVRFv2Key, err) } pubKeyCompressed := vrfKey.Data.ID jobUUID := uuid.New() @@ -79,11 +78,11 @@ func CreateVRFV2Jobs( } ost, err := os.String() if err != nil { - return nil, errors.Wrap(err, ErrParseJob) + return nil, fmt.Errorf("%s, err %w", ErrParseJob, err) } nativeTokenPrimaryKeyAddress, err := chainlinkNode.PrimaryEthAddress() if err != nil { - return nil, errors.Wrap(err, ErrNodePrimaryKey) + return nil, fmt.Errorf("%s, err %w", ErrNodePrimaryKey, err) } job, err := chainlinkNode.MustCreateJob(&client.VRFV2JobSpec{ Name: fmt.Sprintf("vrf-%s", jobUUID), @@ -97,15 +96,15 @@ func CreateVRFV2Jobs( BatchFulfillmentEnabled: false, }) if err != nil { - return nil, errors.Wrap(err, ErrCreatingVRFv2Job) + return nil, fmt.Errorf("%s, err %w", ErrCreatingVRFv2Job, err) } provingKey, err := VRFV2RegisterProvingKey(vrfKey, nativeTokenPrimaryKeyAddress, coordinator) if err != nil { - return nil, errors.Wrap(err, ErrCreatingProvingKey) + return nil, fmt.Errorf("%s, err %w", ErrCreatingProvingKey, err) } keyHash, err := coordinator.HashOfKey(context.Background(), provingKey) if err != nil { - return nil, errors.Wrap(err, ErrCreatingProvingKeyHash) + return nil, fmt.Errorf("%s, err %w", ErrCreatingProvingKeyHash, err) } ji := VRFV2JobInfo{ Job: job, @@ -125,14 +124,14 @@ func VRFV2RegisterProvingKey( ) (VRFV2EncodedProvingKey, error) { provingKey, err := actions.EncodeOnChainVRFProvingKey(*vrfKey) if err != nil { - return VRFV2EncodedProvingKey{}, errors.Wrap(err, ErrEncodingProvingKey) + return VRFV2EncodedProvingKey{}, fmt.Errorf("%s, err %w", ErrEncodingProvingKey, err) } err = coordinator.RegisterProvingKey( oracleAddress, provingKey, ) if err != nil { - return VRFV2EncodedProvingKey{}, errors.Wrap(err, ErrRegisterProvingKey) + return VRFV2EncodedProvingKey{}, fmt.Errorf("%s, err %w", ErrRegisterProvingKey, err) } return provingKey, nil } @@ -140,11 +139,11 @@ func VRFV2RegisterProvingKey( func FundVRFCoordinatorV2Subscription(linkToken contracts.LinkToken, coordinator contracts.VRFCoordinatorV2, chainClient blockchain.EVMClient, subscriptionID uint64, linkFundingAmount *big.Int) error { encodedSubId, err := chainlinkutils.ABIEncode(`[{"type":"uint64"}]`, subscriptionID) if err != nil { - return errors.Wrap(err, ErrABIEncodingFunding) + return fmt.Errorf("%s, err %w", ErrABIEncodingFunding, err) } _, err = linkToken.TransferAndCall(coordinator.Address(), big.NewInt(0).Mul(linkFundingAmount, big.NewInt(1e18)), encodedSubId) if err != nil { - return errors.Wrap(err, ErrSendingLinkToken) + return fmt.Errorf("%s, err %w", ErrSendingLinkToken, err) } return chainClient.WaitForEvents() } diff --git a/integration-tests/actions/vrfv2plus/vrfv2plus_config/config.go b/integration-tests/actions/vrfv2plus/vrfv2plus_config/config.go index 7a1221eaf8..a47103a8a1 100644 --- a/integration-tests/actions/vrfv2plus/vrfv2plus_config/config.go +++ b/integration-tests/actions/vrfv2plus/vrfv2plus_config/config.go @@ -7,8 +7,8 @@ type VRFV2PlusConfig struct { IsNativePayment bool `envconfig:"IS_NATIVE_PAYMENT" default:"false"` // Whether to use native payment or LINK token LinkNativeFeedResponse int64 `envconfig:"LINK_NATIVE_FEED_RESPONSE" default:"1000000000000000000"` // Response of the LINK/ETH feed MinimumConfirmations uint16 `envconfig:"MINIMUM_CONFIRMATIONS" default:"3"` // Minimum number of confirmations for the VRF Coordinator - SubscriptionFundingAmountLink int64 `envconfig:"SUBSCRIPTION_FUNDING_AMOUNT_LINK" default:"10"` // Amount of LINK to fund the subscription with - SubscriptionFundingAmountNative int64 `envconfig:"SUBSCRIPTION_FUNDING_AMOUNT_NATIVE" default:"1"` // Amount of native currency to fund the subscription with + SubscriptionFundingAmountLink float64 `envconfig:"SUBSCRIPTION_FUNDING_AMOUNT_LINK" default:"5"` // Amount of LINK to fund the subscription with + SubscriptionFundingAmountNative float64 `envconfig:"SUBSCRIPTION_FUNDING_AMOUNT_NATIVE" default:"1"` // Amount of native currency to fund the subscription with NumberOfWords uint32 `envconfig:"NUMBER_OF_WORDS" default:"3"` // Number of words to request CallbackGasLimit uint32 `envconfig:"CALLBACK_GAS_LIMIT" default:"1000000"` // Gas limit for the callback MaxGasLimitCoordinatorConfig uint32 `envconfig:"MAX_GAS_LIMIT_COORDINATOR_CONFIG" default:"2500000"` // Max gas limit for the VRF Coordinator config @@ -18,9 +18,13 @@ type VRFV2PlusConfig struct { FulfillmentFlatFeeLinkPPM uint32 `envconfig:"FULFILLMENT_FLAT_FEE_LINK_PPM" default:"500"` // Flat fee in ppm for LINK for the VRF Coordinator config FulfillmentFlatFeeNativePPM uint32 `envconfig:"FULFILLMENT_FLAT_FEE_NATIVE_PPM" default:"500"` // Flat fee in ppm for native currency for the VRF Coordinator config + NumberOfSubToCreate int `envconfig:"NUMBER_OF_SUB_TO_CREATE" default:"1"` // Number of subscriptions to create + RandomnessRequestCountPerRequest uint16 `envconfig:"RANDOMNESS_REQUEST_COUNT_PER_REQUEST" default:"1"` // How many randomness requests to send per request RandomnessRequestCountPerRequestDeviation uint16 `envconfig:"RANDOMNESS_REQUEST_COUNT_PER_REQUEST_DEVIATION" default:"0"` // How many randomness requests to send per request + RandomWordsFulfilledEventTimeout time.Duration `envconfig:"RANDOM_WORDS_FULFILLED_EVENT_TIMEOUT" default:"2m"` // How long to wait for the RandomWordsFulfilled event to be emitted + //Wrapper Config WrapperGasOverhead uint32 `envconfig:"WRAPPER_GAS_OVERHEAD" default:"50000"` CoordinatorGasOverhead uint32 `envconfig:"COORDINATOR_GAS_OVERHEAD" default:"52000"` @@ -37,6 +41,7 @@ type VRFV2PlusConfig struct { UseExistingEnv bool `envconfig:"USE_EXISTING_ENV" default:"false"` // Whether to use an existing environment or create a new one CoordinatorAddress string `envconfig:"COORDINATOR_ADDRESS" default:""` // Coordinator address ConsumerAddress string `envconfig:"CONSUMER_ADDRESS" default:""` // Consumer address + LinkAddress string `envconfig:"LINK_ADDRESS" default:""` // Link address SubID string `envconfig:"SUB_ID" default:""` // Subscription ID KeyHash string `envconfig:"KEY_HASH" default:""` } diff --git a/integration-tests/actions/vrfv2plus/vrfv2plus_steps.go b/integration-tests/actions/vrfv2plus/vrfv2plus_steps.go index 3bfa5d4f41..28fb2635ff 100644 --- a/integration-tests/actions/vrfv2plus/vrfv2plus_steps.go +++ b/integration-tests/actions/vrfv2plus/vrfv2plus_steps.go @@ -7,13 +7,15 @@ import ( "sync" "time" + "github.com/smartcontractkit/chainlink-testing-framework/utils" + "github.com/smartcontractkit/chainlink/v2/core/assets" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrfv2plus_wrapper_load_test_consumer" "github.com/ethereum/go-ethereum/common" "github.com/google/uuid" - "github.com/pkg/errors" "github.com/rs/zerolog" + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" "github.com/smartcontractkit/chainlink/integration-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus/vrfv2plus_config" @@ -70,19 +72,19 @@ func DeployVRFV2_5Contracts( ) (*VRFV2_5Contracts, error) { bhs, err := contractDeployer.DeployBlockhashStore() if err != nil { - return nil, errors.Wrap(err, ErrDeployBlockHashStore) + return nil, fmt.Errorf("%s, err %w", ErrDeployBlockHashStore, err) } err = chainClient.WaitForEvents() if err != nil { - return nil, errors.Wrap(err, ErrWaitTXsComplete) + return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) } coordinator, err := contractDeployer.DeployVRFCoordinatorV2_5(bhs.Address()) if err != nil { - return nil, errors.Wrap(err, ErrDeployCoordinator) + return nil, fmt.Errorf("%s, err %w", ErrDeployCoordinator, err) } err = chainClient.WaitForEvents() if err != nil { - return nil, errors.Wrap(err, ErrWaitTXsComplete) + return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) } consumers, err := DeployVRFV2PlusConsumers(contractDeployer, coordinator, consumerContractsAmount) if err != nil { @@ -90,7 +92,7 @@ func DeployVRFV2_5Contracts( } err = chainClient.WaitForEvents() if err != nil { - return nil, errors.Wrap(err, ErrWaitTXsComplete) + return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) } return &VRFV2_5Contracts{coordinator, bhs, consumers}, nil } @@ -106,11 +108,11 @@ func DeployVRFV2PlusDirectFundingContracts( vrfv2PlusWrapper, err := contractDeployer.DeployVRFV2PlusWrapper(linkTokenAddress, linkEthFeedAddress, coordinator.Address()) if err != nil { - return nil, errors.Wrap(err, ErrDeployWrapper) + return nil, fmt.Errorf("%s, err %w", ErrDeployWrapper, err) } err = chainClient.WaitForEvents() if err != nil { - return nil, errors.Wrap(err, ErrWaitTXsComplete) + return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) } consumers, err := DeployVRFV2PlusWrapperConsumers(contractDeployer, linkTokenAddress, vrfv2PlusWrapper, consumerContractsAmount) @@ -119,7 +121,7 @@ func DeployVRFV2PlusDirectFundingContracts( } err = chainClient.WaitForEvents() if err != nil { - return nil, errors.Wrap(err, ErrWaitTXsComplete) + return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) } return &VRFV2PlusWrapperContracts{vrfv2PlusWrapper, consumers}, nil } @@ -129,7 +131,7 @@ func DeployVRFV2PlusConsumers(contractDeployer contracts.ContractDeployer, coord for i := 1; i <= consumerContractsAmount; i++ { loadTestConsumer, err := contractDeployer.DeployVRFv2PlusLoadTestConsumer(coordinator.Address()) if err != nil { - return nil, errors.Wrap(err, ErrAdvancedConsumer) + return nil, fmt.Errorf("%s, err %w", ErrAdvancedConsumer, err) } consumers = append(consumers, loadTestConsumer) } @@ -141,7 +143,7 @@ func DeployVRFV2PlusWrapperConsumers(contractDeployer contracts.ContractDeployer for i := 1; i <= consumerContractsAmount; i++ { loadTestConsumer, err := contractDeployer.DeployVRFV2PlusWrapperLoadTestConsumer(linkTokenAddress, vrfV2PlusWrapper.Address()) if err != nil { - return nil, errors.Wrap(err, ErrAdvancedConsumer) + return nil, fmt.Errorf("%s, err %w", ErrAdvancedConsumer, err) } consumers = append(consumers, loadTestConsumer) } @@ -162,7 +164,7 @@ func CreateVRFV2PlusJob( } ost, err := os.String() if err != nil { - return nil, errors.Wrap(err, ErrParseJob) + return nil, fmt.Errorf("%s, err %w", ErrParseJob, err) } job, err := chainlinkNode.MustCreateJob(&client.VRFV2PlusJobSpec{ @@ -177,7 +179,7 @@ func CreateVRFV2PlusJob( BatchFulfillmentEnabled: false, }) if err != nil { - return nil, errors.Wrap(err, ErrCreatingVRFv2PlusJob) + return nil, fmt.Errorf("%s, err %w", ErrCreatingVRFv2PlusJob, err) } return job, nil @@ -190,14 +192,14 @@ func VRFV2_5RegisterProvingKey( ) (VRFV2PlusEncodedProvingKey, error) { provingKey, err := actions.EncodeOnChainVRFProvingKey(*vrfKey) if err != nil { - return VRFV2PlusEncodedProvingKey{}, errors.Wrap(err, ErrEncodingProvingKey) + return VRFV2PlusEncodedProvingKey{}, fmt.Errorf("%s, err %w", ErrEncodingProvingKey, err) } err = coordinator.RegisterProvingKey( oracleAddress, provingKey, ) if err != nil { - return VRFV2PlusEncodedProvingKey{}, errors.Wrap(err, ErrRegisterProvingKey) + return VRFV2PlusEncodedProvingKey{}, fmt.Errorf("%s, err %w", ErrRegisterProvingKey, err) } return provingKey, nil } @@ -209,26 +211,32 @@ func VRFV2PlusUpgradedVersionRegisterProvingKey( ) (VRFV2PlusEncodedProvingKey, error) { provingKey, err := actions.EncodeOnChainVRFProvingKey(*vrfKey) if err != nil { - return VRFV2PlusEncodedProvingKey{}, errors.Wrap(err, ErrEncodingProvingKey) + return VRFV2PlusEncodedProvingKey{}, fmt.Errorf("%s, err %w", ErrEncodingProvingKey, err) } err = coordinator.RegisterProvingKey( oracleAddress, provingKey, ) if err != nil { - return VRFV2PlusEncodedProvingKey{}, errors.Wrap(err, ErrRegisterProvingKey) + return VRFV2PlusEncodedProvingKey{}, fmt.Errorf("%s, err %w", ErrRegisterProvingKey, err) } return provingKey, nil } -func FundVRFCoordinatorV2_5Subscription(linkToken contracts.LinkToken, coordinator contracts.VRFCoordinatorV2_5, chainClient blockchain.EVMClient, subscriptionID *big.Int, linkFundingAmount *big.Int) error { +func FundVRFCoordinatorV2_5Subscription( + linkToken contracts.LinkToken, + coordinator contracts.VRFCoordinatorV2_5, + chainClient blockchain.EVMClient, + subscriptionID *big.Int, + linkFundingAmountJuels *big.Int, +) error { encodedSubId, err := chainlinkutils.ABIEncode(`[{"type":"uint256"}]`, subscriptionID) if err != nil { - return errors.Wrap(err, ErrABIEncodingFunding) + return fmt.Errorf("%s, err %w", ErrABIEncodingFunding, err) } - _, err = linkToken.TransferAndCall(coordinator.Address(), big.NewInt(0).Mul(linkFundingAmount, big.NewInt(1e18)), encodedSubId) + _, err = linkToken.TransferAndCall(coordinator.Address(), linkFundingAmountJuels, encodedSubId) if err != nil { - return errors.Wrap(err, ErrSendingLinkToken) + return fmt.Errorf("%s, err %w", ErrSendingLinkToken, err) } return chainClient.WaitForEvents() } @@ -236,18 +244,22 @@ func FundVRFCoordinatorV2_5Subscription(linkToken contracts.LinkToken, coordinat // SetupVRFV2_5Environment will create specified number of subscriptions and add the same conumer/s to each of them func SetupVRFV2_5Environment( env *test_env.CLClusterTestEnv, - vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig, + vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig, linkToken contracts.LinkToken, mockNativeLINKFeed contracts.MockETHLINKFeed, + registerProvingKeyAgainstAddress string, numberOfConsumers int, numberOfSubToCreate int, + l zerolog.Logger, ) (*VRFV2_5Contracts, []*big.Int, *VRFV2PlusData, error) { - + l.Info().Msg("Starting VRFV2 Plus environment setup") + l.Info().Msg("Deploying VRFV2 Plus contracts") vrfv2_5Contracts, err := DeployVRFV2_5Contracts(env.ContractDeployer, env.EVMClient, numberOfConsumers) if err != nil { - return nil, nil, nil, errors.Wrap(err, ErrDeployVRFV2_5Contracts) + return nil, nil, nil, fmt.Errorf("%s, err %w", ErrDeployVRFV2_5Contracts, err) } + l.Info().Str("Coordinator", vrfv2_5Contracts.Coordinator.Address()).Msg("Setting Coordinator Config") err = vrfv2_5Contracts.Coordinator.SetConfig( vrfv2PlusConfig.MinimumConfirmations, vrfv2PlusConfig.MaxGasLimitCoordinatorConfig, @@ -260,64 +272,52 @@ func SetupVRFV2_5Environment( }, ) if err != nil { - return nil, nil, nil, errors.Wrap(err, ErrSetVRFCoordinatorConfig) + return nil, nil, nil, fmt.Errorf("%s, err %w", ErrSetVRFCoordinatorConfig, err) } + l.Info().Str("Coordinator", vrfv2_5Contracts.Coordinator.Address()).Msg("Setting Link and ETH/LINK feed") err = vrfv2_5Contracts.Coordinator.SetLINKAndLINKNativeFeed(linkToken.Address(), mockNativeLINKFeed.Address()) if err != nil { - return nil, nil, nil, errors.Wrap(err, ErrSetLinkNativeLinkFeed) + return nil, nil, nil, fmt.Errorf("%s, err %w", ErrSetLinkNativeLinkFeed, err) } err = env.EVMClient.WaitForEvents() if err != nil { - return nil, nil, nil, errors.Wrap(err, ErrWaitTXsComplete) - } - - subIDs, err := CreateSubsAndFund(env, vrfv2PlusConfig, linkToken, vrfv2_5Contracts, numberOfSubToCreate) - if err != nil { - return nil, nil, nil, err - } - - subToConsumersMap := map[*big.Int][]contracts.VRFv2PlusLoadTestConsumer{} - - //each subscription will have the same consumers - for _, subID := range subIDs { - subToConsumersMap[subID] = vrfv2_5Contracts.LoadTestConsumers - } - - err = AddConsumersToSubs( - subToConsumersMap, - vrfv2_5Contracts.Coordinator, - ) + return nil, nil, nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) + } + l.Info().Str("Coordinator", vrfv2_5Contracts.Coordinator.Address()).Int("Number of Subs to create", numberOfSubToCreate).Msg("Creating and funding subscriptions, adding consumers") + subIDs, err := CreateFundSubsAndAddConsumers( + env, + vrfv2PlusConfig, + linkToken, + vrfv2_5Contracts.Coordinator, vrfv2_5Contracts.LoadTestConsumers, numberOfSubToCreate) if err != nil { return nil, nil, nil, err } - - err = env.EVMClient.WaitForEvents() - if err != nil { - return nil, nil, nil, errors.Wrap(err, ErrWaitTXsComplete) - } - + l.Info().Str("Node URL", env.ClCluster.NodeAPIs()[0].URL()).Msg("Creating VRF Key on the Node") vrfKey, err := env.ClCluster.NodeAPIs()[0].MustCreateVRFKey() if err != nil { - return nil, nil, nil, errors.Wrap(err, ErrCreatingVRFv2PlusKey) + return nil, nil, nil, fmt.Errorf("%s, err %w", ErrCreatingVRFv2PlusKey, err) } pubKeyCompressed := vrfKey.Data.ID - nativeTokenPrimaryKeyAddress, err := env.ClCluster.NodeAPIs()[0].PrimaryEthAddress() + l.Info().Str("Coordinator", vrfv2_5Contracts.Coordinator.Address()).Msg("Registering Proving Key") + provingKey, err := VRFV2_5RegisterProvingKey(vrfKey, registerProvingKeyAgainstAddress, vrfv2_5Contracts.Coordinator) if err != nil { - return nil, nil, nil, errors.Wrap(err, ErrNodePrimaryKey) - } - provingKey, err := VRFV2_5RegisterProvingKey(vrfKey, nativeTokenPrimaryKeyAddress, vrfv2_5Contracts.Coordinator) - if err != nil { - return nil, nil, nil, errors.Wrap(err, ErrRegisteringProvingKey) + return nil, nil, nil, fmt.Errorf("%s, err %w", ErrRegisteringProvingKey, err) } keyHash, err := vrfv2_5Contracts.Coordinator.HashOfKey(context.Background(), provingKey) if err != nil { - return nil, nil, nil, errors.Wrap(err, ErrCreatingProvingKeyHash) + return nil, nil, nil, fmt.Errorf("%s, err %w", ErrCreatingProvingKeyHash, err) } chainID := env.EVMClient.GetChainID() + nativeTokenPrimaryKeyAddress, err := env.ClCluster.NodeAPIs()[0].PrimaryEthAddress() + if err != nil { + return nil, nil, nil, fmt.Errorf("%s, err %w", ErrNodePrimaryKey, err) + } + + l.Info().Msg("Creating VRFV2 Plus Job") job, err := CreateVRFV2PlusJob( env.ClCluster.NodeAPIs()[0], vrfv2_5Contracts.Coordinator.Address(), @@ -327,7 +327,7 @@ func SetupVRFV2_5Environment( vrfv2PlusConfig.MinimumConfirmations, ) if err != nil { - return nil, nil, nil, errors.Wrap(err, ErrCreateVRFV2PlusJobs) + return nil, nil, nil, fmt.Errorf("%s, err %w", ErrCreateVRFV2PlusJobs, err) } // this part is here because VRFv2 can work with only a specific key @@ -335,14 +335,15 @@ func SetupVRFV2_5Environment( // Key = '...' addr, err := env.ClCluster.Nodes[0].API.PrimaryEthAddress() if err != nil { - return nil, nil, nil, errors.Wrap(err, ErrGetPrimaryKey) + return nil, nil, nil, fmt.Errorf("%s, err %w", ErrGetPrimaryKey, err) } nodeConfig := node.NewConfig(env.ClCluster.Nodes[0].NodeConfig, node.WithVRFv2EVMEstimator(addr), ) + l.Info().Msg("Restarting Node with new sending key PriceMax configuration") err = env.ClCluster.Nodes[0].Restart(nodeConfig) if err != nil { - return nil, nil, nil, errors.Wrap(err, ErrRestartCLNode) + return nil, nil, nil, fmt.Errorf("%s, err %w", ErrRestartCLNode, err) } vrfv2PlusKeyData := VRFV2PlusKeyData{ @@ -358,25 +359,60 @@ func SetupVRFV2_5Environment( chainID, } + l.Info().Msg("VRFV2 Plus environment setup is finished") return vrfv2_5Contracts, subIDs, &data, nil } +func CreateFundSubsAndAddConsumers( + env *test_env.CLClusterTestEnv, + vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig, + linkToken contracts.LinkToken, + coordinator contracts.VRFCoordinatorV2_5, + consumers []contracts.VRFv2PlusLoadTestConsumer, + numberOfSubToCreate int, +) ([]*big.Int, error) { + subIDs, err := CreateSubsAndFund(env, vrfv2PlusConfig, linkToken, coordinator, numberOfSubToCreate) + if err != nil { + return nil, err + } + subToConsumersMap := map[*big.Int][]contracts.VRFv2PlusLoadTestConsumer{} + + //each subscription will have the same consumers + for _, subID := range subIDs { + subToConsumersMap[subID] = consumers + } + + err = AddConsumersToSubs( + subToConsumersMap, + coordinator, + ) + if err != nil { + return nil, err + } + + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) + } + return subIDs, nil +} + func CreateSubsAndFund( env *test_env.CLClusterTestEnv, - vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig, + vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig, linkToken contracts.LinkToken, - vrfv2_5Contracts *VRFV2_5Contracts, + coordinator contracts.VRFCoordinatorV2_5, subAmountToCreate int, ) ([]*big.Int, error) { - subs, err := CreateSubs(env, vrfv2_5Contracts.Coordinator, subAmountToCreate) + subs, err := CreateSubs(env, coordinator, subAmountToCreate) if err != nil { return nil, err } err = env.EVMClient.WaitForEvents() if err != nil { - return nil, errors.Wrap(err, ErrWaitTXsComplete) + return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) } - err = FundSubscriptions(env, vrfv2PlusConfig, linkToken, vrfv2_5Contracts.Coordinator, subs) + err = FundSubscriptions(env, vrfv2PlusConfig, linkToken, coordinator, subs) if err != nil { return nil, err } @@ -408,7 +444,7 @@ func AddConsumersToSubs( for _, consumer := range consumers { err := coordinator.AddConsumer(subID, consumer.Address()) if err != nil { - return errors.Wrap(err, ErrAddConsumerToSub) + return fmt.Errorf("%s, err %w", ErrAddConsumerToSub, err) } } } @@ -417,7 +453,7 @@ func AddConsumersToSubs( func SetupVRFV2PlusWrapperEnvironment( env *test_env.CLClusterTestEnv, - vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig, + vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig, linkToken contracts.LinkToken, mockNativeLINKFeed contracts.MockETHLINKFeed, coordinator contracts.VRFCoordinatorV2_5, @@ -440,7 +476,7 @@ func SetupVRFV2PlusWrapperEnvironment( err = env.EVMClient.WaitForEvents() if err != nil { - return nil, nil, errors.Wrap(err, ErrWaitTXsComplete) + return nil, nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) } err = wrapperContracts.VRFV2PlusWrapper.SetConfig( vrfv2PlusConfig.WrapperGasOverhead, @@ -459,7 +495,7 @@ func SetupVRFV2PlusWrapperEnvironment( err = env.EVMClient.WaitForEvents() if err != nil { - return nil, nil, errors.Wrap(err, ErrWaitTXsComplete) + return nil, nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) } //fund sub @@ -470,7 +506,7 @@ func SetupVRFV2PlusWrapperEnvironment( err = env.EVMClient.WaitForEvents() if err != nil { - return nil, nil, errors.Wrap(err, ErrWaitTXsComplete) + return nil, nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) } err = FundSubscriptions(env, vrfv2PlusConfig, linkToken, coordinator, []*big.Int{wrapperSubID}) @@ -488,7 +524,7 @@ func SetupVRFV2PlusWrapperEnvironment( } err = env.EVMClient.WaitForEvents() if err != nil { - return nil, nil, errors.Wrap(err, ErrWaitTXsComplete) + return nil, nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) } //fund consumer with Eth @@ -498,36 +534,45 @@ func SetupVRFV2PlusWrapperEnvironment( } err = env.EVMClient.WaitForEvents() if err != nil { - return nil, nil, errors.Wrap(err, ErrWaitTXsComplete) + return nil, nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) } return wrapperContracts, wrapperSubID, nil } func CreateSubAndFindSubID(env *test_env.CLClusterTestEnv, coordinator contracts.VRFCoordinatorV2_5) (*big.Int, error) { - err := coordinator.CreateSubscription() + tx, err := coordinator.CreateSubscription() if err != nil { - return nil, errors.Wrap(err, ErrCreateVRFSubscription) + return nil, fmt.Errorf("%s, err %w", ErrCreateVRFSubscription, err) + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) } - sub, err := coordinator.WaitForSubscriptionCreatedEvent(time.Second * 10) + receipt, err := env.EVMClient.GetTxReceipt(tx.Hash()) if err != nil { - return nil, errors.Wrap(err, ErrFindSubID) + return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) } - err = env.EVMClient.WaitForEvents() + //SubscriptionsCreated Log should be emitted with the subscription ID + subID := receipt.Logs[0].Topics[1].Big() + + //verify that the subscription was created + _, err = coordinator.FindSubscriptionID(subID) if err != nil { - return nil, errors.Wrap(err, ErrWaitTXsComplete) + return nil, fmt.Errorf("%s, err %w", ErrFindSubID, err) } - return sub.SubId, nil + + return subID, nil } func GetUpgradedCoordinatorTotalBalance(coordinator contracts.VRFCoordinatorV2PlusUpgradedVersion) (linkTotalBalance *big.Int, nativeTokenTotalBalance *big.Int, err error) { linkTotalBalance, err = coordinator.GetLinkTotalBalance(context.Background()) if err != nil { - return nil, nil, errors.Wrap(err, ErrLinkTotalBalance) + return nil, nil, fmt.Errorf("%s, err %w", ErrLinkTotalBalance, err) } nativeTokenTotalBalance, err = coordinator.GetNativeTokenTotalBalance(context.Background()) if err != nil { - return nil, nil, errors.Wrap(err, ErrNativeTokenBalance) + return nil, nil, fmt.Errorf("%s, err %w", ErrNativeTokenBalance, err) } return } @@ -535,37 +580,42 @@ func GetUpgradedCoordinatorTotalBalance(coordinator contracts.VRFCoordinatorV2Pl func GetCoordinatorTotalBalance(coordinator contracts.VRFCoordinatorV2_5) (linkTotalBalance *big.Int, nativeTokenTotalBalance *big.Int, err error) { linkTotalBalance, err = coordinator.GetLinkTotalBalance(context.Background()) if err != nil { - return nil, nil, errors.Wrap(err, ErrLinkTotalBalance) + return nil, nil, fmt.Errorf("%s, err %w", ErrLinkTotalBalance, err) } nativeTokenTotalBalance, err = coordinator.GetNativeTokenTotalBalance(context.Background()) if err != nil { - return nil, nil, errors.Wrap(err, ErrNativeTokenBalance) + return nil, nil, fmt.Errorf("%s, err %w", ErrNativeTokenBalance, err) } return } func FundSubscriptions( env *test_env.CLClusterTestEnv, - vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig, + vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig, linkAddress contracts.LinkToken, coordinator contracts.VRFCoordinatorV2_5, subIDs []*big.Int, ) error { for _, subID := range subIDs { //Native Billing - err := coordinator.FundSubscriptionWithNative(subID, big.NewInt(0).Mul(big.NewInt(vrfv2PlusConfig.SubscriptionFundingAmountNative), big.NewInt(1e18))) + amountWei := utils.EtherToWei(big.NewFloat(vrfv2PlusConfig.SubscriptionFundingAmountNative)) + err := coordinator.FundSubscriptionWithNative( + subID, + amountWei, + ) if err != nil { - return errors.Wrap(err, ErrFundSubWithNativeToken) + return fmt.Errorf("%s, err %w", ErrFundSubWithNativeToken, err) } //Link Billing - err = FundVRFCoordinatorV2_5Subscription(linkAddress, coordinator, env.EVMClient, subID, big.NewInt(vrfv2PlusConfig.SubscriptionFundingAmountLink)) + amountJuels := utils.EtherToWei(big.NewFloat(vrfv2PlusConfig.SubscriptionFundingAmountLink)) + err = FundVRFCoordinatorV2_5Subscription(linkAddress, coordinator, env.EVMClient, subID, amountJuels) if err != nil { - return errors.Wrap(err, ErrFundSubWithLinkToken) + return fmt.Errorf("%s, err %w", ErrFundSubWithLinkToken, err) } } err := env.EVMClient.WaitForEvents() if err != nil { - return errors.Wrap(err, ErrWaitTXsComplete) + return fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err) } return nil } @@ -577,7 +627,8 @@ func RequestRandomnessAndWaitForFulfillment( subID *big.Int, isNativeBilling bool, randomnessRequestCountPerRequest uint16, - vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig, + vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig, + randomWordsFulfilledEventTimeout time.Duration, l zerolog.Logger, ) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error) { logRandRequest(consumer.Address(), coordinator.Address(), subID, isNativeBilling, vrfv2PlusConfig, l) @@ -591,10 +642,18 @@ func RequestRandomnessAndWaitForFulfillment( randomnessRequestCountPerRequest, ) if err != nil { - return nil, errors.Wrap(err, ErrRequestRandomness) + return nil, fmt.Errorf("%s, err %w", ErrRequestRandomness, err) } - return WaitForRequestAndFulfillmentEvents(consumer.Address(), coordinator, vrfv2PlusData, subID, isNativeBilling, l) + return WaitForRequestAndFulfillmentEvents( + consumer.Address(), + coordinator, + vrfv2PlusData, + subID, + isNativeBilling, + randomWordsFulfilledEventTimeout, + l, + ) } func RequestRandomnessAndWaitForFulfillmentUpgraded( @@ -603,7 +662,7 @@ func RequestRandomnessAndWaitForFulfillmentUpgraded( vrfv2PlusData *VRFV2PlusData, subID *big.Int, isNativeBilling bool, - vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig, + vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig, l zerolog.Logger, ) (*vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled, error) { logRandRequest(consumer.Address(), coordinator.Address(), subID, isNativeBilling, vrfv2PlusConfig, l) @@ -617,7 +676,7 @@ func RequestRandomnessAndWaitForFulfillmentUpgraded( vrfv2PlusConfig.RandomnessRequestCountPerRequest, ) if err != nil { - return nil, errors.Wrap(err, ErrRequestRandomness) + return nil, fmt.Errorf("%s, err %w", ErrRequestRandomness, err) } randomWordsRequestedEvent, err := coordinator.WaitForRandomWordsRequestedEvent( @@ -627,7 +686,7 @@ func RequestRandomnessAndWaitForFulfillmentUpgraded( time.Minute*1, ) if err != nil { - return nil, errors.Wrap(err, ErrWaitRandomWordsRequestedEvent) + return nil, fmt.Errorf("%s, err %w", ErrWaitRandomWordsRequestedEvent, err) } LogRandomnessRequestedEventUpgraded(l, coordinator, randomWordsRequestedEvent) @@ -638,7 +697,7 @@ func RequestRandomnessAndWaitForFulfillmentUpgraded( time.Minute*2, ) if err != nil { - return nil, errors.Wrap(err, ErrWaitRandomWordsFulfilledEvent) + return nil, fmt.Errorf("%s, err %w", ErrWaitRandomWordsFulfilledEvent, err) } LogRandomWordsFulfilledEventUpgraded(l, coordinator, randomWordsFulfilledEvent) @@ -651,7 +710,8 @@ func DirectFundingRequestRandomnessAndWaitForFulfillment( vrfv2PlusData *VRFV2PlusData, subID *big.Int, isNativeBilling bool, - vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig, + vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig, + randomWordsFulfilledEventTimeout time.Duration, l zerolog.Logger, ) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error) { logRandRequest(consumer.Address(), coordinator.Address(), subID, isNativeBilling, vrfv2PlusConfig, l) @@ -663,7 +723,7 @@ func DirectFundingRequestRandomnessAndWaitForFulfillment( vrfv2PlusConfig.RandomnessRequestCountPerRequest, ) if err != nil { - return nil, errors.Wrap(err, ErrRequestRandomnessDirectFundingNativePayment) + return nil, fmt.Errorf("%s, err %w", ErrRequestRandomnessDirectFundingNativePayment, err) } } else { _, err := consumer.RequestRandomness( @@ -673,14 +733,22 @@ func DirectFundingRequestRandomnessAndWaitForFulfillment( vrfv2PlusConfig.RandomnessRequestCountPerRequest, ) if err != nil { - return nil, errors.Wrap(err, ErrRequestRandomnessDirectFundingLinkPayment) + return nil, fmt.Errorf("%s, err %w", ErrRequestRandomnessDirectFundingLinkPayment, err) } } wrapperAddress, err := consumer.GetWrapper(context.Background()) if err != nil { - return nil, errors.Wrap(err, "error getting wrapper address") + return nil, fmt.Errorf("error getting wrapper address, err: %w", err) } - return WaitForRequestAndFulfillmentEvents(wrapperAddress.String(), coordinator, vrfv2PlusData, subID, isNativeBilling, l) + return WaitForRequestAndFulfillmentEvents( + wrapperAddress.String(), + coordinator, + vrfv2PlusData, + subID, + isNativeBilling, + randomWordsFulfilledEventTimeout, + l, + ) } func WaitForRequestAndFulfillmentEvents( @@ -689,6 +757,7 @@ func WaitForRequestAndFulfillmentEvents( vrfv2PlusData *VRFV2PlusData, subID *big.Int, isNativeBilling bool, + randomWordsFulfilledEventTimeout time.Duration, l zerolog.Logger, ) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error) { randomWordsRequestedEvent, err := coordinator.WaitForRandomWordsRequestedEvent( @@ -698,7 +767,7 @@ func WaitForRequestAndFulfillmentEvents( time.Minute*1, ) if err != nil { - return nil, errors.Wrap(err, ErrWaitRandomWordsRequestedEvent) + return nil, fmt.Errorf("%s, err %w", ErrWaitRandomWordsRequestedEvent, err) } LogRandomnessRequestedEvent(l, coordinator, randomWordsRequestedEvent, isNativeBilling) @@ -706,10 +775,10 @@ func WaitForRequestAndFulfillmentEvents( randomWordsFulfilledEvent, err := coordinator.WaitForRandomWordsFulfilledEvent( []*big.Int{subID}, []*big.Int{randomWordsRequestedEvent.RequestId}, - time.Minute*2, + randomWordsFulfilledEventTimeout, ) if err != nil { - return nil, errors.Wrap(err, ErrWaitRandomWordsFulfilledEvent) + return nil, fmt.Errorf("%s, err %w", ErrWaitRandomWordsFulfilledEvent, err) } LogRandomWordsFulfilledEvent(l, coordinator, randomWordsFulfilledEvent, isNativeBilling) @@ -734,7 +803,7 @@ func WaitForRequestCountEqualToFulfilmentCount(consumer contracts.VRFv2PlusLoadT fmt.Errorf("timeout waiting for rand request and fulfilments to be equal AFTER performance test was executed. Request Count: %d, Fulfilment Count: %d", metrics.RequestCount.Uint64(), metrics.FulfilmentCount.Uint64()) case <-ticker.C: - go getLoadTestMetrics(consumer, metricsChannel, metricsErrorChannel) + go retreiveLoadTestMetrics(consumer, metricsChannel, metricsErrorChannel) case metrics = <-metricsChannel: if metrics.RequestCount.Cmp(metrics.FulfilmentCount) == 0 { ticker.Stop() @@ -749,7 +818,42 @@ func WaitForRequestCountEqualToFulfilmentCount(consumer contracts.VRFv2PlusLoadT } } -func getLoadTestMetrics( +func ReturnFundsForFulfilledRequests(client blockchain.EVMClient, coordinator contracts.VRFCoordinatorV2_5, l zerolog.Logger) error { + linkTotalBalance, err := coordinator.GetLinkTotalBalance(context.Background()) + if err != nil { + return fmt.Errorf("Error getting LINK total balance, err: %w", err) + } + defaultWallet := client.GetDefaultWallet().Address() + l.Info(). + Str("LINK amount", linkTotalBalance.String()). + Str("Returning to", defaultWallet). + Msg("Returning LINK for fulfilled requests") + err = coordinator.OracleWithdraw( + common.HexToAddress(defaultWallet), + linkTotalBalance, + ) + if err != nil { + return fmt.Errorf("Error withdrawing LINK from coordinator to default wallet, err: %w", err) + } + nativeTotalBalance, err := coordinator.GetNativeTokenTotalBalance(context.Background()) + if err != nil { + return fmt.Errorf("Error getting NATIVE total balance, err: %w", err) + } + l.Info(). + Str("Native Token amount", linkTotalBalance.String()). + Str("Returning to", defaultWallet). + Msg("Returning Native Token for fulfilled requests") + err = coordinator.OracleWithdrawNative( + common.HexToAddress(defaultWallet), + nativeTotalBalance, + ) + if err != nil { + return fmt.Errorf("Error withdrawing NATIVE from coordinator to default wallet, err: %w", err) + } + return nil +} + +func retreiveLoadTestMetrics( consumer contracts.VRFv2PlusLoadTestConsumer, metricsChannel chan *contracts.VRFLoadTestMetrics, metricsErrorChannel chan error, @@ -906,7 +1010,7 @@ func logRandRequest( coordinator string, subID *big.Int, isNativeBilling bool, - vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig, + vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig, l zerolog.Logger) { l.Debug(). Str("Consumer", consumer). diff --git a/integration-tests/benchmark/keeper_test.go b/integration-tests/benchmark/keeper_test.go index a3db60f3b3..d5f4461012 100644 --- a/integration-tests/benchmark/keeper_test.go +++ b/integration-tests/benchmark/keeper_test.go @@ -11,13 +11,13 @@ import ( "github.com/stretchr/testify/require" - env_client "github.com/smartcontractkit/chainlink-env/client" - "github.com/smartcontractkit/chainlink-env/environment" - "github.com/smartcontractkit/chainlink-env/pkg/cdk8s/blockscout" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" - "github.com/smartcontractkit/chainlink-env/pkg/helm/reorg" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + env_client "github.com/smartcontractkit/chainlink-testing-framework/k8s/client" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/cdk8s/blockscout" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/reorg" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" @@ -161,6 +161,7 @@ func TestAutomationBenchmark(t *testing.T) { RegistryVersions: registryVersions, KeeperRegistrySettings: &contracts.KeeperRegistrySettings{ PaymentPremiumPPB: uint32(0), + FlatFeeMicroLINK: uint32(40000), BlockCountPerTurn: big.NewInt(100), CheckGasLimit: uint32(45_000_000), //45M StalenessSeconds: big.NewInt(90_000), @@ -225,7 +226,7 @@ func addRegistry(registryToTest string) []eth_contracts.KeeperRegistryVersion { case "2_0-Multiple": return repeatRegistries(eth_contracts.RegistryVersion_2_0, NumberOfRegistries) case "2_1-Multiple": - return repeatRegistries(eth_contracts.RegistryVersion_1_0, NumberOfRegistries) + return repeatRegistries(eth_contracts.RegistryVersion_2_1, NumberOfRegistries) default: return []eth_contracts.KeeperRegistryVersion{eth_contracts.RegistryVersion_2_0} } @@ -241,13 +242,13 @@ func repeatRegistries(registryVersion eth_contracts.KeeperRegistryVersion, numbe var networkConfig = map[string]NetworkConfig{ "SimulatedGeth": { - upkeepSLA: int64(20), + upkeepSLA: int64(120), //2 minutes blockTime: time.Second, deltaStage: 30 * time.Second, funding: big.NewFloat(100_000), }, "geth": { - upkeepSLA: int64(20), + upkeepSLA: int64(120), //2 minutes blockTime: time.Second, deltaStage: 30 * time.Second, funding: big.NewFloat(100_000), @@ -282,6 +283,18 @@ var networkConfig = map[string]NetworkConfig{ deltaStage: time.Duration(0), funding: big.NewFloat(ChainlinkNodeFunding), }, + "BaseGoerli": { + upkeepSLA: int64(60), + blockTime: 2 * time.Second, + deltaStage: 20 * time.Second, + funding: big.NewFloat(ChainlinkNodeFunding), + }, + "ArbitrumSepolia": { + upkeepSLA: int64(120), + blockTime: time.Second, + deltaStage: 20 * time.Second, + funding: big.NewFloat(ChainlinkNodeFunding), + }, } func getEnv(key, fallback string) string { @@ -298,7 +311,7 @@ func getEnv(key, fallback string) string { func SetupAutomationBenchmarkEnv(t *testing.T) (*environment.Environment, blockchain.EVMNetwork) { l := logging.GetTestLogger(t) - testNetwork := networks.SelectedNetwork // Environment currently being used to run benchmark test on + testNetwork := networks.MustGetSelectedNetworksFromEnv()[0] // Environment currently being used to run benchmark test on blockTime := "1" networkDetailTOML := `MinIncomingConfirmations = 1` diff --git a/integration-tests/ccip-tests/actions/ccip_helpers.go b/integration-tests/ccip-tests/actions/ccip_helpers.go index 2935474221..ba21566e88 100644 --- a/integration-tests/ccip-tests/actions/ccip_helpers.go +++ b/integration-tests/ccip-tests/actions/ccip_helpers.go @@ -22,9 +22,9 @@ import ( "go.uber.org/atomic" "golang.org/x/sync/errgroup" - "github.com/smartcontractkit/chainlink-env/environment" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" "github.com/smartcontractkit/chainlink/integration-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/contracts" diff --git a/integration-tests/ccip-tests/chaos/ccip_test.go b/integration-tests/ccip-tests/chaos/ccip_test.go index 06c9703021..7746ebeddc 100644 --- a/integration-tests/ccip-tests/chaos/ccip_test.go +++ b/integration-tests/ccip-tests/chaos/ccip_test.go @@ -4,12 +4,13 @@ import ( "testing" "time" - "github.com/smartcontractkit/chainlink-env/chaos" - a "github.com/smartcontractkit/chainlink-env/pkg/alias" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos" "github.com/smartcontractkit/chainlink-testing-framework/logging" + "github.com/smartcontractkit/chainlink-testing-framework/utils" "github.com/stretchr/testify/require" - "github.com/smartcontractkit/ccip/integration-tests/ccip-tests/testconfig" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testconfig" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testsetups" ) @@ -31,9 +32,9 @@ func TestChaosCCIP(t *testing.T) { testName: "CCIP works after rpc is down for NetworkA @network-chaos", chaosFunc: chaos.NewNetworkPartition, chaosProps: &chaos.Props{ - FromLabels: &map[string]*string{actions.ChaosGroupNetworkACCIPGeth: a.Str("1")}, + FromLabels: &map[string]*string{actions.ChaosGroupNetworkACCIPGeth: utils.Ptr("1")}, // chainlink-0 is default label set for all cll nodes - ToLabels: &map[string]*string{"app": a.Str("chainlink-0")}, + ToLabels: &map[string]*string{"app": utils.Ptr("chainlink-0")}, DurationStr: "1m", }, waitForChaosRecovery: true, @@ -42,8 +43,8 @@ func TestChaosCCIP(t *testing.T) { testName: "CCIP works after rpc is down for NetworkB @network-chaos", chaosFunc: chaos.NewNetworkPartition, chaosProps: &chaos.Props{ - FromLabels: &map[string]*string{actions.ChaosGroupNetworkBCCIPGeth: a.Str("1")}, - ToLabels: &map[string]*string{"app": a.Str("chainlink-0")}, + FromLabels: &map[string]*string{actions.ChaosGroupNetworkBCCIPGeth: utils.Ptr("1")}, + ToLabels: &map[string]*string{"app": utils.Ptr("chainlink-0")}, DurationStr: "1m", }, waitForChaosRecovery: true, @@ -52,8 +53,8 @@ func TestChaosCCIP(t *testing.T) { testName: "CCIP works after 2 rpc's are down for all cll nodes @network-chaos", chaosFunc: chaos.NewNetworkPartition, chaosProps: &chaos.Props{ - FromLabels: &map[string]*string{"geth": a.Str(actions.ChaosGroupCCIPGeth)}, - ToLabels: &map[string]*string{"app": a.Str("chainlink-0")}, + FromLabels: &map[string]*string{"geth": utils.Ptr(actions.ChaosGroupCCIPGeth)}, + ToLabels: &map[string]*string{"app": utils.Ptr("chainlink-0")}, DurationStr: "1m", }, waitForChaosRecovery: true, @@ -62,7 +63,7 @@ func TestChaosCCIP(t *testing.T) { testName: "CCIP Commit works after majority of CL nodes are recovered from pod failure @pod-chaos", chaosFunc: chaos.NewFailPods, chaosProps: &chaos.Props{ - LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaultyPlus: a.Str("1")}, + LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaultyPlus: utils.Ptr("1")}, DurationStr: "1m", }, waitForChaosRecovery: true, @@ -71,7 +72,7 @@ func TestChaosCCIP(t *testing.T) { testName: "CCIP Execution works after majority of CL nodes are recovered from pod failure @pod-chaos", chaosFunc: chaos.NewFailPods, chaosProps: &chaos.Props{ - LabelsSelector: &map[string]*string{actions.ChaosGroupExecutionFaultyPlus: a.Str("1")}, + LabelsSelector: &map[string]*string{actions.ChaosGroupExecutionFaultyPlus: utils.Ptr("1")}, DurationStr: "1m", }, waitForChaosRecovery: true, @@ -80,7 +81,7 @@ func TestChaosCCIP(t *testing.T) { testName: "CCIP Commit works while minority of CL nodes are in failed state for pod failure @pod-chaos", chaosFunc: chaos.NewFailPods, chaosProps: &chaos.Props{ - LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaulty: a.Str("1")}, + LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaulty: utils.Ptr("1")}, DurationStr: "90s", }, waitForChaosRecovery: false, @@ -89,7 +90,7 @@ func TestChaosCCIP(t *testing.T) { testName: "CCIP Execution works while minority of CL nodes are in failed state for pod failure @pod-chaos", chaosFunc: chaos.NewFailPods, chaosProps: &chaos.Props{ - LabelsSelector: &map[string]*string{actions.ChaosGroupExecutionFaulty: a.Str("1")}, + LabelsSelector: &map[string]*string{actions.ChaosGroupExecutionFaulty: utils.Ptr("1")}, DurationStr: "90s", }, waitForChaosRecovery: false, diff --git a/integration-tests/ccip-tests/load/ccip_test.go b/integration-tests/ccip-tests/load/ccip_test.go index 14ae998615..0e4e26821d 100644 --- a/integration-tests/ccip-tests/load/ccip_test.go +++ b/integration-tests/ccip-tests/load/ccip_test.go @@ -6,12 +6,12 @@ import ( "time" "github.com/rs/zerolog/log" - "github.com/smartcontractkit/chainlink-env/chaos" - a "github.com/smartcontractkit/chainlink-env/pkg/alias" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/stretchr/testify/require" - "github.com/smartcontractkit/ccip/integration-tests/utils" + "github.com/smartcontractkit/chainlink/integration-tests/utils" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/actions" "github.com/smartcontractkit/chainlink/v2/core/store/models" ) @@ -82,8 +82,8 @@ func TestLoadCCIPStableRequestTriggeringWithNetworkChaos(t *testing.T) { chaosId, err := testEnv.K8Env.Chaos.Run( chaos.NewNetworkLatency( testEnv.K8Env.Cfg.Namespace, &chaos.Props{ - FromLabels: &map[string]*string{"geth": a.Str(actions.ChaosGroupCCIPGeth)}, - ToLabels: &map[string]*string{"app": a.Str("chainlink-0")}, + FromLabels: &map[string]*string{"geth": utils.Ptr(actions.ChaosGroupCCIPGeth)}, + ToLabels: &map[string]*string{"app": utils.Ptr("chainlink-0")}, DurationStr: testArgs.TestCfg.TestGroupInput.TestDuration.String(), Delay: "300ms", })) @@ -112,7 +112,7 @@ func TestLoadCCIPStableWithMajorityNodeFailure(t *testing.T) { ChaosName: "CCIP works after majority of CL nodes are recovered from pod failure @pod-chaos", ChaosFunc: chaos.NewFailPods, ChaosProps: &chaos.Props{ - LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaultyPlus: a.Str("1")}, + LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaultyPlus: utils.Ptr("1")}, DurationStr: "2m", }, }, @@ -166,7 +166,7 @@ func TestLoadCCIPStableWithMinorityNodeFailure(t *testing.T) { ChaosName: "CCIP works while minority of CL nodes are in failed state for pod failure @pod-chaos", ChaosFunc: chaos.NewFailPods, ChaosProps: &chaos.Props{ - LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaulty: a.Str("1")}, + LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaulty: utils.Ptr("1")}, DurationStr: "4m", }, }, @@ -218,7 +218,7 @@ func TestLoadCCIPStableWithPodChaosDiffCommitAndExec(t *testing.T) { ChaosName: "CCIP Commit works after majority of CL nodes are recovered from pod failure @pod-chaos", ChaosFunc: chaos.NewFailPods, ChaosProps: &chaos.Props{ - LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaultyPlus: a.Str("1")}, + LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaultyPlus: utils.Ptr("1")}, DurationStr: "2m", }, }, @@ -226,7 +226,7 @@ func TestLoadCCIPStableWithPodChaosDiffCommitAndExec(t *testing.T) { ChaosName: "CCIP Execution works after majority of CL nodes are recovered from pod failure @pod-chaos", ChaosFunc: chaos.NewFailPods, ChaosProps: &chaos.Props{ - LabelsSelector: &map[string]*string{actions.ChaosGroupExecutionFaultyPlus: a.Str("1")}, + LabelsSelector: &map[string]*string{actions.ChaosGroupExecutionFaultyPlus: utils.Ptr("1")}, DurationStr: "2m", }, }, @@ -234,7 +234,7 @@ func TestLoadCCIPStableWithPodChaosDiffCommitAndExec(t *testing.T) { ChaosName: "CCIP Commit works while minority of CL nodes are in failed state for pod failure @pod-chaos", ChaosFunc: chaos.NewFailPods, ChaosProps: &chaos.Props{ - LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaulty: a.Str("1")}, + LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaulty: utils.Ptr("1")}, DurationStr: "4m", }, }, @@ -242,7 +242,7 @@ func TestLoadCCIPStableWithPodChaosDiffCommitAndExec(t *testing.T) { ChaosName: "CCIP Execution works while minority of CL nodes are in failed state for pod failure @pod-chaos", ChaosFunc: chaos.NewFailPods, ChaosProps: &chaos.Props{ - LabelsSelector: &map[string]*string{actions.ChaosGroupExecutionFaulty: a.Str("1")}, + LabelsSelector: &map[string]*string{actions.ChaosGroupExecutionFaulty: utils.Ptr("1")}, DurationStr: "4m", }, }, diff --git a/integration-tests/ccip-tests/load/helper.go b/integration-tests/ccip-tests/load/helper.go index 4b1f740634..7beba53259 100644 --- a/integration-tests/ccip-tests/load/helper.go +++ b/integration-tests/ccip-tests/load/helper.go @@ -11,13 +11,14 @@ import ( "github.com/AlekSi/pointer" "github.com/rs/zerolog" - "github.com/smartcontractkit/chainlink-env/chaos" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos" "github.com/smartcontractkit/wasp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" - "github.com/smartcontractkit/ccip/integration-tests/ccip-tests/testconfig" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testconfig" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testsetups" ) diff --git a/integration-tests/ccip-tests/smoke/ccip_test.go b/integration-tests/ccip-tests/smoke/ccip_test.go index b49d641a55..2559f5eb57 100644 --- a/integration-tests/ccip-tests/smoke/ccip_test.go +++ b/integration-tests/ccip-tests/smoke/ccip_test.go @@ -10,10 +10,10 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/stretchr/testify/require" - "github.com/smartcontractkit/ccip/integration-tests/ccip-tests/testconfig" - "github.com/smartcontractkit/ccip/integration-tests/utils" "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/actions" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testconfig" "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testsetups" + "github.com/smartcontractkit/chainlink/integration-tests/utils" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/lock_release_token_pool" ) diff --git a/integration-tests/ccip-tests/testsetups/ccip.go b/integration-tests/ccip-tests/testsetups/ccip.go index bae36b031a..866c9141ab 100644 --- a/integration-tests/ccip-tests/testsetups/ccip.go +++ b/integration-tests/ccip-tests/testsetups/ccip.go @@ -17,18 +17,18 @@ import ( "github.com/pkg/errors" "github.com/rs/zerolog" chainselectors "github.com/smartcontractkit/chain-selectors" - "github.com/smartcontractkit/chainlink-env/config" - "github.com/smartcontractkit/chainlink-env/environment" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/config" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" "github.com/smartcontractkit/chainlink-testing-framework/networks" - "github.com/smartcontractkit/chainlink-testing-framework/utils" "github.com/stretchr/testify/require" "go.uber.org/atomic" "go.uber.org/multierr" "go.uber.org/zap/zapcore" "golang.org/x/sync/errgroup" - "github.com/smartcontractkit/ccip/integration-tests/ccip-tests/testconfig" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testconfig" + integrationactions "github.com/smartcontractkit/chainlink/integration-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/contracts/laneconfig" @@ -743,7 +743,7 @@ func CCIPDefaultTestSetUp( return } lggr.Info().Msg("Tearing down the environment") - err = integrationactions.TeardownSuite(t, ccipEnv.K8Env, utils.ProjectRoot, ccipEnv.CLNodes, setUpArgs.Reporter, + err = integrationactions.TeardownSuite(t, ccipEnv.K8Env, ccipEnv.CLNodes, setUpArgs.Reporter, zapcore.ErrorLevel, chains...) require.NoError(t, err, "Environment teardown shouldn't fail") } else { diff --git a/integration-tests/ccip-tests/testsetups/test_env.go b/integration-tests/ccip-tests/testsetups/test_env.go index b1ce9aa409..224504fba5 100644 --- a/integration-tests/ccip-tests/testsetups/test_env.go +++ b/integration-tests/ccip-tests/testsetups/test_env.go @@ -7,12 +7,12 @@ import ( "testing" "github.com/AlekSi/pointer" - "github.com/smartcontractkit/chainlink-env/client" - "github.com/smartcontractkit/chainlink-env/environment" - "github.com/smartcontractkit/chainlink-env/pkg/cdk8s/blockscout" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/reorg" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/client" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/cdk8s/blockscout" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/reorg" "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/types/config/node" diff --git a/integration-tests/ccip-tests/types/config/node/core.go b/integration-tests/ccip-tests/types/config/node/core.go index 165f0b1471..a34e0c5799 100644 --- a/integration-tests/ccip-tests/types/config/node/core.go +++ b/integration-tests/ccip-tests/types/config/node/core.go @@ -8,8 +8,8 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/blockchain" - itutils "github.com/smartcontractkit/ccip/integration-tests/utils" "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" + itutils "github.com/smartcontractkit/chainlink/integration-tests/utils" evmcfg "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml" "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" "github.com/smartcontractkit/chainlink/v2/core/utils" diff --git a/integration-tests/chaos/automation_chaos_test.go b/integration-tests/chaos/automation_chaos_test.go index 6f2cacdb03..6ebf14d806 100644 --- a/integration-tests/chaos/automation_chaos_test.go +++ b/integration-tests/chaos/automation_chaos_test.go @@ -1,7 +1,6 @@ package chaos import ( - "context" "fmt" "math/big" "testing" @@ -11,13 +10,12 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" - "github.com/smartcontractkit/chainlink-env/chaos" - "github.com/smartcontractkit/chainlink-env/environment" - a "github.com/smartcontractkit/chainlink-env/pkg/alias" - "github.com/smartcontractkit/chainlink-env/pkg/cdk8s/blockscout" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/cdk8s/blockscout" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" "github.com/smartcontractkit/chainlink-testing-framework/utils" @@ -26,6 +24,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" eth_contracts "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" + it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils" ) var ( @@ -42,7 +41,7 @@ ListenAddresses = ["0.0.0.0:6690"]` defaultAutomationSettings = map[string]interface{}{ "replicas": 6, - "toml": client.AddNetworksConfig(baseTOML, networks.SelectedNetwork), + "toml": client.AddNetworksConfig(baseTOML, networks.MustGetSelectedNetworksFromEnv()[0]), "db": map[string]interface{}{ "stateful": true, "capacity": "1Gi", @@ -60,9 +59,10 @@ ListenAddresses = ["0.0.0.0:6690"]` } defaultEthereumSettings = ðereum.Props{ - NetworkName: networks.SelectedNetwork.Name, - Simulated: networks.SelectedNetwork.Simulated, - WsURLs: networks.SelectedNetwork.URLs, + // utils.MustGetSelectedNetworksFromEnv() + NetworkName: networks.MustGetSelectedNetworksFromEnv()[0].Name, + Simulated: networks.MustGetSelectedNetworksFromEnv()[0].Simulated, + WsURLs: networks.MustGetSelectedNetworksFromEnv()[0].URLs, Values: map[string]interface{}{ "resources": map[string]interface{}{ "requests": map[string]interface{}{ @@ -116,6 +116,7 @@ func TestAutomationChaos(t *testing.T) { } for name, registryVersion := range registryVersions { + registryVersion := registryVersion t.Run(name, func(t *testing.T) { t.Parallel() @@ -131,7 +132,7 @@ func TestAutomationChaos(t *testing.T) { chainlink.New(0, defaultAutomationSettings), chaos.NewFailPods, &chaos.Props{ - LabelsSelector: &map[string]*string{ChaosGroupMinority: a.Str("1")}, + LabelsSelector: &map[string]*string{ChaosGroupMinority: utils.Ptr("1")}, DurationStr: "1m", }, }, @@ -140,7 +141,7 @@ func TestAutomationChaos(t *testing.T) { chainlink.New(0, defaultAutomationSettings), chaos.NewFailPods, &chaos.Props{ - LabelsSelector: &map[string]*string{ChaosGroupMajority: a.Str("1")}, + LabelsSelector: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")}, DurationStr: "1m", }, }, @@ -149,9 +150,9 @@ func TestAutomationChaos(t *testing.T) { chainlink.New(0, defaultAutomationSettings), chaos.NewFailPods, &chaos.Props{ - LabelsSelector: &map[string]*string{ChaosGroupMajority: a.Str("1")}, + LabelsSelector: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")}, DurationStr: "1m", - ContainerNames: &[]*string{a.Str("chainlink-db")}, + ContainerNames: &[]*string{utils.Ptr("chainlink-db")}, }, }, NetworkChaosFailMajorityNetwork: { @@ -159,8 +160,8 @@ func TestAutomationChaos(t *testing.T) { chainlink.New(0, defaultAutomationSettings), chaos.NewNetworkPartition, &chaos.Props{ - FromLabels: &map[string]*string{ChaosGroupMajority: a.Str("1")}, - ToLabels: &map[string]*string{ChaosGroupMinority: a.Str("1")}, + FromLabels: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")}, + ToLabels: &map[string]*string{ChaosGroupMinority: utils.Ptr("1")}, DurationStr: "1m", }, }, @@ -169,19 +170,19 @@ func TestAutomationChaos(t *testing.T) { chainlink.New(0, defaultAutomationSettings), chaos.NewNetworkPartition, &chaos.Props{ - FromLabels: &map[string]*string{"app": a.Str("geth")}, - ToLabels: &map[string]*string{ChaosGroupMajorityPlus: a.Str("1")}, + FromLabels: &map[string]*string{"app": utils.Ptr("geth")}, + ToLabels: &map[string]*string{ChaosGroupMajorityPlus: utils.Ptr("1")}, DurationStr: "1m", }, }, } - for n, tst := range testCases { - name := n - testCase := tst + for name, testCase := range testCases { + name := name + testCase := testCase t.Run(fmt.Sprintf("Automation_%s", name), func(t *testing.T) { t.Parallel() - network := networks.SelectedNetwork // Need a new copy of the network for each test + network := networks.MustGetSelectedNetworksFromEnv()[0] // Need a new copy of the network for each test testEnvironment := environment. New(&environment.Config{ @@ -223,7 +224,7 @@ func TestAutomationChaos(t *testing.T) { if chainClient != nil { chainClient.GasStats().PrintStats() } - err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, nil, zapcore.PanicLevel, chainClient) + err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.PanicLevel, chainClient) require.NoError(t, err, "Error tearing down environment") }) @@ -268,7 +269,7 @@ func TestAutomationChaos(t *testing.T) { gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 10 for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(it_utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) expect := 5 l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed") @@ -283,7 +284,7 @@ func TestAutomationChaos(t *testing.T) { gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 10 for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(it_utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) expect := 10 l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed") diff --git a/integration-tests/chaos/ocr2vrf_chaos_test.go b/integration-tests/chaos/ocr2vrf_chaos_test.go index 1d7f61f783..8739a5960a 100644 --- a/integration-tests/chaos/ocr2vrf_chaos_test.go +++ b/integration-tests/chaos/ocr2vrf_chaos_test.go @@ -10,12 +10,11 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" - "github.com/smartcontractkit/chainlink-env/chaos" - "github.com/smartcontractkit/chainlink-env/environment" - a "github.com/smartcontractkit/chainlink-env/pkg/alias" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" "github.com/smartcontractkit/chainlink-testing-framework/utils" @@ -26,12 +25,13 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/config" "github.com/smartcontractkit/chainlink/integration-tests/contracts" + it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func TestOCR2VRFChaos(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) - loadedNetwork := networks.SelectedNetwork + loadedNetwork := networks.MustGetSelectedNetworksFromEnv()[0] defaultOCR2VRFSettings := map[string]interface{}{ "replicas": 6, @@ -68,7 +68,7 @@ func TestOCR2VRFChaos(t *testing.T) { chainlink.New(0, defaultOCR2VRFSettings), chaos.NewFailPods, &chaos.Props{ - LabelsSelector: &map[string]*string{ChaosGroupMinority: a.Str("1")}, + LabelsSelector: &map[string]*string{ChaosGroupMinority: utils.Ptr("1")}, DurationStr: "1m", }, }, @@ -78,7 +78,7 @@ func TestOCR2VRFChaos(t *testing.T) { // chainlink.New(0, defaultOCR2VRFSettings), // chaos.NewFailPods, // &chaos.Props{ - // LabelsSelector: &map[string]*string{ChaosGroupMajority: a.Str("1")}, + // LabelsSelector: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")}, // DurationStr: "1m", // }, //}, @@ -88,9 +88,9 @@ func TestOCR2VRFChaos(t *testing.T) { // chainlink.New(0, defaultOCR2VRFSettings), // chaos.NewFailPods, // &chaos.Props{ - // LabelsSelector: &map[string]*string{ChaosGroupMajority: a.Str("1")}, + // LabelsSelector: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")}, // DurationStr: "1m", - // ContainerNames: &[]*string{a.Str("chainlink-db")}, + // ContainerNames: &[]*string{utils.Ptr("chainlink-db")}, // }, //}, //NetworkChaosFailMajorityNetwork: { @@ -98,8 +98,8 @@ func TestOCR2VRFChaos(t *testing.T) { // chainlink.New(0, defaultOCR2VRFSettings), // chaos.NewNetworkPartition, // &chaos.Props{ - // FromLabels: &map[string]*string{ChaosGroupMajority: a.Str("1")}, - // ToLabels: &map[string]*string{ChaosGroupMinority: a.Str("1")}, + // FromLabels: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")}, + // ToLabels: &map[string]*string{ChaosGroupMinority: utils.Ptr("1")}, // DurationStr: "1m", // }, //}, @@ -108,8 +108,8 @@ func TestOCR2VRFChaos(t *testing.T) { // chainlink.New(0, defaultOCR2VRFSettings), // chaos.NewNetworkPartition, // &chaos.Props{ - // FromLabels: &map[string]*string{"app": a.Str("geth")}, - // ToLabels: &map[string]*string{ChaosGroupMajority: a.Str("1")}, + // FromLabels: &map[string]*string{"app": utils.Ptr("geth")}, + // ToLabels: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")}, // DurationStr: "1m", // }, //}, @@ -119,7 +119,7 @@ func TestOCR2VRFChaos(t *testing.T) { testCase := tc t.Run(fmt.Sprintf("OCR2VRF_%s", testCaseName), func(t *testing.T) { t.Parallel() - testNetwork := networks.SelectedNetwork // Need a new copy of the network for each test + testNetwork := networks.MustGetSelectedNetworksFromEnv()[0] // Need a new copy of the network for each test testEnvironment := environment. New(&environment.Config{ NamespacePrefix: fmt.Sprintf( @@ -150,7 +150,7 @@ func TestOCR2VRFChaos(t *testing.T) { require.NoError(t, err, "Retrieving on-chain wallet addresses for chainlink nodes shouldn't fail") t.Cleanup(func() { - err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, nil, zapcore.PanicLevel, chainClient) + err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.PanicLevel, chainClient) require.NoError(t, err, "Error tearing down environment") }) @@ -186,7 +186,7 @@ func TestOCR2VRFChaos(t *testing.T) { ) for i := uint16(0); i < ocr2vrf_constants.NumberOfRandomWordsToRequest; i++ { - randomness, err := consumerContract.GetRandomnessByRequestId(nil, requestID, big.NewInt(int64(i))) + randomness, err := consumerContract.GetRandomnessByRequestId(it_utils.TestContext(t), requestID, big.NewInt(int64(i))) require.NoError(t, err) l.Info().Interface("Random Number", randomness).Interface("Randomness Number Index", i).Msg("Randomness retrieved from Consumer contract") require.NotEqual(t, 0, randomness.Uint64(), "Randomness retrieved from Consumer contract give an answer other than 0") @@ -213,7 +213,7 @@ func TestOCR2VRFChaos(t *testing.T) { ) for i := uint16(0); i < ocr2vrf_constants.NumberOfRandomWordsToRequest; i++ { - randomness, err := consumerContract.GetRandomnessByRequestId(nil, requestID, big.NewInt(int64(i))) + randomness, err := consumerContract.GetRandomnessByRequestId(it_utils.TestContext(t), requestID, big.NewInt(int64(i))) require.NoError(t, err, "Error getting Randomness result from Consumer Contract") l.Info().Interface("Random Number", randomness).Interface("Randomness Number Index", i).Msg("Randomness retrieved from Consumer contract") require.NotEqual(t, 0, randomness.Uint64(), "Randomness retrieved from Consumer contract give an answer other than 0") diff --git a/integration-tests/chaos/ocr_chaos_test.go b/integration-tests/chaos/ocr_chaos_test.go index f3ee12046f..76e25d9200 100644 --- a/integration-tests/chaos/ocr_chaos_test.go +++ b/integration-tests/chaos/ocr_chaos_test.go @@ -1,7 +1,6 @@ package chaos import ( - "context" "fmt" "math/big" "os" @@ -11,15 +10,14 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" - "github.com/smartcontractkit/chainlink-env/chaos" - "github.com/smartcontractkit/chainlink-env/environment" - a "github.com/smartcontractkit/chainlink-env/pkg/alias" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" - "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver" - mockservercfg "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver" + mockservercfg "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" "github.com/smartcontractkit/chainlink-testing-framework/utils" @@ -28,6 +26,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/config" "github.com/smartcontractkit/chainlink/integration-tests/contracts" + it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils" ) var ( @@ -53,7 +52,7 @@ var ( ) func TestMain(m *testing.M) { - defaultOCRSettings["toml"] = client.AddNetworksConfig(config.BaseOCRP2PV1Config, networks.SelectedNetwork) + defaultOCRSettings["toml"] = client.AddNetworksConfig(config.BaseOCRP2PV1Config, networks.MustGetSelectedNetworksFromEnv()[0]) os.Exit(m.Run()) } @@ -75,14 +74,14 @@ func TestOCRChaos(t *testing.T) { // and chaos.NewNetworkPartition method (https://chaos-mesh.org/docs/simulate-network-chaos-on-kubernetes/) // in order to regenerate Go bindings if k8s version will be updated // you can pull new CRD spec from your current cluster and check README here - // https://github.com/smartcontractkit/chainlink-env/blob/master/README.md + // https://github.com/smartcontractkit/chainlink-testing-framework/k8s/blob/master/README.md NetworkChaosFailMajorityNetwork: { ethereum.New(nil), chainlink.New(0, defaultOCRSettings), chaos.NewNetworkPartition, &chaos.Props{ - FromLabels: &map[string]*string{ChaosGroupMajority: a.Str("1")}, - ToLabels: &map[string]*string{ChaosGroupMinority: a.Str("1")}, + FromLabels: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")}, + ToLabels: &map[string]*string{ChaosGroupMinority: utils.Ptr("1")}, DurationStr: "1m", }, }, @@ -91,8 +90,8 @@ func TestOCRChaos(t *testing.T) { chainlink.New(0, defaultOCRSettings), chaos.NewNetworkPartition, &chaos.Props{ - FromLabels: &map[string]*string{"app": a.Str("geth")}, - ToLabels: &map[string]*string{ChaosGroupMajorityPlus: a.Str("1")}, + FromLabels: &map[string]*string{"app": utils.Ptr("geth")}, + ToLabels: &map[string]*string{ChaosGroupMajorityPlus: utils.Ptr("1")}, DurationStr: "1m", }, }, @@ -101,7 +100,7 @@ func TestOCRChaos(t *testing.T) { chainlink.New(0, defaultOCRSettings), chaos.NewFailPods, &chaos.Props{ - LabelsSelector: &map[string]*string{ChaosGroupMinority: a.Str("1")}, + LabelsSelector: &map[string]*string{ChaosGroupMinority: utils.Ptr("1")}, DurationStr: "1m", }, }, @@ -110,7 +109,7 @@ func TestOCRChaos(t *testing.T) { chainlink.New(0, defaultOCRSettings), chaos.NewFailPods, &chaos.Props{ - LabelsSelector: &map[string]*string{ChaosGroupMajority: a.Str("1")}, + LabelsSelector: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")}, DurationStr: "1m", }, }, @@ -119,9 +118,9 @@ func TestOCRChaos(t *testing.T) { chainlink.New(0, defaultOCRSettings), chaos.NewFailPods, &chaos.Props{ - LabelsSelector: &map[string]*string{ChaosGroupMajority: a.Str("1")}, + LabelsSelector: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")}, DurationStr: "1m", - ContainerNames: &[]*string{a.Str("chainlink-db")}, + ContainerNames: &[]*string{utils.Ptr("chainlink-db")}, }, }, } @@ -165,7 +164,7 @@ func TestOCRChaos(t *testing.T) { if chainClient != nil { chainClient.GasStats().PrintStats() } - err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, nil, zapcore.PanicLevel, chainClient) + err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.PanicLevel, chainClient) require.NoError(t, err, "Error tearing down environment") }) @@ -181,7 +180,7 @@ func TestOCRChaos(t *testing.T) { err = actions.FundChainlinkNodes(chainlinkNodes, chainClient, big.NewFloat(10)) require.NoError(t, err) - ocrInstances, err := actions.DeployOCRContracts(1, lt, cd, bootstrapNode, workerNodes, chainClient) + ocrInstances, err := actions.DeployOCRContracts(1, lt, cd, workerNodes, chainClient) require.NoError(t, err) err = chainClient.WaitForEvents() require.NoError(t, err) @@ -196,7 +195,7 @@ func TestOCRChaos(t *testing.T) { err := ocr.RequestNewRound() require.NoError(t, err, "Error requesting new round") } - round, err := ocrInstances[0].GetLatestRound(context.Background()) + round, err := ocrInstances[0].GetLatestRound(it_utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred()) l.Info().Int64("RoundID", round.RoundId.Int64()).Msg("Latest OCR Round") if round.RoundId.Int64() == chaosStartRound && !chaosApplied { diff --git a/integration-tests/client/chainlink.go b/integration-tests/client/chainlink.go index 3fe663a33b..81c587b46e 100644 --- a/integration-tests/client/chainlink.go +++ b/integration-tests/client/chainlink.go @@ -121,11 +121,11 @@ func (c *ChainlinkClient) MustCreateJob(spec JobSpec) (*Job, error) { if err != nil { return nil, err } - return job, VerifyStatusCode(resp.StatusCode, http.StatusOK) + return job, VerifyStatusCode(resp.RawResponse.StatusCode, http.StatusOK) } // CreateJob creates a Chainlink job based on the provided spec struct -func (c *ChainlinkClient) CreateJob(spec JobSpec) (*Job, *http.Response, error) { +func (c *ChainlinkClient) CreateJob(spec JobSpec) (*Job, *resty.Response, error) { job := &Job{} specString, err := spec.String() if err != nil { @@ -142,7 +142,7 @@ func (c *ChainlinkClient) CreateJob(spec JobSpec) (*Job, *http.Response, error) if err != nil { return nil, nil, err } - return job, resp.RawResponse, err + return job, resp, err } // ReadJobs reads all jobs from the Chainlink node @@ -306,6 +306,19 @@ func (c *ChainlinkClient) ReadBridge(name string) (*BridgeType, *http.Response, return &bt, resp.RawResponse, err } +// ReadBridges reads bridges from the Chainlink node +func (c *ChainlinkClient) ReadBridges() (*ResponseSlice, *resty.Response, error) { + result := &ResponseSlice{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Getting all bridges") + resp, err := c.APIClient.R(). + SetResult(&result). + Get("/v2/bridge_types") + if err != nil { + return nil, nil, err + } + return result, resp, err +} + // DeleteBridge deletes a bridge on the Chainlink node based on the provided name func (c *ChainlinkClient) DeleteBridge(name string) (*http.Response, error) { c.l.Info().Str(NodeURL, c.Config.URL).Str("Name", name).Msg("Deleting Bridge") @@ -885,8 +898,16 @@ func (c *ChainlinkClient) CreateCSAKey() (*CSAKey, *http.Response, error) { return csaKey, resp.RawResponse, err } +func (c *ChainlinkClient) MustReadCSAKeys() (*CSAKeys, *resty.Response, error) { + csaKeys, res, err := c.ReadCSAKeys() + if err != nil { + return nil, res, err + } + return csaKeys, res, VerifyStatusCodeWithResponse(res, http.StatusOK) +} + // ReadCSAKeys reads CSA keys from the Chainlink node -func (c *ChainlinkClient) ReadCSAKeys() (*CSAKeys, *http.Response, error) { +func (c *ChainlinkClient) ReadCSAKeys() (*CSAKeys, *resty.Response, error) { csaKeys := &CSAKeys{} c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading CSA Keys") resp, err := c.APIClient.R(). @@ -898,7 +919,7 @@ func (c *ChainlinkClient) ReadCSAKeys() (*CSAKeys, *http.Response, error) { if err != nil { return nil, nil, err } - return csaKeys, resp.RawResponse, err + return csaKeys, resp, err } // CreateEI creates an EI on the Chainlink node based on the provided attributes and returns the respective secrets @@ -1109,6 +1130,19 @@ func VerifyStatusCode(actStatusCd, expStatusCd int) error { return nil } +func VerifyStatusCodeWithResponse(res *resty.Response, expStatusCd int) error { + actStatusCd := res.RawResponse.StatusCode + if actStatusCd != expStatusCd { + return fmt.Errorf( + "unexpected response code, got %d, expected %d, response: %s", + actStatusCd, + expStatusCd, + res.Body(), + ) + } + return nil +} + func CreateNodeKeysBundle(nodes []*ChainlinkClient, chainName string, chainId string) ([]NodeKeysBundle, []*CLNodesWithKeys, error) { nkb := make([]NodeKeysBundle, 0) var clNodes []*CLNodesWithKeys @@ -1217,3 +1251,23 @@ func (c *ChainlinkClient) GetForwarders() (*Forwarders, *http.Response, error) { } return response, resp.RawResponse, err } + +// Replays log poller from block number +func (c *ChainlinkClient) ReplayLogPollerFromBlock(fromBlock, evmChainID int64) (*ReplayResponse, *http.Response, error) { + specObj := &ReplayResponse{} + c.l.Info().Str(NodeURL, c.Config.URL).Int64("From block", fromBlock).Int64("EVM chain ID", evmChainID).Msg("Replaying Log Poller from block") + resp, err := c.APIClient.R(). + SetResult(&specObj). + SetQueryParams(map[string]string{ + "evmChainID": fmt.Sprint(evmChainID), + }). + SetPathParams(map[string]string{ + "fromBlock": fmt.Sprint(fromBlock), + }). + Post("/v2/replay_from_block/{fromBlock}") + if err != nil { + return nil, nil, err + } + + return specObj, resp.RawResponse, err +} diff --git a/integration-tests/client/chainlink_config_builder.go b/integration-tests/client/chainlink_config_builder.go index 9c1050300b..13cc1e7fe9 100644 --- a/integration-tests/client/chainlink_config_builder.go +++ b/integration-tests/client/chainlink_config_builder.go @@ -4,8 +4,8 @@ import ( "fmt" "os" - "github.com/smartcontractkit/chainlink-env/config" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/config" ) const ( diff --git a/integration-tests/client/chainlink_k8s.go b/integration-tests/client/chainlink_k8s.go index 4aa7c6d0fe..27fd956103 100644 --- a/integration-tests/client/chainlink_k8s.go +++ b/integration-tests/client/chainlink_k8s.go @@ -8,7 +8,7 @@ import ( "github.com/rs/zerolog/log" - "github.com/smartcontractkit/chainlink-env/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" ) type ChainlinkK8sClient struct { @@ -63,7 +63,7 @@ func (c *ChainlinkK8sClient) UpgradeVersion(testEnvironment *environment.Environ }, }, } - testEnvironment, err := testEnvironment.UpdateHelm(c.ChartName, upgradeVals) + _, err := testEnvironment.UpdateHelm(c.ChartName, upgradeVals) return err } diff --git a/integration-tests/client/chainlink_models.go b/integration-tests/client/chainlink_models.go index 6013e13e0f..c6d1209d2e 100644 --- a/integration-tests/client/chainlink_models.go +++ b/integration-tests/client/chainlink_models.go @@ -9,6 +9,7 @@ import ( "gopkg.in/guregu/null.v4" "github.com/smartcontractkit/chainlink/v2/core/services/job" + "github.com/smartcontractkit/chainlink/v2/core/utils" ) // EIServiceConfig represents External Initiator service config @@ -1407,3 +1408,16 @@ type ForwarderAttributes struct { CreatedAt time.Time `json:"createdAt"` UpdatedAt time.Time `json:"updatedAt"` } + +type ReplayResponse struct { + Data ReplayResponseData `json:"data"` +} + +type ReplayResponseData struct { + Attributes ReplayResponseAttributes `json:"attributes"` +} + +type ReplayResponseAttributes struct { + Message string `json:"message"` + EVMChainID *utils.Big `json:"evmChainID"` +} diff --git a/integration-tests/config/config.go b/integration-tests/config/config.go index 44c108b0d7..1da8254e0e 100644 --- a/integration-tests/config/config.go +++ b/integration-tests/config/config.go @@ -8,7 +8,6 @@ Enabled = true [P2P.V2] Enabled = false -[P2P] [P2P.V1] Enabled = true ListenIP = '0.0.0.0' diff --git a/integration-tests/contracts/contract_deployer.go b/integration-tests/contracts/contract_deployer.go index bdf63d1919..45195d327e 100644 --- a/integration-tests/contracts/contract_deployer.go +++ b/integration-tests/contracts/contract_deployer.go @@ -12,11 +12,12 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" - "github.com/smartcontractkit/chainlink-testing-framework/blockchain" "github.com/smartcontractkit/libocr/gethwrappers/offchainaggregator" "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator" ocrConfigHelper "github.com/smartcontractkit/libocr/offchainreporting/confighelper" + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/functions/generated/functions_load_test_client" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/functions/generated/functions_v1_events_mock" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_consumer_benchmark" @@ -45,6 +46,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" registry21 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper_2_1" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface" + le "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_emitter" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_triggered_streams_lookup_wrapper" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_upkeep_counter_wrapper" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/mock_aggregator_proxy" @@ -138,6 +140,7 @@ type ContractDeployer interface { DeployMercuryVerifierProxyContract(accessControllerAddr common.Address) (MercuryVerifierProxy, error) DeployMercuryFeeManager(linkAddress common.Address, nativeAddress common.Address, proxyAddress common.Address, rewardManagerAddress common.Address) (MercuryFeeManager, error) DeployMercuryRewardManager(linkAddress common.Address) (MercuryRewardManager, error) + DeployLogEmitterContract() (LogEmitter, error) } // NewContractDeployer returns an instance of a contract deployer based on the client type @@ -169,6 +172,12 @@ func NewContractDeployer(bcClient blockchain.EVMClient, logger zerolog.Logger) ( return &PolygonZkEvmContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil case *blockchain.LineaClient: return &LineaContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.FantomClient: + return &FantomContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.KromaClient: + return &KromaContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.WeMixClient: + return &WeMixContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil } return nil, errors.New("unknown blockchain client implementation for contract deployer, register blockchain client in NewContractDeployer") } @@ -232,6 +241,18 @@ type LineaContractDeployer struct { *EthereumContractDeployer } +type FantomContractDeployer struct { + *EthereumContractDeployer +} + +type KromaContractDeployer struct { + *EthereumContractDeployer +} + +type WeMixContractDeployer struct { + *EthereumContractDeployer +} + // NewEthereumContractDeployer returns an instantiated instance of the ETH contract deployer func NewEthereumContractDeployer(ethClient blockchain.EVMClient, logger zerolog.Logger) *EthereumContractDeployer { return &EthereumContractDeployer{ @@ -848,34 +869,41 @@ func (e *EthereumContractDeployer) LoadKeeperRegistrar(address common.Address, r client: e.client, registrar20: instance.(*keeper_registrar_wrapper2_0.KeeperRegistrar), }, err - } else { - instance, err := e.client.LoadContract("AutomationRegistrar", address, func( - address common.Address, - backend bind.ContractBackend, - ) (interface{}, error) { - return registrar21.NewAutomationRegistrar(address, backend) - }) - if err != nil { - return nil, err - } - return &EthereumKeeperRegistrar{ - address: &address, - client: e.client, - registrar21: instance.(*registrar21.AutomationRegistrar), - }, err } + instance, err := e.client.LoadContract("AutomationRegistrar", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return registrar21.NewAutomationRegistrar(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistrar{ + address: &address, + client: e.client, + registrar21: instance.(*registrar21.AutomationRegistrar), + }, err } func (e *EthereumContractDeployer) DeployKeeperRegistry( opts *KeeperRegistryOpts, ) (KeeperRegistry, error) { var mode uint8 - switch e.client.GetChainID() { + switch e.client.GetChainID().Int64() { //Arbitrum payment model - case big.NewInt(421613): + //Goerli Arbitrum + case 421613: + mode = uint8(1) + //Sepolia Arbitrum + case 421614: mode = uint8(1) //Optimism payment model - case big.NewInt(420): + //Goerli Optimism + case 420: + mode = uint8(2) + //Goerli Base + case 84531: mode = uint8(2) default: mode = uint8(0) @@ -1599,3 +1627,21 @@ func (e *EthereumContractDeployer) DeployWERC20Mock() (WERC20Mock, error) { l: e.l, }, err } + +func (e *EthereumContractDeployer) DeployLogEmitterContract() (LogEmitter, error) { + address, _, instance, err := e.client.DeployContract("Log Emitter", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return le.DeployLogEmitter(auth, backend) + }) + if err != nil { + return nil, err + } + return &LogEmitterContract{ + client: e.client, + instance: instance.(*le.LogEmitter), + address: *address, + l: e.l, + }, err +} diff --git a/integration-tests/contracts/contract_loader.go b/integration-tests/contracts/contract_loader.go index 9e889f8f1f..9a2f20226d 100644 --- a/integration-tests/contracts/contract_loader.go +++ b/integration-tests/contracts/contract_loader.go @@ -65,6 +65,8 @@ func NewContractLoader(bcClient blockchain.EVMClient, logger zerolog.Logger) (Co return &OptimismContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil case *blockchain.PolygonZkEvmClient: return &PolygonZkEvmContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil + case *blockchain.WeMixClient: + return &WeMixContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil } return nil, errors.New("unknown blockchain client implementation for contract Loader, register blockchain client in NewContractLoader") } @@ -108,6 +110,11 @@ type PolygonZKEVMContractLoader struct { *EthereumContractLoader } +// WeMixContractLoader wraps for WeMix +type WeMixContractLoader struct { + *EthereumContractLoader +} + // NewEthereumContractLoader returns an instantiated instance of the ETH contract Loader func NewEthereumContractLoader(ethClient blockchain.EVMClient, logger zerolog.Logger) *EthereumContractLoader { return &EthereumContractLoader{ diff --git a/integration-tests/contracts/contract_models.go b/integration-tests/contracts/contract_models.go index 51fce7cb12..4c8d610fa1 100644 --- a/integration-tests/contracts/contract_models.go +++ b/integration-tests/contracts/contract_models.go @@ -400,3 +400,13 @@ type WERC20Mock interface { Transfer(to string, amount *big.Int) error Mint(account common.Address, amount *big.Int) (*types.Transaction, error) } + +type LogEmitter interface { + Address() common.Address + EmitLogInts(ints []int) (*types.Transaction, error) + EmitLogIntsIndexed(ints []int) (*types.Transaction, error) + EmitLogStrings(strings []string) (*types.Transaction, error) + EmitLogInt(payload int) (*types.Transaction, error) + EmitLogIntIndexed(payload int) (*types.Transaction, error) + EmitLogString(strings string) (*types.Transaction, error) +} diff --git a/integration-tests/contracts/contract_vrf_models.go b/integration-tests/contracts/contract_vrf_models.go index 656cabb92e..baee2ccd92 100644 --- a/integration-tests/contracts/contract_vrf_models.go +++ b/integration-tests/contracts/contract_vrf_models.go @@ -73,18 +73,24 @@ type VRFCoordinatorV2_5 interface { publicProvingKey [2]*big.Int, ) error HashOfKey(ctx context.Context, pubKey [2]*big.Int) ([32]byte, error) - CreateSubscription() error + CreateSubscription() (*types.Transaction, error) GetActiveSubscriptionIds(ctx context.Context, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) Migrate(subId *big.Int, coordinatorAddress string) error RegisterMigratableCoordinator(migratableCoordinatorAddress string) error AddConsumer(subId *big.Int, consumerAddress string) error FundSubscriptionWithNative(subId *big.Int, nativeTokenAmount *big.Int) error Address() string + PendingRequestsExist(ctx context.Context, subID *big.Int) (bool, error) GetSubscription(ctx context.Context, subID *big.Int) (vrf_coordinator_v2_5.GetSubscription, error) + OwnerCancelSubscription(subID *big.Int) (*types.Transaction, error) + CancelSubscription(subID *big.Int, to common.Address) (*types.Transaction, error) + OracleWithdraw(recipient common.Address, amount *big.Int) error + OracleWithdrawNative(recipient common.Address, amount *big.Int) error GetNativeTokenTotalBalance(ctx context.Context) (*big.Int, error) GetLinkTotalBalance(ctx context.Context) (*big.Int, error) - FindSubscriptionID() (*big.Int, error) + FindSubscriptionID(subID *big.Int) (*big.Int, error) WaitForSubscriptionCreatedEvent(timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCreated, error) + WaitForSubscriptionCanceledEvent(subID *big.Int, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCanceled, error) WaitForRandomWordsFulfilledEvent(subID []*big.Int, requestID []*big.Int, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error) WaitForRandomWordsRequestedEvent(keyHash [][32]byte, subID []*big.Int, sender []common.Address, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequested, error) WaitForMigrationCompletedEvent(timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25MigrationCompleted, error) @@ -256,12 +262,13 @@ type RequestStatus struct { } type LoadTestRequestStatus struct { - Fulfilled bool - RandomWords []*big.Int - requestTimestamp *big.Int - fulfilmentTimestamp *big.Int - requestBlockNumber *big.Int - fulfilmentBlockNumber *big.Int + Fulfilled bool + RandomWords []*big.Int + // Currently Unused November 8, 2023, Mignt be used in near future, will remove if not. + // requestTimestamp *big.Int + // fulfilmentTimestamp *big.Int + // requestBlockNumber *big.Int + // fulfilmentBlockNumber *big.Int } type VRFLoadTestMetrics struct { diff --git a/integration-tests/contracts/ethereum_contracts.go b/integration-tests/contracts/ethereum_contracts.go index 5b3a93fe0c..9cb858fe00 100644 --- a/integration-tests/contracts/ethereum_contracts.go +++ b/integration-tests/contracts/ethereum_contracts.go @@ -13,10 +13,14 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/rs/zerolog/log" + "github.com/smartcontractkit/libocr/gethwrappers/offchainaggregator" + "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator" + ocrConfigHelper "github.com/smartcontractkit/libocr/offchainreporting/confighelper" + ocrTypes "github.com/smartcontractkit/libocr/offchainreporting/types" + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/functions/generated/functions_coordinator" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/functions/generated/functions_load_test_client" @@ -44,10 +48,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier_proxy" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/shared/generated/werc20_mock" - "github.com/smartcontractkit/libocr/gethwrappers/offchainaggregator" - "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator" - ocrConfigHelper "github.com/smartcontractkit/libocr/offchainreporting/confighelper" - ocrTypes "github.com/smartcontractkit/libocr/offchainreporting/types" "github.com/smartcontractkit/chainlink/integration-tests/client" eth_contracts "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" @@ -940,7 +940,7 @@ func (f *EthereumFluxAggregator) PaymentAmount(ctx context.Context) (*big.Int, e return payment, nil } -func (f *EthereumFluxAggregator) RequestNewRound(ctx context.Context) error { +func (f *EthereumFluxAggregator) RequestNewRound(_ context.Context) error { opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) if err != nil { return err @@ -979,7 +979,7 @@ func (f *EthereumFluxAggregator) WatchSubmissionReceived(ctx context.Context, ev } } -func (f *EthereumFluxAggregator) SetRequesterPermissions(ctx context.Context, addr common.Address, authorized bool, roundsDelay uint32) error { +func (f *EthereumFluxAggregator) SetRequesterPermissions(_ context.Context, addr common.Address, authorized bool, roundsDelay uint32) error { opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) if err != nil { return err @@ -1020,7 +1020,7 @@ func (f *EthereumFluxAggregator) LatestRoundID(ctx context.Context) (*big.Int, e } func (f *EthereumFluxAggregator) WithdrawPayment( - ctx context.Context, + _ context.Context, from common.Address, to common.Address, amount *big.Int) error { @@ -2162,11 +2162,11 @@ func (e *EthereumFunctionsRouter) CreateSubscriptionWithConsumer(consumer string topicOneInputs := abi.Arguments{fabi.Events["SubscriptionCreated"].Inputs[0]} topicOneHash := []common.Hash{r.Logs[0].Topics[1:][0]} if err := abi.ParseTopicsIntoMap(topicsMap, topicOneInputs, topicOneHash); err != nil { - return 0, errors.Wrap(err, "failed to decode topic value") + return 0, fmt.Errorf("failed to decode topic value, err: %w", err) } e.l.Info().Interface("NewTopicsDecoded", topicsMap).Send() if topicsMap["subscriptionId"] == 0 { - return 0, errors.New("failed to decode subscription ID after creation") + return 0, fmt.Errorf("failed to decode subscription ID after creation") } return topicsMap["subscriptionId"].(uint64), nil } diff --git a/integration-tests/contracts/ethereum_keeper_contracts.go b/integration-tests/contracts/ethereum_keeper_contracts.go index 135b016ee5..2c0250e745 100644 --- a/integration-tests/contracts/ethereum_keeper_contracts.go +++ b/integration-tests/contracts/ethereum_keeper_contracts.go @@ -250,25 +250,25 @@ func (rcs *KeeperRegistrySettings) EncodeOnChainConfig(registrar string, registr encodedOnchainConfig, err := utilsABI.Methods["_onChainConfig"].Inputs.Pack(&onchainConfigStruct) return encodedOnchainConfig, err - } else { - configType := goabi.MustNewType("tuple(uint32 paymentPremiumPPB,uint32 flatFeeMicroLink,uint32 checkGasLimit,uint24 stalenessSeconds,uint16 gasCeilingMultiplier,uint96 minUpkeepSpend,uint32 maxPerformGas,uint32 maxCheckDataSize,uint32 maxPerformDataSize,uint256 fallbackGasPrice,uint256 fallbackLinkPrice,address transcoder,address registrar)") - onchainConfig, err := goabi.Encode(map[string]interface{}{ - "paymentPremiumPPB": rcs.PaymentPremiumPPB, - "flatFeeMicroLink": rcs.FlatFeeMicroLINK, - "checkGasLimit": rcs.CheckGasLimit, - "stalenessSeconds": rcs.StalenessSeconds, - "gasCeilingMultiplier": rcs.GasCeilingMultiplier, - "minUpkeepSpend": rcs.MinUpkeepSpend, - "maxPerformGas": rcs.MaxPerformGas, - "maxCheckDataSize": rcs.MaxCheckDataSize, - "maxPerformDataSize": rcs.MaxPerformDataSize, - "fallbackGasPrice": rcs.FallbackGasPrice, - "fallbackLinkPrice": rcs.FallbackLinkPrice, - "transcoder": common.Address{}, - "registrar": registrar, - }, configType) - return onchainConfig, err } + configType := goabi.MustNewType("tuple(uint32 paymentPremiumPPB,uint32 flatFeeMicroLink,uint32 checkGasLimit,uint24 stalenessSeconds,uint16 gasCeilingMultiplier,uint96 minUpkeepSpend,uint32 maxPerformGas,uint32 maxCheckDataSize,uint32 maxPerformDataSize,uint256 fallbackGasPrice,uint256 fallbackLinkPrice,address transcoder,address registrar)") + onchainConfig, err := goabi.Encode(map[string]interface{}{ + "paymentPremiumPPB": rcs.PaymentPremiumPPB, + "flatFeeMicroLink": rcs.FlatFeeMicroLINK, + "checkGasLimit": rcs.CheckGasLimit, + "stalenessSeconds": rcs.StalenessSeconds, + "gasCeilingMultiplier": rcs.GasCeilingMultiplier, + "minUpkeepSpend": rcs.MinUpkeepSpend, + "maxPerformGas": rcs.MaxPerformGas, + "maxCheckDataSize": rcs.MaxCheckDataSize, + "maxPerformDataSize": rcs.MaxPerformDataSize, + "fallbackGasPrice": rcs.FallbackGasPrice, + "fallbackLinkPrice": rcs.FallbackLinkPrice, + "transcoder": common.Address{}, + "registrar": registrar, + }, configType) + return onchainConfig, err + } func (v *EthereumKeeperRegistry) RegistryOwnerAddress() common.Address { @@ -276,6 +276,7 @@ func (v *EthereumKeeperRegistry) RegistryOwnerAddress() common.Address { Pending: false, } + //nolint: exhaustive switch v.version { case ethereum.RegistryVersion_2_1: ownerAddress, _ := v.registry2_1.Owner(callOpts) @@ -283,6 +284,8 @@ func (v *EthereumKeeperRegistry) RegistryOwnerAddress() common.Address { case ethereum.RegistryVersion_2_0: ownerAddress, _ := v.registry2_0.Owner(callOpts) return ownerAddress + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1, ethereum.RegistryVersion_1_2, ethereum.RegistryVersion_1_3: + return common.HexToAddress(v.client.GetDefaultWallet().Address()) } return common.HexToAddress(v.client.GetDefaultWallet().Address()) @@ -664,7 +667,7 @@ func (v *EthereumKeeperRegistry) GetKeeperInfo(ctx context.Context, keeperAddr s info, err = v.registry1_2.GetKeeperInfo(opts, common.HexToAddress(keeperAddr)) case ethereum.RegistryVersion_1_3: info, err = v.registry1_3.GetKeeperInfo(opts, common.HexToAddress(keeperAddr)) - case ethereum.RegistryVersion_2_0: + case ethereum.RegistryVersion_2_0, ethereum.RegistryVersion_2_1: // this is not used anywhere return nil, fmt.Errorf("not supported") } @@ -710,6 +713,8 @@ func (v *EthereumKeeperRegistry) SetKeepers(keepers []string, payees []string, o ocrConfig.OffchainConfigVersion, ocrConfig.OffchainConfig, ) + case ethereum.RegistryVersion_2_1: + return fmt.Errorf("not supported") } if err != nil { @@ -760,6 +765,8 @@ func (v *EthereumKeeperRegistry) RegisterUpkeep(target string, gasLimit uint32, checkData, nil, //offchain config ) + case ethereum.RegistryVersion_2_1: + return fmt.Errorf("not supported") } if err != nil { @@ -877,6 +884,8 @@ func (v *EthereumKeeperRegistry) GetKeeperList(ctx context.Context) ([]string, e return []string{}, err } list = state.Transmitters + case ethereum.RegistryVersion_2_1: + return nil, fmt.Errorf("not supported") } if err != nil { @@ -1112,6 +1121,7 @@ func (v *EthereumKeeperRegistry) ParseUpkeepPerformedLog(log *types.Log) (*Upkee // ParseStaleUpkeepReportLog Parses Stale upkeep report log func (v *EthereumKeeperRegistry) ParseStaleUpkeepReportLog(log *types.Log) (*StaleUpkeepReportLog, error) { + //nolint:exhaustive switch v.version { case ethereum.RegistryVersion_2_0: parsedLog, err := v.registry2_0.ParseStaleUpkeepReport(*log) @@ -1129,7 +1139,6 @@ func (v *EthereumKeeperRegistry) ParseStaleUpkeepReportLog(log *types.Log) (*Sta return &StaleUpkeepReportLog{ Id: parsedLog.Id, }, nil - } return nil, fmt.Errorf("keeper registry version %d is not supported", v.version) } @@ -1850,7 +1859,7 @@ func (v *EthereumKeeperConsumerPerformance) GetUpkeepCount(ctx context.Context) return eligible, err } -func (v *EthereumKeeperConsumerPerformance) SetCheckGasToBurn(ctx context.Context, gas *big.Int) error { +func (v *EthereumKeeperConsumerPerformance) SetCheckGasToBurn(_ context.Context, gas *big.Int) error { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -1862,7 +1871,7 @@ func (v *EthereumKeeperConsumerPerformance) SetCheckGasToBurn(ctx context.Contex return v.client.ProcessTransaction(tx) } -func (v *EthereumKeeperConsumerPerformance) SetPerformGasToBurn(ctx context.Context, gas *big.Int) error { +func (v *EthereumKeeperConsumerPerformance) SetPerformGasToBurn(_ context.Context, gas *big.Int) error { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -1897,7 +1906,7 @@ func (v *EthereumKeeperPerformDataCheckerConsumer) Counter(ctx context.Context) return cnt, nil } -func (v *EthereumKeeperPerformDataCheckerConsumer) SetExpectedData(ctx context.Context, expectedData []byte) error { +func (v *EthereumKeeperPerformDataCheckerConsumer) SetExpectedData(_ context.Context, expectedData []byte) error { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -2041,31 +2050,23 @@ func (v *EthereumKeeperRegistrar) EncodeRegisterRequest(name string, email []byt common.HexToAddress(senderAddr), ) - if err != nil { - return nil, err - } - return req, nil - } else { - req, err := registrarABI.Pack( - "register", - name, - email, - common.HexToAddress(upkeepAddr), - gasLimit, - common.HexToAddress(adminAddr), - uint8(0), // trigger type - checkData, - []byte{}, // triggerConfig - []byte{}, // offchainConfig - amount, - common.HexToAddress(senderAddr), - ) - - if err != nil { - return nil, err - } - return req, nil + return req, err } + req, err := registrarABI.Pack( + "register", + name, + email, + common.HexToAddress(upkeepAddr), + gasLimit, + common.HexToAddress(adminAddr), + uint8(0), // trigger type + checkData, + []byte{}, // triggerConfig + []byte{}, // offchainConfig + amount, + common.HexToAddress(senderAddr), + ) + return req, err } registryABI, err := abi.JSON(strings.NewReader(keeper_registrar_wrapper1_2.KeeperRegistrarMetaData.ABI)) if err != nil { diff --git a/integration-tests/contracts/ethereum_ocr2vrf_contracts.go b/integration-tests/contracts/ethereum_ocr2vrf_contracts.go index e8149b2125..cb52d1941a 100644 --- a/integration-tests/contracts/ethereum_ocr2vrf_contracts.go +++ b/integration-tests/contracts/ethereum_ocr2vrf_contracts.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/pkg/errors" "github.com/rs/zerolog/log" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" @@ -230,7 +229,7 @@ func (dkgContract *EthereumDKG) WaitForTransmittedEvent(timeout time.Duration) ( case err = <-subscription.Err(): return nil, err case <-time.After(timeout): - return nil, errors.New("timeout waiting for DKGTransmitted event") + return nil, fmt.Errorf("timeout waiting for DKGTransmitted event") case transmittedEvent := <-transmittedEventsChannel: return transmittedEvent, nil } @@ -250,7 +249,7 @@ func (dkgContract *EthereumDKG) WaitForConfigSetEvent(timeout time.Duration) (*d case err = <-subscription.Err(): return nil, err case <-time.After(timeout): - return nil, errors.New("timeout waiting for DKGConfigSet event") + return nil, fmt.Errorf("timeout waiting for DKGConfigSet event") case configSetEvent := <-configSetEventsChannel: return configSetEvent, nil } @@ -451,7 +450,7 @@ func (consumer *EthereumVRFBeaconConsumer) RequestRandomness( ) (*types.Receipt, error) { opts, err := consumer.client.TransactionOpts(consumer.client.GetDefaultWallet()) if err != nil { - return nil, errors.Wrap(err, "TransactionOpts failed") + return nil, fmt.Errorf("TransactionOpts failed, err: %w", err) } tx, err := consumer.vrfBeaconConsumer.TestRequestRandomness( opts, @@ -460,20 +459,20 @@ func (consumer *EthereumVRFBeaconConsumer) RequestRandomness( confirmationDelayArg, ) if err != nil { - return nil, errors.Wrap(err, "TestRequestRandomness failed") + return nil, fmt.Errorf("TestRequestRandomness failed, err: %w", err) } err = consumer.client.ProcessTransaction(tx) if err != nil { - return nil, errors.Wrap(err, "ProcessTransaction failed") + return nil, fmt.Errorf("ProcessTransaction failed, err: %w", err) } err = consumer.client.WaitForEvents() if err != nil { - return nil, errors.Wrap(err, "WaitForEvents failed") + return nil, fmt.Errorf("WaitForEvents failed, err: %w", err) } receipt, err := consumer.client.GetTxReceipt(tx.Hash()) if err != nil { - return nil, errors.Wrap(err, "GetTxReceipt failed") + return nil, fmt.Errorf("GetTxReceipt failed, err: %w", err) } log.Info().Interface("Sub ID", subID). Interface("Number of Words", numWords). @@ -526,20 +525,20 @@ func (consumer *EthereumVRFBeaconConsumer) RequestRandomnessFulfillment( arguments, ) if err != nil { - return nil, errors.Wrap(err, "TestRequestRandomnessFulfillment failed") + return nil, fmt.Errorf("TestRequestRandomnessFulfillment failed, err: %w", err) } err = consumer.client.ProcessTransaction(tx) if err != nil { - return nil, errors.Wrap(err, "ProcessTransaction failed") + return nil, fmt.Errorf("ProcessTransaction failed, err: %w", err) } err = consumer.client.WaitForEvents() if err != nil { - return nil, errors.Wrap(err, "WaitForEvents failed") + return nil, fmt.Errorf("WaitForEvents failed, err: %w", err) } receipt, err := consumer.client.GetTxReceipt(tx.Hash()) if err != nil { - return nil, errors.Wrap(err, "GetTxReceipt failed") + return nil, fmt.Errorf("GetTxReceipt failed, err: %w", err) } log.Info().Interface("Sub ID", subID). Interface("Number of Words", numWords). diff --git a/integration-tests/contracts/ethereum_vrfv2plus_contracts.go b/integration-tests/contracts/ethereum_vrfv2plus_contracts.go index 329d40ef40..330166dc79 100644 --- a/integration-tests/contracts/ethereum_vrfv2plus_contracts.go +++ b/integration-tests/contracts/ethereum_vrfv2plus_contracts.go @@ -12,7 +12,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" - "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_coordinator_v2_5" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_v2plus_load_test_with_metrics" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_v2plus_upgraded_version" @@ -97,6 +96,18 @@ func (v *EthereumVRFCoordinatorV2_5) GetActiveSubscriptionIds(ctx context.Contex return activeSubscriptionIds, nil } +func (v *EthereumVRFCoordinatorV2_5) PendingRequestsExist(ctx context.Context, subID *big.Int) (bool, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + pendingRequestExists, err := v.coordinator.PendingRequestExists(opts, subID) + if err != nil { + return false, err + } + return pendingRequestExists, nil +} + func (v *EthereumVRFCoordinatorV2_5) GetSubscription(ctx context.Context, subID *big.Int) (vrf_coordinator_v2_5.GetSubscription, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), @@ -132,6 +143,75 @@ func (v *EthereumVRFCoordinatorV2_5) GetNativeTokenTotalBalance(ctx context.Cont return totalBalance, nil } +// OwnerCancelSubscription cancels subscription by Coordinator owner +// return funds to sub owner, +// does not check if pending requests for a sub exist +func (v *EthereumVRFCoordinatorV2_5) OwnerCancelSubscription(subID *big.Int) (*types.Transaction, error) { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := v.coordinator.OwnerCancelSubscription( + opts, + subID, + ) + if err != nil { + return nil, err + } + return tx, v.client.ProcessTransaction(tx) +} + +// CancelSubscription cancels subscription by Sub owner, +// return funds to specified address, +// checks if pending requests for a sub exist +func (v *EthereumVRFCoordinatorV2_5) CancelSubscription(subID *big.Int, to common.Address) (*types.Transaction, error) { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := v.coordinator.CancelSubscription( + opts, + subID, + to, + ) + if err != nil { + return nil, err + } + return tx, v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2_5) OracleWithdraw(recipient common.Address, amount *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.OracleWithdraw( + opts, + recipient, + amount, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2_5) OracleWithdrawNative(recipient common.Address, amount *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.OracleWithdrawNative( + opts, + recipient, + amount, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + func (v *EthereumVRFCoordinatorV2_5) SetConfig(minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig vrf_coordinator_v2_5.VRFCoordinatorV25FeeConfig) error { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { @@ -183,16 +263,16 @@ func (v *EthereumVRFCoordinatorV2_5) RegisterProvingKey( return v.client.ProcessTransaction(tx) } -func (v *EthereumVRFCoordinatorV2_5) CreateSubscription() error { +func (v *EthereumVRFCoordinatorV2_5) CreateSubscription() (*types.Transaction, error) { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { - return err + return nil, err } tx, err := v.coordinator.CreateSubscription(opts) if err != nil { - return err + return nil, err } - return v.client.ProcessTransaction(tx) + return tx, v.client.ProcessTransaction(tx) } func (v *EthereumVRFCoordinatorV2_5) Migrate(subId *big.Int, coordinatorAddress string) error { @@ -251,11 +331,11 @@ func (v *EthereumVRFCoordinatorV2_5) FundSubscriptionWithNative(subId *big.Int, return v.client.ProcessTransaction(tx) } -func (v *EthereumVRFCoordinatorV2_5) FindSubscriptionID() (*big.Int, error) { +func (v *EthereumVRFCoordinatorV2_5) FindSubscriptionID(subID *big.Int) (*big.Int, error) { owner := v.client.GetDefaultWallet().Address() subscriptionIterator, err := v.coordinator.FilterSubscriptionCreated( nil, - nil, + []*big.Int{subID}, ) if err != nil { return nil, err @@ -288,6 +368,26 @@ func (v *EthereumVRFCoordinatorV2_5) WaitForSubscriptionCreatedEvent(timeout tim } } +func (v *EthereumVRFCoordinatorV2_5) WaitForSubscriptionCanceledEvent(subID *big.Int, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCanceled, error) { + eventsChannel := make(chan *vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCanceled) + subscription, err := v.coordinator.WatchSubscriptionCanceled(nil, eventsChannel, []*big.Int{subID}) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for SubscriptionCanceled event") + case sub := <-eventsChannel: + return sub, nil + } + } +} + func (v *EthereumVRFCoordinatorV2_5) WaitForRandomWordsFulfilledEvent(subID []*big.Int, requestID []*big.Int, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error) { randomWordsFulfilledEventsChannel := make(chan *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled) subscription, err := v.coordinator.WatchRandomWordsFulfilled(nil, randomWordsFulfilledEventsChannel, requestID, subID) diff --git a/integration-tests/contracts/test_contracts.go b/integration-tests/contracts/test_contracts.go new file mode 100644 index 0000000000..3080668da6 --- /dev/null +++ b/integration-tests/contracts/test_contracts.go @@ -0,0 +1,80 @@ +package contracts + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rs/zerolog" + + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + + le "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_emitter" +) + +type LogEmitterContract struct { + address common.Address + client blockchain.EVMClient + instance *le.LogEmitter + l zerolog.Logger +} + +func (e *LogEmitterContract) Address() common.Address { + return e.address +} + +func (e *LogEmitterContract) EmitLogInts(ints []int) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + bigInts := make([]*big.Int, len(ints)) + for i, v := range ints { + bigInts[i] = big.NewInt(int64(v)) + } + tx, err := e.instance.EmitLog1(opts, bigInts) + if err != nil { + return nil, err + } + return tx, e.client.ProcessTransaction(tx) +} + +func (e *LogEmitterContract) EmitLogIntsIndexed(ints []int) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + bigInts := make([]*big.Int, len(ints)) + for i, v := range ints { + bigInts[i] = big.NewInt(int64(v)) + } + tx, err := e.instance.EmitLog2(opts, bigInts) + if err != nil { + return nil, err + } + return tx, e.client.ProcessTransaction(tx) +} + +func (e *LogEmitterContract) EmitLogStrings(strings []string) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := e.instance.EmitLog3(opts, strings) + if err != nil { + return nil, err + } + return tx, e.client.ProcessTransaction(tx) +} + +func (e *LogEmitterContract) EmitLogInt(payload int) (*types.Transaction, error) { + return e.EmitLogInts([]int{payload}) +} + +func (e *LogEmitterContract) EmitLogIntIndexed(payload int) (*types.Transaction, error) { + return e.EmitLogIntsIndexed([]int{payload}) +} + +func (e *LogEmitterContract) EmitLogString(strings string) (*types.Transaction, error) { + return e.EmitLogStrings([]string{strings}) +} diff --git a/integration-tests/docker/cmd/test_env.go b/integration-tests/docker/cmd/test_env.go index 31b7de5dcd..5fe2001350 100644 --- a/integration-tests/docker/cmd/test_env.go +++ b/integration-tests/docker/cmd/test_env.go @@ -9,10 +9,11 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" - "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" - "github.com/smartcontractkit/chainlink/integration-tests/utils" "github.com/spf13/cobra" "github.com/testcontainers/testcontainers-go" + + "github.com/smartcontractkit/chainlink-testing-framework/logging" + "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" ) func main() { @@ -31,7 +32,7 @@ func main() { Use: "cl-cluster", Short: "Basic CL cluster", RunE: func(cmd *cobra.Command, args []string) error { - utils.SetupCoreDockerEnvLogger() + log.Logger = logging.GetLogger(nil, "CORE_DOCKER_ENV_LOG_LEVEL") log.Info().Msg("Starting CL cluster test environment..") _, err := test_env.NewCLTestEnvBuilder(). @@ -50,6 +51,7 @@ func main() { return nil }, } + startEnvCmd.AddCommand(startFullEnvCmd) // Set default log level for non-testcontainer code diff --git a/integration-tests/docker/test_env/cl_node.go b/integration-tests/docker/test_env/cl_node.go index bf4d3285dc..3c0a6d3af7 100644 --- a/integration-tests/docker/test_env/cl_node.go +++ b/integration-tests/docker/test_env/cl_node.go @@ -1,15 +1,11 @@ package test_env import ( - "context" - "crypto/ed25519" - "encoding/hex" "fmt" "math/big" "net/url" "os" "strings" - "sync" "testing" "time" @@ -17,7 +13,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/google/uuid" "github.com/pelletier/go-toml/v2" - "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/rs/zerolog/log" tc "github.com/testcontainers/testcontainers-go" @@ -30,8 +25,6 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/logwatch" "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" - ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2/types" - "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/utils" @@ -49,6 +42,8 @@ type ClNode struct { NodeConfig *chainlink.Config `json:"-"` NodeSecretsConfigTOML string `json:"-"` PostgresDb *test_env.PostgresDb `json:"postgresDb"` + UserEmail string `json:"userEmail"` + UserPassword string `json:"userPassword"` t *testing.T l zerolog.Logger lw *logwatch.LogWatch @@ -86,18 +81,22 @@ func WithLogWatch(lw *logwatch.LogWatch) ClNodeOption { } } -func NewClNode(networks []string, nodeConfig *chainlink.Config, opts ...ClNodeOption) *ClNode { +func NewClNode(networks []string, imageName, imageVersion string, nodeConfig *chainlink.Config, opts ...ClNodeOption) *ClNode { nodeDefaultCName := fmt.Sprintf("%s-%s", "cl-node", uuid.NewString()[0:8]) pgDefaultCName := fmt.Sprintf("pg-%s", nodeDefaultCName) pgDb := test_env.NewPostgresDb(networks, test_env.WithPostgresDbContainerName(pgDefaultCName)) n := &ClNode{ EnvComponent: test_env.EnvComponent{ - ContainerName: nodeDefaultCName, - Networks: networks, + ContainerName: nodeDefaultCName, + ContainerImage: imageName, + ContainerVersion: imageVersion, + Networks: networks, }, - NodeConfig: nodeConfig, - PostgresDb: pgDb, - l: log.Logger, + UserEmail: "local@local.com", + UserPassword: "localdevpassword", + NodeConfig: nodeConfig, + PostgresDb: pgDb, + l: log.Logger, } for _, opt := range opts { opt(n) @@ -113,7 +112,7 @@ func (n *ClNode) SetTestLogger(t *testing.T) { // Restart restarts only CL node, DB container is reused func (n *ClNode) Restart(cfg *chainlink.Config) error { - if err := n.Container.Terminate(context.Background()); err != nil { + if err := n.Container.Terminate(utils.TestContext(n.t)); err != nil { return err } n.NodeConfig = cfg @@ -121,12 +120,12 @@ func (n *ClNode) Restart(cfg *chainlink.Config) error { } // UpgradeVersion restarts the cl node with new image and version -func (n *ClNode) UpgradeVersion(cfg *chainlink.Config, newImage, newVersion string) error { +func (n *ClNode) UpgradeVersion(newImage, newVersion string) error { if newVersion == "" { return fmt.Errorf("new version is empty") } if newImage == "" { - newImage = os.Getenv("CHAINLINK_IMAGE") + return fmt.Errorf("new image name is empty") } n.ContainerImage = newImage n.ContainerVersion = newVersion @@ -137,9 +136,9 @@ func (n *ClNode) PrimaryETHAddress() (string, error) { return n.API.PrimaryEthAddress() } -func (n *ClNode) AddBootstrapJob(verifierAddr common.Address, fromBlock uint64, chainId int64, +func (n *ClNode) AddBootstrapJob(verifierAddr common.Address, chainId int64, feedId [32]byte) (*client.Job, error) { - spec := utils.BuildBootstrapSpec(verifierAddr, chainId, fromBlock, feedId) + spec := utils.BuildBootstrapSpec(verifierAddr, chainId, feedId) return n.API.MustCreateJob(spec) } @@ -191,13 +190,17 @@ func (n *ClNode) AddMercuryOCRJob(verifierAddr common.Address, fromBlock uint64, } func (n *ClNode) GetContainerName() string { - name, err := n.Container.Name(context.Background()) + name, err := n.Container.Name(utils.TestContext(n.t)) if err != nil { return "" } return strings.Replace(name, "/", "", -1) } +func (n *ClNode) GetAPIClient() *client.ChainlinkClient { + return n.API +} + func (n *ClNode) GetPeerUrl() (string, error) { p2pKeys, err := n.API.MustReadP2PKeys() if err != nil { @@ -276,34 +279,39 @@ func (n *ClNode) StartContainer() error { Logger: l, }) if err != nil { - return errors.Wrap(err, ErrStartCLNodeContainer) + return fmt.Errorf("%s err: %w", ErrStartCLNodeContainer, err) } if n.lw != nil { - if err := n.lw.ConnectContainer(context.Background(), container, "cl-node", true); err != nil { + if err := n.lw.ConnectContainer(utils.TestContext(n.t), container, "cl-node", true); err != nil { return err } } - clEndpoint, err := test_env.GetEndpoint(context.Background(), container, "http") + clEndpoint, err := test_env.GetEndpoint(utils.TestContext(n.t), container, "http") if err != nil { return err } - ip, err := container.ContainerIP(context.Background()) + ip, err := container.ContainerIP(utils.TestContext(n.t)) if err != nil { return err } - n.l.Info().Str("containerName", n.ContainerName). + n.l.Info(). + Str("containerName", n.ContainerName). + Str("containerImage", n.ContainerImage). + Str("containerVersion", n.ContainerVersion). Str("clEndpoint", clEndpoint). Str("clInternalIP", ip). + Str("userEmail", n.UserEmail). + Str("userPassword", n.UserPassword). Msg("Started Chainlink Node container") clClient, err := client.NewChainlinkClient(&client.ChainlinkConfig{ URL: clEndpoint, - Email: "local@local.com", - Password: "localdevpassword", + Email: n.UserEmail, + Password: n.UserPassword, InternalIP: ip, }, n.l) if err != nil { - return errors.Wrap(err, ErrConnectNodeClient) + return fmt.Errorf("%s err: %w", ErrConnectNodeClient, err) } clClient.Config.InternalIP = n.ContainerName n.Container = container @@ -360,21 +368,6 @@ func (n *ClNode) getContainerRequest(secrets string) ( adminCredsPath := "/home/admin-credentials.txt" apiCredsPath := "/home/api-credentials.txt" - if n.ContainerImage == "" { - image, ok := os.LookupEnv("CHAINLINK_IMAGE") - if !ok { - return nil, errors.New("CHAINLINK_IMAGE env must be set") - } - n.ContainerImage = image - } - if n.ContainerVersion == "" { - version, ok := os.LookupEnv("CHAINLINK_VERSION") - if !ok { - return nil, errors.New("CHAINLINK_VERSION env must be set") - } - n.ContainerVersion = version - } - return &tc.ContainerRequest{ Name: n.ContainerName, Image: fmt.Sprintf("%s:%s", n.ContainerImage, n.ContainerVersion), @@ -415,83 +408,3 @@ func (n *ClNode) getContainerRequest(secrets string) ( }, }, nil } - -func GetOracleIdentities(chainlinkNodes []*ClNode) ([]int, []confighelper.OracleIdentityExtra) { - S := make([]int, len(chainlinkNodes)) - oracleIdentities := make([]confighelper.OracleIdentityExtra, len(chainlinkNodes)) - sharedSecretEncryptionPublicKeys := make([]ocrtypes.ConfigEncryptionPublicKey, len(chainlinkNodes)) - var wg sync.WaitGroup - for i, cl := range chainlinkNodes { - wg.Add(1) - go func(i int, cl *ClNode) error { - defer wg.Done() - - ocr2Keys, err := cl.API.MustReadOCR2Keys() - if err != nil { - return err - } - var ocr2Config client.OCR2KeyAttributes - for _, key := range ocr2Keys.Data { - if key.Attributes.ChainType == string(chaintype.EVM) { - ocr2Config = key.Attributes - break - } - } - - keys, err := cl.API.MustReadP2PKeys() - if err != nil { - return err - } - p2pKeyID := keys.Data[0].Attributes.PeerID - - offchainPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.OffChainPublicKey, "ocr2off_evm_")) - if err != nil { - return err - } - - offchainPkBytesFixed := [ed25519.PublicKeySize]byte{} - copy(offchainPkBytesFixed[:], offchainPkBytes) - if err != nil { - return err - } - - configPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.ConfigPublicKey, "ocr2cfg_evm_")) - if err != nil { - return err - } - - configPkBytesFixed := [ed25519.PublicKeySize]byte{} - copy(configPkBytesFixed[:], configPkBytes) - if err != nil { - return err - } - - onchainPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.OnChainPublicKey, "ocr2on_evm_")) - if err != nil { - return err - } - - csaKeys, _, err := cl.API.ReadCSAKeys() - if err != nil { - return err - } - - sharedSecretEncryptionPublicKeys[i] = configPkBytesFixed - oracleIdentities[i] = confighelper.OracleIdentityExtra{ - OracleIdentity: confighelper.OracleIdentity{ - OnchainPublicKey: onchainPkBytes, - OffchainPublicKey: offchainPkBytesFixed, - PeerID: p2pKeyID, - TransmitAccount: ocrtypes.Account(csaKeys.Data[0].ID), - }, - ConfigEncryptionPublicKey: configPkBytesFixed, - } - S[i] = 1 - - return nil - }(i, cl) - } - wg.Wait() - - return S, oracleIdentities -} diff --git a/integration-tests/docker/test_env/cl_node_cluster.go b/integration-tests/docker/test_env/cl_node_cluster.go index 5ae90bb982..08122b5744 100644 --- a/integration-tests/docker/test_env/cl_node_cluster.go +++ b/integration-tests/docker/test_env/cl_node_cluster.go @@ -1,8 +1,9 @@ package test_env import ( + "fmt" + "github.com/ethereum/go-ethereum/common" - "github.com/pkg/errors" "golang.org/x/sync/errgroup" "github.com/smartcontractkit/chainlink/integration-tests/client" @@ -61,7 +62,7 @@ func (c *ClCluster) NodeCSAKeys() ([]string, error) { for _, n := range c.Nodes { csaKeys, err := n.GetNodeCSAKeys() if err != nil { - return nil, errors.Wrap(err, ErrGetNodeCSAKeys) + return nil, fmt.Errorf("%s, err: %w", ErrGetNodeCSAKeys, err) } keys = append(keys, csaKeys.Data[0].ID) } diff --git a/integration-tests/docker/test_env/test_env.go b/integration-tests/docker/test_env/test_env.go index aee074d514..9987bab2fe 100644 --- a/integration-tests/docker/test_env/test_env.go +++ b/integration-tests/docker/test_env/test_env.go @@ -1,18 +1,17 @@ package test_env import ( - "context" "encoding/json" "fmt" "io" "math/big" "os" "path/filepath" + "runtime/debug" "testing" "time" "github.com/ethereum/go-ethereum/accounts/keystore" - "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/rs/zerolog/log" tc "github.com/testcontainers/testcontainers-go" @@ -23,11 +22,11 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/logwatch" + "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/utils" - "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" ) var ( @@ -40,26 +39,26 @@ type CLClusterTestEnv struct { LogWatch *logwatch.LogWatch /* components */ - ClCluster *ClCluster - Geth *test_env.Geth // for tests using --dev networks - PrivateChain []test_env.PrivateChain // for tests using non-dev networks - MockAdapter *test_env.Killgrave - EVMClient blockchain.EVMClient - ContractDeployer contracts.ContractDeployer - ContractLoader contracts.ContractLoader - l zerolog.Logger - t *testing.T + ClCluster *ClCluster + PrivateChain []test_env.PrivateChain // for tests using non-dev networks -- unify it with new approach + MockAdapter *test_env.Killgrave + EVMClient blockchain.EVMClient + ContractDeployer contracts.ContractDeployer + ContractLoader contracts.ContractLoader + RpcProvider test_env.RpcProvider + PrivateEthereumConfig *test_env.EthereumNetwork // new approach to private chains, supporting eth1 and eth2 + l zerolog.Logger + t *testing.T } func NewTestEnv() (*CLClusterTestEnv, error) { - utils.SetupCoreDockerEnvLogger() + log.Logger = logging.GetLogger(nil, "CORE_DOCKER_ENV_LOG_LEVEL") network, err := docker.CreateNetwork(log.Logger) if err != nil { return nil, err } n := []string{network.Name} return &CLClusterTestEnv{ - Geth: test_env.NewGeth(n), MockAdapter: test_env.NewKillgrave(n, ""), Network: network, l: log.Logger, @@ -67,11 +66,10 @@ func NewTestEnv() (*CLClusterTestEnv, error) { } // WithTestEnvConfig sets the test environment cfg. -// Sets up the Geth and MockAdapter containers with the provided cfg. +// Sets up private ethereum chain and MockAdapter containers with the provided cfg. func (te *CLClusterTestEnv) WithTestEnvConfig(cfg *TestEnvConfig) *CLClusterTestEnv { te.Cfg = cfg n := []string{te.Network.Name} - te.Geth = test_env.NewGeth(n, test_env.WithContainerName(te.Cfg.Geth.ContainerName)) te.MockAdapter = test_env.NewKillgrave(n, te.Cfg.MockAdapter.ImpostersPath, test_env.WithContainerName(te.Cfg.MockAdapter.ContainerName)) return te } @@ -79,7 +77,6 @@ func (te *CLClusterTestEnv) WithTestEnvConfig(cfg *TestEnvConfig) *CLClusterTest func (te *CLClusterTestEnv) WithTestLogger(t *testing.T) *CLClusterTestEnv { te.t = t te.l = logging.GetTestLogger(t) - te.Geth.WithTestLogger(t) te.MockAdapter.WithTestLogger(t) return te } @@ -92,6 +89,11 @@ func (te *CLClusterTestEnv) WithPrivateChain(evmNetworks []blockchain.EVMNetwork var chains []test_env.PrivateChain for _, evmNetwork := range evmNetworks { n := evmNetwork + pgc := test_env.NewPrivateGethChain(&n, []string{te.Network.Name}) + if te.t != nil { + pgc.GetPrimaryNode().WithTestLogger(te.t) + } + chains = append(chains, pgc) var privateChain test_env.PrivateChain switch n.SimulationType { case "besu": @@ -99,9 +101,6 @@ func (te *CLClusterTestEnv) WithPrivateChain(evmNetworks []blockchain.EVMNetwork default: privateChain = test_env.NewPrivateGethChain(&n, []string{te.Network.Name}) } - if te.t != nil { - privateChain.GetPrimaryNode().WithTestLogger(te.t) - } chains = append(chains, privateChain) } te.PrivateChain = chains @@ -112,7 +111,7 @@ func (te *CLClusterTestEnv) StartPrivateChain() error { for _, chain := range te.PrivateChain { primaryNode := chain.GetPrimaryNode() if primaryNode == nil { - return errors.WithStack(fmt.Errorf("primary node is nil in PrivateChain interface")) + return fmt.Errorf("primary node is nil in PrivateChain interface, stack: %s", string(debug.Stack())) } err := primaryNode.Start() if err != nil { @@ -126,8 +125,26 @@ func (te *CLClusterTestEnv) StartPrivateChain() error { return nil } -func (te *CLClusterTestEnv) StartGeth() (blockchain.EVMNetwork, test_env.InternalDockerUrls, error) { - return te.Geth.StartContainer() +func (te *CLClusterTestEnv) StartEthereumNetwork(cfg *test_env.EthereumNetwork) (blockchain.EVMNetwork, test_env.RpcProvider, error) { + // if environment is being restored from a previous state, use the existing config + // this might fail terribly if temporary folders with chain data on the host machine were removed + if te.Cfg != nil && te.Cfg.EthereumNetwork != nil { + builder := test_env.NewEthereumNetworkBuilder() + c, err := builder.WithExistingConfig(*te.Cfg.EthereumNetwork). + WithTest(te.t). + Build() + if err != nil { + return blockchain.EVMNetwork{}, test_env.RpcProvider{}, err + } + cfg = &c + } + n, rpc, err := cfg.Start() + + if err != nil { + return blockchain.EVMNetwork{}, test_env.RpcProvider{}, err + } + + return n, rpc, nil } func (te *CLClusterTestEnv) StartMockAdapter() error { @@ -140,7 +157,7 @@ func (te *CLClusterTestEnv) StartClCluster(nodeConfig *chainlink.Config, count i } else { te.ClCluster = &ClCluster{} for i := 0; i < count; i++ { - ocrNode := NewClNode([]string{te.Network.Name}, nodeConfig, + ocrNode := NewClNode([]string{te.Network.Name}, os.Getenv("CHAINLINK_IMAGE"), os.Getenv("CHAINLINK_VERSION"), nodeConfig, WithSecrets(secretsConfig), ) te.ClCluster.Nodes = append(te.ClCluster.Nodes, ocrNode) @@ -162,8 +179,9 @@ func (te *CLClusterTestEnv) StartClCluster(nodeConfig *chainlink.Config, count i func (te *CLClusterTestEnv) FundChainlinkNodes(amount *big.Float) error { for _, cl := range te.ClCluster.Nodes { if err := cl.Fund(te.EVMClient, amount); err != nil { - return errors.Wrap(err, ErrFundCLNode) + return fmt.Errorf("%s, err: %w", ErrFundCLNode, err) } + time.Sleep(5 * time.Second) } return te.EVMClient.WaitForEvents() } @@ -178,12 +196,14 @@ func (te *CLClusterTestEnv) Terminate() error { func (te *CLClusterTestEnv) Cleanup() error { te.l.Info().Msg("Cleaning up test environment") if te.t == nil { - return errors.New("cannot cleanup test environment without a testing.T") + return fmt.Errorf("cannot cleanup test environment without a testing.T") } if te.ClCluster == nil || len(te.ClCluster.Nodes) == 0 { - return errors.New("chainlink nodes are nil, unable cleanup chainlink nodes") + return fmt.Errorf("chainlink nodes are nil, unable cleanup chainlink nodes") } + te.logWhetherAllContainersAreRunning() + // TODO: This is an imperfect and temporary solution, see TT-590 for a more sustainable solution // Collect logs if the test fails, or if we just want them if te.t.Failed() || os.Getenv("TEST_LOG_COLLECT") == "true" { @@ -193,7 +213,7 @@ func (te *CLClusterTestEnv) Cleanup() error { } if te.EVMClient == nil { - return errors.New("evm client is nil, unable to return funds from chainlink nodes during cleanup") + return fmt.Errorf("evm client is nil, unable to return funds from chainlink nodes during cleanup") } else if te.EVMClient.NetworkSimulated() { te.l.Info(). Str("Network Name", te.EVMClient.GetNetworkName()). @@ -213,6 +233,21 @@ func (te *CLClusterTestEnv) Cleanup() error { return nil } +func (te *CLClusterTestEnv) logWhetherAllContainersAreRunning() { + for _, node := range te.ClCluster.Nodes { + isCLRunning := node.Container.IsRunning() + isDBRunning := node.PostgresDb.Container.IsRunning() + + if !isCLRunning { + te.l.Warn().Str("Node", node.ContainerName).Msg("Chainlink node was not running, when test ended") + } + + if !isDBRunning { + te.l.Warn().Str("Node", node.ContainerName).Msg("Postgres DB is not running, when test ended") + } + } +} + // collectTestLogs collects the logs from all the Chainlink nodes in the test environment and writes them to local files func (te *CLClusterTestEnv) collectTestLogs() error { te.l.Info().Msg("Collecting test logs") @@ -231,7 +266,7 @@ func (te *CLClusterTestEnv) collectTestLogs() error { return err } defer logFile.Close() - logReader, err := node.Container.Logs(context.Background()) + logReader, err := node.Container.Logs(utils.TestContext(te.t)) if err != nil { return err } diff --git a/integration-tests/docker/test_env/test_env_builder.go b/integration-tests/docker/test_env/test_env_builder.go index 066af340b6..77c5669015 100644 --- a/integration-tests/docker/test_env/test_env_builder.go +++ b/integration-tests/docker/test_env/test_env_builder.go @@ -4,9 +4,9 @@ import ( "fmt" "math/big" "os" + "runtime/debug" "testing" - "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -15,11 +15,11 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/logwatch" "github.com/smartcontractkit/chainlink-testing-framework/networks" - "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" + evmcfg "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml" ) type CleanUpType string @@ -31,23 +31,26 @@ const ( ) type CLTestEnvBuilder struct { - hasLogWatch bool - hasGeth bool - hasKillgrave bool - hasForwarders bool - clNodeConfig *chainlink.Config - secretsConfig string - nonDevGethNetworks []blockchain.EVMNetwork - clNodesCount int - customNodeCsaKeys []string - defaultNodeCsaKeys []string - l zerolog.Logger - t *testing.T - te *CLClusterTestEnv - isNonEVM bool - - cleanUpType CleanUpType - cleanUpCustomFn func() + hasLogWatch bool + // hasGeth bool + hasKillgrave bool + hasForwarders bool + clNodeConfig *chainlink.Config + secretsConfig string + nonDevGethNetworks []blockchain.EVMNetwork + clNodesCount int + customNodeCsaKeys []string + defaultNodeCsaKeys []string + l zerolog.Logger + t *testing.T + te *CLClusterTestEnv + isNonEVM bool + cleanUpType CleanUpType + cleanUpCustomFn func() + chainOptionsFn []ChainOption + evmClientNetworkOption []EVMClientNetworkOption + ethereumNetwork *test_env.EthereumNetwork + /* funding */ ETHFunds *big.Float } @@ -116,8 +119,27 @@ func (b *CLTestEnvBuilder) WithFunding(eth *big.Float) *CLTestEnvBuilder { return b } +// deprecated +// left only for backward compatibility func (b *CLTestEnvBuilder) WithGeth() *CLTestEnvBuilder { - b.hasGeth = true + ethBuilder := test_env.NewEthereumNetworkBuilder() + cfg, err := ethBuilder. + WithConsensusType(test_env.ConsensusType_PoW). + WithExecutionLayer(test_env.ExecutionLayer_Geth). + WithTest(b.t). + Build() + + if err != nil { + panic(err) + } + + b.ethereumNetwork = &cfg + + return b +} + +func (b *CLTestEnvBuilder) WithPrivateEthereumNetwork(en test_env.EthereumNetwork) *CLTestEnvBuilder { + b.ethereumNetwork = &en return b } @@ -163,6 +185,24 @@ func (b *CLTestEnvBuilder) WithCustomCleanup(customFn func()) *CLTestEnvBuilder return b } +type ChainOption = func(*evmcfg.Chain) *evmcfg.Chain + +func (b *CLTestEnvBuilder) WithChainOptions(opts ...ChainOption) *CLTestEnvBuilder { + b.chainOptionsFn = make([]ChainOption, 0) + b.chainOptionsFn = append(b.chainOptionsFn, opts...) + + return b +} + +type EVMClientNetworkOption = func(*blockchain.EVMNetwork) *blockchain.EVMNetwork + +func (b *CLTestEnvBuilder) EVMClientNetworkOptions(opts ...EVMClientNetworkOption) *CLTestEnvBuilder { + b.evmClientNetworkOption = make([]EVMClientNetworkOption, 0) + b.evmClientNetworkOption = append(b.evmClientNetworkOption, opts...) + + return b +} + func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { if b.te == nil { var err error @@ -171,13 +211,6 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { return nil, err } } - b.l.Info(). - Bool("hasGeth", b.hasGeth). - Bool("hasKillgrave", b.hasKillgrave). - Int("clNodesCount", b.clNodesCount). - Strs("customNodeCsaKeys", b.customNodeCsaKeys). - Strs("defaultNodeCsaKeys", b.defaultNodeCsaKeys). - Msg("Building CL cluster test environment..") var err error if b.t != nil { @@ -210,7 +243,7 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { case CleanUpTypeNone: b.l.Warn().Msg("test environment won't be cleaned up") case "": - return b.te, errors.WithMessage(errors.New("explicit cleanup type must be set when building test environment"), "test environment builder failed") + return b.te, fmt.Errorf("test environment builder failed: %w", fmt.Errorf("explicit cleanup type must be set when building test environment")) } if b.nonDevGethNetworks != nil { @@ -223,14 +256,14 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { for i, n := range b.te.PrivateChain { primaryNode := n.GetPrimaryNode() if primaryNode == nil { - return b.te, errors.WithStack(fmt.Errorf("primary node is nil in PrivateChain interface")) + return b.te, fmt.Errorf("primary node is nil in PrivateChain interface, stack: %s", string(debug.Stack())) } nonDevNetworks = append(nonDevNetworks, *n.GetNetworkConfig()) nonDevNetworks[i].URLs = []string{primaryNode.GetInternalWsUrl()} nonDevNetworks[i].HTTPURLs = []string{primaryNode.GetInternalHttpUrl()} } if nonDevNetworks == nil { - return nil, errors.New("cannot create nodes with custom config without nonDevNetworks") + return nil, fmt.Errorf("cannot create nodes with custom config without nonDevNetworks") } err = b.te.StartClCluster(b.clNodeConfig, b.clNodesCount, b.secretsConfig) @@ -239,17 +272,29 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { } return b.te, nil } - networkConfig := networks.SelectedNetwork - var internalDockerUrls test_env.InternalDockerUrls - if b.hasGeth && networkConfig.Simulated { - networkConfig, internalDockerUrls, err = b.te.StartGeth() + + networkConfig := networks.MustGetSelectedNetworksFromEnv()[0] + var rpcProvider test_env.RpcProvider + if b.ethereumNetwork != nil && networkConfig.Simulated { + // TODO here we should save the ethereum network config to te.Cfg, but it doesn't exist at this point + // in general it seems we have no methods for saving config to file and we only load it from file + // but I don't know how that config file is to be created or whether anyone ever done that + var enCfg test_env.EthereumNetwork + b.ethereumNetwork.DockerNetworkNames = []string{b.te.Network.Name} + networkConfig, rpcProvider, err = b.te.StartEthereumNetwork(b.ethereumNetwork) if err != nil { return nil, err } - + b.te.RpcProvider = rpcProvider + b.te.PrivateEthereumConfig = &enCfg } if !b.isNonEVM { + if b.evmClientNetworkOption != nil && len(b.evmClientNetworkOption) > 0 { + for _, fn := range b.evmClientNetworkOption { + fn(&networkConfig) + } + } bc, err := blockchain.NewEVMClientFromNetwork(networkConfig, b.l) if err != nil { return nil, err @@ -287,14 +332,22 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { var httpUrls []string var wsUrls []string if networkConfig.Simulated { - httpUrls = []string{internalDockerUrls.HttpUrl} - wsUrls = []string{internalDockerUrls.WsUrl} + httpUrls = rpcProvider.PrivateHttpUrls() + wsUrls = rpcProvider.PrivateWsUrsl() } else { httpUrls = networkConfig.HTTPURLs wsUrls = networkConfig.URLs } node.SetChainConfig(cfg, wsUrls, httpUrls, networkConfig, b.hasForwarders) + + if b.chainOptionsFn != nil && len(b.chainOptionsFn) > 0 { + for _, fn := range b.chainOptionsFn { + for _, evmCfg := range cfg.EVM { + fn(&evmCfg.Chain) + } + } + } } err := b.te.StartClCluster(cfg, b.clNodesCount, b.secretsConfig) @@ -309,7 +362,7 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { b.defaultNodeCsaKeys = nodeCsaKeys } - if b.hasGeth && b.clNodesCount > 0 && b.ETHFunds != nil { + if b.ethereumNetwork != nil && b.clNodesCount > 0 && b.ETHFunds != nil { b.te.ParallelTransactions(true) defer b.te.ParallelTransactions(false) if err := b.te.FundChainlinkNodes(b.ETHFunds); err != nil { @@ -317,5 +370,20 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { } } + var enDesc string + if b.te.PrivateEthereumConfig != nil { + enDesc = b.te.PrivateEthereumConfig.Describe() + } else { + enDesc = "none" + } + + b.l.Info(). + Str("privateEthereumNetwork", enDesc). + Bool("hasKillgrave", b.hasKillgrave). + Int("clNodesCount", b.clNodesCount). + Strs("customNodeCsaKeys", b.customNodeCsaKeys). + Strs("defaultNodeCsaKeys", b.defaultNodeCsaKeys). + Msg("Building CL cluster test environment..") + return b.te, nil } diff --git a/integration-tests/docker/test_env/test_env_config.go b/integration-tests/docker/test_env/test_env_config.go index 1a0c8d5c86..0902deb0c2 100644 --- a/integration-tests/docker/test_env/test_env_config.go +++ b/integration-tests/docker/test_env/test_env_config.go @@ -3,14 +3,16 @@ package test_env import ( "encoding/json" + cte "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" env "github.com/smartcontractkit/chainlink/integration-tests/types/envcommon" ) type TestEnvConfig struct { - Networks []string `json:"networks"` - Geth GethConfig `json:"geth"` - MockAdapter MockAdapterConfig `json:"mock_adapter"` - ClCluster *ClCluster `json:"clCluster"` + Networks []string `json:"networks"` + Geth GethConfig `json:"geth"` + MockAdapter MockAdapterConfig `json:"mock_adapter"` + ClCluster *ClCluster `json:"clCluster"` + EthereumNetwork *cte.EthereumNetwork `json:"private_ethereum_config"` } type MockAdapterConfig struct { diff --git a/integration-tests/go.mod b/integration-tests/go.mod index 8e9eddd3d6..4ea02e0ac1 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -1,4 +1,4 @@ -module github.com/smartcontractkit/ccip/integration-tests +module github.com/smartcontractkit/chainlink/integration-tests go 1.21 @@ -6,6 +6,7 @@ go 1.21 replace github.com/smartcontractkit/chainlink/v2 => ../ require ( + cosmossdk.io/errors v1.0.0 github.com/AlekSi/pointer v1.1.0 github.com/K-Phoen/grabana v0.21.17 github.com/cli/go-gh/v2 v2.0.0 @@ -19,16 +20,16 @@ require ( github.com/pelletier/go-toml/v2 v2.1.0 github.com/pkg/errors v0.9.1 github.com/rs/zerolog v1.30.0 + github.com/scylladb/go-reflectx v1.0.1 github.com/segmentio/ksuid v1.0.4 github.com/slack-go/slack v0.12.2 github.com/smartcontractkit/chain-selectors v1.0.3 - github.com/smartcontractkit/chainlink-env v0.38.3 - github.com/smartcontractkit/chainlink-testing-framework v1.17.12-0.20231021045507-b80c9afbc467 - github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20230828183543-6d0939746966 + github.com/smartcontractkit/chainlink-testing-framework v1.18.6 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 - github.com/smartcontractkit/libocr v0.0.0-20231020123319-d255366a6545 + github.com/smartcontractkit/libocr v0.0.0-20231107151413-13e0202ae8d7 github.com/smartcontractkit/ocr2keepers v0.7.28 github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687 + github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 github.com/smartcontractkit/wasp v0.3.2-0.20231007012020-8f5eb29299d7 github.com/spf13/cobra v1.7.0 @@ -56,7 +57,6 @@ require ( cosmossdk.io/api v0.3.1 // indirect cosmossdk.io/core v0.5.1 // indirect cosmossdk.io/depinject v1.0.0-alpha.3 // indirect - cosmossdk.io/errors v1.0.0 // indirect cosmossdk.io/math v1.0.1 // indirect dario.cat/mergo v1.0.0 // indirect filippo.io/edwards25519 v1.0.0 // indirect @@ -151,7 +151,7 @@ require ( github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fvbommel/sortorder v1.0.2 // indirect - github.com/fxamacker/cbor/v2 v2.4.0 // indirect + github.com/fxamacker/cbor/v2 v2.5.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/gagliardetto/binary v0.7.1 // indirect github.com/gagliardetto/solana-go v1.4.1-0.20220428092759-5250b4abbb27 // indirect @@ -185,15 +185,15 @@ require ( github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.14.0 // indirect github.com/go-stack/stack v1.8.1 // indirect - github.com/go-webauthn/revoke v0.1.9 // indirect - github.com/go-webauthn/webauthn v0.8.2 // indirect + github.com/go-webauthn/webauthn v0.8.6 // indirect + github.com/go-webauthn/x v0.1.4 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.3 // indirect github.com/gogo/status v1.1.1 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/golang/glog v1.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect @@ -202,7 +202,7 @@ require ( github.com/google/gnostic v0.6.9 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/go-tpm v0.3.3 // indirect + github.com/google/go-tpm v0.9.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect @@ -332,7 +332,7 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/miekg/dns v1.1.55 // indirect github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect @@ -386,11 +386,11 @@ require ( github.com/prometheus/alertmanager v0.25.1 // indirect github.com/prometheus/client_golang v1.17.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/exporter-toolkit v0.10.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/prometheus/prometheus v0.46.0 // indirect + github.com/prometheus/prometheus v0.47.2 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect @@ -398,7 +398,6 @@ require ( github.com/russross/blackfriday v1.6.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect - github.com/scylladb/go-reflectx v1.0.1 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sercand/kuberesolver v2.4.0+incompatible // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect @@ -410,7 +409,6 @@ require ( github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231020230319-2ede955d1dc9 // indirect github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab05 // indirect github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231024133459-1ef3a11319eb // indirect - github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb // indirect github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 // indirect github.com/smartcontractkit/wsrpc v0.7.2 // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect @@ -428,7 +426,7 @@ require ( github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125 // indirect github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a // indirect github.com/tidwall/btree v1.6.0 // indirect - github.com/tidwall/gjson v1.16.0 // indirect + github.com/tidwall/gjson v1.17.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect @@ -449,7 +447,6 @@ require ( github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.1.0 // indirect - github.com/yuin/goldmark v1.4.13 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect github.com/zondax/hid v0.9.1 // indirect github.com/zondax/ledger-go v0.14.1 // indirect @@ -474,17 +471,16 @@ require ( go.uber.org/ratelimit v0.2.0 // indirect golang.org/x/arch v0.4.0 // indirect golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect - golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/mod v0.13.0 // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/oauth2 v0.12.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/term v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.14.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect - gonum.org/v1/gonum v0.13.0 // indirect + gonum.org/v1/gonum v0.14.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 // indirect diff --git a/integration-tests/go.sum b/integration-tests/go.sum index bb982ef198..bf47fd56a5 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -987,8 +987,8 @@ github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo= github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88= -github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= +github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= github.com/gagliardetto/binary v0.6.1/go.mod h1:aOfYkc20U0deHaHn/LVZXiqlkDbFAX0FpTlDhsXa0S0= @@ -1128,10 +1128,10 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-webauthn/revoke v0.1.9 h1:gSJ1ckA9VaKA2GN4Ukp+kiGTk1/EXtaDb1YE8RknbS0= -github.com/go-webauthn/revoke v0.1.9/go.mod h1:j6WKPnv0HovtEs++paan9g3ar46gm1NarktkXBaPR+w= -github.com/go-webauthn/webauthn v0.8.2 h1:8KLIbpldjz9KVGHfqEgJNbkhd7bbRXhNw4QWFJE15oA= -github.com/go-webauthn/webauthn v0.8.2/go.mod h1:d+ezx/jMCNDiqSMzOchuynKb9CVU1NM9BumOnokfcVQ= +github.com/go-webauthn/webauthn v0.8.6 h1:bKMtL1qzd2WTFkf1mFTVbreYrwn7dsYmEPjTq6QN90E= +github.com/go-webauthn/webauthn v0.8.6/go.mod h1:emwVLMCI5yx9evTTvr0r+aOZCdWJqMfbRhF0MufyUog= +github.com/go-webauthn/x v0.1.4 h1:sGmIFhcY70l6k7JIDfnjVBiAAFEssga5lXIUXe0GtAs= +github.com/go-webauthn/x v0.1.4/go.mod h1:75Ug0oK6KYpANh5hDOanfDI+dvPWHk788naJVG/37H8= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= @@ -1184,9 +1184,12 @@ github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRs github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= +github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -1261,12 +1264,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/go-tpm v0.1.2-0.20190725015402-ae6dd98980d4/go.mod h1:H9HbmUG2YgV/PHITkO7p6wxEEj/v5nlsVWIwumwH2NI= -github.com/google/go-tpm v0.3.0/go.mod h1:iVLWvrPp/bHeEkxTFi9WG6K9w0iy2yIszHwZGHPbzAw= -github.com/google/go-tpm v0.3.3 h1:P/ZFNBZYXRxc+z7i5uyd8VP7MaDteuLZInzrH2idRGo= -github.com/google/go-tpm v0.3.3/go.mod h1:9Hyn3rgnzWF9XBWVk6ml6A6hNkbWjNFlDQL51BeghL4= -github.com/google/go-tpm-tools v0.0.0-20190906225433-1614c142f845/go.mod h1:AVfHadzbdzHo54inR2x1v640jdi1YSi3NauM2DUsxk0= -github.com/google/go-tpm-tools v0.2.0/go.mod h1:npUd03rQ60lxN7tzeBJreG38RvWwme2N1reF/eeiBk4= +github.com/google/go-tpm v0.9.0 h1:sQF6YqWMi+SCXpsmS3fd21oPy/vSddwZry4JnmltHVk= +github.com/google/go-tpm v0.9.0/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -1346,7 +1345,6 @@ github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyC github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -1985,8 +1983,9 @@ github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJK github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= @@ -2263,8 +2262,8 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0= @@ -2376,22 +2375,20 @@ github.com/smartcontractkit/chain-selectors v1.0.3 h1:wVED4QEvATtSRi95Ow77C9Cu6G github.com/smartcontractkit/chain-selectors v1.0.3/go.mod h1:WBhLlODF5b95vvx2tdKK55vGACg1+qZpuBhOGu1UXVo= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20230913032705-f924d753cc47 h1:vdieOW3CZGdD2R5zvCSMS+0vksyExPN3/Fa1uVfld/A= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20230913032705-f924d753cc47/go.mod h1:xMwqRdj5vqYhCJXgKVqvyAwdcqM6ZAEhnwEQ4Khsop8= -github.com/smartcontractkit/chainlink-env v0.38.3 h1:ZtOnwkG622R0VCTxL5V09AnT/QXhlFwkGTjd0Lsfpfg= -github.com/smartcontractkit/chainlink-env v0.38.3/go.mod h1:7z4sw/hN8TxioQCLwFqQdhK3vaOV0a22Qe99z4bRUcg= github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231020230319-2ede955d1dc9 h1:fFD5SgSJtnXvkGLK3CExNKpUIz4sGrNNkKv3Ljw63Hk= github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231020230319-2ede955d1dc9/go.mod h1:M9U1JV7IQi8Sfj4JR1qSi1tIh6omgW78W/8SHN/8BUQ= github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab05 h1:DaPSVnxe7oz1QJ+AVIhQWs1W3ubQvwvGo9NbHpMs1OQ= github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab05/go.mod h1:o0Pn1pbaUluboaK6/yhf8xf7TiFCkyFl6WUOdwqamuU= github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231024133459-1ef3a11319eb h1:HiluOfEVGOQTM6BTDImOqYdMZZ7qq7fkZ3TJdmItNr8= github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231024133459-1ef3a11319eb/go.mod h1:/30flFG4L/iCYAFeA3DUzR0xuHSxAMONiWTzyzvsNwo= -github.com/smartcontractkit/chainlink-testing-framework v1.17.12-0.20231021045507-b80c9afbc467 h1:Yir34O6l4QPz/jl1d5mzbxNc6vSWJlb5q3DoqKW6KkE= -github.com/smartcontractkit/chainlink-testing-framework v1.17.12-0.20231021045507-b80c9afbc467/go.mod h1:RWlmjwnjIGbQAnRfKwe02Ife82nNI3rZmdI0zgkfbyk= +github.com/smartcontractkit/chainlink-testing-framework v1.18.6 h1:UL3DxsPflSRALP62rsg5v3NdOsa8RHGhHMUImoWDD6k= +github.com/smartcontractkit/chainlink-testing-framework v1.18.6/go.mod h1:zScXRqmvbyTFUooyLYrOp4+V/sFPUbFJNRc72YmnuIk= github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306 h1:ko88+ZznniNJZbZPWAvHQU8SwKAdHngdDZ+pvVgB5ss= github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJif132UCdjo8u43i7iPN1/MFnu49hv7lFGFftCHKU= github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f/go.mod h1:MvMXoufZAtqExNexqi4cjrNYE9MefKddKylxjS+//n0= -github.com/smartcontractkit/libocr v0.0.0-20231020123319-d255366a6545 h1:qOsw2ETQD/Sb/W2xuYn2KPWjvvsWA0C+l19rWFq8iNg= -github.com/smartcontractkit/libocr v0.0.0-20231020123319-d255366a6545/go.mod h1:2lyRkw/qLQgUWlrWWmq5nj0y90rWeO6Y+v+fCakRgb0= +github.com/smartcontractkit/libocr v0.0.0-20231107151413-13e0202ae8d7 h1:21V61XOYSxpFmFqlhr5IaEh1uQ1F6CewJ30D/U/P34c= +github.com/smartcontractkit/libocr v0.0.0-20231107151413-13e0202ae8d7/go.mod h1:2lyRkw/qLQgUWlrWWmq5nj0y90rWeO6Y+v+fCakRgb0= github.com/smartcontractkit/ocr2keepers v0.7.28 h1:dufAiYl4+uly9aH0+6GkS2jYzHGujq7tg0LYQE+x6JU= github.com/smartcontractkit/ocr2keepers v0.7.28/go.mod h1:1QGzJURnoWpysguPowOe2bshV0hNp1YX10HHlhDEsas= github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687 h1:NwC3SOc25noBTe1KUQjt45fyTIuInhoE2UfgcHAdihM= @@ -2427,7 +2424,6 @@ github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= @@ -2438,7 +2434,6 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= @@ -2488,8 +2483,8 @@ github.com/thlib/go-timezone-local v0.0.0-20210907160436-ef149e42d28e/go.mod h1: github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.16.0 h1:SyXa+dsSPpUlcwEDuKuEBJEz5vzTvOea+9rjyYodQFg= -github.com/tidwall/gjson v1.16.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= +github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -2586,7 +2581,6 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= @@ -2773,7 +2767,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -2811,7 +2804,6 @@ golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2910,8 +2902,8 @@ golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -3017,7 +3009,6 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210629170331-7dc0b73dc9fb/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -3202,8 +3193,8 @@ gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJ gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= -gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= -gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= +gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= +gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= @@ -3417,7 +3408,6 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go. google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= diff --git a/integration-tests/load/functions/config.go b/integration-tests/load/functions/config.go index 5c622401ab..ad7e7446af 100644 --- a/integration-tests/load/functions/config.go +++ b/integration-tests/load/functions/config.go @@ -1,12 +1,14 @@ package loadfunctions import ( + "fmt" + "math/big" + "os" + "github.com/pelletier/go-toml/v2" - "github.com/pkg/errors" "github.com/rs/zerolog/log" + "github.com/smartcontractkit/chainlink/v2/core/store/models" - "math/big" - "os" ) const ( @@ -103,22 +105,21 @@ func ReadConfig() (*PerformanceConfig, error) { var cfg *PerformanceConfig d, err := os.ReadFile(DefaultConfigFilename) if err != nil { - return nil, errors.Wrap(err, ErrReadPerfConfig) + return nil, fmt.Errorf("%s, err: %w", ErrReadPerfConfig, err) } err = toml.Unmarshal(d, &cfg) if err != nil { - return nil, errors.Wrap(err, ErrUnmarshalPerfConfig) + return nil, fmt.Errorf("%s, err: %w", ErrUnmarshalPerfConfig, err) } log.Debug().Interface("PerformanceConfig", cfg).Msg("Parsed performance config") mpk := os.Getenv("MUMBAI_KEYS") murls := os.Getenv("MUMBAI_URLS") snet := os.Getenv("SELECTED_NETWORKS") if mpk == "" || murls == "" || snet == "" { - return nil, errors.New( + return nil, fmt.Errorf( "ensure variables are set:\nMUMBAI_KEYS variable, private keys, comma separated\nSELECTED_NETWORKS=MUMBAI\nMUMBAI_URLS variable, websocket urls, comma separated", ) - } else { - cfg.MumbaiPrivateKey = mpk } + cfg.MumbaiPrivateKey = mpk return cfg, nil } diff --git a/integration-tests/load/functions/functions_test.go b/integration-tests/load/functions/functions_test.go index 7822035208..dc52846d3c 100644 --- a/integration-tests/load/functions/functions_test.go +++ b/integration-tests/load/functions/functions_test.go @@ -1,10 +1,11 @@ package loadfunctions import ( - "github.com/smartcontractkit/wasp" - "github.com/stretchr/testify/require" "testing" "time" + + "github.com/smartcontractkit/wasp" + "github.com/stretchr/testify/require" ) func TestFunctionsLoad(t *testing.T) { diff --git a/integration-tests/load/functions/gateway.go b/integration-tests/load/functions/gateway.go index aefe4fbedc..ac5f895ac1 100644 --- a/integration-tests/load/functions/gateway.go +++ b/integration-tests/load/functions/gateway.go @@ -8,16 +8,17 @@ import ( "encoding/hex" "encoding/json" "fmt" + "time" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/ecies" "github.com/go-resty/resty/v2" - "github.com/pkg/errors" "github.com/rs/zerolog/log" + "github.com/smartcontractkit/tdh2/go/tdh2/tdh2easy" + "github.com/smartcontractkit/chainlink/v2/core/services/gateway/api" "github.com/smartcontractkit/chainlink/v2/core/services/gateway/handlers/functions" "github.com/smartcontractkit/chainlink/v2/core/services/s4" - "github.com/smartcontractkit/tdh2/go/tdh2/tdh2easy" - "time" ) type RPCResponse struct { @@ -115,7 +116,7 @@ func UploadS4Secrets(rc *resty.Client, s4Cfg *S4SecretsCfg) (uint8, uint64, erro log.Debug().Interface("Result", result).Msg("S4 secrets_set response result") for _, nodeResponse := range result.Result.Body.Payload.NodeResponses { if !nodeResponse.Body.Payload.Success { - return 0, 0, fmt.Errorf("node response was not succesful") + return 0, 0, fmt.Errorf("node response was not successful") } } return uint8(envelope.SlotID), envelope.Version, nil @@ -182,12 +183,12 @@ func EncryptS4Secrets(deployerPk *ecdsa.PrivateKey, tdh2Pk *tdh2easy.PublicKey, donKey = bytes.Join([][]byte{b, donKey}, nil) donPubKey, err := crypto.UnmarshalPubkey(donKey) if err != nil { - return "", errors.Wrap(err, "failed to unmarshal DON key") + return "", fmt.Errorf("failed to unmarshal DON key: %w", err) } eciesDONPubKey := ecies.ImportECDSAPublic(donPubKey) signature, err := deployerPk.Sign(rand.Reader, []byte(msgJSON), nil) if err != nil { - return "", errors.Wrap(err, "failed to sign the msg with Ethereum key") + return "", fmt.Errorf("failed to sign the msg with Ethereum key: %w", err) } signedSecrets, err := json.Marshal(struct { Signature []byte `json:"signature"` @@ -197,29 +198,29 @@ func EncryptS4Secrets(deployerPk *ecdsa.PrivateKey, tdh2Pk *tdh2easy.PublicKey, Message: msgJSON, }) if err != nil { - return "", errors.Wrap(err, "failed to marshal signed secrets") + return "", fmt.Errorf("failed to marshal signed secrets: %w", err) } ct, err := ecies.Encrypt(rand.Reader, eciesDONPubKey, signedSecrets, nil, nil) if err != nil { - return "", errors.Wrap(err, "failed to encrypt with DON key") + return "", fmt.Errorf("failed to encrypt with DON key: %w", err) } ct0xFormat, err := json.Marshal(map[string]interface{}{"0x0": base64.StdEncoding.EncodeToString(ct)}) if err != nil { - return "", errors.Wrap(err, "failed to marshal DON key encrypted format") + return "", fmt.Errorf("failed to marshal DON key encrypted format: %w", err) } ctTDH2Format, err := tdh2easy.Encrypt(tdh2Pk, ct0xFormat) if err != nil { - return "", errors.Wrap(err, "failed to encrypt with TDH2 public key") + return "", fmt.Errorf("failed to encrypt with TDH2 public key: %w", err) } tdh2Message, err := ctTDH2Format.Marshal() if err != nil { - return "", errors.Wrap(err, "failed to marshal TDH2 encrypted msg") + return "", fmt.Errorf("failed to marshal TDH2 encrypted msg: %w", err) } finalMsg, err := json.Marshal(map[string]interface{}{ "encryptedSecrets": "0x" + hex.EncodeToString(tdh2Message), }) if err != nil { - return "", errors.Wrap(err, "failed to marshal secrets msg") + return "", fmt.Errorf("failed to marshal secrets msg: %w", err) } return string(finalMsg), nil } diff --git a/integration-tests/load/functions/gateway_gun.go b/integration-tests/load/functions/gateway_gun.go index fd13922d0a..3dafb458a5 100644 --- a/integration-tests/load/functions/gateway_gun.go +++ b/integration-tests/load/functions/gateway_gun.go @@ -3,14 +3,15 @@ package loadfunctions import ( "crypto/ecdsa" "fmt" - "github.com/go-resty/resty/v2" - "github.com/rs/zerolog/log" - "github.com/smartcontractkit/tdh2/go/tdh2/tdh2easy" - "github.com/smartcontractkit/wasp" "math/rand" "os" "strconv" "time" + + "github.com/go-resty/resty/v2" + "github.com/rs/zerolog/log" + "github.com/smartcontractkit/tdh2/go/tdh2/tdh2easy" + "github.com/smartcontractkit/wasp" ) /* SingleFunctionCallGun is a gun that constantly requests randomness for one feed */ diff --git a/integration-tests/load/functions/onchain_monitoring.go b/integration-tests/load/functions/onchain_monitoring.go index 0a8b4cef46..c4b4bdb78c 100644 --- a/integration-tests/load/functions/onchain_monitoring.go +++ b/integration-tests/load/functions/onchain_monitoring.go @@ -1,10 +1,11 @@ package loadfunctions import ( - "github.com/rs/zerolog/log" - "github.com/smartcontractkit/wasp" "testing" "time" + + "github.com/rs/zerolog/log" + "github.com/smartcontractkit/wasp" ) /* Monitors on-chain stats of LoadConsumer and pushes them to Loki every second */ diff --git a/integration-tests/load/functions/request_gun.go b/integration-tests/load/functions/request_gun.go index d9987eaa75..bd4cf5f35a 100644 --- a/integration-tests/load/functions/request_gun.go +++ b/integration-tests/load/functions/request_gun.go @@ -13,16 +13,15 @@ const ( ) type SingleFunctionCallGun struct { - ft *FunctionsTest - mode TestMode - times uint32 - source string - slotID uint8 - slotVersion uint64 - encryptedSecrets []byte - args []string - subscriptionId uint64 - jobId [32]byte + ft *FunctionsTest + mode TestMode + times uint32 + source string + slotID uint8 + slotVersion uint64 + args []string + subscriptionId uint64 + jobId [32]byte } func NewSingleFunctionCallGun( diff --git a/integration-tests/load/functions/setup.go b/integration-tests/load/functions/setup.go index 5d44cbc698..c0be47ca83 100644 --- a/integration-tests/load/functions/setup.go +++ b/integration-tests/load/functions/setup.go @@ -2,6 +2,7 @@ package loadfunctions import ( "crypto/ecdsa" + "fmt" "math/big" mrand "math/rand" "os" @@ -10,11 +11,11 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/go-resty/resty/v2" - "github.com/pkg/errors" "github.com/rs/zerolog/log" + "github.com/smartcontractkit/tdh2/go/tdh2/tdh2easy" + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" "github.com/smartcontractkit/chainlink-testing-framework/networks" - "github.com/smartcontractkit/tdh2/go/tdh2/tdh2easy" "github.com/smartcontractkit/chainlink/integration-tests/contracts" chainlinkutils "github.com/smartcontractkit/chainlink/v2/core/utils" @@ -50,7 +51,7 @@ type S4SecretsCfg struct { } func SetupLocalLoadTestEnv(cfg *PerformanceConfig) (*FunctionsTest, error) { - bc, err := blockchain.NewEVMClientFromNetwork(networks.SelectedNetwork, log.Logger) + bc, err := blockchain.NewEVMClientFromNetwork(networks.MustGetSelectedNetworksFromEnv()[0], log.Logger) if err != nil { return nil, err } @@ -91,41 +92,41 @@ func SetupLocalLoadTestEnv(cfg *PerformanceConfig) (*FunctionsTest, error) { log.Info().Msg("Creating new subscription") subID, err := router.CreateSubscriptionWithConsumer(loadTestClient.Address()) if err != nil { - return nil, errors.Wrap(err, "failed to create a new subscription") + return nil, fmt.Errorf("failed to create a new subscription: %w", err) } encodedSubId, err := chainlinkutils.ABIEncode(`[{"type":"uint64"}]`, subID) if err != nil { - return nil, errors.Wrap(err, "failed to encode subscription ID for funding") + return nil, fmt.Errorf("failed to encode subscription ID for funding: %w", err) } _, err = lt.TransferAndCall(router.Address(), big.NewInt(0).Mul(cfg.Common.Funding.SubFunds, big.NewInt(1e18)), encodedSubId) if err != nil { - return nil, errors.Wrap(err, "failed to transferAndCall router, LINK funding") + return nil, fmt.Errorf("failed to transferAndCall router, LINK funding: %w", err) } cfg.Common.SubscriptionID = subID } pKey, pubKey, err := parseEthereumPrivateKey(os.Getenv("MUMBAI_KEYS")) if err != nil { - return nil, errors.Wrap(err, "failed to load Ethereum private key") + return nil, fmt.Errorf("failed to load Ethereum private key: %w", err) } tpk, err := coord.GetThresholdPublicKey() if err != nil { - return nil, errors.Wrap(err, "failed to get Threshold public key") + return nil, fmt.Errorf("failed to get Threshold public key: %w", err) } log.Info().Hex("ThresholdPublicKeyBytesHex", tpk).Msg("Loaded coordinator keys") donPubKey, err := coord.GetDONPublicKey() if err != nil { - return nil, errors.Wrap(err, "failed to get DON public key") + return nil, fmt.Errorf("failed to get DON public key: %w", err) } log.Info().Hex("DONPublicKeyHex", donPubKey).Msg("Loaded DON key") tdh2pk, err := ParseTDH2Key(tpk) if err != nil { - return nil, errors.Wrap(err, "failed to unmarshal tdh2 public key") + return nil, fmt.Errorf("failed to unmarshal tdh2 public key: %w", err) } var encryptedSecrets string if cfg.Common.Secrets != "" { encryptedSecrets, err = EncryptS4Secrets(pKey, tdh2pk, donPubKey, cfg.Common.Secrets) if err != nil { - return nil, errors.Wrap(err, "failed to generate tdh2 secrets") + return nil, fmt.Errorf("failed to generate tdh2 secrets: %w", err) } slotID, slotVersion, err := UploadS4Secrets(resty.New(), &S4SecretsCfg{ GatewayURL: cfg.Common.GatewayURL, @@ -139,7 +140,7 @@ func SetupLocalLoadTestEnv(cfg *PerformanceConfig) (*FunctionsTest, error) { S4SetPayload: encryptedSecrets, }) if err != nil { - return nil, errors.Wrap(err, "failed to upload secrets to S4") + return nil, fmt.Errorf("failed to upload secrets to S4: %w", err) } cfg.Common.SecretsSlotID = slotID cfg.Common.SecretsVersionID = slotVersion @@ -168,13 +169,13 @@ func SetupLocalLoadTestEnv(cfg *PerformanceConfig) (*FunctionsTest, error) { func parseEthereumPrivateKey(pk string) (*ecdsa.PrivateKey, *ecdsa.PublicKey, error) { pKey, err := crypto.HexToECDSA(pk) if err != nil { - return nil, nil, errors.Wrap(err, "failed to convert Ethereum key from hex") + return nil, nil, fmt.Errorf("failed to convert Ethereum key from hex: %w", err) } publicKey := pKey.Public() pubKey, ok := publicKey.(*ecdsa.PublicKey) if !ok { - return nil, nil, errors.Wrap(err, "failed to get public key from Ethereum private key") + return nil, nil, fmt.Errorf("failed to get public key from Ethereum private key: %w", err) } log.Info().Str("Address", crypto.PubkeyToAddress(*pubKey).Hex()).Msg("Parsed private key for address") return pKey, pubKey, nil diff --git a/integration-tests/load/log_poller/config.toml b/integration-tests/load/log_poller/config.toml new file mode 100644 index 0000000000..2e32800194 --- /dev/null +++ b/integration-tests/load/log_poller/config.toml @@ -0,0 +1,22 @@ +[general] +generator = "looped" +contracts = 10 +events_per_tx = 10 + +[chaos] +experiment_count = 10 + +[looped] +[looped.contract] +execution_count = 300 + +[looped.fuzz] +min_emit_wait_time_ms = 100 +max_emit_wait_time_ms = 500 + +[wasp] +[wasp.load] +call_timeout = "3m" +rate_limit_unit_duration = "2s" +LPS = 30 +duration = "1m" \ No newline at end of file diff --git a/integration-tests/load/log_poller/log_poller_test.go b/integration-tests/load/log_poller/log_poller_test.go new file mode 100644 index 0000000000..04366848f0 --- /dev/null +++ b/integration-tests/load/log_poller/log_poller_test.go @@ -0,0 +1,25 @@ +package logpoller + +import ( + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + + "github.com/stretchr/testify/require" + + lp_helpers "github.com/smartcontractkit/chainlink/integration-tests/universal/log_poller" +) + +func TestLoadTestLogPoller(t *testing.T) { + cfg, err := lp_helpers.ReadConfig(lp_helpers.DefaultConfigFilename) + require.NoError(t, err) + + eventsToEmit := []abi.Event{} + for _, event := range lp_helpers.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit + + lp_helpers.ExecuteBasicLogPollerTest(t, cfg) +} diff --git a/integration-tests/load/vrfv2/cmd/dashboard.go b/integration-tests/load/vrfv2/cmd/dashboard.go index 3035da0422..0fb7be2b78 100644 --- a/integration-tests/load/vrfv2/cmd/dashboard.go +++ b/integration-tests/load/vrfv2/cmd/dashboard.go @@ -1,6 +1,8 @@ package main import ( + "os" + "github.com/K-Phoen/grabana/dashboard" "github.com/K-Phoen/grabana/logs" "github.com/K-Phoen/grabana/row" @@ -8,7 +10,6 @@ import ( "github.com/K-Phoen/grabana/timeseries" "github.com/K-Phoen/grabana/timeseries/axis" "github.com/smartcontractkit/wasp" - "os" ) func main() { diff --git a/integration-tests/load/vrfv2/config.go b/integration-tests/load/vrfv2/config.go index ee5f3ff80d..0a595f753c 100644 --- a/integration-tests/load/vrfv2/config.go +++ b/integration-tests/load/vrfv2/config.go @@ -1,12 +1,14 @@ package loadvrfv2 import ( + "fmt" + "math/big" + "os" + "github.com/pelletier/go-toml/v2" - "github.com/pkg/errors" "github.com/rs/zerolog/log" + "github.com/smartcontractkit/chainlink/v2/core/store/models" - "math/big" - "os" ) const ( @@ -63,11 +65,11 @@ func ReadConfig() (*PerformanceConfig, error) { var cfg *PerformanceConfig d, err := os.ReadFile(DefaultConfigFilename) if err != nil { - return nil, errors.Wrap(err, ErrReadPerfConfig) + return nil, fmt.Errorf("%s, err: %w", ErrReadPerfConfig, err) } err = toml.Unmarshal(d, &cfg) if err != nil { - return nil, errors.Wrap(err, ErrUnmarshalPerfConfig) + return nil, fmt.Errorf("%s, err: %w", ErrUnmarshalPerfConfig, err) } log.Debug().Interface("PerformanceConfig", cfg).Msg("Parsed performance config") return cfg, nil diff --git a/integration-tests/load/vrfv2/gun.go b/integration-tests/load/vrfv2/gun.go index d6a8977738..8100baaa7f 100644 --- a/integration-tests/load/vrfv2/gun.go +++ b/integration-tests/load/vrfv2/gun.go @@ -1,9 +1,10 @@ package loadvrfv2 import ( + "github.com/smartcontractkit/wasp" + "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions" vrfConst "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions/vrfv2_constants" - "github.com/smartcontractkit/wasp" ) /* SingleHashGun is a gun that constantly requests randomness for one feed */ @@ -21,7 +22,7 @@ func SingleFeedGun(contracts *vrfv2_actions.VRFV2Contracts, keyHash [32]byte) *S } // Call implements example gun call, assertions on response bodies should be done here -func (m *SingleHashGun) Call(l *wasp.Generator) *wasp.CallResult { +func (m *SingleHashGun) Call(_ *wasp.Generator) *wasp.CallResult { err := m.contracts.LoadTestConsumer.RequestRandomness( m.keyHash, vrfConst.SubID, diff --git a/integration-tests/load/vrfv2/onchain_monitoring.go b/integration-tests/load/vrfv2/onchain_monitoring.go index b4503d27fa..879c7089e1 100644 --- a/integration-tests/load/vrfv2/onchain_monitoring.go +++ b/integration-tests/load/vrfv2/onchain_monitoring.go @@ -1,12 +1,14 @@ package loadvrfv2 import ( - "context" - "github.com/rs/zerolog/log" - "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions" - "github.com/smartcontractkit/wasp" "testing" "time" + + "github.com/rs/zerolog/log" + "github.com/smartcontractkit/wasp" + + "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) /* Monitors on-chain stats of LoadConsumer and pushes them to Loki every second */ @@ -34,7 +36,7 @@ func MonitorLoadStats(t *testing.T, vrfv2Contracts *vrfv2_actions.VRFV2Contracts } for { time.Sleep(1 * time.Second) - metrics, err := vrfv2Contracts.LoadTestConsumer.GetLoadTestMetrics(context.Background()) + metrics, err := vrfv2Contracts.LoadTestConsumer.GetLoadTestMetrics(utils.TestContext(t)) if err != nil { log.Error().Err(err).Msg(ErrMetrics) } diff --git a/integration-tests/load/vrfv2/vrfv2_test.go b/integration-tests/load/vrfv2/vrfv2_test.go index a9fb80a72a..44325965bd 100644 --- a/integration-tests/load/vrfv2/vrfv2_test.go +++ b/integration-tests/load/vrfv2/vrfv2_test.go @@ -3,9 +3,10 @@ package loadvrfv2 import ( "testing" - "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions" "github.com/smartcontractkit/wasp" "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions" ) func TestVRFV2Load(t *testing.T) { diff --git a/integration-tests/load/vrfv2/vu.go b/integration-tests/load/vrfv2/vu.go index df05a9168e..7eb02ae330 100644 --- a/integration-tests/load/vrfv2/vu.go +++ b/integration-tests/load/vrfv2/vu.go @@ -1,13 +1,15 @@ package loadvrfv2 import ( - "github.com/pkg/errors" + "fmt" + "time" + + "github.com/smartcontractkit/wasp" + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions" vrfConst "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions/vrfv2_constants" "github.com/smartcontractkit/chainlink/integration-tests/client" - "github.com/smartcontractkit/wasp" - "time" ) /* JobVolumeVU is a "virtual user" that creates a VRFv2 job and constantly requesting new randomness only for this job instance */ @@ -54,7 +56,7 @@ func (m *JobVolumeVU) Clone(_ *wasp.Generator) wasp.VirtualUser { func (m *JobVolumeVU) Setup(_ *wasp.Generator) error { jobs, err := vrfv2_actions.CreateVRFV2Jobs(m.nodes, m.contracts.Coordinator, m.bc, m.minIncomingConfirmations) if err != nil { - return errors.Wrap(err, "failed to create VRFv2 jobs in setup") + return fmt.Errorf("failed to create VRFv2 jobs in setup: %w", err) } m.jobs = jobs m.keyHash = jobs[0].KeyHash diff --git a/integration-tests/load/vrfv2plus/cmd/dashboard.go b/integration-tests/load/vrfv2plus/cmd/dashboard.go index 9a0ba682a1..049ee9ff2e 100644 --- a/integration-tests/load/vrfv2plus/cmd/dashboard.go +++ b/integration-tests/load/vrfv2plus/cmd/dashboard.go @@ -1,6 +1,8 @@ package main import ( + "os" + "github.com/K-Phoen/grabana/dashboard" "github.com/K-Phoen/grabana/logs" "github.com/K-Phoen/grabana/row" @@ -8,7 +10,6 @@ import ( "github.com/K-Phoen/grabana/timeseries" "github.com/K-Phoen/grabana/timeseries/axis" "github.com/smartcontractkit/wasp" - "os" ) func main() { diff --git a/integration-tests/load/vrfv2plus/config.go b/integration-tests/load/vrfv2plus/config.go index 5f3babfeab..96dbf99c6b 100644 --- a/integration-tests/load/vrfv2plus/config.go +++ b/integration-tests/load/vrfv2plus/config.go @@ -2,16 +2,22 @@ package loadvrfv2plus import ( "encoding/base64" + "fmt" + "os" + "github.com/pelletier/go-toml/v2" - "github.com/pkg/errors" "github.com/rs/zerolog/log" + "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus/vrfv2plus_config" "github.com/smartcontractkit/chainlink/v2/core/store/models" - "os" ) const ( DefaultConfigFilename = "config.toml" + SoakTestType = "Soak" + LoadTestType = "Load" + StressTestType = "Stress" + SpikeTestType = "Spike" ErrReadPerfConfig = "failed to read TOML config for performance tests" ErrUnmarshalPerfConfig = "failed to unmarshal TOML config for performance tests" @@ -32,13 +38,15 @@ type PerformanceConfig struct { type ExistingEnvConfig struct { CoordinatorAddress string `toml:"coordinator_address"` ConsumerAddress string `toml:"consumer_address"` + LinkAddress string `toml:"link_address"` SubID string `toml:"sub_id"` KeyHash string `toml:"key_hash"` + SubFunding + CreateFundSubsAndAddConsumers bool `toml:"create_fund_subs_and_add_consumers"` } type NewEnvConfig struct { Funding - NumberOfSubToCreate int `toml:"number_of_sub_to_create"` } type Common struct { @@ -46,9 +54,13 @@ type Common struct { } type Funding struct { - NodeFunds float64 `toml:"node_funds"` - SubFundsLink int64 `toml:"sub_funds_link"` - SubFundsNative int64 `toml:"sub_funds_native"` + NodeFunds float64 `toml:"node_funds"` + SubFunding +} + +type SubFunding struct { + SubFundsLink float64 `toml:"sub_funds_link"` + SubFundsNative float64 `toml:"sub_funds_native"` } type Soak struct { @@ -68,6 +80,8 @@ type Spike struct { } type PerformanceTestConfig struct { + NumberOfSubToCreate int `toml:"number_of_sub_to_create"` + RPS int64 `toml:"rps"` //Duration *models.Duration `toml:"duration"` RateLimitUnitDuration *models.Duration `toml:"rate_limit_unit_duration"` @@ -83,42 +97,49 @@ func ReadConfig() (*PerformanceConfig, error) { if rawConfig == "" { d, err = os.ReadFile(DefaultConfigFilename) if err != nil { - return nil, errors.Wrap(err, ErrReadPerfConfig) + return nil, fmt.Errorf("%s, err: %w", ErrReadPerfConfig, err) } } else { d, err = base64.StdEncoding.DecodeString(rawConfig) + if err != nil { + return nil, fmt.Errorf("%s, err: %w", ErrReadPerfConfig, err) + } } err = toml.Unmarshal(d, &cfg) if err != nil { - return nil, errors.Wrap(err, ErrUnmarshalPerfConfig) + return nil, fmt.Errorf("%s, err: %w", ErrUnmarshalPerfConfig, err) } if cfg.Soak.RandomnessRequestCountPerRequest <= cfg.Soak.RandomnessRequestCountPerRequestDeviation { - return nil, errors.Wrap(err, ErrDeviationShouldBeLessThanOriginal) + return nil, fmt.Errorf("%s, err: %w", ErrDeviationShouldBeLessThanOriginal, err) } log.Debug().Interface("Config", cfg).Msg("Parsed config") return cfg, nil } -func SetPerformanceTestConfig(vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig, cfg *PerformanceConfig) { - switch os.Getenv("TEST_TYPE") { - case "Soak": +func SetPerformanceTestConfig(testType string, vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig, cfg *PerformanceConfig) { + switch testType { + case SoakTestType: + vrfv2PlusConfig.NumberOfSubToCreate = cfg.Soak.NumberOfSubToCreate vrfv2PlusConfig.RPS = cfg.Soak.RPS vrfv2PlusConfig.RateLimitUnitDuration = cfg.Soak.RateLimitUnitDuration.Duration() vrfv2PlusConfig.RandomnessRequestCountPerRequest = cfg.Soak.RandomnessRequestCountPerRequest vrfv2PlusConfig.RandomnessRequestCountPerRequestDeviation = cfg.Soak.RandomnessRequestCountPerRequestDeviation - case "Load": + case LoadTestType: + vrfv2PlusConfig.NumberOfSubToCreate = cfg.Load.NumberOfSubToCreate vrfv2PlusConfig.RPS = cfg.Load.RPS vrfv2PlusConfig.RateLimitUnitDuration = cfg.Load.RateLimitUnitDuration.Duration() vrfv2PlusConfig.RandomnessRequestCountPerRequest = cfg.Load.RandomnessRequestCountPerRequest vrfv2PlusConfig.RandomnessRequestCountPerRequestDeviation = cfg.Load.RandomnessRequestCountPerRequestDeviation - case "Stress": + case StressTestType: + vrfv2PlusConfig.NumberOfSubToCreate = cfg.Stress.NumberOfSubToCreate vrfv2PlusConfig.RPS = cfg.Stress.RPS vrfv2PlusConfig.RateLimitUnitDuration = cfg.Stress.RateLimitUnitDuration.Duration() vrfv2PlusConfig.RandomnessRequestCountPerRequest = cfg.Stress.RandomnessRequestCountPerRequest vrfv2PlusConfig.RandomnessRequestCountPerRequestDeviation = cfg.Stress.RandomnessRequestCountPerRequestDeviation - case "Spike": + case SpikeTestType: + vrfv2PlusConfig.NumberOfSubToCreate = cfg.Spike.NumberOfSubToCreate vrfv2PlusConfig.RPS = cfg.Spike.RPS vrfv2PlusConfig.RateLimitUnitDuration = cfg.Spike.RateLimitUnitDuration.Duration() vrfv2PlusConfig.RandomnessRequestCountPerRequest = cfg.Spike.RandomnessRequestCountPerRequest diff --git a/integration-tests/load/vrfv2plus/config.toml b/integration-tests/load/vrfv2plus/config.toml index 1208423dc0..e3200fafe2 100644 --- a/integration-tests/load/vrfv2plus/config.toml +++ b/integration-tests/load/vrfv2plus/config.toml @@ -3,17 +3,19 @@ minimum_confirmations = 3 [NewEnvConfig] -sub_funds_link = 1000 -sub_funds_native = 1000 +sub_funds_link = 1 +sub_funds_native = 1 node_funds = 10 -number_of_sub_to_create = 10 [ExistingEnvConfig] -coordinator_address = "0x4931Ce2e341398c8eD8A5D0F6ADb920476D6DaBb" -consumer_address = "0x087F232165D9bA1A602f148025e5D0666953F64a" -sub_id = "52116875585187328970776211988181422347535732407068188096422095950800466618218" -key_hash = "0x4c422465ed6a06cfc84575a5437fef7b9dc6263133f648afbe6ae7b2c694d3b3" - +coordinator_address = "0x27b61f155F772b291D1d9B478BeAd37B2Ae447b0" +#consumer_address = "0x087F232165D9bA1A602f148025e5D0666953F64a" +#sub_id = "52116875585187328970776211988181422347535732407068188096422095950800466618218" +key_hash = "0x787d74caea10b2b357790d5b5247c2f63d1d91572a9846f780606e4d953677ae" +create_fund_subs_and_add_consumers = true +link_address = "0x779877A7B0D9E8603169DdbD7836e478b4624789" +sub_funds_link = 3 +sub_funds_native = 1 # 10 RPM - 1 tx request with 1 rand request in each tx every 6 seconds [Soak] @@ -21,6 +23,7 @@ rate_limit_unit_duration = "6s" rps = 1 randomness_request_count_per_request = 1 # amount of randomness requests to make per one TX request randomness_request_count_per_request_deviation = 0 #NOTE - deviation should be less than randomness_request_count_per_request setting +number_of_sub_to_create = 1 # approx 60 RPM - 1 tx request with 4 rand requests in each tx every 3 seconds [Load] @@ -28,13 +31,15 @@ rate_limit_unit_duration = "3s" rps = 1 randomness_request_count_per_request = 3 # amount of randomness requests to make per one TX request randomness_request_count_per_request_deviation = 2 #NOTE - deviation should be less than randomness_request_count_per_request setting +number_of_sub_to_create = 1 # approx 540 RPM - 3 tx requests per second with 4 rand requests in each tx [Stress] -rate_limit_unit_duration = "0" +rate_limit_unit_duration = "1s" rps = 3 randomness_request_count_per_request = 4 # amount of randomness requests to make per one TX request randomness_request_count_per_request_deviation = 0 #NOTE - deviation should be less than randomness_request_count_per_request setting +number_of_sub_to_create = 1 # approx 150 RPM - 1 tx request with 150 rand requests in each tx every 60 seconds [Spike] @@ -42,3 +47,4 @@ rate_limit_unit_duration = "1m" rps = 1 randomness_request_count_per_request = 150 # amount of randomness requests to make per one TX request randomness_request_count_per_request_deviation = 0 #NOTE - deviation should be less than randomness_request_count_per_request setting +number_of_sub_to_create = 1 \ No newline at end of file diff --git a/integration-tests/load/vrfv2plus/gun.go b/integration-tests/load/vrfv2plus/gun.go index c9947fa32f..8ab278b73e 100644 --- a/integration-tests/load/vrfv2plus/gun.go +++ b/integration-tests/load/vrfv2plus/gun.go @@ -1,12 +1,14 @@ package loadvrfv2plus import ( + "math/big" + "math/rand" + "github.com/rs/zerolog" + "github.com/smartcontractkit/wasp" + "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus" "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus/vrfv2plus_config" - "github.com/smartcontractkit/wasp" - "math/big" - "math/rand" ) /* SingleHashGun is a gun that constantly requests randomness for one feed */ @@ -15,7 +17,7 @@ type SingleHashGun struct { contracts *vrfv2plus.VRFV2_5Contracts keyHash [32]byte subIDs []*big.Int - vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig + vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig logger zerolog.Logger } @@ -23,7 +25,7 @@ func NewSingleHashGun( contracts *vrfv2plus.VRFV2_5Contracts, keyHash [32]byte, subIDs []*big.Int, - vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig, + vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig, logger zerolog.Logger, ) *SingleHashGun { return &SingleHashGun{ @@ -36,7 +38,7 @@ func NewSingleHashGun( } // Call implements example gun call, assertions on response bodies should be done here -func (m *SingleHashGun) Call(l *wasp.Generator) *wasp.CallResult { +func (m *SingleHashGun) Call(_ *wasp.Generator) *wasp.CallResult { //todo - should work with multiple consumers and consumers having different keyhashes and wallets //randomly increase/decrease randomness request count per TX @@ -52,6 +54,7 @@ func (m *SingleHashGun) Call(l *wasp.Generator) *wasp.CallResult { randBool(), randomnessRequestCountPerRequest, m.vrfv2PlusConfig, + m.vrfv2PlusConfig.RandomWordsFulfilledEventTimeout, m.logger, ) if err != nil { diff --git a/integration-tests/load/vrfv2plus/onchain_monitoring.go b/integration-tests/load/vrfv2plus/onchain_monitoring.go index 0ae27fe6be..c911546af0 100644 --- a/integration-tests/load/vrfv2plus/onchain_monitoring.go +++ b/integration-tests/load/vrfv2plus/onchain_monitoring.go @@ -2,11 +2,13 @@ package loadvrfv2plus import ( "context" - "github.com/rs/zerolog/log" - "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus" - "github.com/smartcontractkit/wasp" "testing" "time" + + "github.com/rs/zerolog/log" + "github.com/smartcontractkit/wasp" + + "github.com/smartcontractkit/chainlink/integration-tests/contracts" ) /* Monitors on-chain stats of LoadConsumer and pushes them to Loki every second */ @@ -18,11 +20,12 @@ const ( ErrLokiPush = "failed to push monitoring metrics to Loki" ) -func MonitorLoadStats(lc *wasp.LokiClient, vrfv2PlusContracts *vrfv2plus.VRFV2_5Contracts, labels map[string]string) { +func MonitorLoadStats(lc *wasp.LokiClient, consumer contracts.VRFv2PlusLoadTestConsumer, labels map[string]string) { go func() { for { time.Sleep(1 * time.Second) - SendLoadTestMetricsToLoki(vrfv2PlusContracts, lc, labels) + metrics := GetLoadTestMetrics(consumer) + SendMetricsToLoki(metrics, lc, labels) } }() } @@ -38,13 +41,16 @@ func UpdateLabels(labels map[string]string, t *testing.T) map[string]string { return updatedLabels } -func SendLoadTestMetricsToLoki(vrfv2PlusContracts *vrfv2plus.VRFV2_5Contracts, lc *wasp.LokiClient, updatedLabels map[string]string) { - //todo - should work with multiple consumers and consumers having different keyhashes and wallets - metrics, err := vrfv2PlusContracts.LoadTestConsumers[0].GetLoadTestMetrics(context.Background()) - if err != nil { - log.Error().Err(err).Msg(ErrMetrics) - } +func SendMetricsToLoki(metrics *contracts.VRFLoadTestMetrics, lc *wasp.LokiClient, updatedLabels map[string]string) { if err := lc.HandleStruct(wasp.LabelsMapToModel(updatedLabels), time.Now(), metrics); err != nil { log.Error().Err(err).Msg(ErrLokiPush) } } + +func GetLoadTestMetrics(consumer contracts.VRFv2PlusLoadTestConsumer) *contracts.VRFLoadTestMetrics { + metrics, err := consumer.GetLoadTestMetrics(context.Background()) + if err != nil { + log.Error().Err(err).Msg(ErrMetrics) + } + return metrics +} diff --git a/integration-tests/load/vrfv2plus/vrfv2plus_test.go b/integration-tests/load/vrfv2plus/vrfv2plus_test.go index 5c3ea6e8c6..e7734fee0d 100644 --- a/integration-tests/load/vrfv2plus/vrfv2plus_test.go +++ b/integration-tests/load/vrfv2plus/vrfv2plus_test.go @@ -1,18 +1,22 @@ package loadvrfv2plus import ( - "context" - "github.com/ethereum/go-ethereum/common" - "github.com/kelseyhightower/envconfig" - "github.com/smartcontractkit/chainlink-testing-framework/logging" - "github.com/smartcontractkit/wasp" - "github.com/stretchr/testify/require" "math/big" "os" "sync" "testing" "time" + "github.com/ethereum/go-ethereum/common" + "github.com/kelseyhightower/envconfig" + "github.com/rs/zerolog/log" + "github.com/smartcontractkit/wasp" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-testing-framework/logging" + "github.com/smartcontractkit/chainlink/integration-tests/testreporters" + "github.com/smartcontractkit/chainlink/integration-tests/utils" + "github.com/smartcontractkit/chainlink/integration-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus" "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus/vrfv2plus_config" @@ -20,21 +24,47 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" ) -func TestVRFV2PlusLoad(t *testing.T) { +var ( + env *test_env.CLClusterTestEnv + vrfv2PlusContracts *vrfv2plus.VRFV2_5Contracts + vrfv2PlusData *vrfv2plus.VRFV2PlusData + subIDs []*big.Int + eoaWalletAddress string + + labels = map[string]string{ + "branch": "vrfv2Plus_healthcheck", + "commit": "vrfv2Plus_healthcheck", + } + + testType = os.Getenv("TEST_TYPE") +) + +func TestVRFV2PlusPerformance(t *testing.T) { cfg, err := ReadConfig() require.NoError(t, err) var vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig err = envconfig.Process("VRFV2PLUS", &vrfv2PlusConfig) require.NoError(t, err) - SetPerformanceTestConfig(&vrfv2PlusConfig, cfg) + testReporter := &testreporters.VRFV2PlusTestReporter{} + + SetPerformanceTestConfig(testType, &vrfv2PlusConfig, cfg) l := logging.GetTestLogger(t) //todo: temporary solution with envconfig and toml config until VRF-662 is implemented vrfv2PlusConfig.MinimumConfirmations = cfg.Common.MinimumConfirmations + lokiConfig := wasp.NewEnvLokiConfig() + lc, err := wasp.NewLokiClient(lokiConfig) + if err != nil { + l.Error().Err(err).Msg(ErrLokiClient) + return + } + + updatedLabels := UpdateLabels(labels, t) + l.Info(). - Str("Test Type", os.Getenv("TEST_TYPE")). + Str("Test Type", testType). Str("Test Duration", vrfv2PlusConfig.TestDuration.Truncate(time.Second).String()). Int64("RPS", vrfv2PlusConfig.RPS). Str("RateLimitUnitDuration", vrfv2PlusConfig.RateLimitUnitDuration.String()). @@ -43,21 +73,48 @@ func TestVRFV2PlusLoad(t *testing.T) { Bool("UseExistingEnv", vrfv2PlusConfig.UseExistingEnv). Msg("Performance Test Configuration") - var env *test_env.CLClusterTestEnv - var vrfv2PlusContracts *vrfv2plus.VRFV2_5Contracts - var vrfv2PlusData *vrfv2plus.VRFV2PlusData - var subIDs []*big.Int - if vrfv2PlusConfig.UseExistingEnv { //todo: temporary solution with envconfig and toml config until VRF-662 is implemented vrfv2PlusConfig.CoordinatorAddress = cfg.ExistingEnvConfig.CoordinatorAddress vrfv2PlusConfig.ConsumerAddress = cfg.ExistingEnvConfig.ConsumerAddress + vrfv2PlusConfig.LinkAddress = cfg.ExistingEnvConfig.LinkAddress + vrfv2PlusConfig.SubscriptionFundingAmountLink = cfg.ExistingEnvConfig.SubFunding.SubFundsLink + vrfv2PlusConfig.SubscriptionFundingAmountNative = cfg.ExistingEnvConfig.SubFunding.SubFundsNative vrfv2PlusConfig.SubID = cfg.ExistingEnvConfig.SubID vrfv2PlusConfig.KeyHash = cfg.ExistingEnvConfig.KeyHash env, err = test_env.NewCLTestEnvBuilder(). WithTestLogger(t). - WithoutCleanup(). + WithCustomCleanup( + func() { + teardown(t, vrfv2PlusContracts.LoadTestConsumers[0], lc, updatedLabels, testReporter, testType, vrfv2PlusConfig) + if env.EVMClient.NetworkSimulated() { + l.Info(). + Str("Network Name", env.EVMClient.GetNetworkName()). + Msg("Network is a simulated network. Skipping fund return for Coordinator Subscriptions.") + } else { + //cancel subs and return funds to sub owner + for _, subID := range subIDs { + l.Info(). + Str("Returning funds from SubID", subID.String()). + Str("Returning funds to", eoaWalletAddress). + Msg("Canceling subscription and returning funds to subscription owner") + pendingRequestsExist, err := vrfv2PlusContracts.Coordinator.PendingRequestsExist(utils.TestContext(t), subID) + if err != nil { + l.Error().Err(err).Msg("Error checking if pending requests exist") + } + if !pendingRequestsExist { + _, err := vrfv2PlusContracts.Coordinator.CancelSubscription(subID, common.HexToAddress(eoaWalletAddress)) + if err != nil { + l.Error().Err(err).Msg("Error canceling subscription") + } + } else { + l.Error().Str("Sub ID", subID.String()).Msg("Pending requests exist for subscription, cannot cancel subscription and return funds") + } + + } + } + }). Build() require.NoError(t, err, "error creating test env") @@ -65,17 +122,36 @@ func TestVRFV2PlusLoad(t *testing.T) { coordinator, err := env.ContractLoader.LoadVRFCoordinatorV2_5(vrfv2PlusConfig.CoordinatorAddress) require.NoError(t, err) - consumer, err := env.ContractLoader.LoadVRFv2PlusLoadTestConsumer(vrfv2PlusConfig.ConsumerAddress) - require.NoError(t, err) + var consumers []contracts.VRFv2PlusLoadTestConsumer + if cfg.ExistingEnvConfig.CreateFundSubsAndAddConsumers { + linkToken, err := env.ContractLoader.LoadLINKToken(vrfv2PlusConfig.LinkAddress) + require.NoError(t, err) + consumers, err = vrfv2plus.DeployVRFV2PlusConsumers(env.ContractDeployer, coordinator, 1) + require.NoError(t, err) + subIDs, err = vrfv2plus.CreateFundSubsAndAddConsumers( + env, + vrfv2PlusConfig, + linkToken, + coordinator, + consumers, + vrfv2PlusConfig.NumberOfSubToCreate, + ) + require.NoError(t, err) + } else { + consumer, err := env.ContractLoader.LoadVRFv2PlusLoadTestConsumer(vrfv2PlusConfig.ConsumerAddress) + require.NoError(t, err) + consumers = append(consumers, consumer) + var ok bool + subID, ok := new(big.Int).SetString(vrfv2PlusConfig.SubID, 10) + require.True(t, ok) + subIDs = append(subIDs, subID) + } vrfv2PlusContracts = &vrfv2plus.VRFV2_5Contracts{ Coordinator: coordinator, - LoadTestConsumers: []contracts.VRFv2PlusLoadTestConsumer{consumer}, + LoadTestConsumers: consumers, BHS: nil, } - var ok bool - subID, ok := new(big.Int).SetString(vrfv2PlusConfig.SubID, 10) - require.True(t, ok) vrfv2PlusData = &vrfv2plus.VRFV2PlusData{ VRFV2PlusKeyData: vrfv2plus.VRFV2PlusKeyData{ @@ -87,19 +163,43 @@ func TestVRFV2PlusLoad(t *testing.T) { PrimaryEthAddress: "", ChainID: nil, } - subIDs = append(subIDs, subID) + } else { //todo: temporary solution with envconfig and toml config until VRF-662 is implemented vrfv2PlusConfig.ChainlinkNodeFunding = cfg.NewEnvConfig.NodeFunds vrfv2PlusConfig.SubscriptionFundingAmountLink = cfg.NewEnvConfig.Funding.SubFundsLink vrfv2PlusConfig.SubscriptionFundingAmountNative = cfg.NewEnvConfig.Funding.SubFundsNative - numberOfSubToCreate := cfg.NewEnvConfig.NumberOfSubToCreate env, err = test_env.NewCLTestEnvBuilder(). WithTestLogger(t). WithGeth(). WithCLNodes(1). WithFunding(big.NewFloat(vrfv2PlusConfig.ChainlinkNodeFunding)). - WithStandardCleanup(). + WithCustomCleanup( + func() { + teardown(t, vrfv2PlusContracts.LoadTestConsumers[0], lc, updatedLabels, testReporter, testType, vrfv2PlusConfig) + + if env.EVMClient.NetworkSimulated() { + l.Info(). + Str("Network Name", env.EVMClient.GetNetworkName()). + Msg("Network is a simulated network. Skipping fund return for Coordinator Subscriptions.") + } else { + for _, subID := range subIDs { + l.Info(). + Str("Returning funds from SubID", subID.String()). + Str("Returning funds to", eoaWalletAddress). + Msg("Canceling subscription and returning funds to subscription owner") + _, err := vrfv2PlusContracts.Coordinator.CancelSubscription(subID, common.HexToAddress(eoaWalletAddress)) + if err != nil { + l.Error().Err(err).Msg("Error canceling subscription") + } + } + //err = vrfv2plus.ReturnFundsForFulfilledRequests(env.EVMClient, vrfv2PlusContracts.Coordinator, l) + //l.Error().Err(err).Msg("Error returning funds for fulfilled requests") + } + if err := env.Cleanup(); err != nil { + l.Error().Err(err).Msg("Error cleaning up test environment") + } + }). WithLogWatcher(). Build() @@ -113,29 +213,28 @@ func TestVRFV2PlusLoad(t *testing.T) { linkToken, err := actions.DeployLINKToken(env.ContractDeployer) require.NoError(t, err, "error deploying LINK contract") - vrfv2PlusContracts, subIDs, vrfv2PlusData, err = vrfv2plus.SetupVRFV2_5Environment(env, &vrfv2PlusConfig, linkToken, mockETHLinkFeed, 1, numberOfSubToCreate) + vrfv2PlusContracts, subIDs, vrfv2PlusData, err = vrfv2plus.SetupVRFV2_5Environment( + env, + vrfv2PlusConfig, + linkToken, + mockETHLinkFeed, + //register proving key against EOA address in order to return funds to this address + env.EVMClient.GetDefaultWallet().Address(), + 1, + vrfv2PlusConfig.NumberOfSubToCreate, + l, + ) require.NoError(t, err, "error setting up VRF v2_5 env") } + eoaWalletAddress = env.EVMClient.GetDefaultWallet().Address() - l.Debug().Int("Number of Subs", len(subIDs)).Msg("Subs Involved in Load Test") + l.Debug().Int("Number of Subs", len(subIDs)).Msg("Subs involved in the test") for _, subID := range subIDs { - subscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), subID) + subscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subID) require.NoError(t, err, "error getting subscription information for subscription %s", subID.String()) vrfv2plus.LogSubDetails(l, subscription, subID, vrfv2PlusContracts.Coordinator) } - labels := map[string]string{ - "branch": "vrfv2Plus_healthcheck", - "commit": "vrfv2Plus_healthcheck", - } - - lokiConfig := wasp.NewEnvLokiConfig() - lc, err := wasp.NewLokiClient(lokiConfig) - if err != nil { - l.Error().Err(err).Msg(ErrLokiClient) - return - } - singleFeedConfig := &wasp.Config{ T: t, LoadType: wasp.RPS, @@ -145,7 +244,7 @@ func TestVRFV2PlusLoad(t *testing.T) { vrfv2PlusContracts, vrfv2PlusData.KeyHash, subIDs, - &vrfv2PlusConfig, + vrfv2PlusConfig, l, ), Labels: labels, @@ -156,11 +255,10 @@ func TestVRFV2PlusLoad(t *testing.T) { consumer := vrfv2PlusContracts.LoadTestConsumers[0] err = consumer.ResetMetrics() require.NoError(t, err) - updatedLabels := UpdateLabels(labels, t) - MonitorLoadStats(lc, vrfv2PlusContracts, updatedLabels) + MonitorLoadStats(lc, consumer, updatedLabels) // is our "job" stable at all, no memory leaks, no flaking performance under some RPS? - t.Run("vrfv2plus soak test", func(t *testing.T) { + t.Run("vrfv2plus performance test", func(t *testing.T) { singleFeedConfig.Schedule = wasp.Plain( vrfv2PlusConfig.RPS, @@ -172,17 +270,45 @@ func TestVRFV2PlusLoad(t *testing.T) { require.NoError(t, err) var wg sync.WaitGroup - wg.Add(1) - requestCount, fulfilmentCount, err := vrfv2plus.WaitForRequestCountEqualToFulfilmentCount(consumer, 30*time.Second, &wg) + //todo - timeout should be configurable depending on the perf test type + requestCount, fulfilmentCount, err := vrfv2plus.WaitForRequestCountEqualToFulfilmentCount(consumer, 2*time.Minute, &wg) + require.NoError(t, err) + wg.Wait() + l.Info(). Interface("Request Count", requestCount). Interface("Fulfilment Count", fulfilmentCount). Msg("Final Request/Fulfilment Stats") - require.NoError(t, err) - wg.Wait() - //send final results - SendLoadTestMetricsToLoki(vrfv2PlusContracts, lc, updatedLabels) }) +} +func teardown( + t *testing.T, + consumer contracts.VRFv2PlusLoadTestConsumer, + lc *wasp.LokiClient, + updatedLabels map[string]string, + testReporter *testreporters.VRFV2PlusTestReporter, + testType string, + vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig, +) { + //send final results to Loki + metrics := GetLoadTestMetrics(consumer) + SendMetricsToLoki(metrics, lc, updatedLabels) + //set report data for Slack notification + testReporter.SetReportData( + testType, + metrics.RequestCount, + metrics.FulfilmentCount, + metrics.AverageFulfillmentInMillions, + metrics.SlowestFulfillment, + metrics.FastestFulfillment, + vrfv2PlusConfig, + ) + + // send Slack notification + err := testReporter.SendSlackNotification(t, nil) + if err != nil { + log.Warn().Err(err).Msg("Error sending Slack notification") + } } diff --git a/integration-tests/migration/upgrade_version_test.go b/integration-tests/migration/upgrade_version_test.go index bf97f43d05..c851f36ec6 100644 --- a/integration-tests/migration/upgrade_version_test.go +++ b/integration-tests/migration/upgrade_version_test.go @@ -1,13 +1,12 @@ package migration import ( + "os" "testing" - "github.com/smartcontractkit/chainlink-testing-framework/utils" "github.com/stretchr/testify/require" - "os" - + "github.com/smartcontractkit/chainlink-testing-framework/utils" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" ) diff --git a/integration-tests/performance/cron_test.go b/integration-tests/performance/cron_test.go index 959cb0fa3e..7e90d29221 100644 --- a/integration-tests/performance/cron_test.go +++ b/integration-tests/performance/cron_test.go @@ -12,15 +12,14 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" - "github.com/smartcontractkit/chainlink-env/environment" - "github.com/smartcontractkit/chainlink-env/logging" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" - "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver" - mockservercfg "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client" - "github.com/smartcontractkit/chainlink-testing-framework/utils" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver" + mockservercfg "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver-cfg" + "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" @@ -44,7 +43,7 @@ func CleanupPerformanceTest( if chainClient != nil { chainClient.GasStats().PrintStats() } - err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, &testReporter, zapcore.PanicLevel, chainClient) + err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, &testReporter, zapcore.PanicLevel, chainClient) require.NoError(t, err, "Error tearing down environment") } @@ -109,7 +108,7 @@ func TestCronPerformance(t *testing.T) { func setupCronTest(t *testing.T) (testEnvironment *environment.Environment) { logging.Init() - network := networks.SelectedNetwork + network := networks.MustGetSelectedNetworksFromEnv()[0] evmConfig := ethereum.New(nil) if !network.Simulated { evmConfig = ethereum.New(ðereum.Props{ diff --git a/integration-tests/performance/directrequest_test.go b/integration-tests/performance/directrequest_test.go index 4ff2b85619..1a3f1d2a01 100644 --- a/integration-tests/performance/directrequest_test.go +++ b/integration-tests/performance/directrequest_test.go @@ -1,7 +1,6 @@ package performance import ( - "context" "fmt" "math/big" "strings" @@ -11,13 +10,13 @@ import ( "github.com/onsi/gomega" "github.com/stretchr/testify/require" - "github.com/smartcontractkit/chainlink-env/environment" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" - "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver" - mockservercfg "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver" + mockservercfg "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" @@ -25,6 +24,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/testsetups" + "github.com/smartcontractkit/chainlink/integration-tests/utils" "github.com/google/uuid" ) @@ -108,7 +108,7 @@ func TestDirectRequestPerformance(t *testing.T) { gom := gomega.NewGomegaWithT(t) gom.Eventually(func(g gomega.Gomega) { - d, err := consumer.Data(context.Background()) + d, err := consumer.Data(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Getting data from consumer contract shouldn't fail") g.Expect(d).ShouldNot(gomega.BeNil(), "Expected the initial on chain data to be nil") l.Debug().Int64("Data", d.Int64()).Msg("Found on chain") @@ -129,7 +129,7 @@ func TestDirectRequestPerformance(t *testing.T) { } func setupDirectRequestTest(t *testing.T) (testEnvironment *environment.Environment) { - network := networks.SelectedNetwork + network := networks.MustGetSelectedNetworksFromEnv()[0] evmConfig := ethereum.New(nil) if !network.Simulated { evmConfig = ethereum.New(ðereum.Props{ diff --git a/integration-tests/performance/flux_test.go b/integration-tests/performance/flux_test.go index 3f9db27c10..18b13ab907 100644 --- a/integration-tests/performance/flux_test.go +++ b/integration-tests/performance/flux_test.go @@ -1,7 +1,6 @@ package performance import ( - "context" "fmt" "math/big" "strings" @@ -12,13 +11,13 @@ import ( "github.com/google/uuid" "github.com/stretchr/testify/require" - "github.com/smartcontractkit/chainlink-env/environment" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" - "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver" - mockservercfg "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver" + mockservercfg "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" @@ -26,6 +25,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/testsetups" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func TestFluxPerformance(t *testing.T) { @@ -83,7 +83,7 @@ func TestFluxPerformance(t *testing.T) { require.NoError(t, err, "Setting oracle options in the Flux Aggregator contract shouldn't fail") err = chainClient.WaitForEvents() require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail") - oracles, err := fluxInstance.GetOracles(context.Background()) + oracles, err := fluxInstance.GetOracles(utils.TestContext(t)) require.NoError(t, err, "Getting oracle details from the Flux aggregator contract shouldn't fail") l.Info().Str("Oracles", strings.Join(oracles, ",")).Msg("Oracles set") @@ -120,7 +120,7 @@ func TestFluxPerformance(t *testing.T) { chainClient.AddHeaderEventSubscription(fluxInstance.Address(), fluxRound) err = chainClient.WaitForEvents() require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail") - data, err := fluxInstance.GetContractData(context.Background()) + data, err := fluxInstance.GetContractData(utils.TestContext(t)) require.NoError(t, err, "Getting contract data from flux aggregator contract shouldn't fail") l.Info().Interface("Data", data).Msg("Round data") require.Equal(t, int64(1e5), data.LatestRoundData.Answer.Int64(), @@ -140,7 +140,7 @@ func TestFluxPerformance(t *testing.T) { require.NoError(t, err, "Setting value path in mock server shouldn't fail") err = chainClient.WaitForEvents() require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail") - data, err = fluxInstance.GetContractData(context.Background()) + data, err = fluxInstance.GetContractData(utils.TestContext(t)) require.NoError(t, err, "Getting contract data from flux aggregator contract shouldn't fail") require.Equal(t, int64(1e10), data.LatestRoundData.Answer.Int64(), "Expected latest round answer to be %d, but found %d", int64(1e10), data.LatestRoundData.Answer.Int64()) @@ -153,7 +153,7 @@ func TestFluxPerformance(t *testing.T) { l.Info().Interface("data", data).Msg("Round data") for _, oracleAddr := range nodeAddresses { - payment, _ := fluxInstance.WithdrawablePayment(context.Background(), oracleAddr) + payment, _ := fluxInstance.WithdrawablePayment(utils.TestContext(t), oracleAddr) require.Equal(t, int64(2), payment.Int64(), "Expected flux aggregator contract's withdrawable payment to be %d, but found %d", int64(2), payment.Int64()) } @@ -173,7 +173,7 @@ func TestFluxPerformance(t *testing.T) { } func setupFluxTest(t *testing.T) (testEnvironment *environment.Environment, testNetwork blockchain.EVMNetwork) { - testNetwork = networks.SelectedNetwork + testNetwork = networks.MustGetSelectedNetworksFromEnv()[0] evmConf := ethereum.New(nil) if !testNetwork.Simulated { evmConf = ethereum.New(ðereum.Props{ diff --git a/integration-tests/performance/keeper_test.go b/integration-tests/performance/keeper_test.go index 08ea95b434..8e273a96f6 100644 --- a/integration-tests/performance/keeper_test.go +++ b/integration-tests/performance/keeper_test.go @@ -2,7 +2,6 @@ package performance //revive:disable:dot-imports import ( - "context" "fmt" "math/big" "strings" @@ -12,12 +11,12 @@ import ( "github.com/onsi/gomega" "github.com/stretchr/testify/require" - "github.com/smartcontractkit/chainlink-env/environment" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - eth "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" - "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver" - mockservercfg "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + eth "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver" + mockservercfg "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" @@ -26,6 +25,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" "github.com/smartcontractkit/chainlink/integration-tests/testsetups" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) var keeperDefaultRegistryConfig = contracts.KeeperRegistrySettings{ @@ -74,7 +74,7 @@ func TestKeeperPerformance(t *testing.T) { gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 10 for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(10)), "Expected consumer counter to be greater than 10, but got %d", counter.Int64()) @@ -84,7 +84,7 @@ func TestKeeperPerformance(t *testing.T) { // Cancel all the registered upkeeps via the registry for i := 0; i < len(upkeepIDs); i++ { - err := registry.CancelUpkeep(upkeepIDs[i]) + err = registry.CancelUpkeep(upkeepIDs[i]) require.NoError(t, err, "Could not cancel upkeep at index %d", i) } @@ -95,7 +95,7 @@ func TestKeeperPerformance(t *testing.T) { for i := 0; i < len(upkeepIDs); i++ { // Obtain the amount of times the upkeep has been executed so far - countersAfterCancellation[i], err = consumers[i].Counter(context.Background()) + countersAfterCancellation[i], err = consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) l.Info().Int("Index", i).Int64("Upkeeps Performed", countersAfterCancellation[i].Int64()).Msg("Cancelled Upkeep") } @@ -103,7 +103,7 @@ func TestKeeperPerformance(t *testing.T) { gom.Consistently(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { // Expect the counter to remain constant because the upkeep was cancelled, so it shouldn't increase anymore - latestCounter, err := consumers[i].Counter(context.Background()) + latestCounter, err := consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(latestCounter.Int64()).Should(gomega.Equal(countersAfterCancellation[i].Int64()), "Expected consumer counter to remain constant at %d, but got %d", @@ -134,7 +134,7 @@ func setupKeeperTest( contracts.ContractDeployer, contracts.LinkToken, ) { - network := networks.SelectedNetwork + network := networks.MustGetSelectedNetworksFromEnv()[0] evmConfig := eth.New(nil) if !network.Simulated { evmConfig = eth.New(ð.Props{ diff --git a/integration-tests/performance/ocr_test.go b/integration-tests/performance/ocr_test.go index f468a0e037..47879cebb8 100644 --- a/integration-tests/performance/ocr_test.go +++ b/integration-tests/performance/ocr_test.go @@ -1,7 +1,6 @@ package performance import ( - "context" "fmt" "math/big" "strings" @@ -10,13 +9,13 @@ import ( "github.com/stretchr/testify/require" - "github.com/smartcontractkit/chainlink-env/environment" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" - "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver" - mockservercfg "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver" + mockservercfg "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" @@ -25,6 +24,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/testsetups" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func TestOCRBasic(t *testing.T) { @@ -53,7 +53,7 @@ func TestOCRBasic(t *testing.T) { err = actions.FundChainlinkNodes(chainlinkNodes, chainClient, big.NewFloat(.05)) require.NoError(t, err, "Error funding Chainlink nodes") - ocrInstances, err := actions.DeployOCRContracts(1, linkTokenContract, contractDeployer, bootstrapNode, workerNodes, chainClient) + ocrInstances, err := actions.DeployOCRContracts(1, linkTokenContract, contractDeployer, workerNodes, chainClient) require.NoError(t, err) err = chainClient.WaitForEvents() require.NoError(t, err, "Error waiting for events") @@ -64,7 +64,7 @@ func TestOCRBasic(t *testing.T) { err = actions.StartNewRound(1, ocrInstances, chainClient, l) require.NoError(t, err) - answer, err := ocrInstances[0].GetLatestAnswer(context.Background()) + answer, err := ocrInstances[0].GetLatestAnswer(utils.TestContext(t)) require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail") require.Equal(t, int64(5), answer.Int64(), "Expected latest answer from OCR contract to be 5 but got %d", answer.Int64()) @@ -73,7 +73,7 @@ func TestOCRBasic(t *testing.T) { err = actions.StartNewRound(2, ocrInstances, chainClient, l) require.NoError(t, err) - answer, err = ocrInstances[0].GetLatestAnswer(context.Background()) + answer, err = ocrInstances[0].GetLatestAnswer(utils.TestContext(t)) require.NoError(t, err, "Error getting latest OCR answer") require.Equal(t, int64(10), answer.Int64(), "Expected latest answer from OCR contract to be 10 but got %d", answer.Int64()) } @@ -90,7 +90,7 @@ func TestOCRBasic(t *testing.T) { } func setupOCRTest(t *testing.T) (testEnvironment *environment.Environment, testNetwork blockchain.EVMNetwork) { - testNetwork = networks.SelectedNetwork + testNetwork = networks.MustGetSelectedNetworksFromEnv()[0] evmConfig := ethereum.New(nil) if !testNetwork.Simulated { evmConfig = ethereum.New(ðereum.Props{ diff --git a/integration-tests/performance/vrf_test.go b/integration-tests/performance/vrf_test.go index 510e378eb8..7a38a45495 100644 --- a/integration-tests/performance/vrf_test.go +++ b/integration-tests/performance/vrf_test.go @@ -1,7 +1,6 @@ package performance import ( - "context" "fmt" "math/big" "strings" @@ -12,10 +11,10 @@ import ( "github.com/onsi/gomega" "github.com/stretchr/testify/require" - "github.com/smartcontractkit/chainlink-env/environment" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" @@ -23,6 +22,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/testsetups" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func TestVRFBasic(t *testing.T) { @@ -97,7 +97,7 @@ func TestVRFBasic(t *testing.T) { encodedProvingKeys := make([][2]*big.Int, 0) encodedProvingKeys = append(encodedProvingKeys, provingKey) - requestHash, err := coordinator.HashOfKey(context.Background(), encodedProvingKeys[0]) + requestHash, err := coordinator.HashOfKey(utils.TestContext(t), encodedProvingKeys[0]) require.NoError(t, err, "Getting Hash of encoded proving keys shouldn't fail") err = consumer.RequestRandomness(requestHash, big.NewInt(1)) require.NoError(t, err, "Requesting randomness shouldn't fail") @@ -108,7 +108,7 @@ func TestVRFBasic(t *testing.T) { jobRuns, err := chainlinkNodes[0].MustReadRunsByJob(job.Data.ID) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Job execution shouldn't fail") - out, err := consumer.RandomnessOutput(context.Background()) + out, err := consumer.RandomnessOutput(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Getting the randomness output of the consumer shouldn't fail") // Checks that the job has actually run g.Expect(len(jobRuns.Data)).Should(gomega.BeNumerically(">=", 1), @@ -135,7 +135,7 @@ func TestVRFBasic(t *testing.T) { } func setupVRFTest(t *testing.T) (testEnvironment *environment.Environment, testNetwork blockchain.EVMNetwork) { - testNetwork = networks.SelectedNetwork + testNetwork = networks.MustGetSelectedNetworksFromEnv()[0] evmConfig := ethereum.New(nil) if !testNetwork.Simulated { evmConfig = ethereum.New(ðereum.Props{ diff --git a/integration-tests/reorg/automation_reorg_test.go b/integration-tests/reorg/automation_reorg_test.go index ccea490654..58cd147201 100644 --- a/integration-tests/reorg/automation_reorg_test.go +++ b/integration-tests/reorg/automation_reorg_test.go @@ -2,28 +2,28 @@ package reorg //revive:disable:dot-imports import ( - "context" "fmt" "math/big" "testing" "time" "github.com/onsi/gomega" - "github.com/smartcontractkit/chainlink-env/environment" - "github.com/smartcontractkit/chainlink-env/pkg/cdk8s/blockscout" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/reorg" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/cdk8s/blockscout" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/reorg" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" - "github.com/smartcontractkit/chainlink-testing-framework/utils" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zapcore" "github.com/smartcontractkit/chainlink/integration-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" + it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils" ) var ( @@ -47,7 +47,7 @@ HistoryDepth = 400 [EVM.GasEstimator] Mode = 'FixedPrice' LimitDefault = 5_000_000` - activeEVMNetwork = networks.SelectedNetwork + activeEVMNetwork = networks.MustGetSelectedNetworksFromEnv()[0] defaultAutomationSettings = map[string]interface{}{ "toml": client.AddNetworkDetailedConfig(baseTOML, networkTOML, activeEVMNetwork), "db": map[string]interface{}{ @@ -132,9 +132,11 @@ func TestAutomationReorg(t *testing.T) { } for name, registryVersion := range registryVersions { + name := name + registryVersion := registryVersion t.Run(name, func(t *testing.T) { t.Parallel() - network := networks.SelectedNetwork + network := networks.MustGetSelectedNetworksFromEnv()[0] defaultAutomationSettings["replicas"] = numberOfNodes cd := chainlink.New(0, defaultAutomationSettings) @@ -166,7 +168,7 @@ func TestAutomationReorg(t *testing.T) { // Register cleanup for any test t.Cleanup(func() { - err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, nil, zapcore.PanicLevel, chainClient) + err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.PanicLevel, chainClient) require.NoError(t, err, "Error tearing down environment") }) @@ -208,7 +210,7 @@ func TestAutomationReorg(t *testing.T) { gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 5 for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(it_utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) expect := 5 l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed") @@ -239,7 +241,7 @@ func TestAutomationReorg(t *testing.T) { gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analyzing their counters and checking they reach 10 for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(it_utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) expect := 10 l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed") @@ -249,13 +251,14 @@ func TestAutomationReorg(t *testing.T) { }, "5m", "1s").Should(gomega.Succeed()) l.Info().Msg("Upkeep performed during unstable chain, waiting for reorg to finish") - rc.WaitDepthReached() + err = rc.WaitDepthReached() + require.NoError(t, err) l.Info().Msg("Reorg finished, chain should be stable now. Expecting upkeeps to keep getting performed") gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analyzing their counters and checking they reach 20 for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(it_utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) expect := 20 l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed") diff --git a/integration-tests/reorg/log_poller_maybe_reorg_test.go b/integration-tests/reorg/log_poller_maybe_reorg_test.go new file mode 100644 index 0000000000..d319e39aa2 --- /dev/null +++ b/integration-tests/reorg/log_poller_maybe_reorg_test.go @@ -0,0 +1,43 @@ +package reorg + +import ( + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + + logpoller "github.com/smartcontractkit/chainlink/integration-tests/universal/log_poller" +) + +func TestLogPollerFromEnv(t *testing.T) { + cfg := logpoller.Config{ + General: &logpoller.General{ + Generator: logpoller.GeneratorType_Looped, + Contracts: 2, + EventsPerTx: 100, + UseFinalityTag: true, + }, + LoopedConfig: &logpoller.LoopedConfig{ + ContractConfig: logpoller.ContractConfig{ + ExecutionCount: 100, + }, + FuzzConfig: logpoller.FuzzConfig{ + MinEmitWaitTimeMs: 400, + MaxEmitWaitTimeMs: 600, + }, + }, + } + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit + err := cfg.OverrideFromEnv() + if err != nil { + t.Errorf("failed to override config from env: %v", err) + t.FailNow() + } + + logpoller.ExecuteCILogPollerTest(t, &cfg) +} diff --git a/integration-tests/reorg/reorg_confirmer.go b/integration-tests/reorg/reorg_confirmer.go index 6647816c97..2193131680 100644 --- a/integration-tests/reorg/reorg_confirmer.go +++ b/integration-tests/reorg/reorg_confirmer.go @@ -2,20 +2,21 @@ package reorg import ( "context" + "fmt" "math/big" "sync" "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" - "github.com/pkg/errors" "github.com/rs/zerolog/log" - "github.com/smartcontractkit/chainlink-env/chaos" - "github.com/smartcontractkit/chainlink-env/environment" - a "github.com/smartcontractkit/chainlink-env/pkg/alias" - "github.com/smartcontractkit/chainlink-env/pkg/helm/reorg" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/reorg" + + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) // The steps are: @@ -69,7 +70,7 @@ type ReorgController struct { // NewReorgController creates a type that can create reorg chaos and confirm reorg has happened func NewReorgController(cfg *ReorgConfig) (*ReorgController, error) { if len(cfg.Network.GetClients()) == 1 { - return nil, errors.New("need at least 3 nodes to re-org") + return nil, fmt.Errorf("need at least 3 nodes to re-org") } ctx, ctxCancel := context.WithTimeout(context.Background(), cfg.Timeout) rc := &ReorgController{ @@ -164,7 +165,7 @@ func (rc *ReorgController) VerifyReorgComplete() error { } } if rc.currentVerifiedBlocks+1 < rc.ReorgDepth { - return errors.New("Reorg depth has not met") + return fmt.Errorf("Reorg depth has not met") } return nil } @@ -216,7 +217,7 @@ func (rc *ReorgController) Wait() error { if rc.complete { return nil } - return errors.New("timeout waiting for reorg to complete") + return fmt.Errorf("timeout waiting for reorg to complete") } // forkNetwork stomp the network between target reorged node and the rest @@ -231,8 +232,8 @@ func (rc *ReorgController) forkNetwork(header blockchain.NodeHeader) error { rc.cfg.Env.Cfg.Namespace, &chaos.Props{ DurationStr: "999h", - FromLabels: &map[string]*string{"app": a.Str(reorg.TXNodesAppLabel)}, - ToLabels: &map[string]*string{"app": a.Str(reorg.MinerNodesAppLabel)}, + FromLabels: &map[string]*string{"app": utils.Ptr(reorg.TXNodesAppLabel)}, + ToLabels: &map[string]*string{"app": utils.Ptr(reorg.MinerNodesAppLabel)}, }, )) rc.chaosExperimentName = expName diff --git a/integration-tests/reorg/reorg_test.go b/integration-tests/reorg/reorg_test.go index 74468b9253..d5fefdbc56 100644 --- a/integration-tests/reorg/reorg_test.go +++ b/integration-tests/reorg/reorg_test.go @@ -1,7 +1,6 @@ package reorg import ( - "context" "fmt" "math/big" "os" @@ -13,24 +12,25 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" - "github.com/smartcontractkit/chainlink-env/environment" - "github.com/smartcontractkit/chainlink-env/logging" - "github.com/smartcontractkit/chainlink-env/pkg/cdk8s/blockscout" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver" - mockservercfg "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver-cfg" - "github.com/smartcontractkit/chainlink-env/pkg/helm/reorg" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client" - "github.com/smartcontractkit/chainlink-testing-framework/utils" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/cdk8s/blockscout" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver" + mockservercfg "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver-cfg" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/reorg" + "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/onsi/gomega" "github.com/rs/zerolog/log" + "github.com/smartcontractkit/chainlink-testing-framework/networks" "github.com/smartcontractkit/chainlink/integration-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" + it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils" ) const ( @@ -85,7 +85,7 @@ func CleanupReorgTest( if chainClient != nil { chainClient.GasStats().PrintStats() } - err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, nil, zapcore.PanicLevel, chainClient) + err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.PanicLevel, chainClient) require.NoError(t, err, "Error tearing down environment") } @@ -221,7 +221,7 @@ func TestDirectRequestReorg(t *testing.T) { gom := gomega.NewGomegaWithT(t) gom.Eventually(func(g gomega.Gomega) { - d, err := consumer.Data(context.Background()) + d, err := consumer.Data(it_utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Getting data from consumer contract shouldn't fail") g.Expect(d).ShouldNot(gomega.BeNil(), "Expected the initial on chain data to be nil") log.Debug().Int64("Data", d.Int64()).Msg("Found on chain") diff --git a/integration-tests/runner_helpers.go b/integration-tests/runner_helpers.go index 43268a703a..def2ebdc1d 100644 --- a/integration-tests/runner_helpers.go +++ b/integration-tests/runner_helpers.go @@ -122,7 +122,7 @@ func collectBranchesAndTags(results chan []string, errChan chan error) { go func() { stdOut, stdErr, err := gh.Exec("api", fmt.Sprintf("repos/%s/branches", chainlinkRepo), "-q", ".[][\"name\"]", "--paginate") if err != nil { - errChan <- fmt.Errorf("%v: %s", err, stdErr.String()) + errChan <- fmt.Errorf("%w: %s", err, stdErr.String()) } branches := strings.Split(stdOut.String(), "\n") cleanBranches := []string{} @@ -139,7 +139,7 @@ func collectBranchesAndTags(results chan []string, errChan chan error) { go func() { stdOut, stdErr, err := gh.Exec("api", fmt.Sprintf("repos/%s/tags", chainlinkRepo), "-q", ".[][\"name\"]", "--paginate") if err != nil { - errChan <- fmt.Errorf("%v: %s", err, stdErr.String()) + errChan <- fmt.Errorf("%w: %s", err, stdErr.String()) } tags := strings.Split(stdOut.String(), "\n") cleanTags := []string{} diff --git a/integration-tests/smoke/automation_test.go b/integration-tests/smoke/automation_test.go index 9b79e1bea4..1a093a8815 100644 --- a/integration-tests/smoke/automation_test.go +++ b/integration-tests/smoke/automation_test.go @@ -1,7 +1,6 @@ package smoke import ( - "context" "encoding/json" "fmt" "math/big" @@ -11,9 +10,8 @@ import ( "testing" "time" - "github.com/kelseyhightower/envconfig" - "github.com/ethereum/go-ethereum/common" + "github.com/kelseyhightower/envconfig" "github.com/onsi/gomega" "github.com/stretchr/testify/require" @@ -33,7 +31,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" - it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) var utilsABI = cltypes.MustGetABI(automation_utils_2_1.AutomationUtilsABI) @@ -79,9 +77,9 @@ var ( func TestMain(m *testing.M) { logging.Init() - fmt.Printf("Running Smoke Test on %s\n", networks.SelectedNetwork.Name) // Print to get around disabled logging - fmt.Printf("Chainlink Image %s\n", os.Getenv("CHAINLINK_IMAGE")) // Print to get around disabled logging - fmt.Printf("Chainlink Version %s\n", os.Getenv("CHAINLINK_VERSION")) // Print to get around disabled logging + fmt.Printf("Running Smoke Test on %s\n", networks.MustGetSelectedNetworksFromEnv()[0].Name) // Print to get around disabled logging + fmt.Printf("Chainlink Image %s\n", os.Getenv("CHAINLINK_IMAGE")) // Print to get around disabled logging + fmt.Printf("Chainlink Version %s\n", os.Getenv("CHAINLINK_VERSION")) // Print to get around disabled logging os.Exit(m.Run()) } @@ -111,7 +109,6 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) { upgradeImage string upgradeVersion string err error - testName = "basic-upkeep" ) if nodeUpgrade { upgradeImage = os.Getenv("UPGRADE_IMAGE") @@ -119,7 +116,6 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) { if len(upgradeImage) == 0 || len(upgradeVersion) == 0 { t.Fatal("UPGRADE_IMAGE and UPGRADE_VERSION must be set to upgrade nodes") } - testName = "node-upgrade" } // Use the name to determine if this is a log trigger or mercury @@ -129,7 +125,7 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) { isMercury := isMercuryV02 || isMercuryV03 chainClient, _, contractDeployer, linkToken, registry, registrar, testEnv := setupAutomationTestDocker( - t, testName, registryVersion, defaultOCRRegistryConfig, nodeUpgrade, isMercuryV02, isMercuryV03, + t, registryVersion, defaultOCRRegistryConfig, isMercuryV02, isMercuryV03, ) consumers, upkeepIDs := actions.DeployConsumers( @@ -173,7 +169,7 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) { gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analyzing their counters for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) expect := 5 l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep Index", i).Msg("Number of upkeeps performed") @@ -188,13 +184,14 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) { expect := 5 // Upgrade the nodes one at a time and check that the upkeeps are still being performed for i := 0; i < 5; i++ { - actions.UpgradeChainlinkNodeVersionsLocal(upgradeImage, upgradeVersion, testEnv.ClCluster.Nodes[i]) + err = actions.UpgradeChainlinkNodeVersionsLocal(upgradeImage, upgradeVersion, testEnv.ClCluster.Nodes[i]) + require.NoError(t, err, "Error when upgrading node %d", i) time.Sleep(time.Second * 10) expect = expect + 5 gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are increasing by 5 in each step within 5 minutes for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed") g.Expect(counter.Int64()).Should(gomega.BeNumerically(">=", int64(expect)), @@ -217,7 +214,7 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) { for i := 0; i < len(upkeepIDs); i++ { // Obtain the amount of times the upkeep has been executed so far - countersAfterCancellation[i], err = consumers[i].Counter(context.Background()) + countersAfterCancellation[i], err = consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) l.Info().Int64("Upkeep Count", countersAfterCancellation[i].Int64()).Int("Upkeep Index", i).Msg("Cancelled upkeep") } @@ -226,7 +223,7 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) { gom.Consistently(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { // Expect the counter to remain constant (At most increase by 1 to account for stale performs) because the upkeep was cancelled - latestCounter, err := consumers[i].Counter(context.Background()) + latestCounter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterCancellation[i].Int64()+1), "Expected consumer counter to remain less than or equal to %d, but got %d", @@ -242,7 +239,7 @@ func TestSetUpkeepTriggerConfig(t *testing.T) { l := logging.GetTestLogger(t) chainClient, _, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker( - t, "set-trigger-config", ethereum.RegistryVersion_2_1, defaultOCRRegistryConfig, false, false, false, + t, ethereum.RegistryVersion_2_1, defaultOCRRegistryConfig, false, false, ) consumers, upkeepIDs := actions.DeployConsumers( @@ -272,7 +269,7 @@ func TestSetUpkeepTriggerConfig(t *testing.T) { gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analyzing their counters for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) expect := 5 l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep Index", i).Msg("Number of upkeeps performed") @@ -329,7 +326,7 @@ func TestSetUpkeepTriggerConfig(t *testing.T) { time.Sleep(10 * time.Second) for i := 0; i < len(upkeepIDs); i++ { // Obtain the amount of times the upkeep has been executed so far - countersAfterSetNoMatch[i], err = consumers[i].Counter(context.Background()) + countersAfterSetNoMatch[i], err = consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) l.Info().Int64("Upkeep Count", countersAfterSetNoMatch[i].Int64()).Int("Upkeep Index", i).Msg("Upkeep") } @@ -339,7 +336,7 @@ func TestSetUpkeepTriggerConfig(t *testing.T) { for i := 0; i < len(upkeepIDs); i++ { // Expect the counter to remain constant (At most increase by 2 to account for stale performs) because the upkeep trigger config is not met bufferCount := int64(2) - latestCounter, err := consumers[i].Counter(context.Background()) + latestCounter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterSetNoMatch[i].Int64()+bufferCount), "Expected consumer counter to remain less than or equal to %d, but got %d", @@ -375,7 +372,7 @@ func TestSetUpkeepTriggerConfig(t *testing.T) { for i := 0; i < len(upkeepIDs); i++ { // Obtain the amount of times the upkeep has been executed so far - countersAfterSetMatch[i], err = consumers[i].Counter(context.Background()) + countersAfterSetMatch[i], err = consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) l.Info().Int64("Upkeep Count", countersAfterSetMatch[i].Int64()).Int("Upkeep Index", i).Msg("Upkeep") } @@ -394,7 +391,7 @@ func TestSetUpkeepTriggerConfig(t *testing.T) { gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analyzing their counters for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) expect := int64(5) l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep Index", i).Msg("Number of upkeeps performed") @@ -417,7 +414,7 @@ func TestAutomationAddFunds(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() chainClient, _, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker( - t, "add-funds", registryVersion, defaultOCRRegistryConfig, false, false, false, + t, registryVersion, defaultOCRRegistryConfig, false, false, ) consumers, upkeepIDs := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, defaultAmountOfUpkeeps, big.NewInt(1), automationDefaultUpkeepGasLimit, false, false) @@ -425,7 +422,7 @@ func TestAutomationAddFunds(t *testing.T) { gom := gomega.NewGomegaWithT(t) // Since the upkeep is currently underfunded, check that it doesn't get executed gom.Consistently(func(g gomega.Gomega) { - counter, err := consumers[0].Counter(context.Background()) + counter, err := consumers[0].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)), "Expected consumer counter to remain zero, but got %d", counter.Int64()) @@ -445,7 +442,7 @@ func TestAutomationAddFunds(t *testing.T) { // Now the new upkeep should be performing because we added enough funds gom.Eventually(func(g gomega.Gomega) { - counter, err := consumers[0].Counter(context.Background()) + counter, err := consumers[0].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), "Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64()) @@ -468,7 +465,7 @@ func TestAutomationPauseUnPause(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) chainClient, _, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker( - t, "pause-unpause", registryVersion, defaultOCRRegistryConfig, false, false, false, + t, registryVersion, defaultOCRRegistryConfig, false, false, ) consumers, upkeepIDs := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, false, false) @@ -477,7 +474,7 @@ func TestAutomationPauseUnPause(t *testing.T) { gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 5 for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(5)), "Expected consumer counter to be greater than 5, but got %d", counter.Int64()) @@ -497,7 +494,7 @@ func TestAutomationPauseUnPause(t *testing.T) { var countersAfterPause = make([]*big.Int, len(upkeepIDs)) for i := 0; i < len(upkeepIDs); i++ { // Obtain the amount of times the upkeep has been executed so far - countersAfterPause[i], err = consumers[i].Counter(context.Background()) + countersAfterPause[i], err = consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) l.Info().Int("Upkeep Index", i).Int64("Upkeeps Performed", countersAfterPause[i].Int64()).Msg("Paused Upkeep") } @@ -506,7 +503,7 @@ func TestAutomationPauseUnPause(t *testing.T) { for i := 0; i < len(upkeepIDs); i++ { // In most cases counters should remain constant, but there might be a straggling perform tx which // gets committed later and increases counter by 1 - latestCounter, err := consumers[i].Counter(context.Background()) + latestCounter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterPause[i].Int64()+1), "Expected consumer counter not have increased more than %d, but got %d", @@ -526,7 +523,7 @@ func TestAutomationPauseUnPause(t *testing.T) { gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 + numbers of performing before pause for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", countersAfterPause[i].Int64()+1), "Expected consumer counter to be greater than %d, but got %d", countersAfterPause[i].Int64()+1, counter.Int64()) @@ -551,7 +548,7 @@ func TestAutomationRegisterUpkeep(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) chainClient, _, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker( - t, "register-upkeep", registryVersion, defaultOCRRegistryConfig, false, false, false, + t, registryVersion, defaultOCRRegistryConfig, false, false, ) consumers, upkeepIDs := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, false, false) @@ -562,7 +559,7 @@ func TestAutomationRegisterUpkeep(t *testing.T) { // store the value of their initial counters in order to compare later on that the value increased. gom.Eventually(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) initialCounters[i] = counter g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), @@ -582,7 +579,7 @@ func TestAutomationRegisterUpkeep(t *testing.T) { // Test that the newly registered upkeep is also performing. gom.Eventually(func(g gomega.Gomega) { - counter, err := newUpkeep.Counter(context.Background()) + counter, err := newUpkeep.Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling newly deployed upkeep's counter shouldn't fail") g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), "Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64()) @@ -591,7 +588,7 @@ func TestAutomationRegisterUpkeep(t *testing.T) { gom.Eventually(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { - currentCounter, err := consumers[i].Counter(context.Background()) + currentCounter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") l.Info(). @@ -622,7 +619,7 @@ func TestAutomationPauseRegistry(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() chainClient, _, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker( - t, "pause-registry", registryVersion, defaultOCRRegistryConfig, false, false, false, + t, registryVersion, defaultOCRRegistryConfig, false, false, ) consumers, upkeepIDs := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, false, false) @@ -631,7 +628,7 @@ func TestAutomationPauseRegistry(t *testing.T) { // Observe that the upkeeps which are initially registered are performing gom.Eventually(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), "Expected consumer counter to be greater than 0, but got %d") @@ -647,7 +644,7 @@ func TestAutomationPauseRegistry(t *testing.T) { // Store how many times each upkeep performed once the registry was successfully paused var countersAfterPause = make([]*big.Int, len(upkeepIDs)) for i := 0; i < len(upkeepIDs); i++ { - countersAfterPause[i], err = consumers[i].Counter(context.Background()) + countersAfterPause[i], err = consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) } @@ -655,7 +652,7 @@ func TestAutomationPauseRegistry(t *testing.T) { // because they are no longer getting serviced gom.Consistently(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { - latestCounter, err := consumers[i].Counter(context.Background()) + latestCounter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(latestCounter.Int64()).Should(gomega.Equal(countersAfterPause[i].Int64()), "Expected consumer counter to remain constant at %d, but got %d", @@ -680,7 +677,7 @@ func TestAutomationKeeperNodesDown(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) chainClient, chainlinkNodes, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker( - t, "keeper-nodes-down", registryVersion, defaultOCRRegistryConfig, false, false, false, + t, registryVersion, defaultOCRRegistryConfig, false, false, ) consumers, upkeepIDs := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, false, false) @@ -692,7 +689,7 @@ func TestAutomationKeeperNodesDown(t *testing.T) { // Watch upkeeps being performed and store their counters in order to compare them later in the test gom.Eventually(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) initialCounters[i] = counter g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), @@ -711,7 +708,7 @@ func TestAutomationKeeperNodesDown(t *testing.T) { // Assert that upkeeps are still performed and their counters have increased gom.Eventually(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { - currentCounter, err := consumers[i].Counter(context.Background()) + currentCounter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(currentCounter.Int64()).Should(gomega.BeNumerically(">", initialCounters[i].Int64()), "Expected counter to have increased from initial value of %s, but got %s", @@ -732,7 +729,7 @@ func TestAutomationKeeperNodesDown(t *testing.T) { // See how many times each upkeep was executed var countersAfterNoMoreNodes = make([]*big.Int, len(upkeepIDs)) for i := 0; i < len(upkeepIDs); i++ { - countersAfterNoMoreNodes[i], err = consumers[i].Counter(context.Background()) + countersAfterNoMoreNodes[i], err = consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) l.Info().Int("Upkeep Index", i).Int64("Performed", countersAfterNoMoreNodes[i].Int64()).Msg("Upkeeps Performed") } @@ -741,7 +738,7 @@ func TestAutomationKeeperNodesDown(t *testing.T) { // all the nodes were taken down gom.Consistently(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { - latestCounter, err := consumers[i].Counter(context.Background()) + latestCounter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterNoMoreNodes[i].Int64()+1), "Expected consumer counter to not have increased more than %d, but got %d", @@ -765,7 +762,7 @@ func TestAutomationPerformSimulation(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() chainClient, _, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker( - t, "perform-simulation", registryVersion, defaultOCRRegistryConfig, false, false, false, + t, registryVersion, defaultOCRRegistryConfig, false, false, ) consumersPerformance, _ := actions.DeployPerformanceConsumers( @@ -790,7 +787,7 @@ func TestAutomationPerformSimulation(t *testing.T) { // Initially performGas is set high, so performUpkeep reverts and no upkeep should be performed gom.Consistently(func(g gomega.Gomega) { // Consumer count should remain at 0 - cnt, err := consumerPerformance.GetUpkeepCount(context.Background()) + cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") g.Expect(cnt.Int64()).Should(gomega.Equal(int64(0)), "Expected consumer counter to remain constant at %d, but got %d", 0, cnt.Int64(), @@ -798,14 +795,14 @@ func TestAutomationPerformSimulation(t *testing.T) { }, "2m", "1s").Should(gomega.Succeed()) // ~1m for setup, 1m assertion // Set performGas on consumer to be low, so that performUpkeep starts becoming successful - err := consumerPerformance.SetPerformGasToBurn(context.Background(), big.NewInt(100000)) + err := consumerPerformance.SetPerformGasToBurn(utils.TestContext(t), big.NewInt(100000)) require.NoError(t, err, "Perform gas should be set successfully on consumer") err = chainClient.WaitForEvents() require.NoError(t, err, "Error waiting for set perform gas tx") // Upkeep should now start performing gom.Eventually(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(context.Background()) + cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)), "Expected consumer counter to be greater than 0, but got %d", cnt.Int64(), @@ -829,7 +826,7 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) chainClient, chainlinkNodes, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker( - t, "gas-limit", registryVersion, defaultOCRRegistryConfig, false, false, false, + t, registryVersion, defaultOCRRegistryConfig, false, false, ) consumersPerformance, upkeepIDs := actions.DeployPerformanceConsumers( @@ -855,7 +852,7 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) { // Initially performGas is set higher than defaultUpkeepGasLimit, so no upkeep should be performed gom.Consistently(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(context.Background()) + cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") g.Expect(cnt.Int64()).Should( gomega.Equal(int64(0)), @@ -871,7 +868,7 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) { // Upkeep should now start performing gom.Eventually(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(context.Background()) + cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)), "Expected consumer counter to be greater than 0, but got %d", cnt.Int64(), @@ -879,19 +876,19 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) { }, "2m", "1s").Should(gomega.Succeed()) // ~1m to perform once, 1m buffer // Now increase the checkGasBurn on consumer, upkeep should stop performing - err = consumerPerformance.SetCheckGasToBurn(context.Background(), big.NewInt(3000000)) + err = consumerPerformance.SetCheckGasToBurn(utils.TestContext(t), big.NewInt(3000000)) require.NoError(t, err, "Check gas burn should be set successfully on consumer") err = chainClient.WaitForEvents() require.NoError(t, err, "Error waiting for SetCheckGasToBurn tx") // Get existing performed count - existingCnt, err := consumerPerformance.GetUpkeepCount(context.Background()) + existingCnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t)) require.NoError(t, err, "Calling consumer's counter shouldn't fail") l.Info().Int64("Upkeep counter", existingCnt.Int64()).Msg("Upkeep counter when check gas increased") // In most cases count should remain constant, but it might increase by upto 1 due to pending perform gom.Consistently(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(context.Background()) + cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") g.Expect(cnt.Int64()).Should( gomega.BeNumerically("<=", existingCnt.Int64()+1), @@ -899,7 +896,7 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) { ) }, "1m", "1s").Should(gomega.Succeed()) - existingCnt, err = consumerPerformance.GetUpkeepCount(context.Background()) + existingCnt, err = consumerPerformance.GetUpkeepCount(utils.TestContext(t)) require.NoError(t, err, "Calling consumer's counter shouldn't fail") existingCntInt := existingCnt.Int64() l.Info().Int64("Upkeep counter", existingCntInt).Msg("Upkeep counter when consistently block finished") @@ -919,7 +916,7 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) { // Upkeep should start performing again, and it should get regularly performed gom.Eventually(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(context.Background()) + cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", existingCntInt), "Expected consumer counter to be greater than %d, but got %d", existingCntInt, cnt.Int64(), @@ -943,7 +940,7 @@ func TestUpdateCheckData(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) chainClient, _, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker( - t, "update-check-data", registryVersion, defaultOCRRegistryConfig, false, false, false, + t, registryVersion, defaultOCRRegistryConfig, false, false, ) performDataChecker, upkeepIDs := actions.DeployPerformDataCheckerConsumers( @@ -963,7 +960,7 @@ func TestUpdateCheckData(t *testing.T) { gom.Consistently(func(g gomega.Gomega) { // expect the counter to remain 0 because perform data does not match for i := 0; i < len(upkeepIDs); i++ { - counter, err := performDataChecker[i].Counter(context.Background()) + counter, err := performDataChecker[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve perform data checker"+ " for upkeep at index "+strconv.Itoa(i)) g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)), @@ -982,7 +979,7 @@ func TestUpdateCheckData(t *testing.T) { // retrieve new check data for all upkeeps for i := 0; i < len(upkeepIDs); i++ { - upkeep, err := registry.GetUpkeepInfo(context.Background(), upkeepIDs[i]) + upkeep, err := registry.GetUpkeepInfo(utils.TestContext(t), upkeepIDs[i]) require.NoError(t, err, "Failed to get upkeep info at index %d", i) require.Equal(t, []byte(automationExpectedData), upkeep.CheckData, "Upkeep data not as expected") } @@ -990,7 +987,7 @@ func TestUpdateCheckData(t *testing.T) { gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 for i := 0; i < len(upkeepIDs); i++ { - counter, err := performDataChecker[i].Counter(context.Background()) + counter, err := performDataChecker[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve perform data checker counter"+ " for upkeep at index "+strconv.Itoa(i)) g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), @@ -1008,10 +1005,8 @@ type TestConfig struct { func setupAutomationTestDocker( t *testing.T, - testName string, registryVersion ethereum.KeeperRegistryVersion, registryConfig contracts.KeeperRegistrySettings, - statefulDb bool, isMercuryV02 bool, isMercuryV03 bool, ) ( @@ -1028,16 +1023,16 @@ func setupAutomationTestDocker( l := logging.GetTestLogger(t) // Add registry version to config registryConfig.RegistryVersion = registryVersion - network := networks.SelectedNetwork + network := networks.MustGetSelectedNetworksFromEnv()[0] // build the node config clNodeConfig := node.NewConfig(node.NewBaseConfig()) syncInterval := models.MustMakeDuration(5 * time.Minute) - clNodeConfig.Feature.LogPoller = it_utils.Ptr[bool](true) - clNodeConfig.OCR2.Enabled = it_utils.Ptr[bool](true) - clNodeConfig.Keeper.TurnLookBack = it_utils.Ptr[int64](int64(0)) + clNodeConfig.Feature.LogPoller = utils.Ptr[bool](true) + clNodeConfig.OCR2.Enabled = utils.Ptr[bool](true) + clNodeConfig.Keeper.TurnLookBack = utils.Ptr[int64](int64(0)) clNodeConfig.Keeper.Registry.SyncInterval = &syncInterval - clNodeConfig.Keeper.Registry.PerformGasOverhead = it_utils.Ptr[uint32](uint32(150000)) + clNodeConfig.Keeper.Registry.PerformGasOverhead = utils.Ptr[uint32](uint32(150000)) clNodeConfig.P2P.V2.AnnounceAddresses = &[]string{"0.0.0.0:6690"} clNodeConfig.P2P.V2.ListenAddresses = &[]string{"0.0.0.0:6690"} @@ -1071,8 +1066,8 @@ func setupAutomationTestDocker( var httpUrls []string var wsUrls []string if network.Simulated { - httpUrls = []string{env.Geth.InternalHttpUrl} - wsUrls = []string{env.Geth.InternalWsUrl} + httpUrls = []string{env.RpcProvider.PrivateHttpUrls()[0]} + wsUrls = []string{env.RpcProvider.PrivateWsUrsl()[0]} } else { httpUrls = network.HTTPURLs wsUrls = network.URLs @@ -1087,11 +1082,13 @@ func setupAutomationTestDocker( if isMercuryV02 { output := `{"chainlinkBlob":"0x0001c38d71fed6c320b90e84b6f559459814d068e2a1700adc931ca9717d4fe70000000000000000000000000000000000000000000000000000000001a80b52b4bf1233f9cb71144a253a1791b202113c4ab4a92fa1b176d684b4959666ff8200000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001004254432d5553442d415242495452554d2d544553544e4554000000000000000000000000000000000000000000000000000000000000000000000000645570be000000000000000000000000000000000000000000000000000002af2b818dc5000000000000000000000000000000000000000000000000000002af2426faf3000000000000000000000000000000000000000000000000000002af32dc209700000000000000000000000000000000000000000000000000000000012130f8df0a9745bb6ad5e2df605e158ba8ad8a33ef8a0acf9851f0f01668a3a3f2b68600000000000000000000000000000000000000000000000000000000012130f60000000000000000000000000000000000000000000000000000000000000002c4a7958dce105089cf5edb68dad7dcfe8618d7784eb397f97d5a5fade78c11a58275aebda478968e545f7e3657aba9dcbe8d44605e4c6fde3e24edd5e22c94270000000000000000000000000000000000000000000000000000000000000002459c12d33986018a8959566d145225f0c4a4e61a9a3f50361ccff397899314f0018162cf10cd89897635a0bb62a822355bd199d09f4abe76e4d05261bb44733d"}` - env.MockAdapter.SetStringValuePath("/client", []string{http.MethodGet, http.MethodPost}, map[string]string{"Content-Type": "application/json"}, output) + err = env.MockAdapter.SetStringValuePath("/client", []string{http.MethodGet, http.MethodPost}, map[string]string{"Content-Type": "application/json"}, output) + require.NoError(t, err) } if isMercuryV03 { output := `{"reports":[{"feedID":"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000","validFromTimestamp":0,"observationsTimestamp":0,"fullReport":"0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000000081401000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000289ad8d367000000000000000000000000000000000000000000000000000000289acf0b38000000000000000000000000000000000000000000000000000000289b3da40000000000000000000000000000000000000000000000000000000000018ae7ce74d9fa252a8983976eab600dc7590c778d04813430841bc6e765c34cd81a168d00000000000000000000000000000000000000000000000000000000018ae7cb0000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000000000000260412b94e525ca6cedc9f544fd86f77606d52fe731a5d069dbe836a8bfc0fb8c911963b0ae7a14971f3b4621bffb802ef0605392b9a6c89c7fab1df8633a5ade00000000000000000000000000000000000000000000000000000000000000024500c2f521f83fba5efc2bf3effaaedde43d0a4adff785c1213b712a3aed0d8157642a84324db0cf9695ebd27708d4608eb0337e0dd87b0e43f0fa70c700d911"}]}` - env.MockAdapter.SetStringValuePath("/api/v1/reports/bulk", []string{http.MethodGet, http.MethodPost}, map[string]string{"Content-Type": "application/json"}, output) + err = env.MockAdapter.SetStringValuePath("/api/v1/reports/bulk", []string{http.MethodGet, http.MethodPost}, map[string]string{"Content-Type": "application/json"}, output) + require.NoError(t, err) } } else { env, err = test_env.NewCLTestEnvBuilder(). diff --git a/integration-tests/smoke/flux_test.go b/integration-tests/smoke/flux_test.go index 8c2b3638bf..2997ff1c74 100644 --- a/integration-tests/smoke/flux_test.go +++ b/integration-tests/smoke/flux_test.go @@ -1,7 +1,6 @@ package smoke import ( - "context" "fmt" "math/big" "net/http" @@ -19,6 +18,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func TestFluxBasic(t *testing.T) { @@ -74,7 +74,7 @@ func TestFluxBasic(t *testing.T) { err = env.EVMClient.WaitForEvents() require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail") - oracles, err := fluxInstance.GetOracles(context.Background()) + oracles, err := fluxInstance.GetOracles(utils.TestContext(t)) require.NoError(t, err, "Getting oracle details from the Flux aggregator contract shouldn't fail") l.Info().Str("Oracles", strings.Join(oracles, ",")).Msg("Oracles set") @@ -108,7 +108,7 @@ func TestFluxBasic(t *testing.T) { env.EVMClient.AddHeaderEventSubscription(fluxInstance.Address(), fluxRound) err = env.EVMClient.WaitForEvents() require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail") - data, err := fluxInstance.GetContractData(context.Background()) + data, err := fluxInstance.GetContractData(utils.TestContext(t)) require.NoError(t, err, "Getting contract data from flux aggregator contract shouldn't fail") require.Equal(t, int64(1e5), data.LatestRoundData.Answer.Int64(), "Expected latest round answer to be %d, but found %d", int64(1e5), data.LatestRoundData.Answer.Int64()) @@ -127,7 +127,7 @@ func TestFluxBasic(t *testing.T) { require.NoError(t, err, "Setting value path in mock server shouldn't fail") err = env.EVMClient.WaitForEvents() require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail") - data, err = fluxInstance.GetContractData(context.Background()) + data, err = fluxInstance.GetContractData(utils.TestContext(t)) require.NoError(t, err, "Getting contract data from flux aggregator contract shouldn't fail") require.Equal(t, int64(1e10), data.LatestRoundData.Answer.Int64(), "Expected latest round answer to be %d, but found %d", int64(1e10), data.LatestRoundData.Answer.Int64()) @@ -140,7 +140,7 @@ func TestFluxBasic(t *testing.T) { l.Info().Interface("data", data).Msg("Round data") for _, oracleAddr := range nodeAddresses { - payment, _ := fluxInstance.WithdrawablePayment(context.Background(), oracleAddr) + payment, _ := fluxInstance.WithdrawablePayment(utils.TestContext(t), oracleAddr) require.Equal(t, int64(2), payment.Int64(), "Expected flux aggregator contract's withdrawable payment to be %d, but found %d", int64(2), payment.Int64()) } diff --git a/integration-tests/smoke/forwarder_ocr_test.go b/integration-tests/smoke/forwarder_ocr_test.go index 727b83a601..7203e03178 100644 --- a/integration-tests/smoke/forwarder_ocr_test.go +++ b/integration-tests/smoke/forwarder_ocr_test.go @@ -1,7 +1,6 @@ package smoke import ( - "context" "math/big" "testing" @@ -12,6 +11,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func TestForwarderOCRBasic(t *testing.T) { @@ -72,7 +72,7 @@ func TestForwarderOCRBasic(t *testing.T) { err = env.EVMClient.WaitForEvents() require.NoError(t, err, "Error waiting for events") - answer, err := ocrInstances[0].GetLatestAnswer(context.Background()) + answer, err := ocrInstances[0].GetLatestAnswer(utils.TestContext(t)) require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail") require.Equal(t, int64(5), answer.Int64(), "Expected latest answer from OCR contract to be 5 but got %d", answer.Int64()) @@ -83,7 +83,7 @@ func TestForwarderOCRBasic(t *testing.T) { err = env.EVMClient.WaitForEvents() require.NoError(t, err, "Error waiting for events") - answer, err = ocrInstances[0].GetLatestAnswer(context.Background()) + answer, err = ocrInstances[0].GetLatestAnswer(utils.TestContext(t)) require.NoError(t, err, "Error getting latest OCR answer") require.Equal(t, int64(10), answer.Int64(), "Expected latest answer from OCR contract to be 10 but got %d", answer.Int64()) } diff --git a/integration-tests/smoke/forwarders_ocr2_test.go b/integration-tests/smoke/forwarders_ocr2_test.go index baa5a781f6..be87eb5629 100644 --- a/integration-tests/smoke/forwarders_ocr2_test.go +++ b/integration-tests/smoke/forwarders_ocr2_test.go @@ -1,7 +1,6 @@ package smoke import ( - "context" "fmt" "math/big" "net/http" @@ -17,6 +16,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func TestForwarderOCR2Basic(t *testing.T) { @@ -92,7 +92,7 @@ func TestForwarderOCR2Basic(t *testing.T) { err = actions.StartNewOCR2Round(1, ocrInstances, env.EVMClient, time.Minute*10, l) require.NoError(t, err) - answer, err := ocrInstances[0].GetLatestAnswer(context.Background()) + answer, err := ocrInstances[0].GetLatestAnswer(utils.TestContext(t)) require.NoError(t, err, "Getting latest answer from OCRv2 contract shouldn't fail") require.Equal(t, int64(5), answer.Int64(), "Expected latest answer from OCRw contract to be 5 but got %d", answer.Int64()) @@ -103,7 +103,7 @@ func TestForwarderOCR2Basic(t *testing.T) { err = actions.StartNewOCR2Round(int64(i), ocrInstances, env.EVMClient, time.Minute*10, l) require.NoError(t, err) - answer, err = ocrInstances[0].GetLatestAnswer(context.Background()) + answer, err = ocrInstances[0].GetLatestAnswer(utils.TestContext(t)) require.NoError(t, err, "Error getting latest OCRv2 answer") require.Equal(t, int64(ocrRoundVal), answer.Int64(), fmt.Sprintf("Expected latest answer from OCRv2 contract to be %d but got %d", ocrRoundVal, answer.Int64())) } diff --git a/integration-tests/smoke/keeper_test.go b/integration-tests/smoke/keeper_test.go index d42944fd55..b28ab1ff10 100644 --- a/integration-tests/smoke/keeper_test.go +++ b/integration-tests/smoke/keeper_test.go @@ -1,7 +1,6 @@ package smoke import ( - "context" "fmt" "math/big" "strconv" @@ -23,6 +22,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) const ( @@ -109,7 +109,7 @@ func TestKeeperBasicSmoke(t *testing.T) { gom.Eventually(func(g gomega.Gomega) error { // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 10 for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(10)), "Expected consumer counter to be greater than 10, but got %d", counter.Int64()) @@ -131,7 +131,7 @@ func TestKeeperBasicSmoke(t *testing.T) { for i := 0; i < len(upkeepIDs); i++ { // Obtain the amount of times the upkeep has been executed so far - countersAfterCancellation[i], err = consumers[i].Counter(context.Background()) + countersAfterCancellation[i], err = consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) l.Info().Int("Index", i).Int64("Upkeeps Performed", countersAfterCancellation[i].Int64()).Msg("Cancelled Upkeep") } @@ -139,7 +139,7 @@ func TestKeeperBasicSmoke(t *testing.T) { gom.Consistently(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { // Expect the counter to remain constant because the upkeep was cancelled, so it shouldn't increase anymore - latestCounter, err := consumers[i].Counter(context.Background()) + latestCounter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(latestCounter.Int64()).Should(gomega.Equal(countersAfterCancellation[i].Int64()), "Expected consumer counter to remain constant at %d, but got %d", @@ -187,11 +187,11 @@ func TestKeeperBlockCountPerTurn(t *testing.T) { // Wait for upkeep to be performed twice by different keepers (buddies) gom.Eventually(func(g gomega.Gomega) error { - counter, err := consumers[0].Counter(context.Background()) + counter, err := consumers[0].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") l.Info().Int64("Upkeep counter", counter.Int64()).Msg("Number of upkeeps performed") - upkeepInfo, err := registry.GetUpkeepInfo(context.Background(), upkeepID) + upkeepInfo, err := registry.GetUpkeepInfo(utils.TestContext(t), upkeepID) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail") latestKeeper := upkeepInfo.LastKeeper @@ -205,7 +205,7 @@ func TestKeeperBlockCountPerTurn(t *testing.T) { }, "1m", "1s").Should(gomega.Succeed()) gom.Eventually(func(g gomega.Gomega) error { - upkeepInfo, err := registry.GetUpkeepInfo(context.Background(), upkeepID) + upkeepInfo, err := registry.GetUpkeepInfo(utils.TestContext(t), upkeepID) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail") latestKeeper := upkeepInfo.LastKeeper @@ -219,7 +219,7 @@ func TestKeeperBlockCountPerTurn(t *testing.T) { // Expect no new keepers to perform for a while gom.Consistently(func(g gomega.Gomega) { - upkeepInfo, err := registry.GetUpkeepInfo(context.Background(), upkeepID) + upkeepInfo, err := registry.GetUpkeepInfo(utils.TestContext(t), upkeepID) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail") latestKeeper := upkeepInfo.LastKeeper @@ -235,11 +235,11 @@ func TestKeeperBlockCountPerTurn(t *testing.T) { // Expect a new keeper to perform gom.Eventually(func(g gomega.Gomega) error { - counter, err := consumers[0].Counter(context.Background()) + counter, err := consumers[0].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") l.Info().Int64("Upkeep counter", counter.Int64()).Msg("Num upkeeps performed") - upkeepInfo, err := registry.GetUpkeepInfo(context.Background(), upkeepID) + upkeepInfo, err := registry.GetUpkeepInfo(utils.TestContext(t), upkeepID) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail") latestKeeper := upkeepInfo.LastKeeper @@ -296,7 +296,7 @@ func TestKeeperSimulation(t *testing.T) { // Initially performGas is set high, so performUpkeep reverts and no upkeep should be performed gom.Consistently(func(g gomega.Gomega) { // Consumer count should remain at 0 - cnt, err := consumerPerformance.GetUpkeepCount(context.Background()) + cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") g.Expect(cnt.Int64()).Should( gomega.Equal(int64(0)), @@ -304,20 +304,20 @@ func TestKeeperSimulation(t *testing.T) { ) // Not even reverted upkeeps should be performed. Last keeper for the upkeep should be 0 address - upkeepInfo, err := registry.GetUpkeepInfo(context.Background(), upkeepID) + upkeepInfo, err := registry.GetUpkeepInfo(utils.TestContext(t), upkeepID) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail") g.Expect(upkeepInfo.LastKeeper).Should(gomega.Equal(actions.ZeroAddress.String()), "Last keeper should be zero address") }, "1m", "1s").Should(gomega.Succeed()) // Set performGas on consumer to be low, so that performUpkeep starts becoming successful - err = consumerPerformance.SetPerformGasToBurn(context.Background(), big.NewInt(100000)) + err = consumerPerformance.SetPerformGasToBurn(utils.TestContext(t), big.NewInt(100000)) require.NoError(t, err, "Error setting PerformGasToBurn") err = chainClient.WaitForEvents() require.NoError(t, err, "Error waiting to set PerformGasToBurn") // Upkeep should now start performing gom.Eventually(func(g gomega.Gomega) error { - cnt, err := consumerPerformance.GetUpkeepCount(context.Background()) + cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)), "Expected consumer counter to be greater than 0, but got %d", cnt.Int64(), @@ -368,7 +368,7 @@ func TestKeeperCheckPerformGasLimit(t *testing.T) { // Initially performGas is set higher than defaultUpkeepGasLimit, so no upkeep should be performed gom.Consistently(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(context.Background()) + cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") g.Expect(cnt.Int64()).Should( gomega.Equal(int64(0)), @@ -384,7 +384,7 @@ func TestKeeperCheckPerformGasLimit(t *testing.T) { // Upkeep should now start performing gom.Eventually(func(g gomega.Gomega) error { - cnt, err := consumerPerformance.GetUpkeepCount(context.Background()) + cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)), "Expected consumer counter to be greater than 0, but got %d", cnt.Int64(), @@ -393,13 +393,13 @@ func TestKeeperCheckPerformGasLimit(t *testing.T) { }, "1m", "1s").Should(gomega.Succeed()) // Now increase the checkGasBurn on consumer, upkeep should stop performing - err = consumerPerformance.SetCheckGasToBurn(context.Background(), big.NewInt(3000000)) + err = consumerPerformance.SetCheckGasToBurn(utils.TestContext(t), big.NewInt(3000000)) require.NoError(t, err, "Error setting CheckGasToBurn") err = chainClient.WaitForEvents() require.NoError(t, err, "Error waiting for SetCheckGasToBurn tx") // Get existing performed count - existingCnt, err := consumerPerformance.GetUpkeepCount(context.Background()) + existingCnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t)) require.NoError(t, err, "Error calling consumer's counter") l.Info().Int64("Upkeep counter", existingCnt.Int64()).Msg("Check Gas Increased") @@ -407,7 +407,7 @@ func TestKeeperCheckPerformGasLimit(t *testing.T) { // gets committed later. Since every keeper node cannot have more than 1 straggling tx, it // is sufficient to check that the upkeep count does not increase by more than 6. gom.Consistently(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(context.Background()) + cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") g.Expect(cnt.Int64()).Should( gomega.BeNumerically("<=", existingCnt.Int64()+numUpkeepsAllowedForStragglingTxs), @@ -415,7 +415,7 @@ func TestKeeperCheckPerformGasLimit(t *testing.T) { ) }, "3m", "1s").Should(gomega.Succeed()) - existingCnt, err = consumerPerformance.GetUpkeepCount(context.Background()) + existingCnt, err = consumerPerformance.GetUpkeepCount(utils.TestContext(t)) require.NoError(t, err, "Error calling consumer's counter") existingCntInt := existingCnt.Int64() l.Info().Int64("Upkeep counter", existingCntInt).Msg("Upkeep counter when consistently block finished") @@ -430,7 +430,7 @@ func TestKeeperCheckPerformGasLimit(t *testing.T) { // Upkeep should start performing again, and it should get regularly performed gom.Eventually(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(context.Background()) + cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", existingCntInt), "Expected consumer counter to be greater than %d, but got %d", existingCntInt, cnt.Int64(), @@ -478,7 +478,7 @@ func TestKeeperRegisterUpkeep(t *testing.T) { // store the value of their initial counters in order to compare later on that the value increased. gom.Eventually(func(g gomega.Gomega) error { for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) initialCounters[i] = counter g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter"+ " for upkeep at index "+strconv.Itoa(i)) @@ -500,7 +500,7 @@ func TestKeeperRegisterUpkeep(t *testing.T) { // Test that the newly registered upkeep is also performing. gom.Eventually(func(g gomega.Gomega) error { - counter, err := newUpkeep.Counter(context.Background()) + counter, err := newUpkeep.Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling newly deployed upkeep's counter shouldn't fail") g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), "Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64()) @@ -510,7 +510,7 @@ func TestKeeperRegisterUpkeep(t *testing.T) { gom.Eventually(func(g gomega.Gomega) error { for i := 0; i < len(upkeepIDs); i++ { - currentCounter, err := consumers[i].Counter(context.Background()) + currentCounter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") l.Info(). @@ -563,7 +563,7 @@ func TestKeeperAddFunds(t *testing.T) { // Since the upkeep is currently underfunded, check that it doesn't get executed gom.Consistently(func(g gomega.Gomega) { - counter, err := consumers[0].Counter(context.Background()) + counter, err := consumers[0].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)), "Expected consumer counter to remain zero, but got %d", counter.Int64()) @@ -583,7 +583,7 @@ func TestKeeperAddFunds(t *testing.T) { // Now the new upkeep should be performing because we added enough funds gom.Eventually(func(g gomega.Gomega) { - counter, err := consumers[0].Counter(context.Background()) + counter, err := consumers[0].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), "Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64()) @@ -628,7 +628,7 @@ func TestKeeperRemove(t *testing.T) { // Make sure the upkeeps are running before we remove a keeper gom.Eventually(func(g gomega.Gomega) error { for upkeepID := 0; upkeepID < len(upkeepIDs); upkeepID++ { - counter, err := consumers[upkeepID].Counter(context.Background()) + counter, err := consumers[upkeepID].Counter(utils.TestContext(t)) initialCounters[upkeepID] = counter g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter"+ " for upkeep with ID "+strconv.Itoa(upkeepID)) @@ -637,7 +637,7 @@ func TestKeeperRemove(t *testing.T) { return nil }, "1m", "1s").Should(gomega.Succeed()) - keepers, err := registry.GetKeeperList(context.Background()) + keepers, err := registry.GetKeeperList(utils.TestContext(t)) require.NoError(t, err, "Error getting list of Keepers") // Remove the first keeper from the list @@ -660,7 +660,7 @@ func TestKeeperRemove(t *testing.T) { // The upkeeps should still perform and their counters should have increased compared to the first check gom.Eventually(func(g gomega.Gomega) error { for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(counter.Cmp(initialCounters[i]) == 1, "Expected consumer counter to be greater "+ "than initial counter which was %s, but got %s", initialCounters[i], counter) @@ -705,7 +705,7 @@ func TestKeeperPauseRegistry(t *testing.T) { // Observe that the upkeeps which are initially registered are performing gom.Eventually(func(g gomega.Gomega) error { for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), "Expected consumer counter to be greater than 0, but got %d") @@ -722,7 +722,7 @@ func TestKeeperPauseRegistry(t *testing.T) { // Store how many times each upkeep performed once the registry was successfully paused var countersAfterPause = make([]*big.Int, len(upkeepIDs)) for i := 0; i < len(upkeepIDs); i++ { - countersAfterPause[i], err = consumers[i].Counter(context.Background()) + countersAfterPause[i], err = consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Error retrieving consumer at index %d", i) } @@ -730,7 +730,7 @@ func TestKeeperPauseRegistry(t *testing.T) { // because they are no longer getting serviced gom.Consistently(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { - latestCounter, err := consumers[i].Counter(context.Background()) + latestCounter, err := consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Error retrieving consumer contract at index %d", i) g.Expect(latestCounter.Int64()).Should(gomega.Equal(countersAfterPause[i].Int64()), "Expected consumer counter to remain constant at %d, but got %d", @@ -791,7 +791,7 @@ func TestKeeperMigrateRegistry(t *testing.T) { // Check that the first upkeep from the first registry is performing (before being migrated) gom.Eventually(func(g gomega.Gomega) error { - counterBeforeMigration, err := consumers[0].Counter(context.Background()) + counterBeforeMigration, err := consumers[0].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") g.Expect(counterBeforeMigration.Int64()).Should(gomega.BeNumerically(">", int64(0)), "Expected consumer counter to be greater than 0, but got %s", counterBeforeMigration) @@ -810,12 +810,12 @@ func TestKeeperMigrateRegistry(t *testing.T) { err = chainClient.WaitForEvents() require.NoError(t, err, "Error waiting to pause first registry") - counterAfterMigration, err := consumers[0].Counter(context.Background()) + counterAfterMigration, err := consumers[0].Counter(utils.TestContext(t)) require.NoError(t, err, "Error calling consumer's counter") // Check that once we migrated the upkeep, the counter has increased gom.Eventually(func(g gomega.Gomega) error { - currentCounter, err := consumers[0].Counter(context.Background()) + currentCounter, err := consumers[0].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") g.Expect(currentCounter.Int64()).Should(gomega.BeNumerically(">", counterAfterMigration.Int64()), "Expected counter to have increased, but stayed constant at %s", counterAfterMigration) @@ -860,7 +860,7 @@ func TestKeeperNodeDown(t *testing.T) { // Watch upkeeps being performed and store their counters in order to compare them later in the test gom.Eventually(func(g gomega.Gomega) error { for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) initialCounters[i] = counter g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), @@ -882,7 +882,7 @@ func TestKeeperNodeDown(t *testing.T) { // Assert that upkeeps are still performed and their counters have increased gom.Eventually(func(g gomega.Gomega) error { for i := 0; i < len(upkeepIDs); i++ { - currentCounter, err := consumers[i].Counter(context.Background()) + currentCounter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(currentCounter.Int64()).Should(gomega.BeNumerically(">", initialCounters[i].Int64()), "Expected counter to have increased from initial value of %s, but got %s", @@ -908,7 +908,7 @@ func TestKeeperNodeDown(t *testing.T) { // See how many times each upkeep was executed var countersAfterNoMoreNodes = make([]*big.Int, len(upkeepIDs)) for i := 0; i < len(upkeepIDs); i++ { - countersAfterNoMoreNodes[i], err = consumers[i].Counter(context.Background()) + countersAfterNoMoreNodes[i], err = consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Error retrieving consumer counter %d", i) l.Info(). Int("Index", i). @@ -921,7 +921,7 @@ func TestKeeperNodeDown(t *testing.T) { // so a +6 on the upper limit side should be sufficient. gom.Consistently(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { - latestCounter, err := consumers[i].Counter(context.Background()) + latestCounter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterNoMoreNodes[i].Int64()+numUpkeepsAllowedForStragglingTxs, @@ -964,7 +964,7 @@ func TestKeeperPauseUnPauseUpkeep(t *testing.T) { gom.Eventually(func(g gomega.Gomega) error { // Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(5)), "Expected consumer counter to be greater than 5, but got %d", counter.Int64()) @@ -985,7 +985,7 @@ func TestKeeperPauseUnPauseUpkeep(t *testing.T) { var countersAfterPause = make([]*big.Int, len(upkeepIDs)) for i := 0; i < len(upkeepIDs); i++ { // Obtain the amount of times the upkeep has been executed so far - countersAfterPause[i], err = consumers[i].Counter(context.Background()) + countersAfterPause[i], err = consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Error retrieving upkeep count at index %d", i) l.Info(). Int("Index", i). @@ -998,7 +998,7 @@ func TestKeeperPauseUnPauseUpkeep(t *testing.T) { // In most cases counters should remain constant, but there might be a straggling perform tx which // gets committed later. Since every keeper node cannot have more than 1 straggling tx, it // is sufficient to check that the upkeep count does not increase by more than 6. - latestCounter, err := consumers[i].Counter(context.Background()) + latestCounter, err := consumers[i].Counter(utils.TestContext(t)) require.NoError(t, err, "Error retrieving counter at index %d", i) g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterPause[i].Int64()+numUpkeepsAllowedForStragglingTxs), "Expected consumer counter not have increased more than %d, but got %d", @@ -1018,7 +1018,7 @@ func TestKeeperPauseUnPauseUpkeep(t *testing.T) { gom.Eventually(func(g gomega.Gomega) error { // Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 + numbers of performing before pause for i := 0; i < len(upkeepIDs); i++ { - counter, err := consumers[i].Counter(context.Background()) + counter, err := consumers[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter"+ " for upkeep at index %d", i) g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(5)+countersAfterPause[i].Int64()), @@ -1055,7 +1055,7 @@ func TestKeeperUpdateCheckData(t *testing.T) { gom.Consistently(func(g gomega.Gomega) { // expect the counter to remain 0 because perform data does not match for i := 0; i < len(upkeepIDs); i++ { - counter, err := performDataChecker[i].Counter(context.Background()) + counter, err := performDataChecker[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve perform data checker for upkeep at index %d", i) g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)), "Expected perform data checker counter to be 0, but got %d", counter.Int64()) @@ -1073,7 +1073,7 @@ func TestKeeperUpdateCheckData(t *testing.T) { // retrieve new check data for all upkeeps for i := 0; i < len(upkeepIDs); i++ { - upkeep, err := registry.GetUpkeepInfo(context.Background(), upkeepIDs[i]) + upkeep, err := registry.GetUpkeepInfo(utils.TestContext(t), upkeepIDs[i]) require.NoError(t, err, "Error getting upkeep info from index %d", i) require.Equal(t, []byte(keeperExpectedData), upkeep.CheckData, "Check data not as expected") } @@ -1081,7 +1081,7 @@ func TestKeeperUpdateCheckData(t *testing.T) { gom.Eventually(func(g gomega.Gomega) error { // Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 for i := 0; i < len(upkeepIDs); i++ { - counter, err := performDataChecker[i].Counter(context.Background()) + counter, err := performDataChecker[i].Counter(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve perform data checker counter for upkeep at index %d", i) g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(5)), "Expected perform data checker counter to be greater than 5, but got %d", counter.Int64()) diff --git a/integration-tests/smoke/log_poller_test.go b/integration-tests/smoke/log_poller_test.go new file mode 100644 index 0000000000..03a287ee6b --- /dev/null +++ b/integration-tests/smoke/log_poller_test.go @@ -0,0 +1,261 @@ +package smoke + +import ( + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + + logpoller "github.com/smartcontractkit/chainlink/integration-tests/universal/log_poller" +) + +// consistency test with no network disruptions with approximate emission of 1500-1600 logs per second for ~110-120 seconds +// 6 filters are registered +func TestLogPollerFewFiltersFixedDepth(t *testing.T) { + cfg := logpoller.Config{ + General: &logpoller.General{ + Generator: logpoller.GeneratorType_Looped, + Contracts: 2, + EventsPerTx: 4, + UseFinalityTag: false, + }, + LoopedConfig: &logpoller.LoopedConfig{ + ContractConfig: logpoller.ContractConfig{ + ExecutionCount: 100, + }, + FuzzConfig: logpoller.FuzzConfig{ + MinEmitWaitTimeMs: 200, + MaxEmitWaitTimeMs: 500, + }, + }, + } + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit + + logpoller.ExecuteBasicLogPollerTest(t, &cfg) +} + +func TestLogPollerFewFiltersFinalityTag(t *testing.T) { + cfg := logpoller.Config{ + General: &logpoller.General{ + Generator: logpoller.GeneratorType_Looped, + Contracts: 2, + EventsPerTx: 4, + UseFinalityTag: true, + }, + LoopedConfig: &logpoller.LoopedConfig{ + ContractConfig: logpoller.ContractConfig{ + ExecutionCount: 100, + }, + FuzzConfig: logpoller.FuzzConfig{ + MinEmitWaitTimeMs: 200, + MaxEmitWaitTimeMs: 500, + }, + }, + } + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit + + logpoller.ExecuteBasicLogPollerTest(t, &cfg) +} + +// consistency test with no network disruptions with approximate emission of 1000-1100 logs per second for ~110-120 seconds +// 900 filters are registered +func TestLogManyFiltersPollerFixedDepth(t *testing.T) { + cfg := logpoller.Config{ + General: &logpoller.General{ + Generator: logpoller.GeneratorType_Looped, + Contracts: 300, + EventsPerTx: 3, + UseFinalityTag: false, + }, + LoopedConfig: &logpoller.LoopedConfig{ + ContractConfig: logpoller.ContractConfig{ + ExecutionCount: 30, + }, + FuzzConfig: logpoller.FuzzConfig{ + MinEmitWaitTimeMs: 200, + MaxEmitWaitTimeMs: 500, + }, + }, + } + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit + + logpoller.ExecuteBasicLogPollerTest(t, &cfg) +} + +func TestLogManyFiltersPollerFinalityTag(t *testing.T) { + cfg := logpoller.Config{ + General: &logpoller.General{ + Generator: logpoller.GeneratorType_Looped, + Contracts: 300, + EventsPerTx: 3, + UseFinalityTag: true, + }, + LoopedConfig: &logpoller.LoopedConfig{ + ContractConfig: logpoller.ContractConfig{ + ExecutionCount: 30, + }, + FuzzConfig: logpoller.FuzzConfig{ + MinEmitWaitTimeMs: 200, + MaxEmitWaitTimeMs: 500, + }, + }, + } + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit + + logpoller.ExecuteBasicLogPollerTest(t, &cfg) +} + +// consistency test that introduces random distruptions by pausing either Chainlink or Postgres containers for random interval of 5-20 seconds +// with approximate emission of 520-550 logs per second for ~110 seconds +// 6 filters are registered +func TestLogPollerWithChaosFixedDepth(t *testing.T) { + cfg := logpoller.Config{ + General: &logpoller.General{ + Generator: logpoller.GeneratorType_Looped, + Contracts: 2, + EventsPerTx: 100, + UseFinalityTag: false, + }, + LoopedConfig: &logpoller.LoopedConfig{ + ContractConfig: logpoller.ContractConfig{ + ExecutionCount: 100, + }, + FuzzConfig: logpoller.FuzzConfig{ + MinEmitWaitTimeMs: 200, + MaxEmitWaitTimeMs: 500, + }, + }, + ChaosConfig: &logpoller.ChaosConfig{ + ExperimentCount: 10, + }, + } + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit + + logpoller.ExecuteBasicLogPollerTest(t, &cfg) +} + +func TestLogPollerWithChaosFinalityTag(t *testing.T) { + cfg := logpoller.Config{ + General: &logpoller.General{ + Generator: logpoller.GeneratorType_Looped, + Contracts: 2, + EventsPerTx: 100, + UseFinalityTag: true, + }, + LoopedConfig: &logpoller.LoopedConfig{ + ContractConfig: logpoller.ContractConfig{ + ExecutionCount: 100, + }, + FuzzConfig: logpoller.FuzzConfig{ + MinEmitWaitTimeMs: 200, + MaxEmitWaitTimeMs: 500, + }, + }, + ChaosConfig: &logpoller.ChaosConfig{ + ExperimentCount: 10, + }, + } + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit + + logpoller.ExecuteBasicLogPollerTest(t, &cfg) +} + +// consistency test that registers filters after events were emitted and then triggers replay via API +// unfortunately there is no way to make sure that logs that are indexed are only picked up by replay +// and not by backup poller +// with approximate emission of 24 logs per second for ~110 seconds +// 6 filters are registered +func TestLogPollerReplayFixedDepth(t *testing.T) { + cfg := logpoller.Config{ + General: &logpoller.General{ + Generator: logpoller.GeneratorType_Looped, + Contracts: 2, + EventsPerTx: 4, + UseFinalityTag: false, + }, + LoopedConfig: &logpoller.LoopedConfig{ + ContractConfig: logpoller.ContractConfig{ + ExecutionCount: 100, + }, + FuzzConfig: logpoller.FuzzConfig{ + MinEmitWaitTimeMs: 200, + MaxEmitWaitTimeMs: 500, + }, + }, + } + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit + consistencyTimeout := "5m" + + logpoller.ExecuteLogPollerReplay(t, &cfg, consistencyTimeout) +} + +func TestLogPollerReplayFinalityTag(t *testing.T) { + cfg := logpoller.Config{ + General: &logpoller.General{ + Generator: logpoller.GeneratorType_Looped, + Contracts: 2, + EventsPerTx: 4, + UseFinalityTag: false, + }, + LoopedConfig: &logpoller.LoopedConfig{ + ContractConfig: logpoller.ContractConfig{ + ExecutionCount: 100, + }, + FuzzConfig: logpoller.FuzzConfig{ + MinEmitWaitTimeMs: 200, + MaxEmitWaitTimeMs: 500, + }, + }, + } + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit + consistencyTimeout := "5m" + + logpoller.ExecuteLogPollerReplay(t, &cfg, consistencyTimeout) +} diff --git a/integration-tests/smoke/ocr2_test.go b/integration-tests/smoke/ocr2_test.go index d82d84a207..5950e9febb 100644 --- a/integration-tests/smoke/ocr2_test.go +++ b/integration-tests/smoke/ocr2_test.go @@ -1,31 +1,21 @@ package smoke import ( - "context" "fmt" "math/big" "net/http" - "strings" "testing" "time" "github.com/stretchr/testify/require" - "github.com/smartcontractkit/chainlink-env/environment" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" - "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver" - mockservercfg "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver-cfg" - "github.com/smartcontractkit/chainlink-testing-framework/blockchain" "github.com/smartcontractkit/chainlink-testing-framework/logging" - "github.com/smartcontractkit/chainlink-testing-framework/networks" "github.com/smartcontractkit/chainlink/integration-tests/actions" - "github.com/smartcontractkit/chainlink/integration-tests/client" - "github.com/smartcontractkit/chainlink/integration-tests/config" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) // Tests a basic OCRv2 median feed @@ -83,7 +73,7 @@ func TestOCRv2Basic(t *testing.T) { err = actions.StartNewOCR2Round(1, aggregatorContracts, env.EVMClient, time.Minute*5, l) require.NoError(t, err, "Error starting new OCR2 round") - roundData, err := aggregatorContracts[0].GetRound(context.Background(), big.NewInt(1)) + roundData, err := aggregatorContracts[0].GetRound(utils.TestContext(t), big.NewInt(1)) require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail") require.Equal(t, int64(5), roundData.Answer.Int64(), "Expected latest answer from OCR contract to be 5 but got %d", @@ -95,49 +85,10 @@ func TestOCRv2Basic(t *testing.T) { err = actions.StartNewOCR2Round(2, aggregatorContracts, env.EVMClient, time.Minute*5, l) require.NoError(t, err) - roundData, err = aggregatorContracts[0].GetRound(context.Background(), big.NewInt(2)) + roundData, err = aggregatorContracts[0].GetRound(utils.TestContext(t), big.NewInt(2)) require.NoError(t, err, "Error getting latest OCR answer") require.Equal(t, int64(10), roundData.Answer.Int64(), "Expected latest answer from OCR contract to be 10 but got %d", roundData.Answer.Int64(), ) } - -func setupOCR2Test(t *testing.T, forwardersEnabled bool) ( - testEnvironment *environment.Environment, - testNetwork blockchain.EVMNetwork, -) { - testNetwork = networks.SelectedNetwork - evmConfig := ethereum.New(nil) - if !testNetwork.Simulated { - evmConfig = ethereum.New(ðereum.Props{ - NetworkName: testNetwork.Name, - Simulated: testNetwork.Simulated, - WsURLs: testNetwork.URLs, - }) - } - - var toml string - if forwardersEnabled { - toml = client.AddNetworkDetailedConfig(config.BaseOCR2Config, config.ForwarderNetworkDetailConfig, testNetwork) - } else { - toml = client.AddNetworksConfig(config.BaseOCR2Config, testNetwork) - } - - chainlinkChart := chainlink.New(0, map[string]interface{}{ - "replicas": 6, - "toml": toml, - }) - - testEnvironment = environment.New(&environment.Config{ - NamespacePrefix: fmt.Sprintf("smoke-ocr2-%s", strings.ReplaceAll(strings.ToLower(testNetwork.Name), " ", "-")), - Test: t, - }). - AddHelm(mockservercfg.New(nil)). - AddHelm(mockserver.New(nil)). - AddHelm(evmConfig). - AddHelm(chainlinkChart) - err := testEnvironment.Run() - require.NoError(t, err, "Error running test environment") - return testEnvironment, testNetwork -} diff --git a/integration-tests/smoke/ocr2vrf_test.go b/integration-tests/smoke/ocr2vrf_test.go index 8148863918..57bd5412b1 100644 --- a/integration-tests/smoke/ocr2vrf_test.go +++ b/integration-tests/smoke/ocr2vrf_test.go @@ -9,13 +9,12 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" - "github.com/smartcontractkit/chainlink-env/environment" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - eth "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + eth "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" - "github.com/smartcontractkit/chainlink-testing-framework/utils" "github.com/smartcontractkit/chainlink/integration-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/actions/ocr2vrf_actions" @@ -23,6 +22,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/config" "github.com/smartcontractkit/chainlink/integration-tests/contracts" + it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func TestOCR2VRFRedeemModel(t *testing.T) { @@ -44,7 +44,7 @@ func TestOCR2VRFRedeemModel(t *testing.T) { require.NoError(t, err, "Retreiving on-chain wallet addresses for chainlink nodes shouldn't fail") t.Cleanup(func() { - err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, nil, zapcore.ErrorLevel, chainClient) + err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.ErrorLevel, chainClient) require.NoError(t, err, "Error tearing down environment") }) @@ -80,7 +80,7 @@ func TestOCR2VRFRedeemModel(t *testing.T) { ) for i := uint16(0); i < ocr2vrf_constants.NumberOfRandomWordsToRequest; i++ { - randomness, err := consumerContract.GetRandomnessByRequestId(nil, requestID, big.NewInt(int64(i))) + randomness, err := consumerContract.GetRandomnessByRequestId(it_utils.TestContext(t), requestID, big.NewInt(int64(i))) require.NoError(t, err) l.Info().Interface("Random Number", randomness).Interface("Randomness Number Index", i).Msg("Randomness retrieved from Consumer contract") require.NotEqual(t, 0, randomness.Uint64(), "Randomness retrieved from Consumer contract give an answer other than 0") @@ -106,7 +106,7 @@ func TestOCR2VRFFulfillmentModel(t *testing.T) { require.NoError(t, err, "Retreiving on-chain wallet addresses for chainlink nodes shouldn't fail") t.Cleanup(func() { - err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, nil, zapcore.ErrorLevel, chainClient) + err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.ErrorLevel, chainClient) require.NoError(t, err, "Error tearing down environment") }) @@ -141,7 +141,7 @@ func TestOCR2VRFFulfillmentModel(t *testing.T) { ) for i := uint16(0); i < ocr2vrf_constants.NumberOfRandomWordsToRequest; i++ { - randomness, err := consumerContract.GetRandomnessByRequestId(nil, requestID, big.NewInt(int64(i))) + randomness, err := consumerContract.GetRandomnessByRequestId(it_utils.TestContext(t), requestID, big.NewInt(int64(i))) require.NoError(t, err, "Error getting Randomness result from Consumer Contract") l.Info().Interface("Random Number", randomness).Interface("Randomness Number Index", i).Msg("Randomness Fulfillment retrieved from Consumer contract") require.NotEqual(t, 0, randomness.Uint64(), "Randomness Fulfillment retrieved from Consumer contract give an answer other than 0") @@ -149,7 +149,7 @@ func TestOCR2VRFFulfillmentModel(t *testing.T) { } func setupOCR2VRFEnvironment(t *testing.T) (testEnvironment *environment.Environment, testNetwork blockchain.EVMNetwork) { - testNetwork = networks.SelectedNetwork + testNetwork = networks.MustGetSelectedNetworksFromEnv()[0] evmConfig := eth.New(nil) if !testNetwork.Simulated { evmConfig = eth.New(ð.Props{ diff --git a/integration-tests/smoke/ocr_test.go b/integration-tests/smoke/ocr_test.go index 8d71c5d08f..45205565e2 100644 --- a/integration-tests/smoke/ocr_test.go +++ b/integration-tests/smoke/ocr_test.go @@ -1,7 +1,6 @@ package smoke import ( - "context" "math/big" "testing" @@ -11,6 +10,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func TestOCRBasic(t *testing.T) { @@ -22,7 +22,7 @@ func TestOCRBasic(t *testing.T) { WithGeth(). WithMockAdapter(). WithCLNodes(6). - WithFunding(big.NewFloat(.1)). + WithFunding(big.NewFloat(.01)). WithStandardCleanup(). Build() require.NoError(t, err) @@ -46,7 +46,7 @@ func TestOCRBasic(t *testing.T) { err = actions.StartNewRound(1, ocrInstances, env.EVMClient, l) require.NoError(t, err) - answer, err := ocrInstances[0].GetLatestAnswer(context.Background()) + answer, err := ocrInstances[0].GetLatestAnswer(utils.TestContext(t)) require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail") require.Equal(t, int64(5), answer.Int64(), "Expected latest answer from OCR contract to be 5 but got %d", answer.Int64()) @@ -55,7 +55,7 @@ func TestOCRBasic(t *testing.T) { err = actions.StartNewRound(2, ocrInstances, env.EVMClient, l) require.NoError(t, err) - answer, err = ocrInstances[0].GetLatestAnswer(context.Background()) + answer, err = ocrInstances[0].GetLatestAnswer(utils.TestContext(t)) require.NoError(t, err, "Error getting latest OCR answer") require.Equal(t, int64(10), answer.Int64(), "Expected latest answer from OCR contract to be 10 but got %d", answer.Int64()) } diff --git a/integration-tests/smoke/runlog_test.go b/integration-tests/smoke/runlog_test.go index f29cb4bc89..20389da378 100644 --- a/integration-tests/smoke/runlog_test.go +++ b/integration-tests/smoke/runlog_test.go @@ -1,7 +1,6 @@ package smoke import ( - "context" "fmt" "math/big" "net/http" @@ -16,6 +15,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func TestRunLogBasic(t *testing.T) { @@ -87,7 +87,7 @@ func TestRunLogBasic(t *testing.T) { gom := gomega.NewGomegaWithT(t) gom.Eventually(func(g gomega.Gomega) { - d, err := consumer.Data(context.Background()) + d, err := consumer.Data(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Getting data from consumer contract shouldn't fail") g.Expect(d).ShouldNot(gomega.BeNil(), "Expected the initial on chain data to be nil") l.Debug().Int64("Data", d.Int64()).Msg("Found on chain") diff --git a/integration-tests/smoke/vrf_test.go b/integration-tests/smoke/vrf_test.go index 444d1ce20e..61d2c5cdd7 100644 --- a/integration-tests/smoke/vrf_test.go +++ b/integration-tests/smoke/vrf_test.go @@ -1,7 +1,6 @@ package smoke import ( - "context" "fmt" "math/big" "testing" @@ -17,6 +16,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv1" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func TestVRFBasic(t *testing.T) { @@ -81,7 +81,7 @@ func TestVRFBasic(t *testing.T) { encodedProvingKeys := make([][2]*big.Int, 0) encodedProvingKeys = append(encodedProvingKeys, provingKey) - requestHash, err := contracts.Coordinator.HashOfKey(context.Background(), encodedProvingKeys[0]) + requestHash, err := contracts.Coordinator.HashOfKey(utils.TestContext(t), encodedProvingKeys[0]) require.NoError(t, err, "Getting Hash of encoded proving keys shouldn't fail") err = contracts.Consumer.RequestRandomness(requestHash, big.NewInt(1)) require.NoError(t, err, "Requesting randomness shouldn't fail") @@ -92,7 +92,7 @@ func TestVRFBasic(t *testing.T) { jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(job.Data.ID) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Job execution shouldn't fail") - out, err := contracts.Consumer.RandomnessOutput(context.Background()) + out, err := contracts.Consumer.RandomnessOutput(utils.TestContext(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Getting the randomness output of the consumer shouldn't fail") // Checks that the job has actually run g.Expect(len(jobRuns.Data)).Should(gomega.BeNumerically(">=", 1), diff --git a/integration-tests/smoke/vrfv2_test.go b/integration-tests/smoke/vrfv2_test.go index c960bb6c69..714ed752a3 100644 --- a/integration-tests/smoke/vrfv2_test.go +++ b/integration-tests/smoke/vrfv2_test.go @@ -1,7 +1,6 @@ package smoke import ( - "context" "math/big" "testing" "time" @@ -16,6 +15,7 @@ import ( vrfConst "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions/vrfv2_constants" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func TestVRFv2Basic(t *testing.T) { @@ -97,11 +97,11 @@ func TestVRFv2Basic(t *testing.T) { jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(vrfV2jobs[0].Job.Data.ID) g.Expect(err).ShouldNot(gomega.HaveOccurred()) g.Expect(len(jobRuns.Data)).Should(gomega.BeNumerically("==", 1)) - lastRequestID, err = vrfv2Contracts.LoadTestConsumer.GetLastRequestId(context.Background()) + lastRequestID, err = vrfv2Contracts.LoadTestConsumer.GetLastRequestId(utils.TestContext(t)) l.Debug().Interface("Last Request ID", lastRequestID).Msg("Last Request ID Received") g.Expect(err).ShouldNot(gomega.HaveOccurred()) - status, err := vrfv2Contracts.LoadTestConsumer.GetRequestStatus(context.Background(), lastRequestID) + status, err := vrfv2Contracts.LoadTestConsumer.GetRequestStatus(utils.TestContext(t), lastRequestID) g.Expect(err).ShouldNot(gomega.HaveOccurred()) g.Expect(status.Fulfilled).Should(gomega.BeTrue()) l.Debug().Interface("Fulfilment Status", status.Fulfilled).Msg("Random Words Request Fulfilment Status") diff --git a/integration-tests/smoke/vrfv2plus_test.go b/integration-tests/smoke/vrfv2plus_test.go index c2cc0878b6..cfeca0a66a 100644 --- a/integration-tests/smoke/vrfv2plus_test.go +++ b/integration-tests/smoke/vrfv2plus_test.go @@ -1,19 +1,19 @@ package smoke import ( - "context" + "fmt" "math/big" "testing" "time" - "github.com/kelseyhightower/envconfig" - "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_v2plus_upgraded_version" + "github.com/smartcontractkit/chainlink/integration-tests/utils" "github.com/ethereum/go-ethereum/common" - "github.com/pkg/errors" + "github.com/kelseyhightower/envconfig" "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink-testing-framework/logging" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_v2plus_upgraded_version" "github.com/smartcontractkit/chainlink/integration-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus" @@ -46,17 +46,21 @@ func TestVRFv2Plus(t *testing.T) { linkToken, err := actions.DeployLINKToken(env.ContractDeployer) require.NoError(t, err, "error deploying LINK contract") - vrfv2PlusContracts, subIDs, vrfv2PlusData, err := vrfv2plus.SetupVRFV2_5Environment(env, &vrfv2PlusConfig, linkToken, mockETHLinkFeed, 1, 1) + // register proving key against oracle address (sending key) in order to test oracleWithdraw + defaultWalletAddress := env.EVMClient.GetDefaultWallet().Address() + + vrfv2PlusContracts, subIDs, vrfv2PlusData, err := vrfv2plus.SetupVRFV2_5Environment(env, vrfv2PlusConfig, linkToken, mockETHLinkFeed, defaultWalletAddress, 1, 1, l) require.NoError(t, err, "error setting up VRF v2_5 env") subID := subIDs[0] - subscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), subID) + subscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subID) require.NoError(t, err, "error getting subscription information") vrfv2plus.LogSubDetails(l, subscription, subID, vrfv2PlusContracts.Coordinator) - t.Run("VRFV2 Plus With Link Billing", func(t *testing.T) { + t.Run("Link Billing", func(t *testing.T) { + testConfig := vrfv2PlusConfig var isNativeBilling = false subBalanceBeforeRequest := subscription.Balance @@ -70,14 +74,15 @@ func TestVRFv2Plus(t *testing.T) { vrfv2PlusData, subID, isNativeBilling, - vrfv2PlusConfig.RandomnessRequestCountPerRequest, - &vrfv2PlusConfig, + testConfig.RandomnessRequestCountPerRequest, + testConfig, + testConfig.RandomWordsFulfilledEventTimeout, l, ) require.NoError(t, err, "error requesting randomness and waiting for fulfilment") expectedSubBalanceJuels := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment) - subscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), subID) + subscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subID) require.NoError(t, err, "error getting subscription information") subBalanceAfterRequest := subscription.Balance require.Equal(t, expectedSubBalanceJuels, subBalanceAfterRequest) @@ -86,19 +91,19 @@ func TestVRFv2Plus(t *testing.T) { require.NoError(t, err, "error reading job runs") require.Equal(t, len(jobRunsBeforeTest.Data)+1, len(jobRuns.Data)) - status, err := vrfv2PlusContracts.LoadTestConsumers[0].GetRequestStatus(context.Background(), randomWordsFulfilledEvent.RequestId) + status, err := vrfv2PlusContracts.LoadTestConsumers[0].GetRequestStatus(utils.TestContext(t), randomWordsFulfilledEvent.RequestId) require.NoError(t, err, "error getting rand request status") require.True(t, status.Fulfilled) l.Debug().Bool("Fulfilment Status", status.Fulfilled).Msg("Random Words Request Fulfilment Status") - require.Equal(t, vrfv2PlusConfig.NumberOfWords, uint32(len(status.RandomWords))) + require.Equal(t, testConfig.NumberOfWords, uint32(len(status.RandomWords))) for _, w := range status.RandomWords { l.Info().Str("Output", w.String()).Msg("Randomness fulfilled") require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0") } }) - - t.Run("VRFV2 Plus With Native Billing", func(t *testing.T) { + t.Run("Native Billing", func(t *testing.T) { + testConfig := vrfv2PlusConfig var isNativeBilling = true subNativeTokenBalanceBeforeRequest := subscription.NativeBalance @@ -112,13 +117,14 @@ func TestVRFv2Plus(t *testing.T) { vrfv2PlusData, subID, isNativeBilling, - vrfv2PlusConfig.RandomnessRequestCountPerRequest, - &vrfv2PlusConfig, + testConfig.RandomnessRequestCountPerRequest, + testConfig, + testConfig.RandomWordsFulfilledEventTimeout, l, ) require.NoError(t, err, "error requesting randomness and waiting for fulfilment") expectedSubBalanceWei := new(big.Int).Sub(subNativeTokenBalanceBeforeRequest, randomWordsFulfilledEvent.Payment) - subscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), subID) + subscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subID) require.NoError(t, err) subBalanceAfterRequest := subscription.NativeBalance require.Equal(t, expectedSubBalanceWei, subBalanceAfterRequest) @@ -127,125 +133,457 @@ func TestVRFv2Plus(t *testing.T) { require.NoError(t, err, "error reading job runs") require.Equal(t, len(jobRunsBeforeTest.Data)+1, len(jobRuns.Data)) - status, err := vrfv2PlusContracts.LoadTestConsumers[0].GetRequestStatus(context.Background(), randomWordsFulfilledEvent.RequestId) + status, err := vrfv2PlusContracts.LoadTestConsumers[0].GetRequestStatus(utils.TestContext(t), randomWordsFulfilledEvent.RequestId) require.NoError(t, err, "error getting rand request status") require.True(t, status.Fulfilled) l.Debug().Bool("Fulfilment Status", status.Fulfilled).Msg("Random Words Request Fulfilment Status") - require.Equal(t, vrfv2PlusConfig.NumberOfWords, uint32(len(status.RandomWords))) + require.Equal(t, testConfig.NumberOfWords, uint32(len(status.RandomWords))) for _, w := range status.RandomWords { l.Info().Str("Output", w.String()).Msg("Randomness fulfilled") require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0") } }) + t.Run("Direct Funding (VRFV2PlusWrapper)", func(t *testing.T) { + testConfig := vrfv2PlusConfig + wrapperContracts, wrapperSubID, err := vrfv2plus.SetupVRFV2PlusWrapperEnvironment( + env, + testConfig, + linkToken, + mockETHLinkFeed, + vrfv2PlusContracts.Coordinator, + vrfv2PlusData.KeyHash, + 1, + ) + require.NoError(t, err) - wrapperContracts, wrapperSubID, err := vrfv2plus.SetupVRFV2PlusWrapperEnvironment( - env, - &vrfv2PlusConfig, - linkToken, - mockETHLinkFeed, - vrfv2PlusContracts.Coordinator, - vrfv2PlusData.KeyHash, - 1, - ) - require.NoError(t, err) + t.Run("Link Billing", func(t *testing.T) { + testConfig := vrfv2PlusConfig + var isNativeBilling = false + + wrapperConsumerJuelsBalanceBeforeRequest, err := linkToken.BalanceOf(utils.TestContext(t), wrapperContracts.LoadTestConsumers[0].Address()) + require.NoError(t, err, "error getting wrapper consumer balance") + + wrapperSubscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), wrapperSubID) + require.NoError(t, err, "error getting subscription information") + subBalanceBeforeRequest := wrapperSubscription.Balance + + randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment( + wrapperContracts.LoadTestConsumers[0], + vrfv2PlusContracts.Coordinator, + vrfv2PlusData, + wrapperSubID, + isNativeBilling, + testConfig, + testConfig.RandomWordsFulfilledEventTimeout, + l, + ) + require.NoError(t, err, "error requesting randomness and waiting for fulfilment") + + expectedSubBalanceJuels := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment) + wrapperSubscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), wrapperSubID) + require.NoError(t, err, "error getting subscription information") + subBalanceAfterRequest := wrapperSubscription.Balance + require.Equal(t, expectedSubBalanceJuels, subBalanceAfterRequest) + + consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(utils.TestContext(t), randomWordsFulfilledEvent.RequestId) + require.NoError(t, err, "error getting rand request status") + require.True(t, consumerStatus.Fulfilled) + + expectedWrapperConsumerJuelsBalance := new(big.Int).Sub(wrapperConsumerJuelsBalanceBeforeRequest, consumerStatus.Paid) + + wrapperConsumerJuelsBalanceAfterRequest, err := linkToken.BalanceOf(utils.TestContext(t), wrapperContracts.LoadTestConsumers[0].Address()) + require.NoError(t, err, "error getting wrapper consumer balance") + require.Equal(t, expectedWrapperConsumerJuelsBalance, wrapperConsumerJuelsBalanceAfterRequest) + + //todo: uncomment when VRF-651 will be fixed + //require.Equal(t, 1, consumerStatus.Paid.Cmp(randomWordsFulfilledEvent.Payment), "Expected Consumer contract pay more than the Coordinator Sub") + vrfv2plus.LogFulfillmentDetailsLinkBilling(l, wrapperConsumerJuelsBalanceBeforeRequest, wrapperConsumerJuelsBalanceAfterRequest, consumerStatus, randomWordsFulfilledEvent) + + require.Equal(t, testConfig.NumberOfWords, uint32(len(consumerStatus.RandomWords))) + for _, w := range consumerStatus.RandomWords { + l.Info().Str("Output", w.String()).Msg("Randomness fulfilled") + require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0") + } + }) + t.Run("Native Billing", func(t *testing.T) { + testConfig := vrfv2PlusConfig + var isNativeBilling = true + + wrapperConsumerBalanceBeforeRequestWei, err := env.EVMClient.BalanceAt(utils.TestContext(t), common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address())) + require.NoError(t, err, "error getting wrapper consumer balance") + + wrapperSubscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), wrapperSubID) + require.NoError(t, err, "error getting subscription information") + subBalanceBeforeRequest := wrapperSubscription.NativeBalance + + randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment( + wrapperContracts.LoadTestConsumers[0], + vrfv2PlusContracts.Coordinator, + vrfv2PlusData, + wrapperSubID, + isNativeBilling, + testConfig, + testConfig.RandomWordsFulfilledEventTimeout, + l, + ) + require.NoError(t, err, "error requesting randomness and waiting for fulfilment") + + expectedSubBalanceWei := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment) + wrapperSubscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), wrapperSubID) + require.NoError(t, err, "error getting subscription information") + subBalanceAfterRequest := wrapperSubscription.NativeBalance + require.Equal(t, expectedSubBalanceWei, subBalanceAfterRequest) + + consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(utils.TestContext(t), randomWordsFulfilledEvent.RequestId) + require.NoError(t, err, "error getting rand request status") + require.True(t, consumerStatus.Fulfilled) + + expectedWrapperConsumerWeiBalance := new(big.Int).Sub(wrapperConsumerBalanceBeforeRequestWei, consumerStatus.Paid) + + wrapperConsumerBalanceAfterRequestWei, err := env.EVMClient.BalanceAt(utils.TestContext(t), common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address())) + require.NoError(t, err, "error getting wrapper consumer balance") + require.Equal(t, expectedWrapperConsumerWeiBalance, wrapperConsumerBalanceAfterRequestWei) + + //todo: uncomment when VRF-651 will be fixed + //require.Equal(t, 1, consumerStatus.Paid.Cmp(randomWordsFulfilledEvent.Payment), "Expected Consumer contract pay more than the Coordinator Sub") + vrfv2plus.LogFulfillmentDetailsNativeBilling(l, wrapperConsumerBalanceBeforeRequestWei, wrapperConsumerBalanceAfterRequestWei, consumerStatus, randomWordsFulfilledEvent) + + require.Equal(t, testConfig.NumberOfWords, uint32(len(consumerStatus.RandomWords))) + for _, w := range consumerStatus.RandomWords { + l.Info().Str("Output", w.String()).Msg("Randomness fulfilled") + require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0") + } + }) + }) + t.Run("Canceling Sub And Returning Funds", func(t *testing.T) { + testConfig := vrfv2PlusConfig + subIDsForCancelling, err := vrfv2plus.CreateFundSubsAndAddConsumers( + env, + testConfig, + linkToken, + vrfv2PlusContracts.Coordinator, + vrfv2PlusContracts.LoadTestConsumers, + 1, + ) + require.NoError(t, err) + subIDForCancelling := subIDsForCancelling[0] - t.Run("VRFV2 Plus With Direct Funding (VRFV2PlusWrapper) - Link Billing", func(t *testing.T) { - var isNativeBilling = false + testWalletAddress, err := actions.GenerateWallet() + require.NoError(t, err) + + testWalletBalanceNativeBeforeSubCancelling, err := env.EVMClient.BalanceAt(utils.TestContext(t), testWalletAddress) + require.NoError(t, err) - wrapperConsumerJuelsBalanceBeforeRequest, err := linkToken.BalanceOf(context.Background(), wrapperContracts.LoadTestConsumers[0].Address()) - require.NoError(t, err, "error getting wrapper consumer balance") + testWalletBalanceLinkBeforeSubCancelling, err := linkToken.BalanceOf(utils.TestContext(t), testWalletAddress.String()) + require.NoError(t, err) - wrapperSubscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), wrapperSubID) + subscriptionForCancelling, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subIDForCancelling) require.NoError(t, err, "error getting subscription information") - subBalanceBeforeRequest := wrapperSubscription.Balance - randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment( - wrapperContracts.LoadTestConsumers[0], + subBalanceLink := subscriptionForCancelling.Balance + subBalanceNative := subscriptionForCancelling.NativeBalance + l.Info(). + Str("Subscription Amount Native", subBalanceNative.String()). + Str("Subscription Amount Link", subBalanceLink.String()). + Str("Returning funds from SubID", subIDForCancelling.String()). + Str("Returning funds to", testWalletAddress.String()). + Msg("Canceling subscription and returning funds to subscription owner") + tx, err := vrfv2PlusContracts.Coordinator.CancelSubscription(subIDForCancelling, testWalletAddress) + require.NoError(t, err, "Error canceling subscription") + + subscriptionCanceledEvent, err := vrfv2PlusContracts.Coordinator.WaitForSubscriptionCanceledEvent(subIDForCancelling, time.Second*30) + require.NoError(t, err, "error waiting for subscription canceled event") + + cancellationTxReceipt, err := env.EVMClient.GetTxReceipt(tx.Hash()) + require.NoError(t, err, "error getting tx cancellation Tx Receipt") + + txGasUsed := new(big.Int).SetUint64(cancellationTxReceipt.GasUsed) + cancellationTxFeeWei := new(big.Int).Mul(txGasUsed, cancellationTxReceipt.EffectiveGasPrice) + + l.Info(). + Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()). + Str("Effective Gas Price", cancellationTxReceipt.EffectiveGasPrice.String()). + Uint64("Gas Used", cancellationTxReceipt.GasUsed). + Msg("Cancellation TX Receipt") + + l.Info(). + Str("Returned Subscription Amount Native", subscriptionCanceledEvent.AmountNative.String()). + Str("Returned Subscription Amount Link", subscriptionCanceledEvent.AmountLink.String()). + Str("SubID", subscriptionCanceledEvent.SubId.String()). + Str("Returned to", subscriptionCanceledEvent.To.String()). + Msg("Subscription Canceled Event") + + require.Equal(t, subBalanceNative, subscriptionCanceledEvent.AmountNative, "SubscriptionCanceled event native amount is not equal to sub amount while canceling subscription") + require.Equal(t, subBalanceLink, subscriptionCanceledEvent.AmountLink, "SubscriptionCanceled event LINK amount is not equal to sub amount while canceling subscription") + + testWalletBalanceNativeAfterSubCancelling, err := env.EVMClient.BalanceAt(utils.TestContext(t), testWalletAddress) + require.NoError(t, err) + + testWalletBalanceLinkAfterSubCancelling, err := linkToken.BalanceOf(utils.TestContext(t), testWalletAddress.String()) + require.NoError(t, err) + + //Verify that sub was deleted from Coordinator + _, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subIDForCancelling) + require.Error(t, err, "error not occurred when trying to get deleted subscription from old Coordinator after sub migration") + + subFundsReturnedNativeActual := new(big.Int).Sub(testWalletBalanceNativeAfterSubCancelling, testWalletBalanceNativeBeforeSubCancelling) + subFundsReturnedLinkActual := new(big.Int).Sub(testWalletBalanceLinkAfterSubCancelling, testWalletBalanceLinkBeforeSubCancelling) + + subFundsReturnedNativeExpected := new(big.Int).Sub(subBalanceNative, cancellationTxFeeWei) + deltaSpentOnCancellationTxFee := new(big.Int).Sub(subBalanceNative, subFundsReturnedNativeActual) + l.Info(). + Str("Sub Balance - Native", subBalanceNative.String()). + Str("Delta Spent On Cancellation Tx Fee - `NativeBalance - subFundsReturnedNativeActual`", deltaSpentOnCancellationTxFee.String()). + Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()). + Str("Sub Funds Returned Actual - Native", subFundsReturnedNativeActual.String()). + Str("Sub Funds Returned Expected - `NativeBalance - cancellationTxFeeWei`", subFundsReturnedNativeExpected.String()). + Str("Sub Funds Returned Actual - Link", subFundsReturnedLinkActual.String()). + Str("Sub Balance - Link", subBalanceLink.String()). + Msg("Sub funds returned") + + //todo - this fails on SIMULATED env as tx cost is calculated different as for testnets and it's not receipt.EffectiveGasPrice*receipt.GasUsed + //require.Equal(t, subFundsReturnedNativeExpected, subFundsReturnedNativeActual, "Returned funds are not equal to sub balance that was cancelled") + require.Equal(t, 1, testWalletBalanceNativeAfterSubCancelling.Cmp(testWalletBalanceNativeBeforeSubCancelling), "Native funds were not returned after sub cancellation") + require.Equal(t, 0, subBalanceLink.Cmp(subFundsReturnedLinkActual), "Returned LINK funds are not equal to sub balance that was cancelled") + + }) + t.Run("Owner Canceling Sub And Returning Funds While Having Pending Requests", func(t *testing.T) { + testConfig := vrfv2PlusConfig + //underfund subs in order rand fulfillments to fail + testConfig.SubscriptionFundingAmountNative = float64(0.000000000000000001) //1 Wei + testConfig.SubscriptionFundingAmountLink = float64(0.000000000000000001) //1 Juels + + subIDsForCancelling, err := vrfv2plus.CreateFundSubsAndAddConsumers( + env, + testConfig, + linkToken, vrfv2PlusContracts.Coordinator, - vrfv2PlusData, - wrapperSubID, - isNativeBilling, - &vrfv2PlusConfig, - l, + vrfv2PlusContracts.LoadTestConsumers, + 1, ) - require.NoError(t, err, "error requesting randomness and waiting for fulfilment") + require.NoError(t, err) - expectedSubBalanceJuels := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment) - wrapperSubscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), wrapperSubID) + subIDForCancelling := subIDsForCancelling[0] + + subscriptionForCancelling, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subIDForCancelling) require.NoError(t, err, "error getting subscription information") - subBalanceAfterRequest := wrapperSubscription.Balance - require.Equal(t, expectedSubBalanceJuels, subBalanceAfterRequest) - consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(context.Background(), randomWordsFulfilledEvent.RequestId) - require.NoError(t, err, "error getting rand request status") - require.True(t, consumerStatus.Fulfilled) + vrfv2plus.LogSubDetails(l, subscriptionForCancelling, subIDForCancelling, vrfv2PlusContracts.Coordinator) - expectedWrapperConsumerJuelsBalance := new(big.Int).Sub(wrapperConsumerJuelsBalanceBeforeRequest, consumerStatus.Paid) + activeSubscriptionIdsBeforeSubCancellation, err := vrfv2PlusContracts.Coordinator.GetActiveSubscriptionIds(utils.TestContext(t), big.NewInt(0), big.NewInt(0)) + require.NoError(t, err) - wrapperConsumerJuelsBalanceAfterRequest, err := linkToken.BalanceOf(context.Background(), wrapperContracts.LoadTestConsumers[0].Address()) - require.NoError(t, err, "error getting wrapper consumer balance") - require.Equal(t, expectedWrapperConsumerJuelsBalance, wrapperConsumerJuelsBalanceAfterRequest) + require.True(t, utils.BigIntSliceContains(activeSubscriptionIdsBeforeSubCancellation, subIDForCancelling)) - //todo: uncomment when VRF-651 will be fixed - //require.Equal(t, 1, consumerStatus.Paid.Cmp(randomWordsFulfilledEvent.Payment), "Expected Consumer contract pay more than the Coordinator Sub") - vrfv2plus.LogFulfillmentDetailsLinkBilling(l, wrapperConsumerJuelsBalanceBeforeRequest, wrapperConsumerJuelsBalanceAfterRequest, consumerStatus, randomWordsFulfilledEvent) + pendingRequestsExist, err := vrfv2PlusContracts.Coordinator.PendingRequestsExist(utils.TestContext(t), subIDForCancelling) + require.NoError(t, err) + require.False(t, pendingRequestsExist, "Pending requests should not exist") - require.Equal(t, vrfv2PlusConfig.NumberOfWords, uint32(len(consumerStatus.RandomWords))) - for _, w := range consumerStatus.RandomWords { - l.Info().Str("Output", w.String()).Msg("Randomness fulfilled") - require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0") - } - }) + _, err = vrfv2plus.RequestRandomnessAndWaitForFulfillment( + vrfv2PlusContracts.LoadTestConsumers[0], + vrfv2PlusContracts.Coordinator, + vrfv2PlusData, + subIDForCancelling, + false, + testConfig.RandomnessRequestCountPerRequest, + testConfig, + 5*time.Second, + l, + ) - t.Run("VRFV2 Plus With Direct Funding (VRFV2PlusWrapper) - Native Billing", func(t *testing.T) { - var isNativeBilling = true + require.Error(t, err, "error should occur for waiting for fulfilment due to low sub balance") - wrapperConsumerBalanceBeforeRequestWei, err := env.EVMClient.BalanceAt(context.Background(), common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address())) - require.NoError(t, err, "error getting wrapper consumer balance") + _, err = vrfv2plus.RequestRandomnessAndWaitForFulfillment( + vrfv2PlusContracts.LoadTestConsumers[0], + vrfv2PlusContracts.Coordinator, + vrfv2PlusData, + subIDForCancelling, + true, + testConfig.RandomnessRequestCountPerRequest, + testConfig, + testConfig.RandomWordsFulfilledEventTimeout, + l, + ) + + require.Error(t, err, "error should occur for waiting for fulfilment due to low sub balance") + + pendingRequestsExist, err = vrfv2PlusContracts.Coordinator.PendingRequestsExist(utils.TestContext(t), subIDForCancelling) + require.NoError(t, err) + require.True(t, pendingRequestsExist, "Pending requests should exist after unfulfilled rand requests due to low sub balance") + + walletBalanceNativeBeforeSubCancelling, err := env.EVMClient.BalanceAt(utils.TestContext(t), common.HexToAddress(defaultWalletAddress)) + require.NoError(t, err) - wrapperSubscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), wrapperSubID) + walletBalanceLinkBeforeSubCancelling, err := linkToken.BalanceOf(utils.TestContext(t), defaultWalletAddress) + require.NoError(t, err) + + subscriptionForCancelling, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subIDForCancelling) require.NoError(t, err, "error getting subscription information") - subBalanceBeforeRequest := wrapperSubscription.NativeBalance - randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment( - wrapperContracts.LoadTestConsumers[0], + subBalanceLink := subscriptionForCancelling.Balance + subBalanceNative := subscriptionForCancelling.NativeBalance + l.Info(). + Str("Subscription Amount Native", subBalanceNative.String()). + Str("Subscription Amount Link", subBalanceLink.String()). + Str("Returning funds from SubID", subIDForCancelling.String()). + Str("Returning funds to", defaultWalletAddress). + Msg("Canceling subscription and returning funds to subscription owner") + tx, err := vrfv2PlusContracts.Coordinator.OwnerCancelSubscription(subIDForCancelling) + require.NoError(t, err, "Error canceling subscription") + + subscriptionCanceledEvent, err := vrfv2PlusContracts.Coordinator.WaitForSubscriptionCanceledEvent(subIDForCancelling, time.Second*30) + require.NoError(t, err, "error waiting for subscription canceled event") + + cancellationTxReceipt, err := env.EVMClient.GetTxReceipt(tx.Hash()) + require.NoError(t, err, "error getting tx cancellation Tx Receipt") + + txGasUsed := new(big.Int).SetUint64(cancellationTxReceipt.GasUsed) + cancellationTxFeeWei := new(big.Int).Mul(txGasUsed, cancellationTxReceipt.EffectiveGasPrice) + + l.Info(). + Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()). + Str("Effective Gas Price", cancellationTxReceipt.EffectiveGasPrice.String()). + Uint64("Gas Used", cancellationTxReceipt.GasUsed). + Msg("Cancellation TX Receipt") + + l.Info(). + Str("Returned Subscription Amount Native", subscriptionCanceledEvent.AmountNative.String()). + Str("Returned Subscription Amount Link", subscriptionCanceledEvent.AmountLink.String()). + Str("SubID", subscriptionCanceledEvent.SubId.String()). + Str("Returned to", subscriptionCanceledEvent.To.String()). + Msg("Subscription Canceled Event") + + require.Equal(t, subBalanceNative, subscriptionCanceledEvent.AmountNative, "SubscriptionCanceled event native amount is not equal to sub amount while canceling subscription") + require.Equal(t, subBalanceLink, subscriptionCanceledEvent.AmountLink, "SubscriptionCanceled event LINK amount is not equal to sub amount while canceling subscription") + + walletBalanceNativeAfterSubCancelling, err := env.EVMClient.BalanceAt(utils.TestContext(t), common.HexToAddress(defaultWalletAddress)) + require.NoError(t, err) + + walletBalanceLinkAfterSubCancelling, err := linkToken.BalanceOf(utils.TestContext(t), defaultWalletAddress) + require.NoError(t, err) + + //Verify that sub was deleted from Coordinator + _, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subIDForCancelling) + fmt.Println("err", err) + require.Error(t, err, "error not occurred when trying to get deleted subscription from old Coordinator after sub migration") + + subFundsReturnedNativeActual := new(big.Int).Sub(walletBalanceNativeAfterSubCancelling, walletBalanceNativeBeforeSubCancelling) + subFundsReturnedLinkActual := new(big.Int).Sub(walletBalanceLinkAfterSubCancelling, walletBalanceLinkBeforeSubCancelling) + + subFundsReturnedNativeExpected := new(big.Int).Sub(subBalanceNative, cancellationTxFeeWei) + deltaSpentOnCancellationTxFee := new(big.Int).Sub(subBalanceNative, subFundsReturnedNativeActual) + l.Info(). + Str("Sub Balance - Native", subBalanceNative.String()). + Str("Delta Spent On Cancellation Tx Fee - `NativeBalance - subFundsReturnedNativeActual`", deltaSpentOnCancellationTxFee.String()). + Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()). + Str("Sub Funds Returned Actual - Native", subFundsReturnedNativeActual.String()). + Str("Sub Funds Returned Expected - `NativeBalance - cancellationTxFeeWei`", subFundsReturnedNativeExpected.String()). + Str("Sub Funds Returned Actual - Link", subFundsReturnedLinkActual.String()). + Str("Sub Balance - Link", subBalanceLink.String()). + Str("walletBalanceNativeBeforeSubCancelling", walletBalanceNativeBeforeSubCancelling.String()). + Str("walletBalanceNativeAfterSubCancelling", walletBalanceNativeAfterSubCancelling.String()). + Msg("Sub funds returned") + + //todo - need to use different wallet for each test to verify exact amount of Native/LINK returned + //todo - as defaultWallet is used in other tests in parallel which might affect the balance + //require.Equal(t, 1, walletBalanceNativeAfterSubCancelling.Cmp(walletBalanceNativeBeforeSubCancelling), "Native funds were not returned after sub cancellation") + + //todo - this fails on SIMULATED env as tx cost is calculated different as for testnets and it's not receipt.EffectiveGasPrice*receipt.GasUsed + //require.Equal(t, subFundsReturnedNativeExpected, subFundsReturnedNativeActual, "Returned funds are not equal to sub balance that was cancelled") + require.Equal(t, 0, subBalanceLink.Cmp(subFundsReturnedLinkActual), "Returned LINK funds are not equal to sub balance that was cancelled") + + activeSubscriptionIdsAfterSubCancellation, err := vrfv2PlusContracts.Coordinator.GetActiveSubscriptionIds(utils.TestContext(t), big.NewInt(0), big.NewInt(0)) + require.NoError(t, err, "error getting active subscription ids") + + require.False( + t, + utils.BigIntSliceContains(activeSubscriptionIdsAfterSubCancellation, subIDForCancelling), + "Active subscription ids should not contain sub id after sub cancellation", + ) + }) + t.Run("Oracle Withdraw", func(t *testing.T) { + testConfig := vrfv2PlusConfig + subIDsForOracleWithDraw, err := vrfv2plus.CreateFundSubsAndAddConsumers( + env, + testConfig, + linkToken, + vrfv2PlusContracts.Coordinator, + vrfv2PlusContracts.LoadTestConsumers, + 1, + ) + require.NoError(t, err) + subIDForOracleWithdraw := subIDsForOracleWithDraw[0] + + fulfilledEventLink, err := vrfv2plus.RequestRandomnessAndWaitForFulfillment( + vrfv2PlusContracts.LoadTestConsumers[0], vrfv2PlusContracts.Coordinator, vrfv2PlusData, - wrapperSubID, - isNativeBilling, - &vrfv2PlusConfig, + subIDForOracleWithdraw, + false, + testConfig.RandomnessRequestCountPerRequest, + testConfig, + testConfig.RandomWordsFulfilledEventTimeout, l, ) - require.NoError(t, err, "error requesting randomness and waiting for fulfilment") + require.NoError(t, err) - expectedSubBalanceWei := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment) - wrapperSubscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), wrapperSubID) - require.NoError(t, err, "error getting subscription information") - subBalanceAfterRequest := wrapperSubscription.NativeBalance - require.Equal(t, expectedSubBalanceWei, subBalanceAfterRequest) + fulfilledEventNative, err := vrfv2plus.RequestRandomnessAndWaitForFulfillment( + vrfv2PlusContracts.LoadTestConsumers[0], + vrfv2PlusContracts.Coordinator, + vrfv2PlusData, + subIDForOracleWithdraw, + true, + testConfig.RandomnessRequestCountPerRequest, + testConfig, + testConfig.RandomWordsFulfilledEventTimeout, + l, + ) + require.NoError(t, err) + amountToWithdrawLink := fulfilledEventLink.Payment - consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(context.Background(), randomWordsFulfilledEvent.RequestId) - require.NoError(t, err, "error getting rand request status") - require.True(t, consumerStatus.Fulfilled) + defaultWalletBalanceNativeBeforeOracleWithdraw, err := env.EVMClient.BalanceAt(utils.TestContext(t), common.HexToAddress(defaultWalletAddress)) + require.NoError(t, err) - expectedWrapperConsumerWeiBalance := new(big.Int).Sub(wrapperConsumerBalanceBeforeRequestWei, consumerStatus.Paid) + defaultWalletBalanceLinkBeforeOracleWithdraw, err := linkToken.BalanceOf(utils.TestContext(t), defaultWalletAddress) + require.NoError(t, err) - wrapperConsumerBalanceAfterRequestWei, err := env.EVMClient.BalanceAt(context.Background(), common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address())) - require.NoError(t, err, "error getting wrapper consumer balance") - require.Equal(t, expectedWrapperConsumerWeiBalance, wrapperConsumerBalanceAfterRequestWei) + l.Info(). + Str("Returning to", defaultWalletAddress). + Str("Amount", amountToWithdrawLink.String()). + Msg("Invoking Oracle Withdraw for LINK") - //todo: uncomment when VRF-651 will be fixed - //require.Equal(t, 1, consumerStatus.Paid.Cmp(randomWordsFulfilledEvent.Payment), "Expected Consumer contract pay more than the Coordinator Sub") - vrfv2plus.LogFulfillmentDetailsNativeBilling(l, wrapperConsumerBalanceBeforeRequestWei, wrapperConsumerBalanceAfterRequestWei, consumerStatus, randomWordsFulfilledEvent) + err = vrfv2PlusContracts.Coordinator.OracleWithdraw( + common.HexToAddress(defaultWalletAddress), + amountToWithdrawLink, + ) + require.NoError(t, err, "error withdrawing LINK from coordinator to default wallet") + amountToWithdrawNative := fulfilledEventNative.Payment - require.Equal(t, vrfv2PlusConfig.NumberOfWords, uint32(len(consumerStatus.RandomWords))) - for _, w := range consumerStatus.RandomWords { - l.Info().Str("Output", w.String()).Msg("Randomness fulfilled") - require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0") - } - }) + l.Info(). + Str("Returning to", defaultWalletAddress). + Str("Amount", amountToWithdrawNative.String()). + Msg("Invoking Oracle Withdraw for Native") + + err = vrfv2PlusContracts.Coordinator.OracleWithdrawNative( + common.HexToAddress(defaultWalletAddress), + amountToWithdrawNative, + ) + require.NoError(t, err, "error withdrawing Native tokens from coordinator to default wallet") + + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfv2plus.ErrWaitTXsComplete) + + defaultWalletBalanceNativeAfterOracleWithdraw, err := env.EVMClient.BalanceAt(utils.TestContext(t), common.HexToAddress(defaultWalletAddress)) + require.NoError(t, err) + + defaultWalletBalanceLinkAfterOracleWithdraw, err := linkToken.BalanceOf(utils.TestContext(t), defaultWalletAddress) + require.NoError(t, err) + //not possible to verify exact amount of Native/LINK returned as defaultWallet is used in other tests in parallel which might affect the balance + require.Equal(t, 1, defaultWalletBalanceNativeAfterOracleWithdraw.Cmp(defaultWalletBalanceNativeBeforeOracleWithdraw), "Native funds were not returned after oracle withdraw native") + require.Equal(t, 1, defaultWalletBalanceLinkAfterOracleWithdraw.Cmp(defaultWalletBalanceLinkBeforeOracleWithdraw), "LINK funds were not returned after oracle withdraw") + }) } func TestVRFv2PlusMigration(t *testing.T) { @@ -271,22 +609,25 @@ func TestVRFv2PlusMigration(t *testing.T) { linkAddress, err := actions.DeployLINKToken(env.ContractDeployer) require.NoError(t, err, "error deploying LINK contract") - vrfv2PlusContracts, subIDs, vrfv2PlusData, err := vrfv2plus.SetupVRFV2_5Environment(env, &vrfv2PlusConfig, linkAddress, mockETHLinkFeedAddress, 2, 1) + nativeTokenPrimaryKeyAddress, err := env.ClCluster.NodeAPIs()[0].PrimaryEthAddress() + require.NoError(t, err, "error getting primary eth address") + + vrfv2PlusContracts, subIDs, vrfv2PlusData, err := vrfv2plus.SetupVRFV2_5Environment(env, vrfv2PlusConfig, linkAddress, mockETHLinkFeedAddress, nativeTokenPrimaryKeyAddress, 2, 1, l) require.NoError(t, err, "error setting up VRF v2_5 env") subID := subIDs[0] - subscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), subID) + subscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subID) require.NoError(t, err, "error getting subscription information") vrfv2plus.LogSubDetails(l, subscription, subID, vrfv2PlusContracts.Coordinator) - activeSubIdsOldCoordinatorBeforeMigration, err := vrfv2PlusContracts.Coordinator.GetActiveSubscriptionIds(context.Background(), big.NewInt(0), big.NewInt(0)) + activeSubIdsOldCoordinatorBeforeMigration, err := vrfv2PlusContracts.Coordinator.GetActiveSubscriptionIds(utils.TestContext(t), big.NewInt(0), big.NewInt(0)) require.NoError(t, err, "error occurred getting active sub ids") require.Len(t, activeSubIdsOldCoordinatorBeforeMigration, 1, "Active Sub Ids length is not equal to 1") require.Equal(t, subID, activeSubIdsOldCoordinatorBeforeMigration[0]) - oldSubscriptionBeforeMigration, err := vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), subID) + oldSubscriptionBeforeMigration, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subID) require.NoError(t, err, "error getting subscription information") //Migration Process @@ -297,7 +638,7 @@ func TestVRFv2PlusMigration(t *testing.T) { require.NoError(t, err, vrfv2plus.ErrWaitTXsComplete) _, err = vrfv2plus.VRFV2PlusUpgradedVersionRegisterProvingKey(vrfv2PlusData.VRFKey, vrfv2PlusData.PrimaryEthAddress, newCoordinator) - require.NoError(t, err, errors.Wrap(err, vrfv2plus.ErrRegisteringProvingKey)) + require.NoError(t, err, fmt.Errorf("%s, err: %w", vrfv2plus.ErrRegisteringProvingKey, err)) err = newCoordinator.SetConfig( vrfv2PlusConfig.MinimumConfirmations, @@ -310,6 +651,7 @@ func TestVRFv2PlusMigration(t *testing.T) { FulfillmentFlatFeeNativePPM: vrfv2PlusConfig.FulfillmentFlatFeeNativePPM, }, ) + require.NoError(t, err) err = newCoordinator.SetLINKAndLINKNativeFeed(linkAddress.Address(), mockETHLinkFeedAddress.Address()) require.NoError(t, err, vrfv2plus.ErrSetLinkNativeLinkFeed) @@ -356,14 +698,14 @@ func TestVRFv2PlusMigration(t *testing.T) { migratedCoordinatorLinkTotalBalanceAfterMigration, migratedCoordinatorEthTotalBalanceAfterMigration, err := vrfv2plus.GetUpgradedCoordinatorTotalBalance(newCoordinator) require.NoError(t, err) - migratedSubscription, err := newCoordinator.GetSubscription(context.Background(), subID) + migratedSubscription, err := newCoordinator.GetSubscription(utils.TestContext(t), subID) require.NoError(t, err, "error getting subscription information") vrfv2plus.LogSubDetailsAfterMigration(l, newCoordinator, subID, migratedSubscription) //Verify that Coordinators were updated in Consumers for _, consumer := range vrfv2PlusContracts.LoadTestConsumers { - coordinatorAddressInConsumerAfterMigration, err := consumer.GetCoordinator(context.Background()) + coordinatorAddressInConsumerAfterMigration, err := consumer.GetCoordinator(utils.TestContext(t)) require.NoError(t, err, "error getting Coordinator from Consumer contract") require.Equal(t, newCoordinator.Address(), coordinatorAddressInConsumerAfterMigration.String()) l.Debug(). @@ -379,13 +721,13 @@ func TestVRFv2PlusMigration(t *testing.T) { require.Equal(t, oldSubscriptionBeforeMigration.Consumers, migratedSubscription.Consumers) //Verify that old sub was deleted from old Coordinator - _, err = vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), subID) + _, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subID) require.Error(t, err, "error not occurred when trying to get deleted subscription from old Coordinator after sub migration") - _, err = vrfv2PlusContracts.Coordinator.GetActiveSubscriptionIds(context.Background(), big.NewInt(0), big.NewInt(0)) + _, err = vrfv2PlusContracts.Coordinator.GetActiveSubscriptionIds(utils.TestContext(t), big.NewInt(0), big.NewInt(0)) require.Error(t, err, "error not occurred getting active sub ids. Should occur since it should revert when sub id array is empty") - activeSubIdsMigratedCoordinator, err := newCoordinator.GetActiveSubscriptionIds(context.Background(), big.NewInt(0), big.NewInt(0)) + activeSubIdsMigratedCoordinator, err := newCoordinator.GetActiveSubscriptionIds(utils.TestContext(t), big.NewInt(0), big.NewInt(0)) require.NoError(t, err, "error occurred getting active sub ids") require.Len(t, activeSubIdsMigratedCoordinator, 1, "Active Sub Ids length is not equal to 1 for Migrated Coordinator after migration") require.Equal(t, subID, activeSubIdsMigratedCoordinator[0]) @@ -396,10 +738,10 @@ func TestVRFv2PlusMigration(t *testing.T) { expectedLinkTotalBalanceForOldCoordinator := new(big.Int).Sub(oldCoordinatorLinkTotalBalanceBeforeMigration, oldSubscriptionBeforeMigration.Balance) expectedEthTotalBalanceForOldCoordinator := new(big.Int).Sub(oldCoordinatorEthTotalBalanceBeforeMigration, oldSubscriptionBeforeMigration.NativeBalance) - require.Equal(t, expectedLinkTotalBalanceForMigratedCoordinator, migratedCoordinatorLinkTotalBalanceAfterMigration) - require.Equal(t, expectedEthTotalBalanceForMigratedCoordinator, migratedCoordinatorEthTotalBalanceAfterMigration) - require.Equal(t, expectedLinkTotalBalanceForOldCoordinator, oldCoordinatorLinkTotalBalanceAfterMigration) - require.Equal(t, expectedEthTotalBalanceForOldCoordinator, oldCoordinatorEthTotalBalanceAfterMigration) + require.Equal(t, 0, expectedLinkTotalBalanceForMigratedCoordinator.Cmp(migratedCoordinatorLinkTotalBalanceAfterMigration)) + require.Equal(t, 0, expectedEthTotalBalanceForMigratedCoordinator.Cmp(migratedCoordinatorEthTotalBalanceAfterMigration)) + require.Equal(t, 0, expectedLinkTotalBalanceForOldCoordinator.Cmp(oldCoordinatorLinkTotalBalanceAfterMigration)) + require.Equal(t, 0, expectedEthTotalBalanceForOldCoordinator.Cmp(oldCoordinatorEthTotalBalanceAfterMigration)) //Verify rand requests fulfills with Link Token billing _, err = vrfv2plus.RequestRandomnessAndWaitForFulfillmentUpgraded( @@ -408,7 +750,7 @@ func TestVRFv2PlusMigration(t *testing.T) { vrfv2PlusData, subID, false, - &vrfv2PlusConfig, + vrfv2PlusConfig, l, ) require.NoError(t, err, "error requesting randomness and waiting for fulfilment") @@ -420,7 +762,7 @@ func TestVRFv2PlusMigration(t *testing.T) { vrfv2PlusData, subID, true, - &vrfv2PlusConfig, + vrfv2PlusConfig, l, ) require.NoError(t, err, "error requesting randomness and waiting for fulfilment") diff --git a/integration-tests/soak/ocr_test.go b/integration-tests/soak/ocr_test.go index b2375f13ac..9973c23808 100644 --- a/integration-tests/soak/ocr_test.go +++ b/integration-tests/soak/ocr_test.go @@ -16,6 +16,7 @@ func TestOCRSoak(t *testing.T) { // Use this variable to pass in any custom EVM specific TOML values to your Chainlink nodes customNetworkTOML := `` // Uncomment below for debugging TOML issues on the node + // network := networks.MustGetSelectedNetworksFromEnv()[0] // fmt.Println("Using Chainlink TOML\n---------------------") // fmt.Println(client.AddNetworkDetailedConfig(config.BaseOCRP2PV1Config, customNetworkTOML, network)) // fmt.Println("---------------------") diff --git a/integration-tests/testreporters/keeper_benchmark.go b/integration-tests/testreporters/keeper_benchmark.go index c800eb37be..e9f2eaad7c 100644 --- a/integration-tests/testreporters/keeper_benchmark.go +++ b/integration-tests/testreporters/keeper_benchmark.go @@ -183,7 +183,7 @@ func (k *KeeperBenchmarkTestReporter) WriteReport(folderLocation string) error { } for contractIndex, report := range k.Reports { - avg, median, ninetyPct, ninetyNinePct, max := intListStats(report.AllCheckDelays) + avg, median, ninetyPct, ninetyNinePct, max = intListStats(report.AllCheckDelays) err = keeperReportWriter.Write([]string{ fmt.Sprint(contractIndex), report.RegistryAddress, @@ -305,6 +305,8 @@ func (k *KeeperBenchmarkTestReporter) SendSlackNotification(t *testing.T, slackC } // intListStats helper calculates some statistics on an int list: avg, median, 90pct, 99pct, max +// +//nolint:revive func intListStats(in []int64) (float64, int64, int64, int64, int64) { length := len(in) if length == 0 { diff --git a/integration-tests/testreporters/ocr.go b/integration-tests/testreporters/ocr.go index a04718ea22..abbb261fa7 100644 --- a/integration-tests/testreporters/ocr.go +++ b/integration-tests/testreporters/ocr.go @@ -67,9 +67,7 @@ func (e *OCRRoundState) Time() time.Time { // CSV returns a CSV representation of the test state and all events func (e *OCRRoundState) CSV() [][]string { rows := [][]string{{e.StartTime.Format("2006-01-02 15:04:05.00 MST"), fmt.Sprintf("Expecting new Answer: %d", e.Answer)}} - for _, anomaly := range e.anomalies { - rows = append(rows, anomaly) - } + rows = append(rows, e.anomalies...) return rows } diff --git a/integration-tests/testreporters/profile.go b/integration-tests/testreporters/profile.go index 9ac7713e94..ab9dec138e 100644 --- a/integration-tests/testreporters/profile.go +++ b/integration-tests/testreporters/profile.go @@ -54,7 +54,7 @@ func (c *ChainlinkProfileTestReporter) WriteReport(folderLocation string) error } // SendNotification hasn't been implemented for this test -func (c *ChainlinkProfileTestReporter) SendSlackNotification(t *testing.T, slackClient *slack.Client) error { +func (c *ChainlinkProfileTestReporter) SendSlackNotification(_ *testing.T, _ *slack.Client) error { log.Warn().Msg("No Slack notification integration for Chainlink profile tests") return nil } diff --git a/integration-tests/testreporters/vrfv2plus.go b/integration-tests/testreporters/vrfv2plus.go new file mode 100644 index 0000000000..38220ca882 --- /dev/null +++ b/integration-tests/testreporters/vrfv2plus.go @@ -0,0 +1,92 @@ +package testreporters + +import ( + "fmt" + "math/big" + "os" + "testing" + "time" + + "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus/vrfv2plus_config" + + "github.com/slack-go/slack" + + "github.com/smartcontractkit/chainlink-testing-framework/testreporters" +) + +type VRFV2PlusTestReporter struct { + TestType string + RequestCount *big.Int + FulfilmentCount *big.Int + AverageFulfillmentInMillions *big.Int + SlowestFulfillment *big.Int + FastestFulfillment *big.Int + Vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig +} + +func (o *VRFV2PlusTestReporter) SetReportData( + testType string, + RequestCount *big.Int, + FulfilmentCount *big.Int, + AverageFulfillmentInMillions *big.Int, + SlowestFulfillment *big.Int, + FastestFulfillment *big.Int, + vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig, +) { + o.TestType = testType + o.RequestCount = RequestCount + o.FulfilmentCount = FulfilmentCount + o.AverageFulfillmentInMillions = AverageFulfillmentInMillions + o.SlowestFulfillment = SlowestFulfillment + o.FastestFulfillment = FastestFulfillment + o.Vrfv2PlusConfig = &vrfv2PlusConfig +} + +// SendSlackNotification sends a slack message to a slack webhook +func (o *VRFV2PlusTestReporter) SendSlackNotification(t *testing.T, slackClient *slack.Client) error { + if slackClient == nil { + slackClient = slack.New(testreporters.SlackAPIKey) + } + + testFailed := t.Failed() + headerText := fmt.Sprintf(":white_check_mark: VRF %s Test PASSED :white_check_mark:", o.TestType) + if testFailed { + headerText = fmt.Sprintf(":x: VRF %s Test FAILED :x:", o.TestType) + } + + messageBlocks := testreporters.SlackNotifyBlocks(headerText, os.Getenv("SELECTED_NETWORKS"), []string{ + fmt.Sprintf( + "Summary\n"+ + "Perf Test Type: %s\n"+ + "Test Duration set in parameters: %s\n"+ + "Use Existing Env: %t\n"+ + "Request Count: %s\n"+ + "Fulfilment Count: %s\n"+ + "AverageFulfillmentInMillions: %s\n"+ + "Slowest Fulfillment: %s\n"+ + "Fastest Fulfillment: %s \n"+ + "RPS: %d\n"+ + "RateLimitUnitDuration: %s\n"+ + "RandomnessRequestCountPerRequest: %d\n"+ + "RandomnessRequestCountPerRequestDeviation: %d\n", + o.TestType, + o.Vrfv2PlusConfig.TestDuration.Truncate(time.Second).String(), + o.Vrfv2PlusConfig.UseExistingEnv, + o.RequestCount.String(), + o.FulfilmentCount.String(), + o.AverageFulfillmentInMillions.String(), + o.SlowestFulfillment.String(), + o.FastestFulfillment.String(), + o.Vrfv2PlusConfig.RPS, + o.Vrfv2PlusConfig.RateLimitUnitDuration.String(), + o.Vrfv2PlusConfig.RandomnessRequestCountPerRequest, + o.Vrfv2PlusConfig.RandomnessRequestCountPerRequestDeviation, + ), + }) + + _, err := testreporters.SendSlackMessage(slackClient, slack.MsgOptionBlocks(messageBlocks...)) + if err != nil { + return err + } + return nil +} diff --git a/integration-tests/testsetups/don_evm_chain.go b/integration-tests/testsetups/don_evm_chain.go index 545d951580..3ade7f0d69 100644 --- a/integration-tests/testsetups/don_evm_chain.go +++ b/integration-tests/testsetups/don_evm_chain.go @@ -6,13 +6,13 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/require" - e "github.com/smartcontractkit/chainlink-env/environment" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" - "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver" - mockservercfg "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client" + e "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver" + mockservercfg "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" diff --git a/integration-tests/testsetups/keeper_benchmark.go b/integration-tests/testsetups/keeper_benchmark.go index 466eb97fdd..bb6c582c13 100644 --- a/integration-tests/testsetups/keeper_benchmark.go +++ b/integration-tests/testsetups/keeper_benchmark.go @@ -21,8 +21,8 @@ import ( "github.com/slack-go/slack" "github.com/stretchr/testify/require" - "github.com/smartcontractkit/chainlink-env/environment" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" "github.com/smartcontractkit/chainlink-testing-framework/logging" reportModel "github.com/smartcontractkit/chainlink-testing-framework/testreporters" @@ -37,6 +37,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" "github.com/smartcontractkit/chainlink/integration-tests/testreporters" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) // KeeperBenchmarkTest builds a test to check that chainlink nodes are able to upkeep a specified amount of Upkeep @@ -229,7 +230,7 @@ func (k *KeeperBenchmarkTest) Run() { "NumberOfRegistries": len(k.keeperRegistries), } inputs := k.Inputs - startingBlock, err := k.chainClient.LatestBlockNumber(context.Background()) + startingBlock, err := k.chainClient.LatestBlockNumber(utils.TestContext(k.t)) require.NoError(k.t, err, "Error getting latest block number") k.startingBlock = big.NewInt(0).SetUint64(startingBlock) startTime := time.Now() @@ -305,7 +306,7 @@ func (k *KeeperBenchmarkTest) Run() { err = fmt.Errorf("initial error") // to ensure our for loop runs at least once ) for err != nil { // This RPC call can possibly time out or otherwise die. Failure is not an option, keep retrying to get our stats. - ctx, cancel := context.WithTimeout(context.Background(), timeout) + ctx, cancel := context.WithTimeout(utils.TestContext(k.t), timeout) logs, err = k.chainClient.FilterLogs(ctx, filterQuery) cancel() if err != nil { @@ -407,12 +408,13 @@ func (k *KeeperBenchmarkTest) observeUpkeepEvents() { FromBlock: k.startingBlock, } - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(utils.TestContext(k.t), 5*time.Second) sub, err := k.chainClient.SubscribeFilterLogs(ctx, filterQuery, eventLogs) cancel() require.NoError(k.t, err, "Subscribing to upkeep performed events log shouldn't fail") interruption := make(chan os.Signal, 1) + //nolint:staticcheck //ignore SA1016 we need to send the os.Kill signal signal.Notify(interruption, os.Kill, os.Interrupt, syscall.SIGTERM) go func() { @@ -429,7 +431,7 @@ func (k *KeeperBenchmarkTest) observeUpkeepEvents() { Str("Backoff", backoff.String()). Msg("Error while subscribing to Keeper Event Logs. Resubscribing...") - ctx, cancel := context.WithTimeout(context.Background(), backoff) + ctx, cancel := context.WithTimeout(utils.TestContext(k.t), backoff) sub, err = k.chainClient.SubscribeFilterLogs(ctx, filterQuery, eventLogs) cancel() if err != nil { diff --git a/integration-tests/testsetups/ocr.go b/integration-tests/testsetups/ocr.go index 048f3124ad..3fb9dd9844 100644 --- a/integration-tests/testsetups/ocr.go +++ b/integration-tests/testsetups/ocr.go @@ -26,13 +26,13 @@ import ( "github.com/smartcontractkit/libocr/gethwrappers/offchainaggregator" - "github.com/smartcontractkit/chainlink-env/environment" - "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" - "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" - "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver" - mockservercfg "github.com/smartcontractkit/chainlink-env/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver" + mockservercfg "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver-cfg" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" reportModel "github.com/smartcontractkit/chainlink-testing-framework/testreporters" @@ -42,6 +42,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/config" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/testreporters" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) const ( @@ -126,7 +127,7 @@ func NewOCRSoakTest(t *testing.T, forwarderFlow bool) (*OCRSoakTest, error) { // DeployEnvironment deploys the test environment, starting all Chainlink nodes and other components for the test func (o *OCRSoakTest) DeployEnvironment(customChainlinkNetworkTOML string) { - network := networks.SelectedNetwork // Environment currently being used to soak test on + network := networks.MustGetSelectedNetworksFromEnv()[0] // Environment currently being used to soak test on nsPre := "soak-ocr-" if o.OperatorForwarderFlow { nsPre = fmt.Sprintf("%sforwarder-", nsPre) @@ -163,9 +164,9 @@ func (o *OCRSoakTest) DeployEnvironment(customChainlinkNetworkTOML string) { } // LoadEnvironment loads an existing test environment using the provided URLs -func (o *OCRSoakTest) LoadEnvironment(chainlinkURLs []string, chainURL, mockServerURL string) { +func (o *OCRSoakTest) LoadEnvironment(chainlinkURLs []string, mockServerURL string) { var ( - network = networks.SelectedNetwork + network = networks.MustGetSelectedNetworksFromEnv()[0] err error ) o.chainClient, err = blockchain.ConnectEVMClient(network, o.log) @@ -185,7 +186,7 @@ func (o *OCRSoakTest) Environment() *environment.Environment { func (o *OCRSoakTest) Setup() { var ( err error - network = networks.SelectedNetwork + network = networks.MustGetSelectedNetworksFromEnv()[0] ) // Environment currently being used to soak test on @@ -241,7 +242,6 @@ func (o *OCRSoakTest) Setup() { o.Inputs.NumberOfContracts, linkTokenContract, contractDeployer, - o.bootstrapNode, o.workerNodes, o.chainClient, ) @@ -258,7 +258,7 @@ func (o *OCRSoakTest) Setup() { // Run starts the OCR soak test func (o *OCRSoakTest) Run() { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + ctx, cancel := context.WithTimeout(utils.TestContext(o.t), time.Second*5) latestBlockNum, err := o.chainClient.LatestBlockNumber(ctx) cancel() require.NoError(o.t, err, "Error getting current block number") @@ -343,7 +343,7 @@ func (o *OCRSoakTest) SaveState() error { if err != nil { return err } - // #nosec G306 - let everyone read + //nolint:gosec // G306 - let everyone read if err = os.WriteFile(saveFileLocation, data, 0644); err != nil { return err } @@ -387,7 +387,7 @@ func (o *OCRSoakTest) LoadState() error { o.startTime = testState.StartTime o.startingBlockNum = testState.StartingBlockNum - network := networks.SelectedNetwork + network := networks.MustGetSelectedNetworksFromEnv()[0] o.chainClient, err = blockchain.ConnectEVMClient(network, o.log) if err != nil { return err @@ -468,6 +468,7 @@ func (o *OCRSoakTest) Interrupted() bool { func (o *OCRSoakTest) testLoop(testDuration time.Duration, newValue int) { endTest := time.After(testDuration) interruption := make(chan os.Signal, 1) + //nolint:staticcheck //ignore SA1016 we need to send the os.Kill signal signal.Notify(interruption, os.Kill, os.Interrupt, syscall.SIGTERM) lastValue := 0 newRoundTrigger := time.NewTimer(0) // Want to trigger a new round ASAP @@ -558,7 +559,7 @@ func (o *OCRSoakTest) setFilterQuery() { // WARNING: Should only be used for observation and logging. This is not a reliable way to collect events. func (o *OCRSoakTest) observeOCREvents() error { eventLogs := make(chan types.Log) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(utils.TestContext(o.t), 5*time.Second) eventSub, err := o.chainClient.SubscribeFilterLogs(ctx, o.filterQuery, eventLogs) cancel() if err != nil { @@ -592,7 +593,7 @@ func (o *OCRSoakTest) observeOCREvents() error { Str("Backoff", backoff.String()). Interface("Query", o.filterQuery). Msg("Error while subscribed to OCR Logs. Resubscribing") - ctx, cancel = context.WithTimeout(context.Background(), backoff) + ctx, cancel = context.WithTimeout(utils.TestContext(o.t), backoff) eventSub, err = o.chainClient.SubscribeFilterLogs(ctx, o.filterQuery, eventLogs) cancel() if err != nil { @@ -645,12 +646,12 @@ func (o *OCRSoakTest) collectEvents() error { timeout := time.Second * 15 o.log.Info().Interface("Filter Query", o.filterQuery).Str("Timeout", timeout.String()).Msg("Retrieving on-chain events") - ctx, cancel := context.WithTimeout(context.Background(), timeout) + ctx, cancel := context.WithTimeout(utils.TestContext(o.t), timeout) contractEvents, err := o.chainClient.FilterLogs(ctx, o.filterQuery) cancel() for err != nil { o.log.Info().Interface("Filter Query", o.filterQuery).Str("Timeout", timeout.String()).Msg("Retrieving on-chain events") - ctx, cancel := context.WithTimeout(context.Background(), timeout) + ctx, cancel := context.WithTimeout(utils.TestContext(o.t), timeout) contractEvents, err = o.chainClient.FilterLogs(ctx, o.filterQuery) cancel() if err != nil { diff --git a/integration-tests/testsetups/profile.go b/integration-tests/testsetups/profile.go index 6f978cdebe..14fe3d29ae 100644 --- a/integration-tests/testsetups/profile.go +++ b/integration-tests/testsetups/profile.go @@ -7,8 +7,8 @@ import ( . "github.com/onsi/gomega" "golang.org/x/sync/errgroup" - "github.com/smartcontractkit/chainlink-env/environment" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" reportModel "github.com/smartcontractkit/chainlink-testing-framework/testreporters" "github.com/smartcontractkit/chainlink/integration-tests/client" diff --git a/integration-tests/testsetups/vrfv2.go b/integration-tests/testsetups/vrfv2.go index cfa26e8f27..8c5fde7216 100644 --- a/integration-tests/testsetups/vrfv2.go +++ b/integration-tests/testsetups/vrfv2.go @@ -14,14 +14,15 @@ import ( "github.com/rs/zerolog/log" "github.com/stretchr/testify/require" - "github.com/smartcontractkit/chainlink-env/environment" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" "github.com/smartcontractkit/chainlink-testing-framework/logging" reportModel "github.com/smartcontractkit/chainlink-testing-framework/testreporters" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/testreporters" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) // VRFV2SoakTest defines a typical VRFV2 soak test @@ -87,7 +88,8 @@ func (v *VRFV2SoakTest) Run(t *testing.T) { Msg("Starting VRFV2 Soak Test") // set the requests to only run for a certain amount of time - testContext, testCancel := context.WithTimeout(context.Background(), v.Inputs.TestDuration) + ctx := utils.TestContext(t) + testContext, testCancel := context.WithTimeout(ctx, v.Inputs.TestDuration) defer testCancel() v.NumberOfRandRequests = 0 @@ -126,7 +128,7 @@ func (v *VRFV2SoakTest) Run(t *testing.T) { //todo - need to find better way for this time.Sleep(1 * time.Minute) - loadTestMetrics, err := v.Inputs.ConsumerContract.GetLoadTestMetrics(nil) + loadTestMetrics, err := v.Inputs.ConsumerContract.GetLoadTestMetrics(ctx) if err != nil { l.Error().Err(err).Msg("Error Occurred when getting Load Test Metrics from Consumer contract") } diff --git a/integration-tests/types/envcommon/common.go b/integration-tests/types/envcommon/common.go index 607c481f33..bdabcaf96b 100644 --- a/integration-tests/types/envcommon/common.go +++ b/integration-tests/types/envcommon/common.go @@ -2,7 +2,7 @@ package envcommon import ( "encoding/json" - "io/ioutil" + "io" "os" ) @@ -12,7 +12,7 @@ func ParseJSONFile(path string, v any) error { return err } defer jsonFile.Close() - b, _ := ioutil.ReadAll(jsonFile) + b, _ := io.ReadAll(jsonFile) err = json.Unmarshal(b, v) if err != nil { return err diff --git a/integration-tests/universal/log_poller/config.go b/integration-tests/universal/log_poller/config.go new file mode 100644 index 0000000000..78a0da46bc --- /dev/null +++ b/integration-tests/universal/log_poller/config.go @@ -0,0 +1,249 @@ +package logpoller + +import ( + "fmt" + "os" + "strconv" + + "cosmossdk.io/errors" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/pelletier/go-toml/v2" + "github.com/rs/zerolog/log" + + "github.com/smartcontractkit/chainlink/v2/core/store/models" +) + +const ( + DefaultConfigFilename = "config.toml" + + ErrReadPerfConfig = "failed to read TOML config for performance tests" + ErrUnmarshalPerfConfig = "failed to unmarshal TOML config for performance tests" +) + +type GeneratorType = string + +const ( + GeneratorType_WASP = "wasp" + GeneratorType_Looped = "looped" +) + +type Config struct { + General *General `toml:"general"` + ChaosConfig *ChaosConfig `toml:"chaos"` + Wasp *WaspConfig `toml:"wasp"` + LoopedConfig *LoopedConfig `toml:"looped"` +} + +type LoopedConfig struct { + ContractConfig `toml:"contract"` + FuzzConfig `toml:"fuzz"` +} + +type ContractConfig struct { + ExecutionCount int `toml:"execution_count"` +} + +type FuzzConfig struct { + MinEmitWaitTimeMs int `toml:"min_emit_wait_time_ms"` + MaxEmitWaitTimeMs int `toml:"max_emit_wait_time_ms"` +} + +type General struct { + Generator string `toml:"generator"` + EventsToEmit []abi.Event `toml:"-"` + Contracts int `toml:"contracts"` + EventsPerTx int `toml:"events_per_tx"` + UseFinalityTag bool `toml:"use_finality_tag"` +} + +type ChaosConfig struct { + ExperimentCount int `toml:"experiment_count"` +} + +type WaspConfig struct { + Load *Load `toml:"load"` +} + +type Load struct { + RPS int64 `toml:"rps"` + LPS int64 `toml:"lps"` + RateLimitUnitDuration *models.Duration `toml:"rate_limit_unit_duration"` + Duration *models.Duration `toml:"duration"` + CallTimeout *models.Duration `toml:"call_timeout"` +} + +func ReadConfig(configName string) (*Config, error) { + var cfg *Config + d, err := os.ReadFile(configName) + if err != nil { + return nil, errors.Wrap(err, ErrReadPerfConfig) + } + err = toml.Unmarshal(d, &cfg) + if err != nil { + return nil, errors.Wrap(err, ErrUnmarshalPerfConfig) + } + + if err := cfg.validate(); err != nil { + return nil, err + } + + log.Debug().Interface("Config", cfg).Msg("Parsed config") + return cfg, nil +} + +func (c *Config) OverrideFromEnv() error { + if contr := os.Getenv("CONTRACTS"); contr != "" { + c.General.Contracts = mustParseInt(contr) + } + + if eventsPerTx := os.Getenv("EVENTS_PER_TX"); eventsPerTx != "" { + c.General.EventsPerTx = mustParseInt(eventsPerTx) + } + + if useFinalityTag := os.Getenv("USE_FINALITY_TAG"); useFinalityTag != "" { + c.General.UseFinalityTag = mustParseBool(useFinalityTag) + } + + if duration := os.Getenv("LOAD_DURATION"); duration != "" { + d, err := models.ParseDuration(duration) + if err != nil { + return err + } + + if c.General.Generator == GeneratorType_WASP { + c.Wasp.Load.Duration = &d + } else { + // this is completely arbitrary and practice shows that even with this values + // test executes much longer than specified, probably due to network latency + c.LoopedConfig.FuzzConfig.MinEmitWaitTimeMs = 400 + c.LoopedConfig.FuzzConfig.MaxEmitWaitTimeMs = 600 + // divide by 4 based on past runs, but we should do it in a better way + c.LoopedConfig.ContractConfig.ExecutionCount = int(d.Duration().Seconds() / 4) + } + } + + return nil +} + +func (c *Config) validate() error { + if c.General == nil { + return fmt.Errorf("General config is nil") + } + + err := c.General.validate() + if err != nil { + return fmt.Errorf("General config validation failed: %w", err) + } + + switch c.General.Generator { + case GeneratorType_WASP: + if c.Wasp == nil { + return fmt.Errorf("wasp config is nil") + } + if c.Wasp.Load == nil { + return fmt.Errorf("wasp load config is nil") + } + + err = c.Wasp.validate() + if err != nil { + return fmt.Errorf("wasp config validation failed: %w", err) + } + case GeneratorType_Looped: + if c.LoopedConfig == nil { + return fmt.Errorf("looped config is nil") + } + + err = c.LoopedConfig.validate() + if err != nil { + return fmt.Errorf("looped config validation failed: %w", err) + } + default: + return fmt.Errorf("unknown generator type: %s", c.General.Generator) + } + + return nil +} + +func (g *General) validate() error { + if g.Generator == "" { + return fmt.Errorf("generator is empty") + } + + if g.Contracts == 0 { + return fmt.Errorf("contracts is 0, but must be > 0") + } + + if g.EventsPerTx == 0 { + return fmt.Errorf("events_per_tx is 0, but must be > 0") + } + + return nil +} + +func (w *WaspConfig) validate() error { + if w.Load == nil { + return fmt.Errorf("Load config is nil") + } + + err := w.Load.validate() + if err != nil { + return fmt.Errorf("Load config validation failed: %w", err) + } + + return nil +} + +func (l *Load) validate() error { + if l.RPS == 0 && l.LPS == 0 { + return fmt.Errorf("either RPS or LPS needs to be set") + } + + if l.RPS != 0 && l.LPS != 0 { + return fmt.Errorf("only one of RPS or LPS can be set") + } + + if l.Duration == nil { + return fmt.Errorf("duration is nil") + } + + if l.CallTimeout == nil { + return fmt.Errorf("call_timeout is nil") + } + if l.RateLimitUnitDuration == nil { + return fmt.Errorf("rate_limit_unit_duration is nil") + } + + return nil +} + +func (l *LoopedConfig) validate() error { + if l.ExecutionCount == 0 { + return fmt.Errorf("execution_count is 0, but must be > 0") + } + + if l.MinEmitWaitTimeMs == 0 { + return fmt.Errorf("min_emit_wait_time_ms is 0, but must be > 0") + } + + if l.MaxEmitWaitTimeMs == 0 { + return fmt.Errorf("max_emit_wait_time_ms is 0, but must be > 0") + } + + return nil +} + +func mustParseInt(s string) int { + i, err := strconv.Atoi(s) + if err != nil { + panic(err) + } + return i +} + +func mustParseBool(s string) bool { + b, err := strconv.ParseBool(s) + if err != nil { + panic(err) + } + return b +} diff --git a/integration-tests/universal/log_poller/gun.go b/integration-tests/universal/log_poller/gun.go new file mode 100644 index 0000000000..39286f1b53 --- /dev/null +++ b/integration-tests/universal/log_poller/gun.go @@ -0,0 +1,79 @@ +package logpoller + +import ( + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/rs/zerolog" + + "github.com/smartcontractkit/wasp" + + "github.com/smartcontractkit/chainlink/integration-tests/contracts" +) + +/* LogEmitterGun is a gun that constantly emits logs from a contract */ +type LogEmitterGun struct { + contract *contracts.LogEmitter + eventsToEmit []abi.Event + logger zerolog.Logger + eventsPerTx int +} + +type Counter struct { + mu *sync.Mutex + value int +} + +func NewLogEmitterGun( + contract *contracts.LogEmitter, + eventsToEmit []abi.Event, + eventsPerTx int, + logger zerolog.Logger, +) *LogEmitterGun { + return &LogEmitterGun{ + contract: contract, + eventsToEmit: eventsToEmit, + eventsPerTx: eventsPerTx, + logger: logger, + } +} + +func (m *LogEmitterGun) Call(l *wasp.Generator) *wasp.CallResult { + localCounter := 0 + logEmitter := (*m.contract) + address := logEmitter.Address() + for _, event := range m.eventsToEmit { + m.logger.Debug().Str("Emitter address", address.String()).Str("Event type", event.Name).Msg("Emitting log from emitter") + var err error + switch event.Name { + case "Log1": + _, err = logEmitter.EmitLogInts(getIntSlice(m.eventsPerTx)) + case "Log2": + _, err = logEmitter.EmitLogIntsIndexed(getIntSlice(m.eventsPerTx)) + case "Log3": + _, err = logEmitter.EmitLogStrings(getStringSlice(m.eventsPerTx)) + default: + err = fmt.Errorf("unknown event name: %s", event.Name) + } + + if err != nil { + return &wasp.CallResult{Error: err.Error(), Failed: true} + } + localCounter++ + } + + // I don't think that will work as expected, I should atomically read the value and save it, so maybe just a mutex? + if counter, ok := l.InputSharedData().(*Counter); ok { + counter.mu.Lock() + defer counter.mu.Unlock() + counter.value += localCounter + } else { + return &wasp.CallResult{ + Error: "SharedData did not contain a Counter", + Failed: true, + } + } + + return &wasp.CallResult{} +} diff --git a/integration-tests/universal/log_poller/helpers.go b/integration-tests/universal/log_poller/helpers.go new file mode 100644 index 0000000000..c483529d2a --- /dev/null +++ b/integration-tests/universal/log_poller/helpers.go @@ -0,0 +1,1122 @@ +package logpoller + +import ( + "bytes" + "context" + "fmt" + "math/big" + "math/rand" + "sort" + "strings" + "sync" + "testing" + "time" + + geth "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + geth_types "github.com/ethereum/go-ethereum/core/types" + "github.com/smartcontractkit/sqlx" + + "github.com/rs/zerolog" + "github.com/scylladb/go-reflectx" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/wasp" + + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + ctf_test_env "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" + "github.com/smartcontractkit/chainlink-testing-framework/logging" + "github.com/smartcontractkit/chainlink-testing-framework/networks" + + evmcfg "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + cltypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_utils_2_1" + le "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_emitter" + core_logger "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/pg" + "github.com/smartcontractkit/chainlink/v2/core/store/models" + + "github.com/smartcontractkit/chainlink/integration-tests/actions" + "github.com/smartcontractkit/chainlink/integration-tests/client" + + "github.com/smartcontractkit/chainlink/integration-tests/contracts" + "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" + "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" + "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" + + it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils" +) + +var ( + EmitterABI, _ = abi.JSON(strings.NewReader(le.LogEmitterABI)) + automationUtilsABI = cltypes.MustGetABI(automation_utils_2_1.AutomationUtilsABI) + bytes0 = [32]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } // bytes representation of 0x0000000000000000000000000000000000000000000000000000000000000000 + +) + +var registerSingleTopicFilter = func(registry contracts.KeeperRegistry, upkeepID *big.Int, emitterAddress common.Address, topic common.Hash) error { + logTriggerConfigStruct := automation_utils_2_1.LogTriggerConfig{ + ContractAddress: emitterAddress, + FilterSelector: 0, + Topic0: topic, + Topic1: bytes0, + Topic2: bytes0, + Topic3: bytes0, + } + encodedLogTriggerConfig, err := automationUtilsABI.Methods["_logTriggerConfig"].Inputs.Pack(&logTriggerConfigStruct) + if err != nil { + return err + } + + err = registry.SetUpkeepTriggerConfig(upkeepID, encodedLogTriggerConfig) + if err != nil { + return err + } + + return nil +} + +// Currently Unused November 8, 2023, Might be useful in the near future so keeping it here for now +// this is not really possible, log trigger doesn't support multiple topics, even if log poller does +// var registerMultipleTopicsFilter = func(registry contracts.KeeperRegistry, upkeepID *big.Int, emitterAddress common.Address, topics []abi.Event) error { +// if len(topics) > 4 { +// return errors.New("Cannot register more than 4 topics") +// } + +// var getTopic = func(topics []abi.Event, i int) common.Hash { +// if i > len(topics)-1 { +// return bytes0 +// } + +// return topics[i].ID +// } + +// var getFilterSelector = func(topics []abi.Event) (uint8, error) { +// switch len(topics) { +// case 0: +// return 0, errors.New("Cannot register filter with 0 topics") +// case 1: +// return 0, nil +// case 2: +// return 1, nil +// case 3: +// return 3, nil +// case 4: +// return 7, nil +// default: +// return 0, errors.New("Cannot register filter with more than 4 topics") +// } +// } + +// filterSelector, err := getFilterSelector(topics) +// if err != nil { +// return err +// } + +// logTriggerConfigStruct := automation_utils_2_1.LogTriggerConfig{ +// ContractAddress: emitterAddress, +// FilterSelector: filterSelector, +// Topic0: getTopic(topics, 0), +// Topic1: getTopic(topics, 1), +// Topic2: getTopic(topics, 2), +// Topic3: getTopic(topics, 3), +// } +// encodedLogTriggerConfig, err := automationUtilsABI.Methods["_logTriggerConfig"].Inputs.Pack(&logTriggerConfigStruct) +// if err != nil { +// return err +// } + +// err = registry.SetUpkeepTriggerConfig(upkeepID, encodedLogTriggerConfig) +// if err != nil { +// return err +// } + +// return nil +// } + +func NewOrm(logger core_logger.SugaredLogger, chainID *big.Int, postgresDb *ctf_test_env.PostgresDb) (*logpoller.DbORM, *sqlx.DB, error) { + dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=disable", "127.0.0.1", postgresDb.ExternalPort, postgresDb.User, postgresDb.Password, postgresDb.DbName) + db, err := sqlx.Open("postgres", dsn) + if err != nil { + return nil, db, err + } + + db.MapperFunc(reflectx.CamelToSnakeASCII) + return logpoller.NewORM(chainID, db, logger, pg.NewQConfig(false)), db, nil +} + +type ExpectedFilter struct { + emitterAddress common.Address + topic common.Hash +} + +func getExpectedFilters(logEmitters []*contracts.LogEmitter, cfg *Config) []ExpectedFilter { + expectedFilters := make([]ExpectedFilter, 0) + for _, emitter := range logEmitters { + for _, event := range cfg.General.EventsToEmit { + expectedFilters = append(expectedFilters, ExpectedFilter{ + emitterAddress: (*emitter).Address(), + topic: event.ID, + }) + } + } + + return expectedFilters +} + +var nodeHasExpectedFilters = func(expectedFilters []ExpectedFilter, logger core_logger.SugaredLogger, chainID *big.Int, postgresDb *ctf_test_env.PostgresDb) (bool, error) { + orm, db, err := NewOrm(logger, chainID, postgresDb) + if err != nil { + return false, err + } + + defer db.Close() + knownFilters, err := orm.LoadFilters() + if err != nil { + return false, err + } + + for _, expectedFilter := range expectedFilters { + filterFound := false + for _, knownFilter := range knownFilters { + if bytes.Equal(expectedFilter.emitterAddress.Bytes(), knownFilter.Addresses[0].Bytes()) && bytes.Equal(expectedFilter.topic.Bytes(), knownFilter.EventSigs[0].Bytes()) { + filterFound = true + break + } + } + + if !filterFound { + return false, fmt.Errorf("no filter found for emitter %s and topic %s", expectedFilter.emitterAddress.String(), expectedFilter.topic.Hex()) + } + } + + return true, nil +} + +var randomWait = func(minMilliseconds, maxMilliseconds int) { + rand.New(rand.NewSource(time.Now().UnixNano())) + randomMilliseconds := rand.Intn(maxMilliseconds-minMilliseconds+1) + minMilliseconds + time.Sleep(time.Duration(randomMilliseconds) * time.Millisecond) +} + +type LogEmitterChannel struct { + logsEmitted int + err error + // unused + // currentIndex int +} + +func getIntSlice(length int) []int { + result := make([]int, length) + for i := 0; i < length; i++ { + result[i] = i + } + + return result +} + +func getStringSlice(length int) []string { + result := make([]string, length) + for i := 0; i < length; i++ { + result[i] = "amazing event" + } + + return result +} + +var emitEvents = func(ctx context.Context, l zerolog.Logger, logEmitter *contracts.LogEmitter, cfg *Config, wg *sync.WaitGroup, results chan LogEmitterChannel) { + address := (*logEmitter).Address().String() + localCounter := 0 + select { + case <-ctx.Done(): + l.Warn().Str("Emitter address", address).Msg("Context cancelled, not emitting events") + return + default: + defer wg.Done() + for i := 0; i < cfg.LoopedConfig.ExecutionCount; i++ { + for _, event := range cfg.General.EventsToEmit { + l.Debug().Str("Emitter address", address).Str("Event type", event.Name).Str("index", fmt.Sprintf("%d/%d", (i+1), cfg.LoopedConfig.ExecutionCount)).Msg("Emitting log from emitter") + var err error + switch event.Name { + case "Log1": + _, err = (*logEmitter).EmitLogInts(getIntSlice(cfg.General.EventsPerTx)) + case "Log2": + _, err = (*logEmitter).EmitLogIntsIndexed(getIntSlice(cfg.General.EventsPerTx)) + case "Log3": + _, err = (*logEmitter).EmitLogStrings(getStringSlice(cfg.General.EventsPerTx)) + default: + err = fmt.Errorf("unknown event name: %s", event.Name) + } + + if err != nil { + results <- LogEmitterChannel{ + logsEmitted: 0, + err: err, + } + return + } + localCounter += cfg.General.EventsPerTx + + randomWait(cfg.LoopedConfig.FuzzConfig.MinEmitWaitTimeMs, cfg.LoopedConfig.FuzzConfig.MaxEmitWaitTimeMs) + } + + if (i+1)%10 == 0 { + l.Info().Str("Emitter address", address).Str("Index", fmt.Sprintf("%d/%d", i+1, cfg.LoopedConfig.ExecutionCount)).Msg("Emitted all three events") + } + } + + l.Info().Str("Emitter address", address).Int("Total logs emitted", localCounter).Msg("Finished emitting events") + + results <- LogEmitterChannel{ + logsEmitted: localCounter, + err: nil, + } + } +} + +var chainHasFinalisedEndBlock = func(l zerolog.Logger, evmClient blockchain.EVMClient, endBlock int64) (bool, error) { + effectiveEndBlock := endBlock + 1 + lastFinalisedBlockHeader, err := evmClient.GetLatestFinalizedBlockHeader(context.Background()) + if err != nil { + return false, err + } + + l.Info().Int64("Last finalised block header", lastFinalisedBlockHeader.Number.Int64()).Int64("End block", effectiveEndBlock).Int64("Blocks left till end block", effectiveEndBlock-lastFinalisedBlockHeader.Number.Int64()).Msg("Waiting for the finalized block to move beyond end block") + + return lastFinalisedBlockHeader.Number.Int64() > effectiveEndBlock, nil +} + +var logPollerHasFinalisedEndBlock = func(endBlock int64, chainID *big.Int, l zerolog.Logger, coreLogger core_logger.SugaredLogger, nodes *test_env.ClCluster) (bool, error) { + wg := &sync.WaitGroup{} + + type boolQueryResult struct { + nodeName string + hasFinalised bool + err error + } + + endBlockCh := make(chan boolQueryResult, len(nodes.Nodes)-1) + ctx, cancelFn := context.WithCancel(context.Background()) + + for i := 1; i < len(nodes.Nodes); i++ { + wg.Add(1) + + go func(clNode *test_env.ClNode, r chan boolQueryResult) { + defer wg.Done() + select { + case <-ctx.Done(): + return + default: + orm, db, err := NewOrm(coreLogger, chainID, clNode.PostgresDb) + if err != nil { + r <- boolQueryResult{ + nodeName: clNode.ContainerName, + hasFinalised: false, + err: err, + } + } + + defer db.Close() + + latestBlock, err := orm.SelectLatestBlock() + if err != nil { + r <- boolQueryResult{ + nodeName: clNode.ContainerName, + hasFinalised: false, + err: err, + } + } + + r <- boolQueryResult{ + nodeName: clNode.ContainerName, + hasFinalised: latestBlock.FinalizedBlockNumber > endBlock, + err: nil, + } + + } + }(nodes.Nodes[i], endBlockCh) + } + + var err error + allFinalisedCh := make(chan bool, 1) + + go func() { + foundMap := make(map[string]bool, 0) + for r := range endBlockCh { + if r.err != nil { + err = r.err + cancelFn() + return + } + + foundMap[r.nodeName] = r.hasFinalised + if r.hasFinalised { + l.Info().Str("Node name", r.nodeName).Msg("CL node has finalised end block") + } else { + l.Warn().Str("Node name", r.nodeName).Msg("CL node has not finalised end block yet") + } + + if len(foundMap) == len(nodes.Nodes)-1 { + allFinalised := true + for _, v := range foundMap { + if !v { + allFinalised = false + break + } + } + + allFinalisedCh <- allFinalised + return + } + } + }() + + wg.Wait() + close(endBlockCh) + + return <-allFinalisedCh, err +} + +var clNodesHaveExpectedLogCount = func(startBlock, endBlock int64, chainID *big.Int, expectedLogCount int, expectedFilters []ExpectedFilter, l zerolog.Logger, coreLogger core_logger.SugaredLogger, nodes *test_env.ClCluster) (bool, error) { + wg := &sync.WaitGroup{} + + type logQueryResult struct { + nodeName string + logCount int + hasExpectedCount bool + err error + } + + queryCh := make(chan logQueryResult, len(nodes.Nodes)-1) + ctx, cancelFn := context.WithCancel(context.Background()) + + for i := 1; i < len(nodes.Nodes); i++ { + wg.Add(1) + + go func(clNode *test_env.ClNode, r chan logQueryResult) { + defer wg.Done() + select { + case <-ctx.Done(): + return + default: + orm, db, err := NewOrm(coreLogger, chainID, clNode.PostgresDb) + if err != nil { + r <- logQueryResult{ + nodeName: clNode.ContainerName, + logCount: 0, + hasExpectedCount: false, + err: err, + } + } + + defer db.Close() + foundLogsCount := 0 + + for _, filter := range expectedFilters { + logs, err := orm.SelectLogs(startBlock, endBlock, filter.emitterAddress, filter.topic) + if err != nil { + r <- logQueryResult{ + nodeName: clNode.ContainerName, + logCount: 0, + hasExpectedCount: false, + err: err, + } + } + + foundLogsCount += len(logs) + } + + r <- logQueryResult{ + nodeName: clNode.ContainerName, + logCount: foundLogsCount, + hasExpectedCount: foundLogsCount >= expectedLogCount, + err: err, + } + } + }(nodes.Nodes[i], queryCh) + } + + var err error + allFoundCh := make(chan bool, 1) + + go func() { + foundMap := make(map[string]bool, 0) + for r := range queryCh { + if r.err != nil { + err = r.err + cancelFn() + return + } + + foundMap[r.nodeName] = r.hasExpectedCount + if r.hasExpectedCount { + l.Info().Str("Node name", r.nodeName).Int("Logs count", r.logCount).Msg("Expected log count found in CL node") + } else { + l.Warn().Str("Node name", r.nodeName).Str("Found/Expected logs", fmt.Sprintf("%d/%d", r.logCount, expectedLogCount)).Int("Missing logs", expectedLogCount-r.logCount).Msg("Too low log count found in CL node") + } + + if len(foundMap) == len(nodes.Nodes)-1 { + allFound := true + for _, v := range foundMap { + if !v { + allFound = false + break + } + } + + allFoundCh <- allFound + return + } + } + }() + + wg.Wait() + close(queryCh) + + return <-allFoundCh, err +} + +type MissingLogs map[string][]geth_types.Log + +func (m *MissingLogs) IsEmpty() bool { + for _, v := range *m { + if len(v) > 0 { + return false + } + } + + return true +} + +var getMissingLogs = func(startBlock, endBlock int64, logEmitters []*contracts.LogEmitter, evmClient blockchain.EVMClient, clnodeCluster *test_env.ClCluster, l zerolog.Logger, coreLogger core_logger.SugaredLogger, cfg *Config) (MissingLogs, error) { + wg := &sync.WaitGroup{} + + type dbQueryResult struct { + err error + nodeName string + logs []logpoller.Log + } + + ctx, cancelFn := context.WithCancel(context.Background()) + resultCh := make(chan dbQueryResult, len(clnodeCluster.Nodes)-1) + + for i := 1; i < len(clnodeCluster.Nodes); i++ { + wg.Add(1) + + go func(ctx context.Context, i int, r chan dbQueryResult) { + defer wg.Done() + select { + case <-ctx.Done(): + l.Warn().Msg("Context cancelled. Terminating fetching logs from log poller's DB") + return + default: + nodeName := clnodeCluster.Nodes[i].ContainerName + + l.Info().Str("Node name", nodeName).Msg("Fetching log poller logs") + orm, db, err := NewOrm(coreLogger, evmClient.GetChainID(), clnodeCluster.Nodes[i].PostgresDb) + if err != nil { + r <- dbQueryResult{ + err: err, + nodeName: nodeName, + logs: []logpoller.Log{}, + } + } + + defer db.Close() + logs := make([]logpoller.Log, 0) + + for j := 0; j < len(logEmitters); j++ { + address := (*logEmitters[j]).Address() + + for _, event := range cfg.General.EventsToEmit { + l.Debug().Str("Event name", event.Name).Str("Emitter address", address.String()).Msg("Fetching single emitter's logs") + result, err := orm.SelectLogs(startBlock, endBlock, address, event.ID) + if err != nil { + r <- dbQueryResult{ + err: err, + nodeName: nodeName, + logs: []logpoller.Log{}, + } + } + + sort.Slice(result, func(i, j int) bool { + return result[i].BlockNumber < result[j].BlockNumber + }) + + logs = append(logs, result...) + + l.Debug().Str("Event name", event.Name).Str("Emitter address", address.String()).Int("Log count", len(result)).Msg("Logs found per node") + } + } + + l.Warn().Int("Count", len(logs)).Str("Node name", nodeName).Msg("Fetched log poller logs") + + r <- dbQueryResult{ + err: nil, + nodeName: nodeName, + logs: logs, + } + } + }(ctx, i, resultCh) + } + + allLogPollerLogs := make(map[string][]logpoller.Log, 0) + missingLogs := map[string][]geth_types.Log{} + var dbError error + + go func() { + for r := range resultCh { + if r.err != nil { + l.Err(r.err).Str("Node name", r.nodeName).Msg("Error fetching logs from log poller's DB") + dbError = r.err + cancelFn() + return + } + // use channel for aggregation and then for := range over it after closing resultCh? + allLogPollerLogs[r.nodeName] = r.logs + } + }() + + wg.Wait() + close(resultCh) + + if dbError != nil { + return nil, dbError + } + + allLogsInEVMNode, err := getEVMLogs(startBlock, endBlock, logEmitters, evmClient, l, cfg) + if err != nil { + return nil, err + } + + wg = &sync.WaitGroup{} + + type missingLogResult struct { + nodeName string + logs []geth_types.Log + } + + l.Info().Msg("Started comparison of logs from EVM node and CL nodes. This may take a while if there's a lot of logs") + missingCh := make(chan missingLogResult, len(clnodeCluster.Nodes)-1) + evmLogCount := len(allLogsInEVMNode) + for i := 1; i < len(clnodeCluster.Nodes); i++ { + wg.Add(1) + + go func(i int, result chan missingLogResult) { + defer wg.Done() + nodeName := clnodeCluster.Nodes[i].ContainerName + l.Info().Str("Node name", nodeName).Str("Progress", fmt.Sprintf("0/%d", evmLogCount)).Msg("Comparing single CL node's logs with EVM logs") + + missingLogs := make([]geth_types.Log, 0) + for i, evmLog := range allLogsInEVMNode { + logFound := false + for _, logPollerLog := range allLogPollerLogs[nodeName] { + if logPollerLog.BlockNumber == int64(evmLog.BlockNumber) && logPollerLog.TxHash == evmLog.TxHash && bytes.Equal(logPollerLog.Data, evmLog.Data) && logPollerLog.LogIndex == int64(evmLog.Index) && + logPollerLog.Address == evmLog.Address && logPollerLog.BlockHash == evmLog.BlockHash && bytes.Equal(logPollerLog.Topics[0][:], evmLog.Topics[0].Bytes()) { + logFound = true + continue + } + } + + if i%10000 == 0 && i != 0 { + l.Info().Str("Node name", nodeName).Str("Progress", fmt.Sprintf("%d/%d", i, evmLogCount)).Msg("Comparing single CL node's logs with EVM logs") + } + + if !logFound { + missingLogs = append(missingLogs, evmLog) + } + } + + if len(missingLogs) > 0 { + l.Warn().Int("Count", len(missingLogs)).Str("Node name", nodeName).Msg("Some EMV logs were missing from CL node") + } else { + l.Info().Str("Node name", nodeName).Msg("All EVM logs were found in CL node") + } + + result <- missingLogResult{ + nodeName: nodeName, + logs: missingLogs, + } + }(i, missingCh) + } + + wg.Wait() + close(missingCh) + + for v := range missingCh { + if len(v.logs) > 0 { + missingLogs[v.nodeName] = v.logs + } + } + + expectedTotalLogsEmitted := getExpectedLogCount(cfg) + if int64(len(allLogsInEVMNode)) != expectedTotalLogsEmitted { + l.Warn().Str("Actual/Expected", fmt.Sprintf("%d/%d", expectedTotalLogsEmitted, len(allLogsInEVMNode))).Msg("Some of the test logs were not found in EVM node. This is a bug in the test") + } + + return missingLogs, nil +} + +var printMissingLogsByType = func(missingLogs map[string][]geth_types.Log, l zerolog.Logger, cfg *Config) { + var findHumanName = func(topic common.Hash) string { + for _, event := range cfg.General.EventsToEmit { + if event.ID == topic { + return event.Name + } + } + + return "Unknown event" + } + + missingByType := make(map[string]int) + for _, logs := range missingLogs { + for _, v := range logs { + humanName := findHumanName(v.Topics[0]) + missingByType[humanName]++ + } + } + + for k, v := range missingByType { + l.Warn().Str("Event name", k).Int("Missing count", v).Msg("Missing logs by type") + } +} + +var getEVMLogs = func(startBlock, endBlock int64, logEmitters []*contracts.LogEmitter, evmClient blockchain.EVMClient, l zerolog.Logger, cfg *Config) ([]geth_types.Log, error) { + allLogsInEVMNode := make([]geth_types.Log, 0) + for j := 0; j < len(logEmitters); j++ { + address := (*logEmitters[j]).Address() + for _, event := range cfg.General.EventsToEmit { + l.Debug().Str("Event name", event.Name).Str("Emitter address", address.String()).Msg("Fetching logs from EVM node") + logsInEVMNode, err := evmClient.FilterLogs(context.Background(), geth.FilterQuery{ + Addresses: []common.Address{(address)}, + Topics: [][]common.Hash{{event.ID}}, + FromBlock: big.NewInt(startBlock), + ToBlock: big.NewInt(endBlock), + }) + if err != nil { + return nil, err + } + + sort.Slice(logsInEVMNode, func(i, j int) bool { + return logsInEVMNode[i].BlockNumber < logsInEVMNode[j].BlockNumber + }) + + allLogsInEVMNode = append(allLogsInEVMNode, logsInEVMNode...) + l.Debug().Str("Event name", event.Name).Str("Emitter address", address.String()).Int("Log count", len(logsInEVMNode)).Msg("Logs found in EVM node") + } + } + + l.Warn().Int("Count", len(allLogsInEVMNode)).Msg("Logs in EVM node") + + return allLogsInEVMNode, nil +} + +func executeGenerator(t *testing.T, cfg *Config, logEmitters []*contracts.LogEmitter) (int, error) { + if cfg.General.Generator == GeneratorType_WASP { + return runWaspGenerator(t, cfg, logEmitters) + } + + return runLoopedGenerator(t, cfg, logEmitters) +} + +func runWaspGenerator(t *testing.T, cfg *Config, logEmitters []*contracts.LogEmitter) (int, error) { + l := logging.GetTestLogger(t) + + var RPSprime int64 + + // if LPS is set, we need to calculate based on countract count and events per transaction + if cfg.Wasp.Load.LPS > 0 { + RPSprime = cfg.Wasp.Load.LPS / int64(cfg.General.Contracts) / int64(cfg.General.EventsPerTx) / int64(len(cfg.General.EventsToEmit)) + + if RPSprime < 1 { + return 0, fmt.Errorf("invalid load configuration, effective RPS would have been zero. Adjust LPS, contracts count, events per tx or events to emit") + } + } + + // if RPS is set simply split it between contracts + if cfg.Wasp.Load.RPS > 0 { + RPSprime = cfg.Wasp.Load.RPS / int64(cfg.General.Contracts) + } + + counter := &Counter{ + mu: &sync.Mutex{}, + value: 0, + } + + p := wasp.NewProfile() + + for _, logEmitter := range logEmitters { + g, err := wasp.NewGenerator(&wasp.Config{ + T: t, + LoadType: wasp.RPS, + GenName: fmt.Sprintf("log_poller_gen_%s", (*logEmitter).Address().String()), + RateLimitUnitDuration: cfg.Wasp.Load.RateLimitUnitDuration.Duration(), + CallTimeout: cfg.Wasp.Load.CallTimeout.Duration(), + Schedule: wasp.Plain( + RPSprime, + cfg.Wasp.Load.Duration.Duration(), + ), + Gun: NewLogEmitterGun( + logEmitter, + cfg.General.EventsToEmit, + cfg.General.EventsPerTx, + l, + ), + SharedData: counter, + }) + p.Add(g, err) + } + + _, err := p.Run(true) + + if err != nil { + return 0, err + } + + return counter.value, nil +} + +func runLoopedGenerator(t *testing.T, cfg *Config, logEmitters []*contracts.LogEmitter) (int, error) { + l := logging.GetTestLogger(t) + + // Start emitting events in parallel, each contract is emitting events in a separate goroutine + // We will stop as soon as we encounter an error + wg := &sync.WaitGroup{} + emitterCh := make(chan LogEmitterChannel, len(logEmitters)) + + ctx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + + for i := 0; i < len(logEmitters); i++ { + wg.Add(1) + go emitEvents(ctx, l, logEmitters[i], cfg, wg, emitterCh) + } + + var emitErr error + total := 0 + + aggrChan := make(chan int, len(logEmitters)) + + go func() { + for emitter := range emitterCh { + if emitter.err != nil { + emitErr = emitter.err + cancelFn() + return + } + aggrChan <- emitter.logsEmitted + } + }() + + wg.Wait() + close(emitterCh) + + for i := 0; i < len(logEmitters); i++ { + total += <-aggrChan + } + + if emitErr != nil { + return 0, emitErr + } + + return int(total), nil +} + +func getExpectedLogCount(cfg *Config) int64 { + if cfg.General.Generator == GeneratorType_WASP { + if cfg.Wasp.Load.RPS != 0 { + return cfg.Wasp.Load.RPS * int64(cfg.Wasp.Load.Duration.Duration().Seconds()) * int64(cfg.General.EventsPerTx) + } + return cfg.Wasp.Load.LPS * int64(cfg.Wasp.Load.Duration.Duration().Seconds()) + } + + return int64(len(cfg.General.EventsToEmit) * cfg.LoopedConfig.ExecutionCount * cfg.General.Contracts * cfg.General.EventsPerTx) +} + +var chaosPauseSyncFn = func(l zerolog.Logger, testEnv *test_env.CLClusterTestEnv) error { + rand.New(rand.NewSource(time.Now().UnixNano())) + randomBool := rand.Intn(2) == 0 + + randomNode := testEnv.ClCluster.Nodes[rand.Intn(len(testEnv.ClCluster.Nodes)-1)+1] + var component ctf_test_env.EnvComponent + + if randomBool { + component = randomNode.EnvComponent + } else { + component = randomNode.PostgresDb.EnvComponent + } + + pauseTimeSec := rand.Intn(20-5) + 5 + l.Info().Str("Container", component.ContainerName).Int("Pause time", pauseTimeSec).Msg("Pausing component") + pauseTimeDur := time.Duration(pauseTimeSec) * time.Second + err := component.ChaosPause(l, pauseTimeDur) + l.Info().Str("Container", component.ContainerName).Msg("Component unpaused") + + if err != nil { + return err + } + + return nil +} + +var executeChaosExperiment = func(l zerolog.Logger, testEnv *test_env.CLClusterTestEnv, cfg *Config, errorCh chan error) { + if cfg.ChaosConfig == nil || cfg.ChaosConfig.ExperimentCount == 0 { + errorCh <- nil + return + } + + chaosChan := make(chan error, cfg.ChaosConfig.ExperimentCount) + + wg := &sync.WaitGroup{} + + go func() { + // if we wanted to have more than 1 container paused, we'd need to make sure we aren't trying to pause an already paused one + guardChan := make(chan struct{}, 1) + + for i := 0; i < cfg.ChaosConfig.ExperimentCount; i++ { + i := i + wg.Add(1) + guardChan <- struct{}{} + go func() { + defer func() { + <-guardChan + wg.Done() + l.Info().Str("Current/Total", fmt.Sprintf("%d/%d", i, cfg.ChaosConfig.ExperimentCount)).Msg("Done with experiment") + }() + chaosChan <- chaosPauseSyncFn(l, testEnv) + }() + } + + wg.Wait() + + close(chaosChan) + }() + + go func() { + for err := range chaosChan { + // This will receive errors until chaosChan is closed + if err != nil { + // If an error is encountered, log it, send it to the error channel, and return from the function + l.Err(err).Msg("Error encountered during chaos experiment") + errorCh <- err + return // Return on actual error + } + // No need for an else block here, because if err is nil (which happens when the channel is closed), + // the loop will exit and the following log and nil send will execute. + } + + // After the loop exits, which it will do when chaosChan is closed, log that all experiments are finished. + l.Info().Msg("All chaos experiments finished") + errorCh <- nil // Only send nil once, after all errors have been handled and the channel is closed + }() +} + +var GetFinalityDepth = func(chainId int64) (int64, error) { + var finalityDepth int64 + switch chainId { + // Ethereum Sepolia + case 11155111: + finalityDepth = 50 + // Polygon Mumbai + case 80001: + finalityDepth = 500 + // Simulated network + case 1337: + finalityDepth = 10 + default: + return 0, fmt.Errorf("no known finality depth for chain %d", chainId) + } + + return finalityDepth, nil +} + +var GetEndBlockToWaitFor = func(endBlock, chainId int64, cfg *Config) (int64, error) { + if cfg.General.UseFinalityTag { + return endBlock + 1, nil + } + + finalityDepth, err := GetFinalityDepth(chainId) + if err != nil { + return 0, err + } + + return endBlock + finalityDepth, nil +} + +const ( + automationDefaultUpkeepGasLimit = uint32(2500000) + automationDefaultLinkFunds = int64(9e18) + automationDefaultUpkeepsToDeploy = 10 + automationExpectedData = "abcdef" + defaultAmountOfUpkeeps = 2 +) + +var ( + defaultOCRRegistryConfig = contracts.KeeperRegistrySettings{ + PaymentPremiumPPB: uint32(200000000), + FlatFeeMicroLINK: uint32(0), + BlockCountPerTurn: big.NewInt(10), + CheckGasLimit: uint32(2500000), + StalenessSeconds: big.NewInt(90000), + GasCeilingMultiplier: uint16(1), + MinUpkeepSpend: big.NewInt(0), + MaxPerformGas: uint32(5000000), + FallbackGasPrice: big.NewInt(2e11), + FallbackLinkPrice: big.NewInt(2e18), + MaxCheckDataSize: uint32(5000), + MaxPerformDataSize: uint32(5000), + } + + automationDefaultRegistryConfig = contracts.KeeperRegistrySettings{ + PaymentPremiumPPB: uint32(200000000), + FlatFeeMicroLINK: uint32(0), + BlockCountPerTurn: big.NewInt(10), + CheckGasLimit: uint32(2500000), + StalenessSeconds: big.NewInt(90000), + GasCeilingMultiplier: uint16(1), + MinUpkeepSpend: big.NewInt(0), + MaxPerformGas: uint32(5000000), + FallbackGasPrice: big.NewInt(2e11), + FallbackLinkPrice: big.NewInt(2e18), + MaxCheckDataSize: uint32(5000), + MaxPerformDataSize: uint32(5000), + } +) + +func setupLogPollerTestDocker( + t *testing.T, + registryVersion ethereum.KeeperRegistryVersion, + registryConfig contracts.KeeperRegistrySettings, + upkeepsNeeded int, + lpPollingInterval time.Duration, + finalityTagEnabled bool, +) ( + blockchain.EVMClient, + []*client.ChainlinkClient, + contracts.ContractDeployer, + contracts.LinkToken, + contracts.KeeperRegistry, + contracts.KeeperRegistrar, + *test_env.CLClusterTestEnv, +) { + l := logging.GetTestLogger(t) + // Add registry version to config + registryConfig.RegistryVersion = registryVersion + network := networks.MustGetSelectedNetworksFromEnv()[0] + + finalityDepth, err := GetFinalityDepth(network.ChainID) + require.NoError(t, err, "Error getting finality depth") + + // build the node config + clNodeConfig := node.NewConfig(node.NewBaseConfig()) + syncInterval := models.MustMakeDuration(5 * time.Minute) + clNodeConfig.Feature.LogPoller = it_utils.Ptr[bool](true) + clNodeConfig.OCR2.Enabled = it_utils.Ptr[bool](true) + clNodeConfig.Keeper.TurnLookBack = it_utils.Ptr[int64](int64(0)) + clNodeConfig.Keeper.Registry.SyncInterval = &syncInterval + clNodeConfig.Keeper.Registry.PerformGasOverhead = it_utils.Ptr[uint32](uint32(150000)) + clNodeConfig.P2P.V2.Enabled = it_utils.Ptr[bool](true) + clNodeConfig.P2P.V2.AnnounceAddresses = &[]string{"0.0.0.0:6690"} + clNodeConfig.P2P.V2.ListenAddresses = &[]string{"0.0.0.0:6690"} + + //launch the environment + var env *test_env.CLClusterTestEnv + chainlinkNodeFunding := 0.5 + l.Debug().Msgf("Funding amount: %f", chainlinkNodeFunding) + clNodesCount := 5 + + var logPolllerSettingsFn = func(chain *evmcfg.Chain) *evmcfg.Chain { + chain.LogPollInterval = models.MustNewDuration(lpPollingInterval) + chain.FinalityDepth = it_utils.Ptr[uint32](uint32(finalityDepth)) + chain.FinalityTagEnabled = it_utils.Ptr[bool](finalityTagEnabled) + return chain + } + + var evmClientSettingsFn = func(network *blockchain.EVMNetwork) *blockchain.EVMNetwork { + network.FinalityDepth = uint64(finalityDepth) + network.FinalityTag = finalityTagEnabled + return network + } + + ethBuilder := ctf_test_env.NewEthereumNetworkBuilder() + cfg, err := ethBuilder. + WithConsensusType(ctf_test_env.ConsensusType_PoS). + WithConsensusLayer(ctf_test_env.ConsensusLayer_Prysm). + WithExecutionLayer(ctf_test_env.ExecutionLayer_Geth). + WithBeaconChainConfig(ctf_test_env.BeaconChainConfig{ + SecondsPerSlot: 8, + SlotsPerEpoch: 2, + }). + Build() + require.NoError(t, err, "Error building ethereum network config") + + env, err = test_env.NewCLTestEnvBuilder(). + WithTestLogger(t). + WithPrivateEthereumNetwork(cfg). + WithCLNodes(clNodesCount). + WithCLNodeConfig(clNodeConfig). + WithFunding(big.NewFloat(chainlinkNodeFunding)). + WithChainOptions(logPolllerSettingsFn). + EVMClientNetworkOptions(evmClientSettingsFn). + WithStandardCleanup(). + Build() + require.NoError(t, err, "Error deploying test environment") + + env.ParallelTransactions(true) + nodeClients := env.ClCluster.NodeAPIs() + workerNodes := nodeClients[1:] + + var linkToken contracts.LinkToken + + switch network.ChainID { + // Simulated + case 1337: + linkToken, err = env.ContractDeployer.DeployLinkTokenContract() + // Ethereum Sepolia + case 11155111: + linkToken, err = env.ContractLoader.LoadLINKToken("0x779877A7B0D9E8603169DdbD7836e478b4624789") + // Polygon Mumbai + case 80001: + linkToken, err = env.ContractLoader.LoadLINKToken("0x326C977E6efc84E512bB9C30f76E30c160eD06FB") + default: + panic("Not implemented") + } + require.NoError(t, err, "Error loading/deploying LINK token") + + linkBalance, err := env.EVMClient.BalanceAt(context.Background(), common.HexToAddress(linkToken.Address())) + require.NoError(t, err, "Error getting LINK balance") + + l.Info().Str("Balance", big.NewInt(0).Div(linkBalance, big.NewInt(1e18)).String()).Msg("LINK balance") + minLinkBalanceSingleNode := big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(9)) + minLinkBalance := big.NewInt(0).Mul(minLinkBalanceSingleNode, big.NewInt(int64(upkeepsNeeded))) + if minLinkBalance.Cmp(linkBalance) < 0 { + require.FailNowf(t, "Not enough LINK", "Not enough LINK to run the test. Need at least %s", big.NewInt(0).Div(minLinkBalance, big.NewInt(1e18)).String()) + } + + registry, registrar := actions.DeployAutoOCRRegistryAndRegistrar( + t, + registryVersion, + registryConfig, + linkToken, + env.ContractDeployer, + env.EVMClient, + ) + + // Fund the registry with LINK + err = linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(defaultAmountOfUpkeeps)))) + require.NoError(t, err, "Funding keeper registry contract shouldn't fail") + + err = actions.CreateOCRKeeperJobsLocal(l, nodeClients, registry.Address(), network.ChainID, 0, registryVersion) + require.NoError(t, err, "Error creating OCR Keeper Jobs") + ocrConfig, err := actions.BuildAutoOCR2ConfigVarsLocal(l, workerNodes, registryConfig, registrar.Address(), 30*time.Second, registry.RegistryOwnerAddress()) + require.NoError(t, err, "Error building OCR config vars") + err = registry.SetConfig(automationDefaultRegistryConfig, ocrConfig) + require.NoError(t, err, "Registry config should be set successfully") + require.NoError(t, env.EVMClient.WaitForEvents(), "Waiting for config to be set") + + return env.EVMClient, nodeClients, env.ContractDeployer, linkToken, registry, registrar, env +} diff --git a/integration-tests/universal/log_poller/scenarios.go b/integration-tests/universal/log_poller/scenarios.go new file mode 100644 index 0000000000..886547d46e --- /dev/null +++ b/integration-tests/universal/log_poller/scenarios.go @@ -0,0 +1,496 @@ +package logpoller + +import ( + "fmt" + "math/big" + "testing" + "time" + + "github.com/onsi/gomega" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-testing-framework/logging" + "github.com/smartcontractkit/chainlink/integration-tests/actions" + "github.com/smartcontractkit/chainlink/integration-tests/contracts" + "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" + "github.com/smartcontractkit/chainlink/integration-tests/utils" + core_logger "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +func ExecuteBasicLogPollerTest(t *testing.T, cfg *Config) { + l := logging.GetTestLogger(t) + coreLogger := core_logger.TestLogger(t) //needed by ORM ¯\_(ツ)_/¯ + + if cfg.General.EventsToEmit == nil || len(cfg.General.EventsToEmit) == 0 { + l.Warn().Msg("No events to emit specified, using all events from log emitter contract") + for _, event := range EmitterABI.Events { + cfg.General.EventsToEmit = append(cfg.General.EventsToEmit, event) + } + } + + l.Info().Msg("Starting basic log poller test") + + var ( + err error + upKeepsNeeded = cfg.General.Contracts * len(cfg.General.EventsToEmit) + ) + + chainClient, _, contractDeployer, linkToken, registry, registrar, testEnv := setupLogPollerTestDocker( + t, ethereum.RegistryVersion_2_1, defaultOCRRegistryConfig, upKeepsNeeded, time.Duration(500*time.Millisecond), cfg.General.UseFinalityTag, + ) + + _, upkeepIDs := actions.DeployConsumers( + t, + registry, + registrar, + linkToken, + contractDeployer, + chainClient, + upKeepsNeeded, + big.NewInt(automationDefaultLinkFunds), + automationDefaultUpkeepGasLimit, + true, + false, + ) + + // Deploy Log Emitter contracts + logEmitters := make([]*contracts.LogEmitter, 0) + for i := 0; i < cfg.General.Contracts; i++ { + logEmitter, err := testEnv.ContractDeployer.DeployLogEmitterContract() + logEmitters = append(logEmitters, &logEmitter) + require.NoError(t, err, "Error deploying log emitter contract") + l.Info().Str("Contract address", logEmitter.Address().Hex()).Msg("Log emitter contract deployed") + time.Sleep(200 * time.Millisecond) + } + + // Register log triggered upkeep for each combination of log emitter contract and event signature (topic) + // We need to register a separate upkeep for each event signature, because log trigger doesn't support multiple topics (even if log poller does) + for i := 0; i < len(upkeepIDs); i++ { + emitterAddress := (*logEmitters[i%cfg.General.Contracts]).Address() + upkeepID := upkeepIDs[i] + topicId := cfg.General.EventsToEmit[i%len(cfg.General.EventsToEmit)].ID + + l.Info().Int("Upkeep id", int(upkeepID.Int64())).Str("Emitter address", emitterAddress.String()).Str("Topic", topicId.Hex()).Msg("Registering log trigger for log emitter") + err = registerSingleTopicFilter(registry, upkeepID, emitterAddress, topicId) + randomWait(50, 200) + require.NoError(t, err, "Error registering log trigger for log emitter") + } + + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps") + + // Make sure that all nodes have expected filters registered before starting to emit events + expectedFilters := getExpectedFilters(logEmitters, cfg) + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + for i := 1; i < len(testEnv.ClCluster.Nodes); i++ { + nodeName := testEnv.ClCluster.Nodes[i].ContainerName + l.Info().Str("Node name", nodeName).Msg("Fetching filters from log poller's DB") + + hasFilters, err := nodeHasExpectedFilters(expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb) + if err != nil { + l.Warn().Err(err).Msg("Error checking if node has expected filters. Retrying...") + return + } + + g.Expect(hasFilters).To(gomega.BeTrue(), "Not all expected filters were found in the DB") + } + }, "30s", "1s").Should(gomega.Succeed()) + l.Info().Msg("All nodes have expected filters registered") + l.Info().Int("Count", len(expectedFilters)).Msg("Expected filters count") + + // Save block number before starting to emit events, so that we can later use it when querying logs + sb, err := testEnv.EVMClient.LatestBlockNumber(utils.TestContext(t)) + require.NoError(t, err, "Error getting latest block number") + startBlock := int64(sb) + + l.Info().Msg("STARTING EVENT EMISSION") + startTime := time.Now() + + // Start chaos experimnents by randomly pausing random containers (Chainlink nodes or their DBs) + chaosDoneCh := make(chan error, 1) + go func() { + executeChaosExperiment(l, testEnv, cfg, chaosDoneCh) + }() + + totalLogsEmitted, err := executeGenerator(t, cfg, logEmitters) + endTime := time.Now() + require.NoError(t, err, "Error executing event generator") + + expectedLogsEmitted := getExpectedLogCount(cfg) + duration := int(endTime.Sub(startTime).Seconds()) + l.Info().Int("Total logs emitted", totalLogsEmitted).Int64("Expected total logs emitted", expectedLogsEmitted).Str("Duration", fmt.Sprintf("%d sec", duration)).Str("LPS", fmt.Sprintf("%d/sec", totalLogsEmitted/duration)).Msg("FINISHED EVENT EMISSION") + + // Save block number after finishing to emit events, so that we can later use it when querying logs + eb, err := testEnv.EVMClient.LatestBlockNumber(utils.TestContext(t)) + require.NoError(t, err, "Error getting latest block number") + + endBlock, err := GetEndBlockToWaitFor(int64(eb), testEnv.EVMClient.GetChainID().Int64(), cfg) + require.NoError(t, err, "Error getting end block to wait for") + + l.Info().Msg("Waiting before proceeding with test until all chaos experiments finish") + chaosError := <-chaosDoneCh + require.NoError(t, chaosError, "Error encountered during chaos experiment") + + // Wait until last block in which events were emitted has been finalised + // how long should we wait here until all logs are processed? wait for block X to be processed by all nodes? + waitDuration := "15m" + l.Warn().Str("Duration", waitDuration).Msg("Waiting for logs to be processed by all nodes and for chain to advance beyond finality") + + gom.Eventually(func(g gomega.Gomega) { + hasAdvanced, err := chainHasFinalisedEndBlock(l, testEnv.EVMClient, endBlock) + if err != nil { + l.Warn().Err(err).Msg("Error checking if chain has advanced beyond finality. Retrying...") + } + g.Expect(hasAdvanced).To(gomega.BeTrue(), "Chain has not advanced beyond finality") + }, waitDuration, "30s").Should(gomega.Succeed()) + + l.Warn().Str("Duration", "1m").Msg("Waiting for all CL nodes to have end block finalised") + gom.Eventually(func(g gomega.Gomega) { + hasFinalised, err := logPollerHasFinalisedEndBlock(endBlock, testEnv.EVMClient.GetChainID(), l, coreLogger, testEnv.ClCluster) + if err != nil { + l.Warn().Err(err).Msg("Error checking if nodes have finalised end block. Retrying...") + } + g.Expect(hasFinalised).To(gomega.BeTrue(), "Some nodes have not finalised end block") + }, "1m", "30s").Should(gomega.Succeed()) + + gom.Eventually(func(g gomega.Gomega) { + logCountMatches, err := clNodesHaveExpectedLogCount(startBlock, endBlock, testEnv.EVMClient.GetChainID(), totalLogsEmitted, expectedFilters, l, coreLogger, testEnv.ClCluster) + if err != nil { + l.Warn().Err(err).Msg("Error checking if CL nodes have expected log count. Retrying...") + } + g.Expect(logCountMatches).To(gomega.BeTrue(), "Not all CL nodes have expected log count") + }, waitDuration, "5s").Should(gomega.Succeed()) + + // Wait until all CL nodes have exactly the same logs emitted by test contracts as the EVM node has + logConsistencyWaitDuration := "1m" + l.Warn().Str("Duration", logConsistencyWaitDuration).Msg("Waiting for CL nodes to have all the logs that EVM node has") + + gom.Eventually(func(g gomega.Gomega) { + missingLogs, err := getMissingLogs(startBlock, endBlock, logEmitters, testEnv.EVMClient, testEnv.ClCluster, l, coreLogger, cfg) + if err != nil { + l.Warn().Err(err).Msg("Error getting missing logs. Retrying...") + } + + if !missingLogs.IsEmpty() { + printMissingLogsByType(missingLogs, l, cfg) + } + g.Expect(missingLogs.IsEmpty()).To(gomega.BeTrue(), "Some CL nodes were missing logs") + }, logConsistencyWaitDuration, "5s").Should(gomega.Succeed()) +} + +func ExecuteLogPollerReplay(t *testing.T, cfg *Config, consistencyTimeout string) { + l := logging.GetTestLogger(t) + coreLogger := core_logger.TestLogger(t) //needed by ORM ¯\_(ツ)_/¯ + + if cfg.General.EventsToEmit == nil || len(cfg.General.EventsToEmit) == 0 { + l.Warn().Msg("No events to emit specified, using all events from log emitter contract") + for _, event := range EmitterABI.Events { + cfg.General.EventsToEmit = append(cfg.General.EventsToEmit, event) + } + } + + l.Info().Msg("Starting replay log poller test") + + var ( + err error + upKeepsNeeded = cfg.General.Contracts * len(cfg.General.EventsToEmit) + ) + + // we set blockBackfillDepth to 0, to make sure nothing will be backfilled and won't interfere with our test + chainClient, _, contractDeployer, linkToken, registry, registrar, testEnv := setupLogPollerTestDocker( + t, ethereum.RegistryVersion_2_1, defaultOCRRegistryConfig, upKeepsNeeded, time.Duration(1000*time.Millisecond), cfg.General.UseFinalityTag) + + _, upkeepIDs := actions.DeployConsumers( + t, + registry, + registrar, + linkToken, + contractDeployer, + chainClient, + upKeepsNeeded, + big.NewInt(automationDefaultLinkFunds), + automationDefaultUpkeepGasLimit, + true, + false, + ) + + // Deploy Log Emitter contracts + logEmitters := make([]*contracts.LogEmitter, 0) + for i := 0; i < cfg.General.Contracts; i++ { + logEmitter, err := testEnv.ContractDeployer.DeployLogEmitterContract() + logEmitters = append(logEmitters, &logEmitter) + require.NoError(t, err, "Error deploying log emitter contract") + l.Info().Str("Contract address", logEmitter.Address().Hex()).Msg("Log emitter contract deployed") + time.Sleep(200 * time.Millisecond) + } + + //wait for contracts to be uploaded to chain, TODO: could make this wait fluent + time.Sleep(5 * time.Second) + + // Save block number before starting to emit events, so that we can later use it when querying logs + sb, err := testEnv.EVMClient.LatestBlockNumber(utils.TestContext(t)) + require.NoError(t, err, "Error getting latest block number") + startBlock := int64(sb) + + l.Info().Msg("STARTING EVENT EMISSION") + startTime := time.Now() + totalLogsEmitted, err := executeGenerator(t, cfg, logEmitters) + endTime := time.Now() + require.NoError(t, err, "Error executing event generator") + expectedLogsEmitted := getExpectedLogCount(cfg) + duration := int(endTime.Sub(startTime).Seconds()) + l.Info().Int("Total logs emitted", totalLogsEmitted).Int64("Expected total logs emitted", expectedLogsEmitted).Str("Duration", fmt.Sprintf("%d sec", duration)).Str("LPS", fmt.Sprintf("%d/sec", totalLogsEmitted/duration)).Msg("FINISHED EVENT EMISSION") + + // Save block number after finishing to emit events, so that we can later use it when querying logs + eb, err := testEnv.EVMClient.LatestBlockNumber(utils.TestContext(t)) + require.NoError(t, err, "Error getting latest block number") + + endBlock, err := GetEndBlockToWaitFor(int64(eb), testEnv.EVMClient.GetChainID().Int64(), cfg) + require.NoError(t, err, "Error getting end block to wait for") + + // Lets make sure no logs are in DB yet + expectedFilters := getExpectedFilters(logEmitters, cfg) + logCountMatches, err := clNodesHaveExpectedLogCount(startBlock, endBlock, testEnv.EVMClient.GetChainID(), 0, expectedFilters, l, coreLogger, testEnv.ClCluster) + require.NoError(t, err, "Error checking if CL nodes have expected log count") + require.True(t, logCountMatches, "Some CL nodes already had logs in DB") + l.Info().Msg("No logs were saved by CL nodes yet, as expected. Proceeding.") + + // Register log triggered upkeep for each combination of log emitter contract and event signature (topic) + // We need to register a separate upkeep for each event signature, because log trigger doesn't support multiple topics (even if log poller does) + for i := 0; i < len(upkeepIDs); i++ { + emitterAddress := (*logEmitters[i%cfg.General.Contracts]).Address() + upkeepID := upkeepIDs[i] + topicId := cfg.General.EventsToEmit[i%len(cfg.General.EventsToEmit)].ID + + l.Info().Int("Upkeep id", int(upkeepID.Int64())).Str("Emitter address", emitterAddress.String()).Str("Topic", topicId.Hex()).Msg("Registering log trigger for log emitter") + err = registerSingleTopicFilter(registry, upkeepID, emitterAddress, topicId) + require.NoError(t, err, "Error registering log trigger for log emitter") + } + + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps") + + // Make sure that all nodes have expected filters registered before starting to emit events + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + for i := 1; i < len(testEnv.ClCluster.Nodes); i++ { + nodeName := testEnv.ClCluster.Nodes[i].ContainerName + l.Info().Str("Node name", nodeName).Msg("Fetching filters from log poller's DB") + + hasFilters, err := nodeHasExpectedFilters(expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb) + if err != nil { + l.Warn().Err(err).Msg("Error checking if node has expected filters. Retrying...") + return + } + + g.Expect(hasFilters).To(gomega.BeTrue(), "Not all expected filters were found in the DB") + } + }, "30s", "1s").Should(gomega.Succeed()) + l.Info().Msg("All nodes have expected filters registered") + l.Info().Int("Count", len(expectedFilters)).Msg("Expected filters count") + + l.Warn().Str("Duration", "1m").Msg("Waiting for all CL nodes to have end block finalised") + gom.Eventually(func(g gomega.Gomega) { + hasFinalised, err := logPollerHasFinalisedEndBlock(endBlock, testEnv.EVMClient.GetChainID(), l, coreLogger, testEnv.ClCluster) + if err != nil { + l.Warn().Err(err).Msg("Error checking if nodes have finalised end block. Retrying...") + } + g.Expect(hasFinalised).To(gomega.BeTrue(), "Some nodes have not finalised end block") + }, "1m", "30s").Should(gomega.Succeed()) + + // Trigger replay + l.Info().Msg("Triggering log poller's replay") + for i := 1; i < len(testEnv.ClCluster.Nodes); i++ { + nodeName := testEnv.ClCluster.Nodes[i].ContainerName + response, _, err := testEnv.ClCluster.Nodes[i].API.ReplayLogPollerFromBlock(startBlock, testEnv.EVMClient.GetChainID().Int64()) + require.NoError(t, err, "Error triggering log poller's replay on node %s", nodeName) + require.Equal(t, "Replay started", response.Data.Attributes.Message, "Unexpected response message from log poller's replay") + } + + l.Warn().Str("Duration", consistencyTimeout).Msg("Waiting for replay logs to be processed by all nodes") + + gom.Eventually(func(g gomega.Gomega) { + logCountMatches, err := clNodesHaveExpectedLogCount(startBlock, endBlock, testEnv.EVMClient.GetChainID(), totalLogsEmitted, expectedFilters, l, coreLogger, testEnv.ClCluster) + if err != nil { + l.Warn().Err(err).Msg("Error checking if CL nodes have expected log count. Retrying...") + } + g.Expect(logCountMatches).To(gomega.BeTrue(), "Not all CL nodes have expected log count") + }, consistencyTimeout, "30s").Should(gomega.Succeed()) + + // Wait until all CL nodes have exactly the same logs emitted by test contracts as the EVM node has + l.Warn().Str("Duration", consistencyTimeout).Msg("Waiting for CL nodes to have all the logs that EVM node has") + + gom.Eventually(func(g gomega.Gomega) { + missingLogs, err := getMissingLogs(startBlock, endBlock, logEmitters, testEnv.EVMClient, testEnv.ClCluster, l, coreLogger, cfg) + if err != nil { + l.Warn().Err(err).Msg("Error getting missing logs. Retrying...") + } + + if !missingLogs.IsEmpty() { + printMissingLogsByType(missingLogs, l, cfg) + } + g.Expect(missingLogs.IsEmpty()).To(gomega.BeTrue(), "Some CL nodes were missing logs") + }, consistencyTimeout, "10s").Should(gomega.Succeed()) +} + +type FinalityBlockFn = func(chainId int64, endBlock int64) (int64, error) + +func ExecuteCILogPollerTest(t *testing.T, cfg *Config) { + l := logging.GetTestLogger(t) + coreLogger := core_logger.TestLogger(t) //needed by ORM ¯\_(ツ)_/¯ + + if cfg.General.EventsToEmit == nil || len(cfg.General.EventsToEmit) == 0 { + l.Warn().Msg("No events to emit specified, using all events from log emitter contract") + for _, event := range EmitterABI.Events { + cfg.General.EventsToEmit = append(cfg.General.EventsToEmit, event) + } + } + + l.Info().Msg("Starting CI log poller test") + + var ( + err error + upKeepsNeeded = cfg.General.Contracts * len(cfg.General.EventsToEmit) + ) + + chainClient, _, contractDeployer, linkToken, registry, registrar, testEnv := setupLogPollerTestDocker( + t, ethereum.RegistryVersion_2_1, defaultOCRRegistryConfig, upKeepsNeeded, time.Duration(1000*time.Millisecond), cfg.General.UseFinalityTag, + ) + + _, upkeepIDs := actions.DeployConsumers( + t, + registry, + registrar, + linkToken, + contractDeployer, + chainClient, + upKeepsNeeded, + big.NewInt(automationDefaultLinkFunds), + automationDefaultUpkeepGasLimit, + true, + false, + ) + + // Deploy Log Emitter contracts + logEmitters := make([]*contracts.LogEmitter, 0) + for i := 0; i < cfg.General.Contracts; i++ { + logEmitter, err := testEnv.ContractDeployer.DeployLogEmitterContract() + logEmitters = append(logEmitters, &logEmitter) + require.NoError(t, err, "Error deploying log emitter contract") + l.Info().Str("Contract address", logEmitter.Address().Hex()).Msg("Log emitter contract deployed") + time.Sleep(200 * time.Millisecond) + } + + // Register log triggered upkeep for each combination of log emitter contract and event signature (topic) + // We need to register a separate upkeep for each event signature, because log trigger doesn't support multiple topics (even if log poller does) + for i := 0; i < len(upkeepIDs); i++ { + emitterAddress := (*logEmitters[i%cfg.General.Contracts]).Address() + upkeepID := upkeepIDs[i] + topicId := cfg.General.EventsToEmit[i%len(cfg.General.EventsToEmit)].ID + + l.Info().Int("Upkeep id", int(upkeepID.Int64())).Str("Emitter address", emitterAddress.String()).Str("Topic", topicId.Hex()).Msg("Registering log trigger for log emitter") + err = registerSingleTopicFilter(registry, upkeepID, emitterAddress, topicId) + randomWait(50, 200) + require.NoError(t, err, "Error registering log trigger for log emitter") + } + + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps") + + // Make sure that all nodes have expected filters registered before starting to emit events + expectedFilters := getExpectedFilters(logEmitters, cfg) + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + for i := 1; i < len(testEnv.ClCluster.Nodes); i++ { + nodeName := testEnv.ClCluster.Nodes[i].ContainerName + l.Info().Str("Node name", nodeName).Msg("Fetching filters from log poller's DB") + + hasFilters, err := nodeHasExpectedFilters(expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb) + if err != nil { + l.Warn().Err(err).Msg("Error checking if node has expected filters. Retrying...") + return + } + + g.Expect(hasFilters).To(gomega.BeTrue(), "Not all expected filters were found in the DB") + } + }, "1m", "1s").Should(gomega.Succeed()) + l.Info().Msg("All nodes have expected filters registered") + l.Info().Int("Count", len(expectedFilters)).Msg("Expected filters count") + + // Save block number before starting to emit events, so that we can later use it when querying logs + sb, err := testEnv.EVMClient.LatestBlockNumber(utils.TestContext(t)) + require.NoError(t, err, "Error getting latest block number") + startBlock := int64(sb) + + l.Info().Msg("STARTING EVENT EMISSION") + startTime := time.Now() + + // Start chaos experimnents by randomly pausing random containers (Chainlink nodes or their DBs) + chaosDoneCh := make(chan error, 1) + go func() { + executeChaosExperiment(l, testEnv, cfg, chaosDoneCh) + }() + + totalLogsEmitted, err := executeGenerator(t, cfg, logEmitters) + endTime := time.Now() + require.NoError(t, err, "Error executing event generator") + + expectedLogsEmitted := getExpectedLogCount(cfg) + duration := int(endTime.Sub(startTime).Seconds()) + l.Info().Int("Total logs emitted", totalLogsEmitted).Int64("Expected total logs emitted", expectedLogsEmitted).Str("Duration", fmt.Sprintf("%d sec", duration)).Str("LPS", fmt.Sprintf("%d/sec", totalLogsEmitted/duration)).Msg("FINISHED EVENT EMISSION") + + // Save block number after finishing to emit events, so that we can later use it when querying logs + eb, err := testEnv.EVMClient.LatestBlockNumber(utils.TestContext(t)) + require.NoError(t, err, "Error getting latest block number") + + endBlock, err := GetEndBlockToWaitFor(int64(eb), testEnv.EVMClient.GetChainID().Int64(), cfg) + require.NoError(t, err, "Error getting end block to wait for") + + l.Info().Msg("Waiting before proceeding with test until all chaos experiments finish") + chaosError := <-chaosDoneCh + require.NoError(t, chaosError, "Error encountered during chaos experiment") + + // Wait until last block in which events were emitted has been finalised (with buffer) + waitDuration := "45m" + l.Warn().Str("Duration", waitDuration).Msg("Waiting for chain to advance beyond finality") + + gom.Eventually(func(g gomega.Gomega) { + hasAdvanced, err := chainHasFinalisedEndBlock(l, testEnv.EVMClient, endBlock) + if err != nil { + l.Warn().Err(err).Msg("Error checking if chain has advanced beyond finality. Retrying...") + } + g.Expect(hasAdvanced).To(gomega.BeTrue(), "Chain has not advanced beyond finality") + }, waitDuration, "30s").Should(gomega.Succeed()) + + l.Warn().Str("Duration", waitDuration).Msg("Waiting for all CL nodes to have end block finalised") + gom.Eventually(func(g gomega.Gomega) { + hasFinalised, err := logPollerHasFinalisedEndBlock(endBlock, testEnv.EVMClient.GetChainID(), l, coreLogger, testEnv.ClCluster) + if err != nil { + l.Warn().Err(err).Msg("Error checking if nodes have finalised end block. Retrying...") + } + g.Expect(hasFinalised).To(gomega.BeTrue(), "Some nodes have not finalised end block") + }, waitDuration, "30s").Should(gomega.Succeed()) + + // Wait until all CL nodes have exactly the same logs emitted by test contracts as the EVM node has + logConsistencyWaitDuration := "10m" + l.Warn().Str("Duration", logConsistencyWaitDuration).Msg("Waiting for CL nodes to have all the logs that EVM node has") + + gom.Eventually(func(g gomega.Gomega) { + missingLogs, err := getMissingLogs(startBlock, endBlock, logEmitters, testEnv.EVMClient, testEnv.ClCluster, l, coreLogger, cfg) + if err != nil { + l.Warn().Err(err).Msg("Error getting missing logs. Retrying...") + } + + if !missingLogs.IsEmpty() { + printMissingLogsByType(missingLogs, l, cfg) + } + g.Expect(missingLogs.IsEmpty()).To(gomega.BeTrue(), "Some CL nodes were missing logs") + }, logConsistencyWaitDuration, "20s").Should(gomega.Succeed()) + + evmLogs, _ := getEVMLogs(startBlock, endBlock, logEmitters, testEnv.EVMClient, l, cfg) + + if totalLogsEmitted != len(evmLogs) { + l.Warn().Int("Total logs emitted", totalLogsEmitted).Int("Total logs in EVM", len(evmLogs)).Msg("Test passed, but total logs emitted does not match total logs in EVM") + } +} diff --git a/integration-tests/utils/cl_node_jobs.go b/integration-tests/utils/cl_node_jobs.go index 16b0c167cf..65dc6e4e39 100644 --- a/integration-tests/utils/cl_node_jobs.go +++ b/integration-tests/utils/cl_node_jobs.go @@ -10,13 +10,14 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/google/uuid" "github.com/lib/pq" + "gopkg.in/guregu/null.v4" + coreClient "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/v2/core/services/job" "github.com/smartcontractkit/chainlink/v2/core/store/models" - "gopkg.in/guregu/null.v4" ) -func BuildBootstrapSpec(verifierAddr common.Address, chainID int64, fromBlock uint64, feedId [32]byte) *coreClient.OCR2TaskJobSpec { +func BuildBootstrapSpec(verifierAddr common.Address, chainID int64, feedId [32]byte) *coreClient.OCR2TaskJobSpec { hash := common.BytesToHash(feedId[:]) return &coreClient.OCR2TaskJobSpec{ Name: fmt.Sprintf("bootstrap-%s", uuid.NewString()), diff --git a/integration-tests/utils/common.go b/integration-tests/utils/common.go index c8243097a7..f13c71cfd9 100644 --- a/integration-tests/utils/common.go +++ b/integration-tests/utils/common.go @@ -1,7 +1,10 @@ package utils import ( + "context" + "math/big" "net" + "testing" "github.com/smartcontractkit/chainlink/v2/core/store/models" ) @@ -23,3 +26,33 @@ func MustIP(s string) *net.IP { } return &ip } + +func BigIntSliceContains(slice []*big.Int, b *big.Int) bool { + for _, a := range slice { + if b.Cmp(a) == 0 { + return true + } + } + return false +} + +// TestContext returns a context with the test's deadline, if available. +func TestContext(tb testing.TB) context.Context { + ctx := context.Background() + var cancel func() + switch t := tb.(type) { + case *testing.T: + // Return background context if testing.T not set + if t == nil { + return ctx + } + if d, ok := t.Deadline(); ok { + ctx, cancel = context.WithDeadline(ctx, d) + } + } + if cancel == nil { + ctx, cancel = context.WithCancel(ctx) + } + tb.Cleanup(cancel) + return ctx +} diff --git a/integration-tests/utils/templates/secrets.go b/integration-tests/utils/templates/secrets.go index f81287e871..45edf0d012 100644 --- a/integration-tests/utils/templates/secrets.go +++ b/integration-tests/utils/templates/secrets.go @@ -2,6 +2,7 @@ package templates import ( "github.com/google/uuid" + "github.com/smartcontractkit/chainlink-testing-framework/utils/templates" )