diff --git a/.github/workflows/goclean.yml b/.github/workflows/goclean.yml
index 7eabb220c4..ab8b5794c3 100644
--- a/.github/workflows/goclean.yml
+++ b/.github/workflows/goclean.yml
@@ -29,7 +29,7 @@ jobs:
     - name: Setup Go
       uses: actions/setup-go@v3
       with:
-        go-version: '1.19.0'
+        go-version: '1.20.0'
 
     - name: go vet
       run: |
diff --git a/.github/workflows/kind-e2e.yaml b/.github/workflows/kind-e2e.yaml
index d73c376fcc..60bf099f4a 100644
--- a/.github/workflows/kind-e2e.yaml
+++ b/.github/workflows/kind-e2e.yaml
@@ -16,11 +16,13 @@ jobs:
     strategy:
       fail-fast: false # Keep running if one leg fails.
       matrix:
+        # Keep in sync with the list of supported releases: https://kubernetes.io/releases/
         k8s-version:
-        - v1.24.x
         - v1.25.x
         - v1.26.x
         - v1.27.x
+        # Needs https://github.com/sigstore/scaffolding/pull/756
+        # - v1.28.x
     uses: ./.github/workflows/reusable-e2e.yaml
     with:
       k8s-version: ${{ matrix.k8s-version }}
diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml
index 548c1b9607..e94cfb7904 100644
--- a/.github/workflows/lint.yaml
+++ b/.github/workflows/lint.yaml
@@ -19,7 +19,7 @@ jobs:
     steps:
       - uses: actions/setup-go@v4
         with:
-          go-version: "1.19"
+          go-version: "1.20"
 
       - uses: actions/checkout@v3
 
diff --git a/.github/workflows/reusable-e2e.yaml b/.github/workflows/reusable-e2e.yaml
index 393be7582c..d9081bfeda 100644
--- a/.github/workflows/reusable-e2e.yaml
+++ b/.github/workflows/reusable-e2e.yaml
@@ -51,7 +51,7 @@ jobs:
     - name: Set up Go
       uses: actions/setup-go@v2
       with:
-        go-version: 1.19.x
+        go-version: 1.20.x
 
     - uses: imjasonh/setup-ko@v0.6
       with:
diff --git a/go.mod b/go.mod
index 79073e35dc..f149460c62 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
 module github.com/tektoncd/chains
 
-go 1.19
+go 1.20
 
 require (
 	cloud.google.com/go/compute/metadata v0.2.3
@@ -17,7 +17,7 @@ require (
 	github.com/opencontainers/go-digest v1.0.0
 	github.com/pkg/errors v0.9.1
 	github.com/secure-systems-lab/go-securesystemslib v0.7.0
-	github.com/sigstore/cosign/v2 v2.1.1
+	github.com/sigstore/cosign/v2 v2.2.0
 	github.com/sigstore/rekor v1.2.2
 	github.com/sigstore/sigstore v1.7.2
 	github.com/sigstore/sigstore/pkg/signature/kms/aws v1.7.2
@@ -26,8 +26,8 @@ require (
 	github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.2
 	github.com/spiffe/go-spiffe/v2 v2.1.6
 	github.com/stretchr/testify v1.8.4
-	github.com/tektoncd/pipeline v0.50.1
-	github.com/tektoncd/plumbing v0.0.0-20221102182345-5dbcfda657d7
+	github.com/tektoncd/pipeline v0.51.0
+	github.com/tektoncd/plumbing v0.0.0-20230907180608-5625252a2de1
 	go.uber.org/zap v1.25.0
 	gocloud.dev v0.33.0
 	gocloud.dev/docstore/mongodocstore v0.33.0
@@ -38,8 +38,8 @@ require (
 	k8s.io/api v0.27.3
 	k8s.io/apimachinery v0.27.3
 	k8s.io/client-go v0.27.3
-	k8s.io/code-generator v0.25.9
-	knative.dev/pkg v0.0.0-20230518105712-dfb4bf04635d
+	k8s.io/code-generator v0.26.5
+	knative.dev/pkg v0.0.0-20230718152110-aef227e72ead
 	sigs.k8s.io/yaml v1.3.0
 )
 
@@ -50,7 +50,7 @@ require (
 	cloud.google.com/go/compute v1.23.0 // indirect
 	cloud.google.com/go/firestore v1.12.0 // indirect
 	cloud.google.com/go/iam v1.1.1 // indirect
-	cloud.google.com/go/kms v1.15.0 // indirect
+	cloud.google.com/go/kms v1.15.1 // indirect
 	cloud.google.com/go/longrunning v0.5.1 // indirect
 	contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect
 	contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect
@@ -61,7 +61,7 @@ require (
 	github.com/Antonboom/errname v0.1.12 // indirect
 	github.com/Antonboom/nilnil v0.1.7 // indirect
 	github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
-	github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect
+	github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 // indirect
 	github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect
 	github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
 	github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.0 // indirect
@@ -76,12 +76,19 @@ require (
 	github.com/Azure/go-autorest/tracing v0.6.0 // indirect
 	github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
 	github.com/BurntSushi/toml v1.3.2 // indirect
+	github.com/DataDog/appsec-internal-go v1.0.0 // indirect
+	github.com/DataDog/datadog-agent/pkg/obfuscate v0.45.0-rc.1 // indirect
+	github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.46.0-rc.4 // indirect
+	github.com/DataDog/datadog-go/v5 v5.3.0 // indirect
+	github.com/DataDog/go-libddwaf v1.4.1 // indirect
+	github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect
+	github.com/DataDog/sketches-go v1.2.1 // indirect
 	github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
 	github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 // indirect
 	github.com/Masterminds/semver v1.5.0 // indirect
 	github.com/Microsoft/go-winio v0.6.1 // indirect
 	github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect
-	github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903 // indirect
+	github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect
 	github.com/Shopify/sarama v1.38.1 // indirect
 	github.com/ThalesIgnite/crypto11 v1.2.5 // indirect
 	github.com/alexkohler/nakedret/v2 v2.0.2 // indirect
@@ -101,7 +108,7 @@ require (
 	github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
 	github.com/ashanbrown/forbidigo v1.6.0 // indirect
 	github.com/ashanbrown/makezero v1.1.1 // indirect
-	github.com/aws/aws-sdk-go v1.44.317 // indirect
+	github.com/aws/aws-sdk-go v1.44.318 // indirect
 	github.com/aws/aws-sdk-go-v2 v1.20.0 // indirect
 	github.com/aws/aws-sdk-go-v2/config v1.18.32 // indirect
 	github.com/aws/aws-sdk-go-v2/credentials v1.13.31 // indirect
@@ -129,7 +136,8 @@ require (
 	github.com/bombsimon/wsl/v3 v3.4.0 // indirect
 	github.com/breml/bidichk v0.2.4 // indirect
 	github.com/breml/errchkjson v0.3.1 // indirect
-	github.com/buildkite/agent/v3 v3.49.0 // indirect
+	github.com/buildkite/agent/v3 v3.52.1 // indirect
+	github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251 // indirect
 	github.com/butuzov/ireturn v0.2.0 // indirect
 	github.com/butuzov/mirror v1.1.0 // indirect
 	github.com/ccojocar/zxcvbn-go v1.0.1 // indirect
@@ -149,17 +157,20 @@ require (
 	github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect
 	github.com/daixiang0/gci v0.11.0 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
 	github.com/denis-tingaikin/go-header v0.4.3 // indirect
-	github.com/digitorus/pkcs7 v0.0.0-20221212123742-001c36b64ec3 // indirect
-	github.com/digitorus/timestamp v0.0.0-20221019182153-ef3b63b79b31 // indirect
+	github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect
+	github.com/digitorus/timestamp v0.0.0-20230821155606-d1ad5ca9624c // indirect
 	github.com/dimchansky/utfbom v1.1.1 // indirect
 	github.com/docker/cli v24.0.0+incompatible // indirect
 	github.com/docker/distribution v2.8.2+incompatible // indirect
 	github.com/docker/docker v24.0.0+incompatible // indirect
 	github.com/docker/docker-credential-helpers v0.7.0 // indirect
+	github.com/dustin/go-humanize v1.0.1 // indirect
 	github.com/eapache/go-resiliency v1.3.0 // indirect
 	github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect
 	github.com/eapache/queue v1.1.0 // indirect
+	github.com/ebitengine/purego v0.4.0-alpha.4.0.20230519103000-ee8dcecc618f // indirect
 	github.com/emicklei/go-restful/v3 v3.10.2 // indirect
 	github.com/emirpasic/gods v1.18.1 // indirect
 	github.com/esimonov/ifshort v1.0.4 // indirect
@@ -191,7 +202,7 @@ require (
 	github.com/go-openapi/validate v0.22.1 // indirect
 	github.com/go-playground/locales v0.14.1 // indirect
 	github.com/go-playground/universal-translator v0.18.1 // indirect
-	github.com/go-playground/validator/v10 v10.14.1 // indirect
+	github.com/go-playground/validator/v10 v10.15.1 // indirect
 	github.com/go-toolsmith/astcast v1.1.0 // indirect
 	github.com/go-toolsmith/astcopy v1.1.0 // indirect
 	github.com/go-toolsmith/astequal v1.1.0 // indirect
@@ -201,6 +212,7 @@ require (
 	github.com/go-toolsmith/typep v1.1.0 // indirect
 	github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect
 	github.com/gobwas/glob v0.2.3 // indirect
+	github.com/goccy/go-json v0.10.2 // indirect
 	github.com/gofrs/flock v0.8.1 // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
 	github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
@@ -219,11 +231,11 @@ require (
 	github.com/google/certificate-transparency-go v1.1.6 // indirect
 	github.com/google/gnostic v0.6.9 // indirect
 	github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa // indirect
-	github.com/google/go-github/v50 v50.2.0 // indirect
+	github.com/google/go-github/v53 v53.2.0 // indirect
 	github.com/google/go-querystring v1.1.0 // indirect
 	github.com/google/gofuzz v1.2.0 // indirect
 	github.com/google/licenseclassifier v0.0.0-20210722185704-3043a050f148 // indirect
-	github.com/google/s2a-go v0.1.4 // indirect
+	github.com/google/s2a-go v0.1.5 // indirect
 	github.com/google/uuid v1.3.0 // indirect
 	github.com/google/wire v0.5.0 // indirect
 	github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
@@ -233,7 +245,7 @@ require (
 	github.com/gostaticanalysis/comment v1.4.2 // indirect
 	github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect
 	github.com/gostaticanalysis/nilerr v0.1.1 // indirect
-	github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect
+	github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
 	github.com/hashicorp/errwrap v1.1.0 // indirect
 	github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
 	github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
@@ -243,7 +255,7 @@ require (
 	github.com/hashicorp/go-sockaddr v1.0.2 // indirect
 	github.com/hashicorp/go-uuid v1.0.3 // indirect
 	github.com/hashicorp/go-version v1.6.0 // indirect
-	github.com/hashicorp/golang-lru v0.6.0 // indirect
+	github.com/hashicorp/golang-lru v1.0.2 // indirect
 	github.com/hashicorp/hcl v1.0.0 // indirect
 	github.com/hashicorp/vault/api v1.9.2 // indirect
 	github.com/hexops/gotextdiff v1.0.3 // indirect
@@ -278,6 +290,12 @@ require (
 	github.com/ldez/tagliatelle v0.5.0 // indirect
 	github.com/leodido/go-urn v1.2.4 // indirect
 	github.com/leonklingele/grouper v1.1.1 // indirect
+	github.com/lestrrat-go/blackmagic v1.0.1 // indirect
+	github.com/lestrrat-go/httpcc v1.0.1 // indirect
+	github.com/lestrrat-go/httprc v1.0.4 // indirect
+	github.com/lestrrat-go/iter v1.0.2 // indirect
+	github.com/lestrrat-go/jwx/v2 v2.0.11 // indirect
+	github.com/lestrrat-go/option v1.0.1 // indirect
 	github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect
 	github.com/lufeee/execinquery v1.2.1 // indirect
 	github.com/magiconair/properties v1.8.7 // indirect
@@ -286,8 +304,8 @@ require (
 	github.com/maratori/testpackage v1.1.1 // indirect
 	github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect
 	github.com/mattn/go-colorable v0.1.13 // indirect
-	github.com/mattn/go-isatty v0.0.17 // indirect
-	github.com/mattn/go-runewidth v0.0.13 // indirect
+	github.com/mattn/go-isatty v0.0.19 // indirect
+	github.com/mattn/go-runewidth v0.0.14 // indirect
 	github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
 	github.com/mbilski/exhaustivestruct v1.2.0 // indirect
 	github.com/mgechev/revive v1.3.2 // indirect
@@ -306,21 +324,25 @@ require (
 	github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect
 	github.com/nunnatsa/ginkgolinter v0.13.5 // indirect
 	github.com/oklog/ulid v1.3.1 // indirect
+	github.com/oleiade/reflections v1.0.1 // indirect
 	github.com/olekukonko/tablewriter v0.0.5 // indirect
 	github.com/opencontainers/image-spec v1.1.0-rc4 // indirect
 	github.com/opentracing/opentracing-go v1.2.0 // indirect
 	github.com/openzipkin/zipkin-go v0.3.0 // indirect
+	github.com/outcaste-io/ristretto v0.2.1 // indirect
 	github.com/pborman/uuid v1.2.1 // indirect
 	github.com/pelletier/go-toml/v2 v2.0.8 // indirect
+	github.com/philhofer/fwd v1.1.2 // indirect
 	github.com/pierrec/lz4/v4 v4.1.18 // indirect
 	github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
 	github.com/polyfloyd/go-errorlint v1.4.4 // indirect
 	github.com/prometheus/client_golang v1.16.0 // indirect
 	github.com/prometheus/client_model v0.4.0 // indirect
-	github.com/prometheus/common v0.42.0 // indirect
+	github.com/prometheus/common v0.44.0 // indirect
 	github.com/prometheus/procfs v0.10.1 // indirect
 	github.com/prometheus/statsd_exporter v0.21.0 // indirect
+	github.com/puzpuzpuz/xsync/v2 v2.4.1 // indirect
 	github.com/quasilyte/go-ruleguard v0.4.0 // indirect
 	github.com/quasilyte/gogrep v0.5.0 // indirect
 	github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect
@@ -335,12 +357,13 @@ require (
 	github.com/sashamelentyev/usestdlibvars v1.24.0 // indirect
 	github.com/sassoftware/relic v7.2.1+incompatible // indirect
 	github.com/securego/gosec/v2 v2.17.0 // indirect
+	github.com/segmentio/asm v1.2.0 // indirect
 	github.com/segmentio/ksuid v1.0.4 // indirect
 	github.com/sergi/go-diff v1.3.1 // indirect
 	github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
 	github.com/shibumi/go-pathspec v1.3.0 // indirect
-	github.com/sigstore/fulcio v1.3.1 // indirect
-	github.com/sigstore/timestamp-authority v1.1.1 // indirect
+	github.com/sigstore/fulcio v1.4.0 // indirect
+	github.com/sigstore/timestamp-authority v1.1.2 // indirect
 	github.com/sirupsen/logrus v1.9.3 // indirect
 	github.com/sivchari/containedctx v1.0.3 // indirect
 	github.com/sivchari/nosnakecase v1.7.0 // indirect
@@ -364,9 +387,10 @@ require (
 	github.com/tdakkota/asciicheck v0.2.0 // indirect
 	github.com/tetafro/godot v1.4.14 // indirect
 	github.com/thales-e-security/pool v0.0.2 // indirect
-	github.com/theupdateframework/go-tuf v0.5.2 // indirect
+	github.com/theupdateframework/go-tuf v0.6.1 // indirect
 	github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect
 	github.com/timonwong/loggercheck v0.9.4 // indirect
+	github.com/tinylib/msgp v1.1.8 // indirect
 	github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
 	github.com/tjfoc/gmsm v1.3.2 // indirect
 	github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect
@@ -376,7 +400,7 @@ require (
 	github.com/ultraware/whitespace v0.0.5 // indirect
 	github.com/uudashr/gocognit v1.0.7 // indirect
 	github.com/vbatts/tar-split v0.11.3 // indirect
-	github.com/xanzy/go-gitlab v0.86.0 // indirect
+	github.com/xanzy/go-gitlab v0.90.0 // indirect
 	github.com/xanzy/ssh-agent v0.3.3 // indirect
 	github.com/xdg-go/pbkdf2 v1.0.0 // indirect
 	github.com/xdg-go/scram v1.1.2 // indirect
@@ -393,11 +417,13 @@ require (
 	go.opentelemetry.io/otel v1.16.0 // indirect
 	go.opentelemetry.io/otel/metric v1.16.0 // indirect
 	go.opentelemetry.io/otel/trace v1.16.0 // indirect
-	go.step.sm/crypto v0.32.2 // indirect
+	go.step.sm/crypto v0.35.0 // indirect
 	go.tmz.dev/musttag v0.7.2 // indirect
 	go.uber.org/atomic v1.11.0 // indirect
 	go.uber.org/automaxprocs v1.5.2 // indirect
 	go.uber.org/multierr v1.11.0 // indirect
+	go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect
+	go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect
 	golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect
 	golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 // indirect
 	golang.org/x/mod v0.12.0 // indirect
@@ -411,11 +437,12 @@ require (
 	golang.org/x/tools v0.12.0 // indirect
 	golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
 	gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
-	google.golang.org/api v0.134.0 // indirect
+	google.golang.org/api v0.138.0 // indirect
 	google.golang.org/appengine v1.6.7 // indirect
-	google.golang.org/genproto v0.0.0-20230731193218-e0aa005b6bdf // indirect
-	google.golang.org/genproto/googleapis/api v0.0.0-20230731193218-e0aa005b6bdf // indirect
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20230731193218-e0aa005b6bdf // indirect
+	google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect
+	google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect
+	gopkg.in/DataDog/dd-trace-go.v1 v1.53.0 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/ini.v1 v1.67.0 // indirect
 	gopkg.in/square/go-jose.v2 v2.6.0 // indirect
@@ -425,7 +452,8 @@ require (
 	gopkg.in/yaml.v2 v2.4.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 	honnef.co/go/tools v0.4.5 // indirect
-	k8s.io/apiextensions-apiserver v0.25.4 // indirect
+	inet.af/netaddr v0.0.0-20220811202034-502d2d690317 // indirect
+	k8s.io/apiextensions-apiserver v0.26.5 // indirect
 	k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 // indirect
 	k8s.io/klog/v2 v2.100.1 // indirect
 	k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5 // indirect
diff --git a/go.sum b/go.sum
index 3ca045149c..59e314476b 100644
--- a/go.sum
+++ b/go.sum
@@ -2,7 +2,6 @@
 4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs=
 4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc=
 4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU=
-bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
 cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@@ -57,15 +56,14 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB
 cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
 cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
 cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
 cloud.google.com/go/firestore v1.12.0 h1:aeEA/N7DW7+l2u5jtkO8I0qv0D95YwjggD8kUHrTHO4=
 cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4=
 cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
 cloud.google.com/go/iam v0.4.0/go.mod h1:cbaZxyScUhxl7ZAkNWiALgihfP75wS/fUsVNaa1r3vA=
 cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y=
 cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
-cloud.google.com/go/kms v1.15.0 h1:xYl5WEaSekKYN5gGRyhjvZKM22GVBBCzegGNVPy+aIs=
-cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM=
+cloud.google.com/go/kms v1.15.1 h1:HUC3fAoepH3RpcQXiJhXWWYizjQ5r7YjI7SO9ZbHf9s=
+cloud.google.com/go/kms v1.15.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM=
 cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI=
 cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc=
 cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@@ -87,6 +85,7 @@ contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d h
 contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY=
 contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs=
 contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0=
+dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
 filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek=
 filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
@@ -101,11 +100,10 @@ github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClD
 github.com/Antonboom/errname v0.1.12/go.mod h1:bK7todrzvlaZoQagP1orKzWXv59X/x0W0Io2XT1Ssro=
 github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTow=
 github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ=
-github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 h1:/iHxaJhsFr0+xVFfbMr5vxz848jyiWuIEDhYq3y5odY=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg=
 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U=
 github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
@@ -114,16 +112,11 @@ github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.0 h1:yfJe15a
 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.0/go.mod h1:Q28U+75mpCaSCDowNEmhIo/rmgdkqmkmzI7N6TGR4UY=
 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4=
 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0/go.mod h1:cw4zVQgBby0Z5f2v0itn6se2dDP17nTjbZFXW5uPyHA=
-github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
 github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
 github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
 github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
 github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw=
 github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs=
-github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
-github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
 github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
 github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=
 github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=
@@ -135,11 +128,9 @@ github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZy
 github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0=
 github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
 github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
 github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
 github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
 github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
-github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
 github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
 github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
 github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
@@ -151,42 +142,40 @@ github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi
 github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
 github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/DataDog/appsec-internal-go v1.0.0 h1:2u5IkF4DBj3KVeQn5Vg2vjPUtt513zxEYglcqnd500U=
+github.com/DataDog/appsec-internal-go v1.0.0/go.mod h1:+Y+4klVWKPOnZx6XESG7QHydOaUGEXyH2j/vSg9JiNM=
+github.com/DataDog/datadog-agent/pkg/obfuscate v0.45.0-rc.1 h1:XyYvstMFpSyZtfJHWJm1Sf1meNyCdfhKJrjB6+rUNOk=
+github.com/DataDog/datadog-agent/pkg/obfuscate v0.45.0-rc.1/go.mod h1:e933RWa4kAWuHi5jpzEuOiULlv21HcCFEVIYegmaB5c=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.46.0-rc.4 h1:KE/ntoEPODxVGYXjWXFVVRniprifNhE4OOrylNolUv0=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.46.0-rc.4/go.mod h1:VVMDDibJxYEkwcLdZBT2g8EHKpbMT4JdOhRbQ9GdjbM=
+github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E=
+github.com/DataDog/datadog-go/v5 v5.3.0 h1:2q2qjFOb3RwAZNU+ez27ZVDwErJv5/VpbBPprz7Z+s8=
+github.com/DataDog/datadog-go/v5 v5.3.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q=
+github.com/DataDog/go-libddwaf v1.4.1 h1:dZTypHGyf38vDk5QbbsqaB8w5X213dFOZKT8SnMLmSI=
+github.com/DataDog/go-libddwaf v1.4.1/go.mod h1:qLZEuaF5amEVMP5NTYtr/6m30m73voPL4i7SK7dnnt4=
+github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork h1:yBq5PrAtrM4yVeSzQ+bn050+Ysp++RKF1QmtkL4VqvU=
+github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork/go.mod h1:yA5JwkZsHTLuqq3zaRgUQf35DfDkpOZqgtBqHKpwrBs=
+github.com/DataDog/gostackparse v0.5.0 h1:jb72P6GFHPHz2W0onsN51cS3FkaMDcjb0QzgxxA4gDk=
+github.com/DataDog/sketches-go v1.2.1 h1:qTBzWLnZ3kM2kw39ymh6rMcnN+5VULwFs++lEYUUsro=
+github.com/DataDog/sketches-go v1.2.1/go.mod h1:1xYmPLY1So10AwxV6MJV0J53XVH+WL9Ad1KetxVivVI=
 github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
 github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
 github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 h1:3ZBs7LAezy8gh0uECsA6CGU43FF3zsx5f4eah5FxTMA=
 github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0/go.mod h1:rZLTje5A9kFBe0pzhpe2TdhRniBF++PRHQuRpR8esVc=
 github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
 github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
-github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
 github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
 github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
 github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
 github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
-github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
-github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
-github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
-github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
-github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
-github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
-github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
 github.com/OpenPeeDeeP/depguard/v2 v2.1.0 h1:aQl70G173h/GZYhWf36aE5H0KaujXfVMnn/f1kSDVYY=
 github.com/OpenPeeDeeP/depguard/v2 v2.1.0/go.mod h1:PUBgk35fX4i7JDmwzlJwJ+GMe6NfO1723wmJMgPThNQ=
-github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903 h1:ZK3C5DtzV2nVAQTx5S5jQvMeDqWtD1By5mOoyY/xJek=
-github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE=
+github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs=
+github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
 github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
 github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
 github.com/Shopify/sarama v1.30.0/go.mod h1:zujlQQx1kzHsh4jfV1USnptCQrHAEZ2Hk8fTKCulPVs=
 github.com/Shopify/sarama v1.38.1 h1:lqqPUPQZ7zPqYlWpTh+LQ9bhYNu2xJL6k1SJN4WVe2A=
 github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g=
@@ -203,7 +192,6 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
 github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
 github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
 github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
-github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
 github.com/alexkohler/nakedret/v2 v2.0.2 h1:qnXuZNvv3/AxkAb22q/sEsEpcA99YxLFACDtEw9TPxE=
 github.com/alexkohler/nakedret/v2 v2.0.2/go.mod h1:2b8Gkk0GsOrqQv/gPWjNLDSKwG8I5moSXG1K4VIBcTQ=
 github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw=
@@ -251,13 +239,9 @@ github.com/aliyun/credentials-go v1.2.3/go.mod h1:/KowD1cfGSLrLsH28Jr8W+xwoId0yw
 github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
 github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
 github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
 github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
 github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
 github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
 github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
@@ -265,9 +249,8 @@ github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8ger
 github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU=
 github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s=
 github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
-github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
-github.com/aws/aws-sdk-go v1.44.317 h1:+8XWrLmGMwPPXSRSLPzhgcGnzJ2mYkgkrcB9C/GnSOU=
-github.com/aws/aws-sdk-go v1.44.317/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go v1.44.318 h1:Yl66rpbQHFUbxe9JBKLcvOvRivhVgP6+zH0b9KzARX8=
+github.com/aws/aws-sdk-go v1.44.318/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
 github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
 github.com/aws/aws-sdk-go-v2 v1.20.0 h1:INUDpYLt4oiPOJl0XwZDK2OVAVf0Rzo+MGVTv9f+gy8=
 github.com/aws/aws-sdk-go-v2 v1.20.0/go.mod h1:uWOr0m0jDsiWw8nnXiqZ+YG6LdvAlGYDLLf2NmHZoy4=
@@ -314,17 +297,13 @@ github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-
 github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7/go.mod h1:VVALgT1UESBh91dY0GprHnT1Z7mKd96VDk8qVy+bmu0=
 github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
 github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
 github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
 github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
 github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
-github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
 github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY=
 github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM=
-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
 github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
 github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
 github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
@@ -336,26 +315,22 @@ github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkAp
 github.com/bluekeyes/go-gitdiff v0.7.1 h1:graP4ElLRshr8ecu0UtqfNTCHrtSyZd3DABQm/DWesQ=
 github.com/bmatcuk/doublestar/v4 v4.0.2 h1:X0krlUVAVmtr2cRoTqR8aDMrDqnB36ht8wpWTiQ3jsA=
 github.com/bmatcuk/doublestar/v4 v4.0.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
-github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
 github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU=
 github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo=
 github.com/breml/bidichk v0.2.4 h1:i3yedFWWQ7YzjdZJHnPo9d/xURinSq3OM+gyM43K4/8=
 github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s=
 github.com/breml/errchkjson v0.3.1 h1:hlIeXuspTyt8Y/UmP5qy1JocGNR00KQHgfaNtRAjoxQ=
 github.com/breml/errchkjson v0.3.1/go.mod h1:XroxrzKjdiutFyW3nWhw34VGg7kiMsDQox73yWCGI2U=
-github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
-github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
 github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
-github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
-github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
-github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
-github.com/buildkite/agent/v3 v3.49.0 h1:FSmRQz8YFhaCXg4MfE7JucPcY7mQ/HWM55ir1j3E9qM=
-github.com/buildkite/agent/v3 v3.49.0/go.mod h1:iasSyh3KPjOPCnyvnZB1trkkX7jrdL8PnLBgjdVJxgU=
+github.com/buildkite/agent/v3 v3.52.1 h1:s2pxVsYVt/OSMDlOM+ei5ZuE3+X6WptSwSR+OnQ7Gz8=
+github.com/buildkite/agent/v3 v3.52.1/go.mod h1:MSIR+qpVb1Z663HlSqKEoIc4kkhmUFf4XazdCkxyE8E=
+github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251 h1:k6UDF1uPYOs0iy1HPeotNa155qXRWrzKnqAaGXHLZCE=
+github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251/go.mod h1:gbPR1gPu9dB96mucYIR7T3B7p/78hRVSOuzIWLHK2Y4=
 github.com/butuzov/ireturn v0.2.0 h1:kCHi+YzC150GE98WFuZQu9yrTn6GEydO2AuPLbTgnO4=
 github.com/butuzov/ireturn v0.2.0/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
 github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI=
 github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE=
-github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
+github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
 github.com/ccojocar/zxcvbn-go v1.0.1 h1:+sxrANSCj6CdadkcMnvde/GWU1vZiiXRbqYSCalV4/4=
 github.com/ccojocar/zxcvbn-go v1.0.1/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60=
 github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
@@ -372,22 +347,16 @@ github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iy
 github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ=
 github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 h1:W9o46d2kbNL06lq7UNDPV0zYLzkrde/bjIqO02eoll0=
 github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXaaPkxvLw1XQxNGK4I37ys9iBRzNUx/B7pUCo=
-github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
 github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4=
 github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM=
 github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
 github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
-github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
-github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
-github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
 github.com/clbanning/mxj/v2 v2.5.6 h1:Jm4VaCI/+Ug5Q57IzEoZbwx4iQFA6wkXv72juUSeK+g=
 github.com/clbanning/mxj/v2 v2.5.6/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s=
 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
 github.com/cloudevents/sdk-go/v2 v2.14.0 h1:Nrob4FwVgi5L4tV9lhjzZcjYqFVyJzsA56CwPaPfv6s=
 github.com/cloudevents/sdk-go/v2 v2.14.0/go.mod h1:xDmKfzNjM8gBvjaF8ijFjM1VYOVUEeUfapHMUX1T5To=
-github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
 github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
 github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
 github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
@@ -399,116 +368,16 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
 github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
 github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
+github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
 github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ=
 github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w=
-github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
-github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
-github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
-github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
-github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
-github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
-github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
-github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
-github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
-github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
-github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
-github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
-github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
-github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
-github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
-github.com/containerd/containerd v1.6.19 h1:F0qgQPrG0P2JPgwpxWxYavrVeXAG0ezUIB9Z/4FTUAU=
-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
-github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
-github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
-github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
-github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
-github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
-github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
-github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
-github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
-github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
-github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
-github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
-github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/stargz-snapshotter/estargz v0.7.0/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw=
+github.com/containerd/containerd v1.7.3 h1:cKwYKkP1eTj54bP3wCdXXBymmKRQMrWjkLSWZZJDa8o=
 github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
 github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
-github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
-github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
-github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
-github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
-github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
-github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
-github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
-github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
-github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
-github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
-github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
-github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
-github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
 github.com/coreos/go-oidc/v3 v3.6.0 h1:AKVxfYw1Gmkn/w96z0DbT/B/xFnzTd3MkZvWLjF4n/o=
 github.com/coreos/go-oidc/v3 v3.6.0/go.mod h1:ZpHUsHBucTUj6WOkrP4E20UPynbLZzhTQ1XKCXkxyPc=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
 github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
 github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
 github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@@ -516,11 +385,6 @@ github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDU
 github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
 github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 h1:vU+EP9ZuFUCYE0NYLwTSob+3LNEJATzNfP/DC7SWGWI=
 github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
-github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
-github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
-github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
-github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
-github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
 github.com/daixiang0/gci v0.11.0 h1:XeQbFKkCRxvVyn06EOuNY6LPGBLVuB/W130c8FrnX6A=
 github.com/daixiang0/gci v0.11.0/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
 github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0=
@@ -528,47 +392,35 @@ github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnG
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
 github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU=
 github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c=
-github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
 github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936 h1:foGzavPWwtoyBvjWyKJYDYsyzy+23iBV7NKTwdk+LRY=
-github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/digitorus/pkcs7 v0.0.0-20221019075359-21b8b40e6bb4/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc=
-github.com/digitorus/pkcs7 v0.0.0-20221212123742-001c36b64ec3 h1:rjCXeRWazGsbcBlExMcAW8H1LGdgJ9r619y7+aeKgds=
-github.com/digitorus/pkcs7 v0.0.0-20221212123742-001c36b64ec3/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc=
-github.com/digitorus/timestamp v0.0.0-20221019182153-ef3b63b79b31 h1:3go0tpsBpbs9L/oysk3jDwRprlLRRkpSU7YxKlTfU+o=
-github.com/digitorus/timestamp v0.0.0-20221019182153-ef3b63b79b31/go.mod h1:6V2ND8Yf8TOJ4h+9pmUlx8kXvNLBB2QplToVVZQ3rF0=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc=
+github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE=
+github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc=
+github.com/digitorus/timestamp v0.0.0-20230821155606-d1ad5ca9624c h1:kgG83Hfj3YXkUbrihwBxDc0COzP1ZejiDSr4/fItT0E=
+github.com/digitorus/timestamp v0.0.0-20230821155606-d1ad5ca9624c/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y=
 github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
 github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
-github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
 github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
-github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
 github.com/docker/cli v24.0.0+incompatible h1:0+1VshNwBQzQAx9lOl+OYCTCEAD8fKs/qeXMx3O0wqM=
 github.com/docker/cli v24.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
-github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
-github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
 github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
 github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
 github.com/docker/docker v24.0.0+incompatible h1:z4bf8HvONXX9Tde5lGBMQ7yCJgNahmJumdrStZAbeY4=
 github.com/docker/docker v24.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
 github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
 github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
-github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
-github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
-github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
 github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
 github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
 github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0=
 github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho=
@@ -577,9 +429,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4A
 github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0=
 github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/ebitengine/purego v0.4.0-alpha.4.0.20230519103000-ee8dcecc618f h1:v8f0ADMg0RBM0+5rb8qCFj/XlPkjo+xkyCLuUpBnj9s=
+github.com/ebitengine/purego v0.4.0-alpha.4.0.20230519103000-ee8dcecc618f/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ=
 github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE=
 github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
 github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
@@ -600,7 +451,6 @@ github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcH
 github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw=
 github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY=
 github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
-github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
 github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
 github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
 github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
@@ -616,8 +466,8 @@ github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4
 github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y=
 github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI=
 github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
+github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI=
 github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
 github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
 github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
@@ -627,13 +477,10 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
 github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
 github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
 github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
 github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
 github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
 github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
-github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
-github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
 github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
 github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
 github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
@@ -641,13 +488,12 @@ github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyN
 github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
 github.com/go-critic/go-critic v0.9.0 h1:Pmys9qvU3pSML/3GEQ2Xd9RZ/ip+aXHKILuxczKGV/U=
 github.com/go-critic/go-critic v0.9.0/go.mod h1:5P8tdXL7m/6qnyG6oRAlYLORvoXH0WDypYgAEmagT40=
-github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
+github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
 github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4=
-github.com/go-git/go-git/v5 v5.6.1 h1:q4ZRqQl4pR/ZJHc1L5CFjGA1a10u76aV1iC+nh+bHsk=
+github.com/go-git/go-git/v5 v5.8.1 h1:Zo79E4p7TRk0xoRgMq0RShiTHGKcKI4+DI6BfJc/Q+A=
 github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
 github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo=
 github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@@ -660,7 +506,6 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
 github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
 github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
 github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
 github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
@@ -676,13 +521,10 @@ github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpX
 github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
 github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M=
 github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
 github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
 github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
 github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
 github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
 github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
 github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
 github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
@@ -692,7 +534,6 @@ github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8en
 github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw=
 github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc=
 github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
 github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
 github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
 github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8=
@@ -702,7 +543,6 @@ github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrC
 github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
 github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k=
 github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
 github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
 github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
 github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
@@ -716,8 +556,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o
 github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
 github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
 github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
-github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=
-github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
+github.com/go-playground/validator/v10 v10.15.1 h1:BSe8uhN+xQ4r5guV/ywQI4gO59C2raYcGffYWZEjZzM=
+github.com/go-playground/validator/v10 v10.15.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
 github.com/go-rod/rod v0.114.2 h1:Qwt+vZHHnb117zc0q+XjhAJCkB01hchWSxH/raCyLb4=
 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
 github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
@@ -768,31 +608,19 @@ github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/V
 github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
 github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
 github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
+github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
 github.com/goccy/kpoward v0.1.0 h1:UcrLMG9rq7NwrMiUc0h+qUyIlvqPzqLiPb+zQEqH8cE=
-github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
-github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
 github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
 github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
 github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
-github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
-github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
 github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
 github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
 github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
 github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
 github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
 github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -874,16 +702,14 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8
 github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
 github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-containerregistry v0.6.0/go.mod h1:euCCtNbZ6tKqi1E72vwDj2xZcN5ttKpZLfa/wSo5iLw=
 github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ=
 github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
 github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230625233257-b8504803389b h1:ptt4Cmxx6HsJQUSRp0LRB8nAxMdn9mxnqhc4dxwYlSM=
 github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230625233257-b8504803389b/go.mod h1:Ek+8PQrShkA7aHEj3/zSW33wU0V/Bx3zW/gFh7l21xY=
 github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa h1:+MG+Q2Q7mtW6kCIbUPZ9ZMrj7xOWDKI1hhy1qp0ygI0=
 github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa/go.mod h1:KdL98/Va8Dy1irB6lTxIRIQ7bQj4lbrlvqUzKEQ+ZBU=
-github.com/google/go-github/v50 v50.2.0 h1:j2FyongEHlO9nxXLc+LP3wuBSVU9mVxfpdYUexMpIfk=
-github.com/google/go-github/v50 v50.2.0/go.mod h1:VBY8FB6yPIjrtKhozXv4FQupxKLS6H4m6xFZlT43q8Q=
-github.com/google/go-licenses v0.0.0-20200602185517-f29a4c695c3d/go.mod h1:g1VOUGKZYIqe8lDq2mL7plhAWXqrEaGUs7eIjthN1sk=
+github.com/google/go-github/v53 v53.2.0 h1:wvz3FyF53v4BK+AsnvCmeNhf8AkTaeh2SoYu/XUvTtI=
+github.com/google/go-github/v53 v53.2.0/go.mod h1:XhFRObz+m/l+UCm9b7KSIC3lT3NWSXGt7mOsAWEloao=
 github.com/google/go-licenses v1.6.0 h1:MM+VCXf0slYkpWO0mECvdYDVCxZXIQNal5wqUIXEZ/A=
 github.com/google/go-licenses v1.6.0/go.mod h1:Z8jgz2isEhdenOqd/00pq7I4y4k1xVVQJv415otjclo=
 github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
@@ -895,7 +721,6 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
 github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
 github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/licenseclassifier v0.0.0-20190926221455-842c0d70d702/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M=
 github.com/google/licenseclassifier v0.0.0-20210722185704-3043a050f148 h1:TJsAqW6zLRMDTyGmc9TPosfn9OyVlHs8Hrn3pY6ONSY=
 github.com/google/licenseclassifier v0.0.0-20210722185704-3043a050f148/go.mod h1:rq9F0RSpNKlrefnf6ZYMHKUnEJBCNzf6AcCXMYBeYvE=
 github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
@@ -921,17 +746,16 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
 github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
 github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20230509042627-b1315fad0c5a h1:PEOGDI1kkyW37YqPWHLHc+D20D9+87Wt12TCcfTUo5Q=
 github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
-github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
+github.com/google/s2a-go v0.1.5 h1:8IYp3w9nysqv3JH+NJgXJzGbDHzLOTj43BmSkp+O7qg=
+github.com/google/s2a-go v0.1.5/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
 github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
 github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w=
 github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
 github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/google/wire v0.5.0 h1:I7ELFeVBr3yfPIcc8+MWvrjk+3VjbcSzoXm3JVa+jD8=
@@ -949,22 +773,15 @@ github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99
 github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
 github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
 github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
-github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
 github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
 github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
 github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
 github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 h1:mrEEilTAUmaAORhssPPkxj84TsHrPMLBGW2Z4SoTxm8=
 github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0=
-github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
 github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
 github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
 github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
 github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk=
 github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc=
 github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado=
@@ -978,36 +795,22 @@ github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod
 github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY=
 github.com/grafeas/grafeas v0.2.2 h1:dhn3M/RkBVrEP+gCowny1qoG1Opfa09SwPL1BGT6k0U=
 github.com/grafeas/grafeas v0.2.2/go.mod h1:O+UvNYn4LhdKR59XrxRDWwr2bbheR1KRRNdD8mJpxs4=
-github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
 github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
 github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
 github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
 github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
 github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
 github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
 github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
 github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
 github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
 github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
 github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
 github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
 github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
 github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
 github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs=
@@ -1015,29 +818,20 @@ github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3
 github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
 github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
 github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
 github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
 github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
 github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
 github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
 github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
 github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
 github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
-github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
+github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
 github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
 github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
 github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as=
 github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8=
 github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
@@ -1048,10 +842,6 @@ github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb
 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
 github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
 github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
 github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
 github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
 github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU=
@@ -1060,7 +850,6 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
 github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
 github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
 github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
 github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
 github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
@@ -1091,20 +880,16 @@ github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjz
 github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
 github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48=
 github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
-github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
 github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
 github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
 github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
 github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
 github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4=
 github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
 github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
 github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
 github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
 github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
 github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
 github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
 github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@@ -1123,8 +908,6 @@ github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa
 github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
 github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
 github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
 github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
 github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8=
 github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw=
@@ -1132,10 +915,6 @@ github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
 github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8=
 github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg=
-github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
 github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
 github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
@@ -1149,7 +928,6 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn
 github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
 github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
 github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
 github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
 github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -1170,17 +948,27 @@ github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
 github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
 github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU=
 github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY=
+github.com/lestrrat-go/blackmagic v1.0.1 h1:lS5Zts+5HIC/8og6cGHb0uCcNCa3OUt1ygh3Qz2Fe80=
+github.com/lestrrat-go/blackmagic v1.0.1/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU=
+github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE=
+github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
+github.com/lestrrat-go/httprc v1.0.4 h1:bAZymwoZQb+Oq8MEbyipag7iSq6YIga8Wj6GOiJGdI8=
+github.com/lestrrat-go/httprc v1.0.4/go.mod h1:mwwz3JMTPBjHUkkDv/IGJ39aALInZLrhBp0X7KGUZlo=
+github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI=
+github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4=
+github.com/lestrrat-go/jwx/v2 v2.0.11 h1:ViHMnaMeaO0qV16RZWBHM7GTrAnX2aFLVKofc7FuKLQ=
+github.com/lestrrat-go/jwx/v2 v2.0.11/go.mod h1:ZtPtMFlrfDrH2Y0iwfa3dRFn8VzwBrB+cyrm3IBWdDg=
+github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
+github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU=
+github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
 github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf h1:ndns1qx/5dL43g16EQkPV/i8+b3l5bYQwLeoSBe7tS8=
 github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf/go.mod h1:aGkAgvWY/IUcVFfuly53REpfv5edu25oij+qHRFaraA=
 github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM=
 github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
 github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
 github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
 github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
 github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
 github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
 github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@@ -1190,7 +978,6 @@ github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1r
 github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc=
 github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
 github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
-github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
 github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2srm/LN17lpybq15AryXIRcWYLE=
 github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
 github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE=
@@ -1199,52 +986,33 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
 github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
 github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
 github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
 github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
-github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
+github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
 github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
-github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
-github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
+github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
 github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
 github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
 github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
 github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo=
 github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
 github.com/mgechev/revive v1.3.2 h1:Wb8NQKBaALBJ3xrrj4zpwJwqwNA6nDpyJSEQWcCka6U=
 github.com/mgechev/revive v1.3.2/go.mod h1:UCLtc7o5vg5aXCwdUTU1kEBQ1v+YXPAkYDIDXbrs5I0=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
 github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
 github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
 github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
 github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
 github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
 github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
 github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
 github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
 github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
 github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
 github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
 github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
 github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
-github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
 github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
-github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
-github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -1257,19 +1025,14 @@ github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8
 github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
 github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA=
 github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI=
-github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
 github.com/mozillazg/docker-credential-acr-helper v0.3.0 h1:DVWFZ3/O8BP6Ue3iS/Olw+G07u1hCq1EOVCDZZjCIBI=
 github.com/mozillazg/docker-credential-acr-helper v0.3.0/go.mod h1:cZlu3tof523ujmLuiNUb6JsjtHcNA70u1jitrrdnuyA=
-github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
 github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U=
 github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE=
-github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
 github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
 github.com/nishanths/exhaustive v0.11.0 h1:T3I8nUGhl/Cwu5Z2hfc92l0e04D2GEW6e0l8pzda2l0=
 github.com/nishanths/exhaustive v0.11.0/go.mod h1:RqwDsZ1xY0dNdqHho2z6X+bgzizwbLYOWnZbbl2wLB4=
@@ -1284,54 +1047,30 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
 github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
 github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
 github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM=
+github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60=
 github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
 github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
-github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
 github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
 github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
 github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
 github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
 github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
-github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
 github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
 github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
 github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
 github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
 github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
 github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
-github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
 github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
 github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0=
 github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
-github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
 github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
 github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
 github.com/openzipkin/zipkin-go v0.3.0 h1:XtuXmOLIXLjiU2XduuWREDT0LOKtSgos/g7i7RYyoZQ=
@@ -1344,17 +1083,16 @@ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6
 github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
 github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
 github.com/otiai10/mint v1.3.2/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/outcaste-io/ristretto v0.2.1 h1:KCItuNIGJZcursqHr3ghO7fc5ddZLEHspL9UR0cQM64=
+github.com/outcaste-io/ristretto v0.2.1/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac=
 github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw=
 github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
 github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
 github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
-github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
-github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
 github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
 github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
-github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
+github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
 github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
 github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
 github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
@@ -1362,64 +1100,47 @@ github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
 github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
 github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
 github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
 github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
 github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/polyfloyd/go-errorlint v1.4.4 h1:A9gytp+p6TYqeALTYRoxJESYP8wJRETRX2xzGWFsEBU=
 github.com/polyfloyd/go-errorlint v1.4.4/go.mod h1:ry5NqF7l9Q77V+XqAfUg1zfryrEtyac3G5+WVpIK0xU=
 github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
 github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
-github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
 github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
 github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
 github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
 github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
 github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
 github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
 github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
-github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
 github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
-github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
 github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
-github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
-github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
 github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
 github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
 github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
 github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
 github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
 github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8=
 github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/puzpuzpuz/xsync/v2 v2.4.1 h1:aGdE1C/HaR/QC6YAFdtZXi60Df8/qBIrs8PKrzkItcM=
+github.com/puzpuzpuz/xsync/v2 v2.4.1/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU=
 github.com/quasilyte/go-ruleguard v0.4.0 h1:DyM6r+TKL+xbKB4Nm7Afd1IQh9kEUKQs2pboWGKtvQo=
 github.com/quasilyte/go-ruleguard v0.4.0/go.mod h1:Eu76Z/R8IXtViWUIHkE3p8gdH3/PKk1eh3YGfaEof10=
 github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo=
@@ -1431,26 +1152,23 @@ github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8
 github.com/rabbitmq/amqp091-go v1.1.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM=
 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA=
 github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
 github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
 github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
 github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
-github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw=
 github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50=
 github.com/ryanrolds/sqlclosecheck v0.4.0 h1:i8SX60Rppc1wRuyQjMciLqIzV3xnoHB7/tXbr6RGYNI=
 github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
 github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
 github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
 github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
-github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
 github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc=
 github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
 github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw=
@@ -1460,13 +1178,13 @@ github.com/sashamelentyev/usestdlibvars v1.24.0/go.mod h1:9cYkq+gYJ+a5W2RPdhfaSC
 github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A=
 github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk=
 github.com/sassoftware/relic/v7 v7.5.5 h1:2ZUM6ovo3STCAp0hZnO9nQY9lOB8OyfneeYIi4YUxMU=
-github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U=
 github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg=
 github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI=
 github.com/securego/gosec/v2 v2.17.0 h1:ZpAStTDKY39insEG9OH6kV3IkhQZPTq9a9eGOLOjcdI=
 github.com/securego/gosec/v2 v2.17.0/go.mod h1:lt+mgC91VSmriVoJLentrMkRCYs+HLTBnUFUBuhV2hc=
+github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
+github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
 github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
 github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
 github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
@@ -1482,10 +1200,10 @@ github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxr
 github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
 github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f h1:tygelZueB1EtXkPI6mQ4o9DQ0+FKW41hTbunoXZCTqk=
 github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sigstore/cosign/v2 v2.1.1 h1:HOI6pWaEie0wLituDWWaqC5U9MaXablKNf6QroVhj6k=
-github.com/sigstore/cosign/v2 v2.1.1/go.mod h1:S9KGmdQ/Dd29TdgUwGCNeXR7scJWZwREh4A9Za2PRPY=
-github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y=
-github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU=
+github.com/sigstore/cosign/v2 v2.2.0 h1:MV/ALD1/e/JgxXXCdCNxlIRk2NB3Irb4MKPozd8SPR8=
+github.com/sigstore/cosign/v2 v2.2.0/go.mod h1:Kcm7lTZbpiEpA3wPCqRygTUdLpX8CNT+36rODTCBr1M=
+github.com/sigstore/fulcio v1.4.0 h1:05+k8BFvwTQzfCkVxESWzCN4b70KIRliGYz0Upmdrs8=
+github.com/sigstore/fulcio v1.4.0/go.mod h1:wcjlktbhoy6+ZTxO3yXpvqUxsLV+JEH4FF3a5Jz4VPI=
 github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
 github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
 github.com/sigstore/sigstore v1.7.2 h1:MY0wSOhKWa8SIWSCO9SzFnUl+b7jbthgXHJpuUg31Qs=
@@ -1498,10 +1216,8 @@ github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.7.2 h1:/c9AZS8mfOlpjCuBA1a
 github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.7.2/go.mod h1:aN9UpIdLFf185H1+vmdxT+nty/ka8NPxUUvBigydaLk=
 github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.2 h1:GquAXZRIVJ8e10NpTzW4wNkpRGaDdJdPWz2Xlsdc2OM=
 github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.2/go.mod h1:M7+ABfjRwfpKCTMCrhMweUrqKGoa4S6u53dB321MKdM=
-github.com/sigstore/timestamp-authority v1.1.1 h1:EldrdeBED0edNzDMvYZDf5CyWgtSchtR9DKYyksNR8M=
-github.com/sigstore/timestamp-authority v1.1.1/go.mod h1:cEDLEHl/L3ppqKDaiZ3Cg4ikcaYleuq90I/BFNePzF0=
-github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sigstore/timestamp-authority v1.1.2 h1:xgBs9Ct27sEgFb35GL1trKD2XOgYbtk0q2G0HLZHDDY=
+github.com/sigstore/timestamp-authority v1.1.2/go.mod h1:7rGe/e6ZJNMqPiwFiv7w+qNXT8GID9gL7nMcRwZ007I=
 github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
 github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
 github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
@@ -1518,70 +1234,50 @@ github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt
 github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY=
 github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak=
 github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg=
-github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0=
+github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM=
 github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
 github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
 github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY=
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
-github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
 github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00=
 github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo=
 github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0=
 github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
 github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
 github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
 github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
 github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
 github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
-github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
 github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
 github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
 github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
 github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
 github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
 github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
-github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
 github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
-github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
 github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc=
 github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg=
 github.com/spiffe/go-spiffe/v2 v2.1.6 h1:4SdizuQieFyL9eNU+SPiCArH4kynzaKOOj0VvM8R7Xo=
 github.com/spiffe/go-spiffe/v2 v2.1.6/go.mod h1:eVDqm9xFvyqao6C+eQensb9ZPkyNEeaUbqbBpOhBnNk=
-github.com/spiffe/spire-api-sdk v1.7.0 h1:ThqXfS5dt29Noq8Sjr65CPtIk7Pe7LLoLr7S8g671Ic=
+github.com/spiffe/spire-api-sdk v1.7.1 h1:i/X23zWJtk8G6gy1QYXkWBldruAzRdzxH6icgjMJjUs=
 github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
 github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
 github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0=
 github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
 github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc=
 github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I=
-github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
 github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
-github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
 github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
 github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
 github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -1596,23 +1292,19 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
 github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
 github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
 github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
 github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8=
 github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk=
-github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
 github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM=
 github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg=
-github.com/tektoncd/pipeline v0.50.1 h1:Asdfn1MxUSTlfzmr0PV4BPIEH7K4QLGym33nLgQlPCQ=
-github.com/tektoncd/pipeline v0.50.1/go.mod h1:OjhCfhPQbVvK6GUmIseL2ipjaQ8ILcUerMk4P4sCcHA=
-github.com/tektoncd/plumbing v0.0.0-20221102182345-5dbcfda657d7 h1:YsjQ83UBIIq4k/s2PzQ6pqe4tpPtm1hia3oyNBDDrDU=
-github.com/tektoncd/plumbing v0.0.0-20221102182345-5dbcfda657d7/go.mod h1:uJBaI0AL/kjPThiMYZcWRujEz7D401v643d6s/21GAg=
+github.com/tektoncd/pipeline v0.51.0 h1:n2i/AJPXkTwcv9uc9xRJCtMeAHF1c1NkF7cNQ5zEf+0=
+github.com/tektoncd/pipeline v0.51.0/go.mod h1:P9xePA0fqYIhaw4fllmX2LtMneyWqj60EjsZp5qqq9U=
+github.com/tektoncd/plumbing v0.0.0-20230907180608-5625252a2de1 h1:9paprRIBXQgcvdhGq3wKiSspXP0JIFSY52ru3sIMjKM=
+github.com/tektoncd/plumbing v0.0.0-20230907180608-5625252a2de1/go.mod h1:7eWs1XNkmReggow7ggRbRyRuHi7646B8b2XipCZ3VOw=
 github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA=
 github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
 github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag=
@@ -1621,35 +1313,29 @@ github.com/tetafro/godot v1.4.14 h1:ScO641OHpf9UpHPk8fCknSuXNMpi4iFlwuWoBs3L+1s=
 github.com/tetafro/godot v1.4.14/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio=
 github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg=
 github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU=
-github.com/theupdateframework/go-tuf v0.5.2 h1:habfDzTmpbzBLIFGWa2ZpVhYvFBoK0C1onC3a4zuPRA=
-github.com/theupdateframework/go-tuf v0.5.2/go.mod h1:SyMV5kg5n4uEclsyxXJZI2UxPFJNDc4Y+r7wv+MlvTA=
+github.com/theupdateframework/go-tuf v0.6.1 h1:6J89fGjQf7s0mLmTG7p7pO/MbKOg+bIXhaLyQdmbKuE=
+github.com/theupdateframework/go-tuf v0.6.1/go.mod h1:LAFusuQsFNBnEyYoTuA5zZrF7iaQ4TEgBXm8lb6Vj18=
 github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
 github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M=
 github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ=
 github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4=
 github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg=
+github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
+github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
 github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
 github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
 github.com/tjfoc/gmsm v1.3.2 h1:7JVkAn5bvUJ7HtU08iW6UiD+UTmJTIToHCfeFzkcCxM=
 github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
 github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQpIXDJRhQ=
 github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE=
 github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw=
 github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
 github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4=
 github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
 github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI=
 github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4=
 github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI=
 github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
 github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
 github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
 github.com/uudashr/gocognit v1.0.7 h1:e9aFXgKgUJrQ5+bs61zBigmj7bFJ/5cC6HmMahVzuDo=
@@ -1657,18 +1343,11 @@ github.com/uudashr/gocognit v1.0.7/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Usc
 github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
 github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=
 github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
-github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
-github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
-github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
-github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
-github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
-github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
 github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
+github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc=
 github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
-github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
-github.com/xanzy/go-gitlab v0.86.0 h1:jR8V9cK9jXRQDb46KOB20NCF3ksY09luaG0IfXE6p7w=
-github.com/xanzy/go-gitlab v0.86.0/go.mod h1:5ryv+MnpZStBH8I/77HuQBsMbBGANtVpLWC15qOjWAw=
+github.com/xanzy/go-gitlab v0.90.0 h1:j8ZUHfLfXdnC+B8njeNaW/kM44c1zw8fiuNj7D+qQN8=
+github.com/xanzy/go-gitlab v0.90.0/go.mod h1:5ryv+MnpZStBH8I/77HuQBsMbBGANtVpLWC15qOjWAw=
 github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
 github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
 github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
@@ -1684,12 +1363,9 @@ github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6
 github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
 github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
 github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
 github.com/xen0n/gosmopolitan v1.2.1 h1:3pttnTuFumELBRSh+KQs1zcz4fN6Zy7aB0xlnQSn1Iw=
 github.com/xen0n/gosmopolitan v1.2.1/go.mod h1:JsHq/Brs1o050OOdmzHeOr0N7OtlnKRAGAsElF8xBQA=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
 github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM=
 github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk=
 github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o=
@@ -1712,28 +1388,17 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
 github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
-github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
-github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
 github.com/zalando/go-keyring v0.2.2 h1:f0xmpYiSrHtSNAVgwip93Cg8tuF45HJM6rHq/A5RI/4=
 github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs=
 github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
 gitlab.com/bosi/decorder v0.4.0 h1:HWuxAhSxIvsITcXeP+iIRg9d1cVfvVkmlF7M68GaoDY=
 gitlab.com/bosi/decorder v0.4.0/go.mod h1:xarnteyUoJiOTEldDysquWKTVDCKo2TOIOIibSuWqOg=
 go-simpler.org/assert v0.6.0 h1:QxSrXa4oRuo/1eHMXSBFHKvJIpWABayzKldqZyugG7E=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
-go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
 go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
 go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
 go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
 go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE=
 go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0=
-go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
 go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
 go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
 go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -1751,66 +1416,62 @@ go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiM
 go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
 go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
 go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.step.sm/crypto v0.32.2 h1:EhJpFRNgU3RaNEO3WZ62Kn2gF9NWNglNG4DvSPeuiTs=
-go.step.sm/crypto v0.32.2/go.mod h1:JwarCq+Sn6N8IbRSKfSJfjUNKfO8c4N1mcNxYXuxXzc=
+go.step.sm/crypto v0.35.0 h1:0N6ks5n1sdv4+biJMUTdqHjpTBKKN9zNqqBdOJIyHe4=
+go.step.sm/crypto v0.35.0/go.mod h1:sBsrpVReoxmiLexbWL+vQRxZd6Gq4YBj/IRSUH+DZe4=
 go.tmz.dev/musttag v0.7.2 h1:1J6S9ipDbalBSODNT5jCep8dhZyMr4ttnjQagmGYR5s=
 go.tmz.dev/musttag v0.7.2/go.mod h1:m6q5NiiSKMnQYokefa2xGoyoXnrswCbJ0AWYzf4Zs28=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
 go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
 go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
 go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
 go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
 go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
 go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
 go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
 go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
 go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
 go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
 go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c=
 go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk=
+go4.org/intern v0.0.0-20211027215823-ae77deb06f29 h1:UXLjNohABv4S58tHmeuIZDO6e3mHpW2Dx33gaNt03LE=
+go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA=
+go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=
+go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 h1:FyBZqvoA/jbNzuAWLQE2kG820zMAkcilx6BMjGbL/E4=
+go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=
 gocloud.dev v0.33.0 h1:ET5z49jm1+eUhY5BkuGk2d7czfgGeXKd4vtg1Jcg9OQ=
 gocloud.dev v0.33.0/go.mod h1:z6W8qorjrfM09H8t1MDk8KLPj3Xi26aFBzDKAHWIgLU=
 gocloud.dev/docstore/mongodocstore v0.33.0 h1:cM6vEyyBq8O5TgerGDtHj1ZDsvqyWrrvyMCEfWFsteA=
 gocloud.dev/docstore/mongodocstore v0.33.0/go.mod h1:P5kPTTQIYM5sDACG1jt/H7T1Pf+Ms5UrGBBYQck4Kr4=
 gocloud.dev/pubsub/kafkapubsub v0.33.0 h1:cSE9mmkisZ3/s01SIun6mXJJNwxMA/6XvyozlwgQQWo=
 gocloud.dev/pubsub/kafkapubsub v0.33.0/go.mod h1:UfHC/CNXpZTHW3pMJgjaAnm4Hz4vB7N18A4K81rKoV4=
-golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
 golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
 golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
 golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
 golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
 golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
 golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
 golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1865,29 +1526,19 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
 golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
 golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -1902,8 +1553,8 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/
 golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
 golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
 golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
 golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@@ -1931,10 +1582,12 @@ golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug
 golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
 golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
 golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
 golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
 golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
 golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
 golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
 golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
 golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1948,7 +1601,6 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ
 golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
@@ -1983,10 +1635,7 @@ golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5h
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1996,38 +1645,22 @@ golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191119060738-e882bf8e40c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -2036,23 +1669,16 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -2062,9 +1688,7 @@ golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -2081,7 +1705,6 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -2098,6 +1721,7 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -2107,18 +1731,22 @@ golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
 golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
 golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
 golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
 golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
 golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
 golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -2132,21 +1760,19 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
 golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
 golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
 golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
 golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
 golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@@ -2163,9 +1789,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn
 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
 golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
 golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
 golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
 golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
 golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
 golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -2173,10 +1797,8 @@ golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtn
 golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191118222007-07fc4c7f2b98/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -2231,6 +1853,7 @@ golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4
 golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
 golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
 golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
+golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
 golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
 golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
 golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
@@ -2246,7 +1869,6 @@ golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3j
 golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
 gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY=
 gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
-google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
 google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
 google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
 google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -2269,7 +1891,6 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q
 google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
 google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
 google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
 google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I=
 google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
 google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
@@ -2290,8 +1911,8 @@ google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6r
 google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
 google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
 google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
-google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw=
-google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk=
+google.golang.org/api v0.138.0 h1:K/tVp05MxNVbHShRw9m7e9VJGdagNeTdMzqPH7AUqr0=
+google.golang.org/api v0.138.0/go.mod h1:4xyob8CxC+0GChNBvEUAk8VBKNvYOTWM9T3v3UfRxuY=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -2300,13 +1921,11 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
 google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
 google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
 google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
 google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
 google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
 google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
 google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
 google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
@@ -2315,7 +1934,6 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx
 google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
 google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
 google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
 google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
 google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
 google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
@@ -2336,7 +1954,6 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D
 google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@@ -2391,20 +2008,16 @@ google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljW
 google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
 google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
 google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
-google.golang.org/genproto v0.0.0-20230731193218-e0aa005b6bdf h1:v5Cf4E9+6tawYrs/grq1q1hFpGtzlGFzgWHqwt6NFiU=
-google.golang.org/genproto v0.0.0-20230731193218-e0aa005b6bdf/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
-google.golang.org/genproto/googleapis/api v0.0.0-20230731193218-e0aa005b6bdf h1:xkVZ5FdZJF4U82Q/JS+DcZA83s/GRVL+QrFMlexk9Yo=
-google.golang.org/genproto/googleapis/api v0.0.0-20230731193218-e0aa005b6bdf/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230731193218-e0aa005b6bdf h1:guOdSPaeFgN+jEJwTo1dQ71hdBm+yKSCCKuTRkJzcVo=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230731193218-e0aa005b6bdf/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I=
-google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
+google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
+google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44=
+google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
 google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
 google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
 google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
 google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@@ -2454,31 +2067,23 @@ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw
 google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
 google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
 google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
+gopkg.in/DataDog/dd-trace-go.v1 v1.53.0 h1:Rc2Z3tspHI+PsspsfO4wZsSqL8l658yAUo7lFUSnPD0=
+gopkg.in/DataDog/dd-trace-go.v1 v1.53.0/go.mod h1:m0tVxCbhcuHsiHhhaw749KeQcVjRZOWBPMa6wHL6LBQ=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
 gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
 gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
 gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
 gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
 gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
 gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
 gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
 gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
 gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg=
@@ -2491,7 +2096,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
 gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
 gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
 gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -2507,11 +2111,7 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C
 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
-gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
-gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk=
+gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
 honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -2521,53 +2121,30 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
 honnef.co/go/tools v0.4.5 h1:YGD4H+SuIOOqsyoLOpZDWcieM28W47/zRO7f+9V3nvo=
 honnef.co/go/tools v0.4.5/go.mod h1:GUV+uIBCLpdf0/v6UhHHG/yzI/z6qPskBeQCjcNB96k=
-k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
-k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
-k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
+inet.af/netaddr v0.0.0-20220811202034-502d2d690317 h1:U2fwK6P2EqmopP/hFLTOAjWTki0qgd4GMJn5X8wOleU=
+inet.af/netaddr v0.0.0-20220811202034-502d2d690317/go.mod h1:OIezDfdzOgFhuw4HuWapWq2e9l0H9tK4F1j+ETRtF3k=
 k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y=
 k8s.io/api v0.27.3/go.mod h1:C4BNvZnQOF7JA/0Xed2S+aUyJSfTGkGFxLXz9MnpIpg=
-k8s.io/apiextensions-apiserver v0.25.4 h1:7hu9pF+xikxQuQZ7/30z/qxIPZc2J1lFElPtr7f+B6U=
-k8s.io/apiextensions-apiserver v0.25.4/go.mod h1:bkSGki5YBoZWdn5pWtNIdGvDrrsRWlmnvl9a+tAw5vQ=
-k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
+k8s.io/apiextensions-apiserver v0.26.5 h1:VJ946z9RjyCPn3qiz4Kus/UYjCRrdn1xUvEsJFvN5Yo=
+k8s.io/apiextensions-apiserver v0.26.5/go.mod h1:Olsde7ZNWnyz9rsL13iXYXmL1h7kWujtKeC3yWVCDPo=
 k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM=
 k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E=
-k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
-k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
-k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
-k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
-k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
-k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
 k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8=
 k8s.io/client-go v0.27.3/go.mod h1:2MBEKuTo6V1lbKy3z1euEGnhPfGZLKTS9tiJ2xodM48=
-k8s.io/code-generator v0.25.9 h1:lgyAV9AIRYNxZxgLRXqsCAtqJLHvakot41CjEqD5W0w=
-k8s.io/code-generator v0.25.9/go.mod h1:DHfpdhSUrwqF0f4oLqCtF8gYbqlndNetjBEz45nWzJI=
-k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
-k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
-k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
-k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
-k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
-k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/code-generator v0.26.5 h1:0p350mqxkbs29h8/yF4AMilApLVUhnRx3EAfhTWR5fY=
+k8s.io/code-generator v0.26.5/go.mod h1:iWTVFxfBX+RYe0bXjKqSM83KJF8eimor/izQInvq/60=
 k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 h1:iu3o/SxaHVI7tKPtkGzD3M9IzrE21j+CUKH98NQJ8Ms=
 k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
-k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
 k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
 k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
 k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
 k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
 k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5 h1:azYPdzztXxPSa8wb+hksEKayiz0o+PPisO/d+QhWnoo=
 k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5/go.mod h1:kzo02I3kQ4BTtEfVLaPbjvCkX97YqGve33wzlb3fofQ=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
-k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
 k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU=
 k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-knative.dev/pkg v0.0.0-20230518105712-dfb4bf04635d h1:jGvqcYwyh7O1V2PZfd0poheaOOvy6x0z+KQxHXOgskc=
-knative.dev/pkg v0.0.0-20230518105712-dfb4bf04635d/go.mod h1:WLTHHHc/dhrDmhx03nm5F8AeqpNx3RQGggMI2N0/oks=
+knative.dev/pkg v0.0.0-20230718152110-aef227e72ead h1:2dDzorpKuVZW3Qp7TbirMMq16FbId8f6bacQFX8jXLw=
+knative.dev/pkg v0.0.0-20230718152110-aef227e72ead/go.mod h1:WmrwRV/P+hGHoMraAEfwg6ec+fBTf+Obu41v354Iabc=
 mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E=
 mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js=
 mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=
@@ -2579,17 +2156,12 @@ mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d/go.mod h1:IeHQjmn6TOD+e4Z3RF
 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
 rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
 sigs.k8s.io/release-utils v0.7.4 h1:17LmJrydpUloTCtaoWj95uKlcrUp4h2A9Sa+ZL+lV9w=
 sigs.k8s.io/release-utils v0.7.4/go.mod h1:JEt2QPHItd5Pg2UKLAU8PEaSlF4bUjCZimpxFDgymVU=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
 sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
 sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
 sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
 sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
 sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go
index 1594af87e0..cac7139c47 100644
--- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go
+++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go
@@ -254,11 +254,11 @@ func (KeyOperationAttestation_AttestationFormat) EnumDescriptor() ([]byte, []int
 // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose]
 // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT].
 //
-// Algorithms beginning with "RSA_SIGN_" are usable with
+// Algorithms beginning with `RSA_SIGN_` are usable with
 // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose]
 // [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN].
 //
-// The fields in the name after "RSA_SIGN_" correspond to the following
+// The fields in the name after `RSA_SIGN_` correspond to the following
 // parameters: padding algorithm, modulus bit length, and digest algorithm.
 //
 // For PSS, the salt length used is equal to the length of digest
@@ -266,25 +266,25 @@ func (KeyOperationAttestation_AttestationFormat) EnumDescriptor() ([]byte, []int
 // [RSA_SIGN_PSS_2048_SHA256][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm.RSA_SIGN_PSS_2048_SHA256]
 // will use PSS with a salt length of 256 bits or 32 bytes.
 //
-// Algorithms beginning with "RSA_DECRYPT_" are usable with
+// Algorithms beginning with `RSA_DECRYPT_` are usable with
 // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose]
 // [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT].
 //
-// The fields in the name after "RSA_DECRYPT_" correspond to the following
+// The fields in the name after `RSA_DECRYPT_` correspond to the following
 // parameters: padding algorithm, modulus bit length, and digest algorithm.
 //
-// Algorithms beginning with "EC_SIGN_" are usable with
+// Algorithms beginning with `EC_SIGN_` are usable with
 // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose]
 // [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN].
 //
-// The fields in the name after "EC_SIGN_" correspond to the following
+// The fields in the name after `EC_SIGN_` correspond to the following
 // parameters: elliptic curve, digest algorithm.
 //
-// Algorithms beginning with "HMAC_" are usable with
+// Algorithms beginning with `HMAC_` are usable with
 // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose]
 // [MAC][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.MAC].
 //
-// The suffix following "HMAC_" corresponds to the hash algorithm being used
+// The suffix following `HMAC_` corresponds to the hash algorithm being used
 // (eg. SHA256).
 //
 // For more information, see [Key purposes and algorithms]
diff --git a/vendor/cloud.google.com/go/kms/internal/version.go b/vendor/cloud.google.com/go/kms/internal/version.go
index 43a2a84ef8..7dd6a0aa86 100644
--- a/vendor/cloud.google.com/go/kms/internal/version.go
+++ b/vendor/cloud.google.com/go/kms/internal/version.go
@@ -15,4 +15,4 @@
 package internal
 
 // Version is the current tagged release of the library.
-const Version = "1.15.0"
+const Version = "1.15.1"
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
index 8206a57c77..cd7c0da31d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
@@ -1,5 +1,11 @@
 # Release History
 
+## 1.7.1 (2023-08-14)
+
+## Bugs Fixed
+
+* Enable TLS renegotiation in the default transport policy.
+
 ## 1.7.0 (2023-07-12)
 
 ### Features Added
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
index 53c8d353ad..577435a49d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
@@ -32,5 +32,5 @@ const (
 	Module = "azcore"
 
 	// Version is the semantic version (see http://semver.org) of this module.
-	Version = "v1.7.0"
+	Version = "v1.7.1"
 )
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go
index 869bed5118..dbb9fa7f86 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go
@@ -28,7 +28,8 @@ func init() {
 		TLSHandshakeTimeout:   10 * time.Second,
 		ExpectContinueTimeout: 1 * time.Second,
 		TLSClientConfig: &tls.Config{
-			MinVersion: tls.VersionTLS12,
+			MinVersion:    tls.VersionTLS12,
+			Renegotiation: tls.RenegotiateFreelyAsClient,
 		},
 	}
 	defaultHTTPClient = &http.Client{
diff --git a/vendor/github.com/DataDog/appsec-internal-go/LICENSE b/vendor/github.com/DataDog/appsec-internal-go/LICENSE
new file mode 100644
index 0000000000..9301dd7ab9
--- /dev/null
+++ b/vendor/github.com/DataDog/appsec-internal-go/LICENSE
@@ -0,0 +1,200 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2016-present Datadog, Inc.
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/DataDog/appsec-internal-go/httpsec/client_ip.go b/vendor/github.com/DataDog/appsec-internal-go/httpsec/client_ip.go
new file mode 100644
index 0000000000..3761a77399
--- /dev/null
+++ b/vendor/github.com/DataDog/appsec-internal-go/httpsec/client_ip.go
@@ -0,0 +1,126 @@
+package httpsec
+
+import (
+	"net"
+	"net/textproto"
+	"strings"
+
+	"github.com/DataDog/appsec-internal-go/netip"
+)
+
+const (
+	// RemoteIPTag is the tag name used for the remote HTTP request IP address.
+	RemoteIPTag = "network.client.ip"
+	// ClientIPTag is the tag name used for the client IP deduced from the HTTP
+	// request headers with ClientIP().
+	ClientIPTag = "http.client_ip"
+)
+
+// ClientIPTags returns the resulting Datadog span tags `http.client_ip`
+// containing the client IP and `network.client.ip` containing the remote IP.
+// The tags are present only if a valid ip address has been returned by
+// ClientIP().
+func ClientIPTags(remoteIP, clientIP netip.Addr) (tags map[string]string) {
+	remoteIPValid := remoteIP.IsValid()
+	clientIPValid := clientIP.IsValid()
+	if !remoteIPValid && !clientIPValid {
+		return nil
+	}
+
+	tags = make(map[string]string, 2)
+	if remoteIPValid {
+		tags[RemoteIPTag] = remoteIP.String()
+	}
+	if clientIPValid {
+		tags[ClientIPTag] = clientIP.String()
+	}
+	return tags
+}
+
+// ClientIP returns the first public IP address found in the given headers. If
+// none is present, it returns the first valid IP address present, possibly
+// being a local IP address. The remote address, when valid, is used as fallback
+// when no IP address has been found at all.
+func ClientIP(hdrs map[string][]string, hasCanonicalHeaders bool, remoteAddr string, monitoredHeaders []string) (remoteIP, clientIP netip.Addr) {
+	// Walk IP-related headers
+	var foundIP netip.Addr
+headersLoop:
+	for _, headerName := range monitoredHeaders {
+		if hasCanonicalHeaders {
+			headerName = textproto.CanonicalMIMEHeaderKey(headerName)
+		}
+
+		headerValues, exists := hdrs[headerName]
+		if !exists {
+			continue // this monitored header is not present
+		}
+
+		// Assuming a list of comma-separated IP addresses, split them and build
+		// the list of values to try to parse as IP addresses
+		var ips []string
+		for _, ip := range headerValues {
+			ips = append(ips, strings.Split(ip, ",")...)
+		}
+
+		// Look for the first valid or global IP address in the comma-separated list
+		for _, ipstr := range ips {
+			ip := parseIP(strings.TrimSpace(ipstr))
+			if !ip.IsValid() {
+				continue
+			}
+			// Replace foundIP if still not valid in order to keep the oldest
+			if !foundIP.IsValid() {
+				foundIP = ip
+			}
+			if isGlobal(ip) {
+				foundIP = ip
+				break headersLoop
+			}
+		}
+	}
+
+	// Decide which IP address is the client one by starting with the remote IP
+	if ip := parseIP(remoteAddr); ip.IsValid() {
+		remoteIP = ip
+		clientIP = ip
+	}
+
+	// The IP address found in the headers supersedes a private remote IP address.
+	if foundIP.IsValid() && !isGlobal(remoteIP) || isGlobal(foundIP) {
+		clientIP = foundIP
+	}
+
+	return remoteIP, clientIP
+}
+
+func parseIP(s string) netip.Addr {
+	if ip, err := netip.ParseAddr(s); err == nil {
+		return ip
+	}
+	if h, _, err := net.SplitHostPort(s); err == nil {
+		if ip, err := netip.ParseAddr(h); err == nil {
+			return ip
+		}
+	}
+	return netip.Addr{}
+}
+
+var ipv6SpecialNetworks = [...]netip.Prefix{
+	netip.MustParsePrefix("fec0::/10"), // site local
+}
+
+func isGlobal(ip netip.Addr) bool {
+	// IsPrivate also checks for ipv6 ULA.
+	// We care to check for these addresses are not considered public, hence not global.
+	// See https://www.rfc-editor.org/rfc/rfc4193.txt for more details.
+	isGlobal := ip.IsValid() && !ip.IsPrivate() && !ip.IsLoopback() && !ip.IsLinkLocalUnicast()
+	if !isGlobal || !ip.Is6() {
+		return isGlobal
+	}
+	for _, n := range ipv6SpecialNetworks {
+		if n.Contains(ip) {
+			return false
+		}
+	}
+	return isGlobal
+}
diff --git a/vendor/github.com/DataDog/appsec-internal-go/netip/ip_default.go b/vendor/github.com/DataDog/appsec-internal-go/netip/ip_default.go
new file mode 100644
index 0000000000..f2906a0cb1
--- /dev/null
+++ b/vendor/github.com/DataDog/appsec-internal-go/netip/ip_default.go
@@ -0,0 +1,32 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022 Datadog, Inc.
+
+//go:build !go1.19
+// +build !go1.19
+
+package netip
+
+import "inet.af/netaddr"
+
+// Addr wraps an netaddr.IP value
+type Addr = netaddr.IP
+
+// Prefix wraps an netaddr.IPPrefix value
+type Prefix = netaddr.IPPrefix
+
+var (
+	// ParseAddr wraps the netaddr.ParseIP function
+	ParseAddr = netaddr.ParseIP
+	// ParsePrefix wraps the netaddr.ParseIPPrefix function
+	ParsePrefix = netaddr.ParseIPPrefix
+	// MustParsePrefix wraps the netaddr.MustParseIPPrefix function
+	MustParsePrefix = netaddr.MustParseIPPrefix
+	// MustParseAddr wraps the netaddr.MustParseIP function
+	MustParseAddr = netaddr.MustParseIP
+	// IPv4 wraps the netaddr.IPv4 function
+	IPv4 = netaddr.IPv4
+	// AddrFrom16 wraps the netaddr.IPv6Raw function
+	AddrFrom16 = netaddr.IPv6Raw
+)
diff --git a/vendor/github.com/DataDog/appsec-internal-go/netip/ip_go119.go b/vendor/github.com/DataDog/appsec-internal-go/netip/ip_go119.go
new file mode 100644
index 0000000000..2c185de6f6
--- /dev/null
+++ b/vendor/github.com/DataDog/appsec-internal-go/netip/ip_go119.go
@@ -0,0 +1,34 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022 Datadog, Inc.
+
+//go:build go1.19
+// +build go1.19
+
+package netip
+
+import "net/netip"
+
+// Addr wraps a netip.Addr value
+type Addr = netip.Addr
+
+// Prefix wraps a netip.Prefix value
+type Prefix = netip.Prefix
+
+var (
+	// ParseAddr wraps the netip.ParseAddr function
+	ParseAddr = netip.ParseAddr
+	// MustParsePrefix wraps the netip.MustParsePrefix function
+	MustParsePrefix = netip.MustParsePrefix
+	// MustParseAddr wraps the netip.MustParseAddr function
+	MustParseAddr = netip.MustParseAddr
+	// AddrFrom16 wraps the netIP.AddrFrom16 function
+	AddrFrom16 = netip.AddrFrom16
+)
+
+// IPv4 wraps the netip.AddrFrom4 function
+func IPv4(a, b, c, d byte) Addr {
+	e := [4]byte{a, b, c, d}
+	return netip.AddrFrom4(e)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/LICENSE
@@ -0,0 +1,200 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2016-present Datadog, Inc.
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/cache.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/cache.go
new file mode 100644
index 0000000000..3993390d2e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/cache.go
@@ -0,0 +1,90 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package obfuscate
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/outcaste-io/ristretto"
+)
+
+// measuredCache is a wrapper on top of *ristretto.Cache which additionally
+// sends metrics (hits and misses) every 10 seconds.
+type measuredCache struct {
+	*ristretto.Cache
+
+	// close allows sending shutdown notification.
+	close  chan struct{}
+	statsd StatsClient
+}
+
+// Close gracefully closes the cache when active.
+func (c *measuredCache) Close() {
+	if c.Cache == nil {
+		return
+	}
+	c.close <- struct{}{}
+	<-c.close
+}
+
+func (c *measuredCache) statsLoop() {
+	defer func() {
+		c.close <- struct{}{}
+	}()
+	tick := time.NewTicker(10 * time.Second)
+	defer tick.Stop()
+	mx := c.Cache.Metrics
+	for {
+		select {
+		case <-tick.C:
+			c.statsd.Gauge("datadog.trace_agent.ofuscation.sql_cache.hits", float64(mx.Hits()), nil, 1)     //nolint:errcheck
+			c.statsd.Gauge("datadog.trace_agent.ofuscation.sql_cache.misses", float64(mx.Misses()), nil, 1) //nolint:errcheck
+		case <-c.close:
+			c.Cache.Close()
+			return
+		}
+	}
+}
+
+type cacheOptions struct {
+	On     bool
+	Statsd StatsClient
+}
+
+// newMeasuredCache returns a new measuredCache.
+func newMeasuredCache(opts cacheOptions) *measuredCache {
+	if !opts.On {
+		// a nil *ristretto.Cache is a no-op cache
+		return &measuredCache{}
+	}
+	cfg := &ristretto.Config{
+		// We know that the maximum allowed resource length is 5K. This means that
+		// in 5MB we can store a minimum of 1000 queries.
+		MaxCost: 5000000,
+
+		// An appromixated worst-case scenario when the cache is filled with small
+		// queries averaged as being of length 11 ("LOCK TABLES"), we would be able
+		// to fit 476K of them into 5MB of cost.
+		//
+		// We average it to 500K and multiply 10x as the documentation recommends.
+		NumCounters: 500000 * 10,
+
+		BufferItems: 64,   // default recommended value
+		Metrics:     true, // enable hit/miss counters
+	}
+	cache, err := ristretto.NewCache(cfg)
+	if err != nil {
+		panic(fmt.Errorf("Error starting obfuscator query cache: %v", err))
+	}
+	c := measuredCache{
+		close:  make(chan struct{}),
+		statsd: opts.Statsd,
+		Cache:  cache,
+	}
+	go c.statsLoop()
+	return &c
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/credit_cards.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/credit_cards.go
new file mode 100644
index 0000000000..03adf1544a
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/credit_cards.go
@@ -0,0 +1,211 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package obfuscate
+
+// IsCardNumber checks if b could be a credit card number by checking the digit count and IIN prefix.
+// If validateLuhn is true, the Luhn checksum is also applied to potential candidates.
+func IsCardNumber(b string, validateLuhn bool) (ok bool) {
+	//
+	// Just credit card numbers for now, based on:
+	// • https://baymard.com/checkout-usability/credit-card-patterns
+	// • https://www.regular-expressions.info/creditcard.html
+	//
+	if len(b) == 0 {
+		return false
+	}
+	if len(b) < 12 {
+		// fast path: can not be a credit card
+		return false
+	}
+	if b[0] != ' ' && b[0] != '-' && (b[0] < '0' || b[0] > '9') {
+		// fast path: only valid characters are 0-9, space (" ") and dash("-")
+		return false
+	}
+	prefix := 0                 // holds up to b[:6] digits as a numeric value (for example []byte{"523"} becomes int(523)) for checking prefixes
+	count := 0                  // counts digits encountered
+	foundPrefix := false        // reports whether we've detected a valid prefix
+	recdigit := func(_ byte) {} // callback on each found digit; no-op by default (we only need this for Luhn)
+	if validateLuhn {
+		// we need Luhn checksum validation, so we have to take additional action
+		// and record all digits found
+		buf := make([]byte, 0, len(b))
+		recdigit = func(b byte) { buf = append(buf, b) }
+		defer func() {
+			if !ok {
+				// if isCardNumber returned false, it means that b can not be
+				// a credit card number
+				return
+			}
+			// potentially a credit card number, run the Luhn checksum
+			ok = luhnValid(buf)
+		}()
+	}
+loop:
+	for i := range b {
+		// We traverse and search b for a valid IIN credit card prefix based
+		// on the digits found, ignoring spaces and dashes.
+		// Source: https://www.regular-expressions.info/creditcard.html
+		switch b[i] {
+		case ' ', '-':
+			// ignore space (' ') and dash ('-')
+			continue loop
+		}
+		if b[i] < '0' || b[i] > '9' {
+			// not a 0 to 9 digit; can not be a credit card number; abort
+			return false
+		}
+		count++
+		recdigit(b[i])
+		if !foundPrefix {
+			// we have not yet found a valid prefix so we convert the digits
+			// that we have so far into a numeric value:
+			prefix = prefix*10 + (int(b[i]) - '0')
+			maybe, yes := validCardPrefix(prefix)
+			if yes {
+				// we've found a valid prefix; continue counting
+				foundPrefix = true
+			} else if !maybe {
+				// this is not a valid prefix and we should not continue looking
+				return false
+			}
+		}
+		if count > 16 {
+			// too many digits
+			return false
+		}
+	}
+	if count < 12 {
+		// too few digits
+		return false
+	}
+	return foundPrefix
+}
+
+// luhnValid checks that the number represented in the given string validates the Luhn Checksum algorithm.
+// str is expected to contain exclusively digits at all positions.
+//
+// See:
+// • https://en.wikipedia.org/wiki/Luhn_algorithm
+// • https://dev.to/shiraazm/goluhn-a-simple-library-for-generating-calculating-and-verifying-luhn-numbers-588j
+func luhnValid(str []byte) bool {
+	var (
+		sum int
+		alt bool
+	)
+	n := len(str)
+	for i := n - 1; i > -1; i-- {
+		if str[i] < '0' || str[i] > '9' {
+			return false // not a number!
+		}
+		mod := int(str[i] - 0x30) // convert byte to int
+		if alt {
+			mod *= 2
+			if mod > 9 {
+				mod = (mod % 10) + 1
+			}
+		}
+		alt = !alt
+		sum += mod
+	}
+	return sum%10 == 0
+}
+
+// validCardPrefix validates whether b is a valid card prefix. Maybe returns true if
+// the prefix could be an IIN once more digits are revealed and yes reports whether
+// b is a fully valid IIN.
+//
+// If yes is false and maybe is false, there is no reason to continue searching. The
+// prefix is invalid.
+//
+// IMPORTANT: If adding new prefixes to this algorithm, make sure that you update
+// the "maybe" clauses above, in the shorter prefixes than the one you are adding.
+// This refers to the cases which return true, false.
+//
+// TODO(x): this whole code could be code generated from a prettier data structure.
+// Ultimately, it could even be user-configurable.
+func validCardPrefix(n int) (maybe, yes bool) {
+	// Validates IIN prefix possibilities
+	// Source: https://www.regular-expressions.info/creditcard.html
+	if n > 699999 {
+		// too long for any known prefix; stop looking
+		return false, false
+	}
+	if n < 10 {
+		switch n {
+		case 1, 4:
+			// 1 & 4 are valid IIN
+			return false, true
+		case 2, 3, 5, 6:
+			// 2, 3, 5, 6 could be the start of valid IIN
+			return true, false
+		default:
+			// invalid IIN
+			return false, false
+		}
+	}
+	if n < 100 {
+		if (n >= 34 && n <= 39) ||
+			(n >= 51 && n <= 55) ||
+			n == 62 ||
+			n == 65 {
+			// 34-39, 51-55, 62, 65 are valid IIN
+			return false, true
+		}
+		if n == 30 || n == 63 || n == 64 || n == 50 || n == 60 ||
+			(n >= 22 && n <= 27) || (n >= 56 && n <= 58) || (n >= 60 && n <= 69) {
+			// 30, 63, 64, 50, 60, 22-27, 56-58, 60-69 may end up as valid IIN
+			return true, false
+		}
+	}
+	if n < 1000 {
+		if (n >= 300 && n <= 305) ||
+			(n >= 644 && n <= 649) ||
+			n == 309 ||
+			n == 636 {
+			// 300‑305, 309, 636, 644‑649 are valid IIN
+			return false, true
+		}
+		if (n >= 352 && n <= 358) || n == 501 || n == 601 ||
+			(n >= 222 && n <= 272) || (n >= 500 && n <= 509) ||
+			(n >= 560 && n <= 589) || (n >= 600 && n <= 699) {
+			// 352-358, 501, 601, 222-272, 500-509, 560-589, 600-699 may be a 4 or 6 digit IIN prefix
+			return true, false
+		}
+	}
+	if n < 10000 {
+		if (n >= 3528 && n <= 3589) ||
+			n == 5019 ||
+			n == 6011 {
+			// 3528‑3589, 5019, 6011 are valid IINs
+			return false, true
+		}
+		if (n >= 2221 && n <= 2720) || (n >= 5000 && n <= 5099) ||
+			(n >= 5600 && n <= 5899) || (n >= 6000 && n <= 6999) {
+			// maybe a 6-digit IIN
+			return true, false
+		}
+	}
+	if n < 100000 {
+		if (n >= 22210 && n <= 27209) ||
+			(n >= 50000 && n <= 50999) ||
+			(n >= 56000 && n <= 58999) ||
+			(n >= 60000 && n <= 69999) {
+			// maybe a 6-digit IIN
+			return true, false
+		}
+	}
+	if n < 1000000 {
+		if (n >= 222100 && n <= 272099) ||
+			(n >= 500000 && n <= 509999) ||
+			(n >= 560000 && n <= 589999) ||
+			(n >= 600000 && n <= 699999) {
+			// 222100‑272099, 500000‑509999, 560000‑589999, 600000‑699999 are valid IIN
+			return false, true
+		}
+	}
+	// unknown IIN
+	return false, false
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/http.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/http.go
new file mode 100644
index 0000000000..d9a00084f0
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/http.go
@@ -0,0 +1,60 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package obfuscate
+
+import (
+	"net/url"
+	"strings"
+)
+
+// obfuscateUserInfo returns a URL string that obfuscates any userinfo by setting url.User to nil.
+func obfuscateUserInfo(val string) string {
+	u, err := url.Parse(val)
+	if err != nil {
+		return val
+	}
+	u.User = nil
+	return u.String()
+}
+
+// ObfuscateURLString obfuscates the given URL. It must be a valid URL.
+func (o *Obfuscator) ObfuscateURLString(val string) string {
+	if !o.opts.HTTP.RemoveQueryString && !o.opts.HTTP.RemovePathDigits {
+		// nothing to do
+		return obfuscateUserInfo(val)
+	}
+	u, err := url.Parse(val)
+	if err != nil {
+		// should not happen for valid URLs, but better obfuscate everything
+		// rather than expose sensitive information when this option is on.
+		return "?"
+	}
+	u.User = nil
+	if o.opts.HTTP.RemoveQueryString && u.RawQuery != "" {
+		u.ForceQuery = true // add the '?'
+		u.RawQuery = ""
+	}
+	if o.opts.HTTP.RemovePathDigits {
+		segs := strings.Split(u.Path, "/")
+		var changed bool
+		for i, seg := range segs {
+			for _, ch := range []byte(seg) {
+				if ch >= '0' && ch <= '9' {
+					// we can not set the question mark directly here because the url
+					// package will escape it into %3F, so we use this placeholder and
+					// replace it further down.
+					segs[i] = "/REDACTED/"
+					changed = true
+					break
+				}
+			}
+		}
+		if changed {
+			u.Path = strings.Join(segs, "/")
+		}
+	}
+	return strings.Replace(u.String(), "/REDACTED/", "?", -1)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json.go
new file mode 100644
index 0000000000..aef236f312
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json.go
@@ -0,0 +1,201 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package obfuscate
+
+import (
+	"strconv"
+	"strings"
+)
+
+// ObfuscateMongoDBString obfuscates the given MongoDB JSON query.
+func (o *Obfuscator) ObfuscateMongoDBString(cmd string) string {
+	return obfuscateJSONString(cmd, o.mongo)
+}
+
+// ObfuscateElasticSearchString obfuscates the given ElasticSearch JSON query.
+func (o *Obfuscator) ObfuscateElasticSearchString(cmd string) string {
+	return obfuscateJSONString(cmd, o.es)
+}
+
+// obfuscateJSONString obfuscates the given span's tag using the given obfuscator. If the obfuscator is
+// nil it is considered disabled.
+func obfuscateJSONString(cmd string, obfuscator *jsonObfuscator) string {
+	if obfuscator == nil || cmd == "" {
+		// obfuscator is disabled or string is empty
+		return cmd
+	}
+	out, _ := obfuscator.obfuscate([]byte(cmd))
+	// we should accept whatever the obfuscator returns, even if it's an error: a parsing
+	// error simply means that the JSON was invalid, meaning that we've only obfuscated
+	// as much of it as we could. It is safe to accept the output, even if partial.
+	return out
+}
+
+type jsonObfuscator struct {
+	keepKeys      map[string]bool // the values for these keys will not be obfuscated
+	transformKeys map[string]bool // the values for these keys pass through the transformer
+	transformer   func(string) string
+
+	scan     *scanner // scanner
+	closures []bool   // closure stack, true if object (e.g. {[{ => []bool{true, false, true})
+	key      bool     // true if scanning a key
+
+	wiped             bool // true if obfuscation string (`"?"`) was already written for current value
+	keeping           bool // true if not obfuscating
+	transformingValue bool // true if collecting the next literal for transformation
+	keepDepth         int  // the depth at which we've stopped obfuscating
+}
+
+func newJSONObfuscator(cfg *JSONConfig, o *Obfuscator) *jsonObfuscator {
+	keepValue := make(map[string]bool, len(cfg.KeepValues))
+	for _, v := range cfg.KeepValues {
+		keepValue[v] = true
+	}
+	var (
+		transformKeys map[string]bool
+		transformer   func(string) string
+	)
+	if len(cfg.ObfuscateSQLValues) > 0 {
+		transformer = sqlObfuscationTransformer(o)
+		transformKeys = make(map[string]bool, len(cfg.ObfuscateSQLValues))
+		for _, v := range cfg.ObfuscateSQLValues {
+			transformKeys[v] = true
+		}
+	}
+	return &jsonObfuscator{
+		closures:      []bool{},
+		keepKeys:      keepValue,
+		transformKeys: transformKeys,
+		transformer:   transformer,
+		scan:          &scanner{},
+	}
+}
+
+func sqlObfuscationTransformer(o *Obfuscator) func(string) string {
+	return func(s string) string {
+		result, err := o.ObfuscateSQLString(s)
+		if err != nil {
+			o.log.Debugf("Failed to obfuscate SQL string '%s': %s", s, err.Error())
+			// instead of returning an empty string we explicitly return an error string here within the result in order
+			// to surface the problem clearly to the user
+			return "Datadog-agent failed to obfuscate SQL string. Enable agent debug logs for more info."
+		}
+		return result.Query
+	}
+}
+
+// setKey verifies if we are currently scanning a key based on the current state
+// and updates the state accordingly. It must be called only after a closure or a
+// value scan has ended.
+func (p *jsonObfuscator) setKey() {
+	n := len(p.closures)
+	p.key = n == 0 || p.closures[n-1] // true if we are at top level or in an object
+	p.wiped = false
+}
+
+func (p *jsonObfuscator) obfuscate(data []byte) (string, error) {
+	var out strings.Builder
+
+	keyBuf := make([]byte, 0, 10) // recording key token
+	valBuf := make([]byte, 0, 10) // recording value
+
+	p.scan.reset()
+	for _, c := range data {
+		p.scan.bytes++
+		op := p.scan.step(p.scan, c)
+		depth := len(p.closures)
+		switch op {
+		case scanBeginObject:
+			// object begins: {
+			p.closures = append(p.closures, true)
+			p.setKey()
+			p.transformingValue = false
+
+		case scanBeginArray:
+			// array begins: [
+			p.closures = append(p.closures, false)
+			p.setKey()
+			p.transformingValue = false
+
+		case scanEndArray, scanEndObject:
+			// array or object closing
+			if n := len(p.closures) - 1; n > 0 {
+				p.closures = p.closures[:n]
+			}
+			fallthrough
+
+		case scanObjectValue, scanArrayValue:
+			// done scanning value
+			p.setKey()
+			if p.transformingValue && p.transformer != nil {
+				v, err := strconv.Unquote(string(valBuf))
+				if err != nil {
+					v = string(valBuf)
+				}
+				result := p.transformer(v)
+				out.WriteByte('"')
+				out.WriteString(result)
+				out.WriteByte('"')
+				p.transformingValue = false
+				valBuf = valBuf[:0]
+			} else if p.keeping && depth < p.keepDepth {
+				p.keeping = false
+			}
+
+		case scanBeginLiteral, scanContinue:
+			// starting or continuing a literal
+			if p.transformingValue {
+				valBuf = append(valBuf, c)
+				continue
+			} else if p.key {
+				// it's a key
+				keyBuf = append(keyBuf, c)
+			} else if !p.keeping {
+				// it's a value we're not keeping
+				if !p.wiped {
+					out.Write([]byte(`"?"`))
+					p.wiped = true
+				}
+				continue
+			}
+
+		case scanObjectKey:
+			// done scanning key
+			k := strings.Trim(string(keyBuf), `"`)
+			if !p.keeping && p.keepKeys[k] {
+				// we should not obfuscate values of this key
+				p.keeping = true
+				p.keepDepth = depth + 1
+			} else if !p.transformingValue && p.transformer != nil && p.transformKeys[k] {
+				// the string value immediately following this key will be passed through the value transformer
+				// if anything other than a literal is found then sql obfuscation is stopped and json obfuscation
+				// proceeds as usual
+				p.transformingValue = true
+			}
+
+			keyBuf = keyBuf[:0]
+			p.key = false
+
+		case scanSkipSpace:
+			continue
+
+		case scanError:
+			// we've encountered an error, mark that there might be more JSON
+			// using the ellipsis and return whatever we've managed to obfuscate
+			// thus far.
+			out.Write([]byte("..."))
+			return out.String(), p.scan.err
+		}
+		out.WriteByte(c)
+	}
+	if p.scan.eof() == scanError {
+		// if an error occurred it's fine, simply add the ellipsis to indicate
+		// that the input has been truncated.
+		out.Write([]byte("..."))
+		return out.String(), p.scan.err
+	}
+	return out.String(), nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json_scanner.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json_scanner.go
new file mode 100644
index 0000000000..9398632d81
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json_scanner.go
@@ -0,0 +1,565 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//
+// The code that follows is copied from go/src/encoding/json/scanner.go
+// It may contain minor edits, such as allowing multiple JSON objects within
+// the same input string (see stateEndTop)
+//
+
+package obfuscate
+
+import "strconv"
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError struct {
+	msg    string // description of error
+	Offset int64  // error occurred after reading Offset bytes
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// A scanner is a JSON scanning state machine.
+// Callers call scan.reset() and then pass bytes in one at a time
+// by calling scan.step(&scan, c) for each byte.
+// The return value, referred to as an opcode, tells the
+// caller about significant parsing events like beginning
+// and ending literals, objects, and arrays, so that the
+// caller can follow along if it wishes.
+// The return value scanEnd indicates that a single top-level
+// JSON value has been completed, *before* the byte that
+// just got passed in.  (The indication must be delayed in order
+// to recognize the end of numbers: is 123 a whole value or
+// the beginning of 12345e+6?).
+type scanner struct {
+	// The step is a func to be called to execute the next transition.
+	// Also tried using an integer constant and a single func
+	// with a switch, but using the func directly was 10% faster
+	// on a 64-bit Mac Mini, and it's nicer to read.
+	step func(*scanner, byte) int
+
+	// Reached end of top-level value.
+	endTop bool
+
+	// Stack of what we're in the middle of - array values, object keys, object values.
+	parseState []int
+
+	// Error that happened, if any.
+	err error
+
+	// 1-byte redo (see undo method)
+	redo bool
+
+	// total bytes consumed, updated by decoder.Decode
+	bytes int64
+}
+
+// These values are returned by the state transition functions
+// assigned to scanner.state and the method scanner.eof.
+// They give details about the current state of the scan that
+// callers might be interested to know about.
+// It is okay to ignore the return value of any particular
+// call to scanner.state: if one call returns scanError,
+// every subsequent call will return scanError too.
+const (
+	// Continue.
+	scanContinue     = iota // uninteresting byte
+	scanBeginLiteral        // end implied by next result != scanContinue
+	scanBeginObject         // begin object
+	scanObjectKey           // just finished object key (string)
+	scanObjectValue         // just finished non-last object value
+	scanEndObject           // end object (implies scanObjectValue if possible)
+	scanBeginArray          // begin array
+	scanArrayValue          // just finished array value
+	scanEndArray            // end array (implies scanArrayValue if possible)
+	scanSkipSpace           // space byte; can skip; known to be last "continue" result
+
+	// Stop.
+	scanEnd   // top-level value ended *before* this byte; known to be first "stop" result
+	scanError // hit an error, scanner.err.
+)
+
+// These values are stored in the parseState stack.
+// They give the current state of a composite value
+// being scanned. If the parser is inside a nested value
+// the parseState describes the nested state, outermost at entry 0.
+const (
+	parseObjectKey   = iota // parsing object key (before colon)
+	parseObjectValue        // parsing object value (after colon)
+	parseArrayValue         // parsing array value
+)
+
+// reset prepares the scanner for use.
+// It must be called before calling s.step.
+func (s *scanner) reset() {
+	s.step = stateBeginValue
+	s.parseState = s.parseState[0:0]
+	s.err = nil
+	s.redo = false
+	s.endTop = false
+}
+
+// eof tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *scanner) eof() int {
+	if s.err != nil {
+		return scanError
+	}
+	if s.endTop {
+		return scanEnd
+	}
+	s.step(s, ' ')
+	if s.endTop {
+		return scanEnd
+	}
+	if s.err == nil {
+		s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
+	}
+	return scanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+func (s *scanner) pushParseState(p int) {
+	s.parseState = append(s.parseState, p)
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *scanner) popParseState() {
+	n := len(s.parseState) - 1
+	s.parseState = s.parseState[0:n]
+	s.redo = false
+	if n == 0 {
+		s.step = stateEndTop
+		s.endTop = true
+	} else {
+		s.step = stateEndValue
+	}
+}
+
+func isSpace(c byte) bool {
+	return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == ']' {
+		return stateEndValue(s, c)
+	}
+	return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	switch c {
+	case '{':
+		s.step = stateBeginStringOrEmpty
+		s.pushParseState(parseObjectKey)
+		return scanBeginObject
+	case '[':
+		s.step = stateBeginValueOrEmpty
+		s.pushParseState(parseArrayValue)
+		return scanBeginArray
+	case '"':
+		s.step = stateInString
+		return scanBeginLiteral
+	case '-':
+		s.step = stateNeg
+		return scanBeginLiteral
+	case '0': // beginning of 0.123
+		s.step = state0
+		return scanBeginLiteral
+	case 't': // beginning of true
+		s.step = stateT
+		return scanBeginLiteral
+	case 'f': // beginning of false
+		s.step = stateF
+		return scanBeginLiteral
+	case 'n': // beginning of null
+		s.step = stateN
+		return scanBeginLiteral
+	}
+	if '1' <= c && c <= '9' { // beginning of 1234.5
+		s.step = state1
+		return scanBeginLiteral
+	}
+	return s.error(c, "looking for beginning of value")
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == '}' {
+		n := len(s.parseState)
+		s.parseState[n-1] = parseObjectValue
+		return stateEndValue(s, c)
+	}
+	return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == '"' {
+		s.step = stateInString
+		return scanBeginLiteral
+	}
+	return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *scanner, c byte) int {
+	n := len(s.parseState)
+	if n == 0 {
+		// Completed top-level before the current byte.
+		s.step = stateEndTop
+		s.endTop = true
+		return stateEndTop(s, c)
+	}
+	if c <= ' ' && isSpace(c) {
+		s.step = stateEndValue
+		return scanSkipSpace
+	}
+	ps := s.parseState[n-1]
+	switch ps {
+	case parseObjectKey:
+		if c == ':' {
+			s.parseState[n-1] = parseObjectValue
+			s.step = stateBeginValue
+			return scanObjectKey
+		}
+		return s.error(c, "after object key")
+	case parseObjectValue:
+		if c == ',' {
+			s.parseState[n-1] = parseObjectKey
+			s.step = stateBeginString
+			return scanObjectValue
+		}
+		if c == '}' {
+			s.popParseState()
+			return scanEndObject
+		}
+		return s.error(c, "after object key:value pair")
+	case parseArrayValue:
+		if c == ',' {
+			s.step = stateBeginValue
+			return scanArrayValue
+		}
+		if c == ']' {
+			s.popParseState()
+			return scanEndArray
+		}
+		return s.error(c, "after array element")
+	}
+	return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *scanner, c byte) int {
+	if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+		// The former behaviour has been removed. Now, if anything
+		// other than whitespace follows, we assume a new JSON string
+		// might be starting. This allows us to continue obfuscating
+		// further strings in cases where there are multiple JSON
+		// objects enumerated sequentially within the same input.
+		// This is a common case for ElasticSearch response bodies.
+		s.reset()
+		return s.step(s, c)
+	}
+	return scanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *scanner, c byte) int {
+	if c == '"' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	if c == '\\' {
+		s.step = stateInStringEsc
+		return scanContinue
+	}
+	if c < 0x20 {
+		return s.error(c, "in string literal")
+	}
+	return scanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *scanner, c byte) int {
+	switch c {
+	case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+		s.step = stateInString
+		return scanContinue
+	case 'u':
+		s.step = stateInStringEscU
+		return scanContinue
+	}
+	return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInStringEscU1
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInStringEscU12
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInStringEscU123
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInString
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateNeg is the state after reading `-` during a number.
+func stateNeg(s *scanner, c byte) int {
+	if c == '0' {
+		s.step = state0
+		return scanContinue
+	}
+	if '1' <= c && c <= '9' {
+		s.step = state1
+		return scanContinue
+	}
+	return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		s.step = state1
+		return scanContinue
+	}
+	return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *scanner, c byte) int {
+	if c == '.' {
+		s.step = stateDot
+		return scanContinue
+	}
+	if c == 'e' || c == 'E' {
+		s.step = stateE
+		return scanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		s.step = stateDot0
+		return scanContinue
+	}
+	return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		return scanContinue
+	}
+	if c == 'e' || c == 'E' {
+		s.step = stateE
+		return scanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *scanner, c byte) int {
+	if c == '+' || c == '-' {
+		s.step = stateESign
+		return scanContinue
+	}
+	return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		s.step = stateE0
+		return scanContinue
+	}
+	return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		return scanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *scanner, c byte) int {
+	if c == 'r' {
+		s.step = stateTr
+		return scanContinue
+	}
+	return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *scanner, c byte) int {
+	if c == 'u' {
+		s.step = stateTru
+		return scanContinue
+	}
+	return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *scanner, c byte) int {
+	if c == 'e' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *scanner, c byte) int {
+	if c == 'a' {
+		s.step = stateFa
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *scanner, c byte) int {
+	if c == 'l' {
+		s.step = stateFal
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *scanner, c byte) int {
+	if c == 's' {
+		s.step = stateFals
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *scanner, c byte) int {
+	if c == 'e' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *scanner, c byte) int {
+	if c == 'u' {
+		s.step = stateNu
+		return scanContinue
+	}
+	return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *scanner, c byte) int {
+	if c == 'l' {
+		s.step = stateNul
+		return scanContinue
+	}
+	return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *scanner, c byte) int {
+	if c == 'l' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *scanner, c byte) int {
+	return scanError
+}
+
+// error records an error and switches to the error state.
+func (s *scanner) error(c byte, context string) int {
+	s.step = stateError
+	s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
+	return scanError
+}
+
+// quoteChar formats c as a quoted character literal
+func quoteChar(c byte) string {
+	// special cases - different from quoted strings
+	if c == '\'' {
+		return `'\''`
+	}
+	if c == '"' {
+		return `'"'`
+	}
+
+	// use quoted string with different quotation marks
+	s := strconv.Quote(string(c))
+	return "'" + s[1:len(s)-1] + "'"
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/memcached.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/memcached.go
new file mode 100644
index 0000000000..ce5cfe3fd5
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/memcached.go
@@ -0,0 +1,19 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package obfuscate
+
+import "strings"
+
+// ObfuscateMemcachedString obfuscates the Memcached command cmd.
+func (*Obfuscator) ObfuscateMemcachedString(cmd string) string {
+	// All memcached commands end with new lines [1]. In the case of storage
+	// commands, key values follow after. Knowing this, all we have to do
+	// to obfuscate sensitive information is to remove everything that follows
+	// a new line. For non-storage commands, this will have no effect.
+	// [1]: https://github.com/memcached/memcached/blob/master/doc/protocol.txt
+	out := strings.SplitN(cmd, "\r\n", 2)[0]
+	return strings.TrimSpace(out)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/obfuscate.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/obfuscate.go
new file mode 100644
index 0000000000..0de91016de
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/obfuscate.go
@@ -0,0 +1,254 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package obfuscate implements quantizing and obfuscating of tags and resources for
+// a set of spans matching a certain criteria.
+//
+// This module is used in the Datadog Agent, the Go tracing client (dd-trace-go) and in the
+// OpenTelemetry Collector Datadog exporter./ End-user behavior is stable, but there are no
+// stability guarantees on its public Go API. Nonetheless, if editing try to avoid breaking
+// API changes if possible and double check the API usage on all module dependents.
+package obfuscate
+
+import (
+	"bytes"
+
+	"github.com/DataDog/datadog-go/v5/statsd"
+	"go.uber.org/atomic"
+)
+
+// Obfuscator quantizes and obfuscates spans. The obfuscator is not safe for
+// concurrent use.
+type Obfuscator struct {
+	opts                 *Config
+	es                   *jsonObfuscator // nil if disabled
+	mongo                *jsonObfuscator // nil if disabled
+	sqlExecPlan          *jsonObfuscator // nil if disabled
+	sqlExecPlanNormalize *jsonObfuscator // nil if disabled
+	// sqlLiteralEscapes reports whether we should treat escape characters literally or as escape characters.
+	// Different SQL engines behave in different ways and the tokenizer needs to be generic.
+	sqlLiteralEscapes *atomic.Bool
+	// queryCache keeps a cache of already obfuscated queries.
+	queryCache *measuredCache
+	log        Logger
+}
+
+// Logger is able to log certain log messages.
+type Logger interface {
+	// Debugf logs the given message using the given format.
+	Debugf(format string, params ...interface{})
+}
+
+type noopLogger struct{}
+
+func (noopLogger) Debugf(_ string, _ ...interface{}) {}
+
+// setSQLLiteralEscapes sets whether or not escape characters should be treated literally by the SQL obfuscator.
+func (o *Obfuscator) setSQLLiteralEscapes(ok bool) {
+	if ok {
+		o.sqlLiteralEscapes.Store(true)
+	} else {
+		o.sqlLiteralEscapes.Store(false)
+	}
+}
+
+// useSQLLiteralEscapes reports whether escape characters will be treated literally by the SQL obfuscator.
+// Some SQL engines require it and others don't. It will be detected as SQL queries are being obfuscated
+// through calls to ObfuscateSQLString and automatically set for future.
+func (o *Obfuscator) useSQLLiteralEscapes() bool {
+	return o.sqlLiteralEscapes.Load()
+}
+
+// Config holds the configuration for obfuscating sensitive data for various span types.
+type Config struct {
+	// SQL holds the obfuscation configuration for SQL queries.
+	SQL SQLConfig
+
+	// ES holds the obfuscation configuration for ElasticSearch bodies.
+	ES JSONConfig
+
+	// Mongo holds the obfuscation configuration for MongoDB queries.
+	Mongo JSONConfig
+
+	// SQLExecPlan holds the obfuscation configuration for SQL Exec Plans. This is strictly for safety related obfuscation,
+	// not normalization. Normalization of exec plans is configured in SQLExecPlanNormalize.
+	SQLExecPlan JSONConfig
+
+	// SQLExecPlanNormalize holds the normalization configuration for SQL Exec Plans.
+	SQLExecPlanNormalize JSONConfig
+
+	// HTTP holds the obfuscation settings for HTTP URLs.
+	HTTP HTTPConfig
+
+	// Statsd specifies the statsd client to use for reporting metrics.
+	Statsd StatsClient
+
+	// Logger specifies the logger to use when outputting messages.
+	// If unset, no logs will be outputted.
+	Logger Logger
+}
+
+// StatsClient implementations are able to emit stats.
+type StatsClient interface {
+	// Gauge reports a gauge stat with the given name, value, tags and rate.
+	Gauge(name string, value float64, tags []string, rate float64) error
+}
+
+// SQLConfig holds the config for obfuscating SQL.
+type SQLConfig struct {
+	// DBMS identifies the type of database management system (e.g. MySQL, Postgres, and SQL Server).
+	// Valid values for this can be found at https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md#connection-level-attributes
+	DBMS string `json:"dbms"`
+
+	// TableNames specifies whether the obfuscator should also extract the table names that a query addresses,
+	// in addition to obfuscating.
+	TableNames bool `json:"table_names" yaml:"table_names"`
+
+	// CollectCommands specifies whether the obfuscator should extract and return commands as SQL metadata when obfuscating.
+	CollectCommands bool `json:"collect_commands" yaml:"collect_commands"`
+
+	// CollectComments specifies whether the obfuscator should extract and return comments as SQL metadata when obfuscating.
+	CollectComments bool `json:"collect_comments" yaml:"collect_comments"`
+
+	// ReplaceDigits specifies whether digits in table names and identifiers should be obfuscated.
+	ReplaceDigits bool `json:"replace_digits" yaml:"replace_digits"`
+
+	// KeepSQLAlias reports whether SQL aliases ("AS") should be truncated.
+	KeepSQLAlias bool `json:"keep_sql_alias"`
+
+	// DollarQuotedFunc reports whether to treat "$func$" delimited dollar-quoted strings
+	// differently and not obfuscate them as a string. To read more about dollar quoted
+	// strings see:
+	//
+	// https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-DOLLAR-QUOTING
+	DollarQuotedFunc bool `json:"dollar_quoted_func"`
+
+	// Cache reports whether the obfuscator should use a LRU look-up cache for SQL obfuscations.
+	Cache bool
+}
+
+// SQLMetadata holds metadata collected throughout the obfuscation of an SQL statement. It is only
+// collected when enabled via SQLConfig.
+type SQLMetadata struct {
+	// Size holds the byte size of the metadata collected.
+	Size int64
+	// TablesCSV is a comma-separated list of tables that the query addresses.
+	TablesCSV string `json:"tables_csv"`
+	// Commands holds commands executed in an SQL statement.
+	// e.g. SELECT, UPDATE, INSERT, DELETE, etc.
+	Commands []string `json:"commands"`
+	// Comments holds comments in an SQL statement.
+	Comments []string `json:"comments"`
+}
+
+// HTTPConfig holds the configuration settings for HTTP obfuscation.
+type HTTPConfig struct {
+	// RemoveQueryStrings determines query strings to be removed from HTTP URLs.
+	RemoveQueryString bool
+
+	// RemovePathDigits determines digits in path segments to be obfuscated.
+	RemovePathDigits bool
+}
+
+// JSONConfig holds the obfuscation configuration for sensitive
+// data found in JSON objects.
+type JSONConfig struct {
+	// Enabled will specify whether obfuscation should be enabled.
+	Enabled bool
+
+	// KeepValues will specify a set of keys for which their values will
+	// not be obfuscated.
+	KeepValues []string
+
+	// ObfuscateSQLValues will specify a set of keys for which their values
+	// will be passed through SQL obfuscation
+	ObfuscateSQLValues []string
+}
+
+// NewObfuscator creates a new obfuscator
+func NewObfuscator(cfg Config) *Obfuscator {
+	if cfg.Logger == nil {
+		cfg.Logger = noopLogger{}
+	}
+	o := Obfuscator{
+		opts:              &cfg,
+		queryCache:        newMeasuredCache(cacheOptions{On: cfg.SQL.Cache, Statsd: cfg.Statsd}),
+		sqlLiteralEscapes: atomic.NewBool(false),
+		log:               cfg.Logger,
+	}
+	if cfg.ES.Enabled {
+		o.es = newJSONObfuscator(&cfg.ES, &o)
+	}
+	if cfg.Mongo.Enabled {
+		o.mongo = newJSONObfuscator(&cfg.Mongo, &o)
+	}
+	if cfg.SQLExecPlan.Enabled {
+		o.sqlExecPlan = newJSONObfuscator(&cfg.SQLExecPlan, &o)
+	}
+	if cfg.SQLExecPlanNormalize.Enabled {
+		o.sqlExecPlanNormalize = newJSONObfuscator(&cfg.SQLExecPlanNormalize, &o)
+	}
+	if cfg.Statsd == nil {
+		cfg.Statsd = &statsd.NoOpClient{}
+	}
+	return &o
+}
+
+// Stop cleans up after a finished Obfuscator.
+func (o *Obfuscator) Stop() {
+	o.queryCache.Close()
+}
+
+// compactWhitespaces compacts all whitespaces in t.
+func compactWhitespaces(t string) string {
+	n := len(t)
+	r := make([]byte, n)
+	spaceCode := uint8(32)
+	isWhitespace := func(char uint8) bool { return char == spaceCode }
+	nr := 0
+	offset := 0
+	for i := 0; i < n; i++ {
+		if isWhitespace(t[i]) {
+			copy(r[nr:], t[nr+offset:i])
+			r[i-offset] = spaceCode
+			nr = i + 1 - offset
+			for j := i + 1; j < n; j++ {
+				if !isWhitespace(t[j]) {
+					offset += j - i - 1
+					i = j
+					break
+				} else if j == n-1 {
+					offset += j - i
+					i = j
+					break
+				}
+			}
+		}
+	}
+	copy(r[nr:], t[nr+offset:n])
+	r = r[:n-offset]
+	return string(bytes.Trim(r, " "))
+}
+
+// replaceDigits replaces consecutive sequences of digits with '?',
+// example: "jobs_2020_1597876964" --> "jobs_?_?"
+func replaceDigits(buffer []byte) []byte {
+	scanningDigit := false
+	filtered := buffer[:0]
+	for _, b := range buffer {
+		// digits are encoded as 1 byte in utf8
+		if isDigit(rune(b)) {
+			if scanningDigit {
+				continue
+			}
+			scanningDigit = true
+			filtered = append(filtered, byte('?'))
+			continue
+		}
+		scanningDigit = false
+		filtered = append(filtered, b)
+	}
+	return filtered
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis.go
new file mode 100644
index 0000000000..13591af68d
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis.go
@@ -0,0 +1,262 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package obfuscate
+
+import (
+	"strings"
+)
+
+// redisTruncationMark is used as suffix by tracing libraries to indicate that a
+// command was truncated.
+const redisTruncationMark = "..."
+
+const maxRedisNbCommands = 3
+
+// Redis commands consisting in 2 words
+var redisCompoundCommandSet = map[string]bool{
+	"CLIENT": true, "CLUSTER": true, "COMMAND": true, "CONFIG": true, "DEBUG": true, "SCRIPT": true}
+
+// QuantizeRedisString returns a quantized version of a Redis query.
+//
+// TODO(gbbr): Refactor this method to use the tokenizer and
+// remove "compactWhitespaces". This method is buggy when commands
+// contain quoted strings with newlines.
+func (*Obfuscator) QuantizeRedisString(query string) string {
+	query = compactWhitespaces(query)
+
+	var resource strings.Builder
+	truncated := false
+	nbCmds := 0
+
+	for len(query) > 0 && nbCmds < maxRedisNbCommands {
+		var rawLine string
+
+		// Read the next command
+		idx := strings.IndexByte(query, '\n')
+		if idx == -1 {
+			rawLine = query
+			query = ""
+		} else {
+			rawLine = query[:idx]
+			query = query[idx+1:]
+		}
+
+		line := strings.Trim(rawLine, " ")
+		if len(line) == 0 {
+			continue
+		}
+
+		// Parse arguments
+		args := strings.SplitN(line, " ", 3)
+
+		if strings.HasSuffix(args[0], redisTruncationMark) {
+			truncated = true
+			continue
+		}
+
+		command := strings.ToUpper(args[0])
+
+		if redisCompoundCommandSet[command] && len(args) > 1 {
+			if strings.HasSuffix(args[1], redisTruncationMark) {
+				truncated = true
+				continue
+			}
+
+			command += " " + strings.ToUpper(args[1])
+		}
+
+		// Write the command representation
+		resource.WriteByte(' ')
+		resource.WriteString(command)
+
+		nbCmds++
+		truncated = false
+	}
+
+	if nbCmds == maxRedisNbCommands || truncated {
+		resource.WriteString(" ...")
+	}
+
+	return strings.Trim(resource.String(), " ")
+}
+
+// ObfuscateRedisString obfuscates the given Redis command.
+func (*Obfuscator) ObfuscateRedisString(rediscmd string) string {
+	t := newRedisTokenizer([]byte(rediscmd))
+	var (
+		str  strings.Builder
+		cmd  string
+		args []string
+	)
+	for {
+		tok, typ, done := t.scan()
+		switch typ {
+		case redisTokenCommand:
+			// new command starting
+			if cmd != "" {
+				// a previous command was buffered, obfuscate it
+				obfuscateRedisCmd(&str, cmd, args...)
+				str.WriteByte('\n')
+			}
+			cmd = tok
+			args = args[:0]
+		case redisTokenArgument:
+			args = append(args, tok)
+		}
+		if done {
+			// last command
+			obfuscateRedisCmd(&str, cmd, args...)
+			break
+		}
+	}
+	return str.String()
+}
+
+func obfuscateRedisCmd(out *strings.Builder, cmd string, args ...string) {
+	out.WriteString(cmd)
+	if len(args) == 0 {
+		return
+	}
+	out.WriteByte(' ')
+
+	switch strings.ToUpper(cmd) {
+	case "AUTH":
+		// Obfuscate everything after command
+		// • AUTH password
+		if len(args) > 0 {
+			args[0] = "?"
+			args = args[:1]
+		}
+
+	case "APPEND", "GETSET", "LPUSHX", "GEORADIUSBYMEMBER", "RPUSHX",
+		"SET", "SETNX", "SISMEMBER", "ZRANK", "ZREVRANK", "ZSCORE":
+		// Obfuscate 2nd argument:
+		// • APPEND key value
+		// • GETSET key value
+		// • LPUSHX key value
+		// • GEORADIUSBYMEMBER key member radius m|km|ft|mi [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT count] [ASC|DESC] [STORE key] [STOREDIST key]
+		// • RPUSHX key value
+		// • SET key value [expiration EX seconds|PX milliseconds] [NX|XX]
+		// • SETNX key value
+		// • SISMEMBER key member
+		// • ZRANK key member
+		// • ZREVRANK key member
+		// • ZSCORE key member
+		obfuscateRedisArgN(args, 1)
+
+	case "HSET", "HSETNX", "LREM", "LSET", "SETBIT", "SETEX", "PSETEX",
+		"SETRANGE", "ZINCRBY", "SMOVE", "RESTORE":
+		// Obfuscate 3rd argument:
+		// • HSET key field value
+		// • HSETNX key field value
+		// • LREM key count value
+		// • LSET key index value
+		// • SETBIT key offset value
+		// • SETEX key seconds value
+		// • PSETEX key milliseconds value
+		// • SETRANGE key offset value
+		// • ZINCRBY key increment member
+		// • SMOVE source destination member
+		// • RESTORE key ttl serialized-value [REPLACE]
+		obfuscateRedisArgN(args, 2)
+
+	case "LINSERT":
+		// Obfuscate 4th argument:
+		// • LINSERT key BEFORE|AFTER pivot value
+		obfuscateRedisArgN(args, 3)
+
+	case "GEOHASH", "GEOPOS", "GEODIST", "LPUSH", "RPUSH", "SREM",
+		"ZREM", "SADD":
+		// Obfuscate all arguments after the first one.
+		// • GEOHASH key member [member ...]
+		// • GEOPOS key member [member ...]
+		// • GEODIST key member1 member2 [unit]
+		// • LPUSH key value [value ...]
+		// • RPUSH key value [value ...]
+		// • SREM key member [member ...]
+		// • ZREM key member [member ...]
+		// • SADD key member [member ...]
+		if len(args) > 1 {
+			args[1] = "?"
+			args = args[:2]
+		}
+
+	case "GEOADD":
+		// Obfuscating every 3rd argument starting from first
+		// • GEOADD key longitude latitude member [longitude latitude member ...]
+		obfuscateRedisArgsStep(args, 1, 3)
+
+	case "HMSET":
+		// Every 2nd argument starting from first.
+		// • HMSET key field value [field value ...]
+		obfuscateRedisArgsStep(args, 1, 2)
+
+	case "MSET", "MSETNX":
+		// Every 2nd argument starting from command.
+		// • MSET key value [key value ...]
+		// • MSETNX key value [key value ...]
+		obfuscateRedisArgsStep(args, 0, 2)
+
+	case "CONFIG":
+		// Obfuscate 2nd argument to SET sub-command.
+		// • CONFIG SET parameter value
+		if strings.ToUpper(args[0]) == "SET" {
+			obfuscateRedisArgN(args, 2)
+		}
+
+	case "BITFIELD":
+		// Obfuscate 3rd argument to SET sub-command:
+		// • BITFIELD key [GET type offset] [SET type offset value] [INCRBY type offset increment] [OVERFLOW WRAP|SAT|FAIL]
+		var n int
+		for i, arg := range args {
+			if strings.ToUpper(arg) == "SET" {
+				n = i
+			}
+			if n > 0 && i-n == 3 {
+				args[i] = "?"
+				break
+			}
+		}
+
+	case "ZADD":
+		// Obfuscate every 2nd argument after potential optional ones.
+		// • ZADD key [NX|XX] [CH] [INCR] score member [score member ...]
+		var i int
+	loop:
+		for i = range args {
+			if i == 0 {
+				continue // key
+			}
+			switch args[i] {
+			case "NX", "XX", "CH", "INCR":
+				// continue
+			default:
+				break loop
+			}
+		}
+		obfuscateRedisArgsStep(args, i, 2)
+
+	default:
+		// Obfuscate nothing.
+	}
+	out.WriteString(strings.Join(args, " "))
+}
+
+func obfuscateRedisArgN(args []string, n int) {
+	if len(args) > n {
+		args[n] = "?"
+	}
+}
+
+func obfuscateRedisArgsStep(args []string, start, step int) {
+	if start+step-1 >= len(args) {
+		// can't reach target
+		return
+	}
+	for i := start + step - 1; i < len(args); i += step {
+		args[i] = "?"
+	}
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis_tokenizer.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis_tokenizer.go
new file mode 100644
index 0000000000..d4ef2dc332
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis_tokenizer.go
@@ -0,0 +1,187 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package obfuscate
+
+import (
+	"bytes"
+	"strings"
+)
+
+// redisTokenType specifies the token type returned by the tokenizer.
+type redisTokenType int
+
+const (
+	// redisTokenCommand is a command token. For compound tokens, it is
+	// only the first part up to a space.
+	redisTokenCommand redisTokenType = iota
+
+	// redisTokenArgument is an argument token.
+	redisTokenArgument
+)
+
+// String implements fmt.Stringer.
+func (t redisTokenType) String() string {
+	return map[redisTokenType]string{
+		redisTokenCommand:  "command",
+		redisTokenArgument: "argument",
+	}[t]
+}
+
+// redisTokenizer tokenizes a Redis command string. The string can be on
+// multiple lines. The tokenizer is capable of parsing quoted strings and escape
+// sequences inside them.
+type redisTokenizer struct {
+	data  []byte
+	ch    byte
+	off   int
+	done  bool
+	state redisParseState
+}
+
+// redisParseState specifies the current state of the tokenizer.
+type redisParseState int
+
+const (
+	// redisStateCommand specifies that we are about to parse a command.
+	// It is usually the state at the beginning of the scan or after a
+	// new line.
+	redisStateCommand redisParseState = iota
+
+	// redisStateArgument specifies that we are about to parse an argument
+	// to a command or the rest of the tokens in a compound command.
+	redisStateArgument
+)
+
+// newRedisTokenizer returns a new tokenizer for the given data.
+func newRedisTokenizer(data []byte) *redisTokenizer {
+	return &redisTokenizer{
+		data:  bytes.TrimSpace(data),
+		off:   -1,
+		state: redisStateCommand,
+	}
+}
+
+// scan returns the next token, it's type and a bool. The boolean specifies if
+// the returned token was the last one.
+func (t *redisTokenizer) scan() (tok string, typ redisTokenType, done bool) {
+	switch t.state {
+	case redisStateCommand:
+		return t.scanCommand()
+	default:
+		return t.scanArg()
+	}
+}
+
+// next advances the scanner to the next character.
+func (t *redisTokenizer) next() {
+	t.off++
+	if t.off <= len(t.data)-1 {
+		t.ch = t.data[t.off]
+		return
+	}
+	t.done = true
+}
+
+// scanCommand scans a command from the buffer.
+func (t *redisTokenizer) scanCommand() (tok string, typ redisTokenType, done bool) {
+	var (
+		str     strings.Builder
+		started bool
+	)
+	for {
+		t.next()
+		if t.done {
+			return str.String(), typ, t.done
+		}
+		switch t.ch {
+		case ' ':
+			if !started {
+				// skip spaces preceding token
+				t.skipSpace()
+				break
+			}
+			// done scanning command
+			t.state = redisStateArgument
+			t.skipSpace()
+			return str.String(), redisTokenCommand, t.done
+		case '\n':
+			return str.String(), redisTokenCommand, t.done
+		default:
+			str.WriteByte(t.ch)
+		}
+		started = true
+	}
+}
+
+// scanArg scans an argument from the buffer.
+func (t *redisTokenizer) scanArg() (tok string, typ redisTokenType, done bool) {
+	var (
+		str    strings.Builder
+		quoted bool // in quoted string
+		escape bool // escape sequence
+	)
+	for {
+		t.next()
+		if t.done {
+			return str.String(), redisTokenArgument, t.done
+		}
+		switch t.ch {
+		case '\\':
+			str.WriteByte('\\')
+			if !escape {
+				// next character could be escaped
+				escape = true
+				continue
+			}
+		case '\n':
+			if !quoted {
+				// last argument, new command follows
+				t.state = redisStateCommand
+				return str.String(), redisTokenArgument, t.done
+			}
+			str.WriteByte('\n')
+		case '"':
+			str.WriteByte('"')
+			if !escape {
+				// this quote wasn't escaped, toggle quoted mode
+				quoted = !quoted
+			}
+		case ' ':
+			if !quoted {
+				t.skipSpace()
+				return str.String(), redisTokenArgument, t.done
+			}
+			str.WriteByte(' ')
+		default:
+			str.WriteByte(t.ch)
+		}
+		escape = false
+	}
+}
+
+// unread is the reverse of next, unreading a character.
+func (t *redisTokenizer) unread() {
+	if t.off < 1 {
+		return
+	}
+	t.off--
+	t.ch = t.data[t.off]
+}
+
+// skipSpace moves the cursor forward until it meets the last space
+// in a sequence of contiguous spaces.
+func (t *redisTokenizer) skipSpace() {
+	for t.ch == ' ' || t.ch == '\t' || t.ch == '\r' && !t.done {
+		t.next()
+	}
+	if t.ch == '\n' {
+		// next token is a command
+		t.state = redisStateCommand
+	} else {
+		// don't steal the first non-space character
+		t.unread()
+	}
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql.go
new file mode 100644
index 0000000000..2a3bbdee74
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql.go
@@ -0,0 +1,416 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package obfuscate
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+var questionMark = []byte("?")
+
+// metadataFinderFilter is a filter which attempts to collect metadata from a query, such as comments and tables.
+// It is meant to run before all the other filters.
+type metadataFinderFilter struct {
+	collectTableNames bool
+	collectCommands   bool
+	collectComments   bool
+	replaceDigits     bool
+
+	// size holds the byte size of the metadata collected by the filter.
+	size int64
+	// tablesSeen keeps track of unique table names encountered by the filter.
+	tablesSeen map[string]struct{}
+	// tablesCSV specifies a comma-separated list of tables.
+	tablesCSV strings.Builder
+	// commands keeps track of commands encountered by the filter.
+	commands []string
+	// comments keeps track of comments encountered by the filter.
+	comments []string
+}
+
+func (f *metadataFinderFilter) Filter(token, lastToken TokenKind, buffer []byte) (TokenKind, []byte, error) {
+	if f.collectComments && token == Comment {
+		// A comment with line-breaks will be brought to a single line.
+		comment := strings.TrimSpace(strings.Replace(string(buffer), "\n", " ", -1))
+		f.size += int64(len(comment))
+		f.comments = append(f.comments, comment)
+	}
+	if f.collectCommands {
+		switch token {
+		case Select, Update, Insert, Delete, Join, Alter, Drop, Create, Grant, Revoke, Commit, Begin, Truncate:
+			command := strings.ToUpper(token.String())
+			f.size += int64(len(command))
+			f.commands = append(f.commands, command)
+		}
+	}
+	if f.collectTableNames {
+		switch lastToken {
+		case From, Join:
+			// SELECT ... FROM [tableName]
+			// DELETE FROM [tableName]
+			// ... JOIN [tableName]
+			if r, _ := utf8.DecodeRune(buffer); !unicode.IsLetter(r) {
+				// first character in buffer is not a letter; we might have a nested
+				// query like SELECT * FROM (SELECT ...)
+				break
+			}
+			fallthrough
+		case Update, Into:
+			// UPDATE [tableName]
+			// INSERT INTO [tableName]
+			tableName := string(buffer)
+			if f.replaceDigits {
+				tableNameCopy := make([]byte, len(buffer))
+				copy(tableNameCopy, buffer)
+				tableName = string(replaceDigits(tableNameCopy))
+			}
+			f.storeTableName(tableName)
+			return TableName, buffer, nil
+		}
+	}
+	return token, buffer, nil
+}
+
+func (f *metadataFinderFilter) storeTableName(name string) {
+	if _, ok := f.tablesSeen[name]; ok {
+		return
+	}
+	if f.tablesSeen == nil {
+		f.tablesSeen = make(map[string]struct{}, 1)
+	}
+	f.tablesSeen[name] = struct{}{}
+	if f.tablesCSV.Len() > 0 {
+		f.size++
+		f.tablesCSV.WriteByte(',')
+	}
+	f.size += int64(len(name))
+	f.tablesCSV.WriteString(name)
+}
+
+// Results returns metadata collected by the filter for an SQL statement.
+func (f *metadataFinderFilter) Results() SQLMetadata {
+	return SQLMetadata{
+		Size:      f.size,
+		TablesCSV: f.tablesCSV.String(),
+		Commands:  f.commands,
+		Comments:  f.comments,
+	}
+}
+
+// Reset implements tokenFilter.
+func (f *metadataFinderFilter) Reset() {
+	for k := range f.tablesSeen {
+		delete(f.tablesSeen, k)
+	}
+	f.size = 0
+	f.tablesCSV.Reset()
+	f.commands = f.commands[:0]
+	f.comments = f.comments[:0]
+}
+
+// discardFilter is a token filter which discards certain elements from a query, such as
+// comments and AS aliases by returning a nil buffer.
+type discardFilter struct {
+	keepSQLAlias bool
+}
+
+// Filter the given token so that a `nil` slice is returned if the token is in the token filtered list.
+func (f *discardFilter) Filter(token, lastToken TokenKind, buffer []byte) (TokenKind, []byte, error) {
+	// filters based on previous token
+	switch lastToken {
+	case FilteredBracketedIdentifier:
+		if token != ']' {
+			// we haven't found the closing bracket yet, keep going
+			if token != ID {
+				// the token between the brackets *must* be an identifier,
+				// otherwise the query is invalid.
+				return LexError, nil, fmt.Errorf("expected identifier in bracketed filter, got %d", token)
+			}
+			return FilteredBracketedIdentifier, nil, nil
+		}
+		fallthrough
+	case As:
+		if token == '[' {
+			// the identifier followed by AS is an MSSQL bracketed identifier
+			// and will continue to be discarded until we find the corresponding
+			// closing bracket counter-part. See GitHub issue DataDog/datadog-trace-agent#475.
+			return FilteredBracketedIdentifier, nil, nil
+		}
+		if f.keepSQLAlias {
+			return token, buffer, nil
+		}
+		return Filtered, nil, nil
+	}
+
+	// filters based on the current token; if the next token should be ignored,
+	// return the same token value (not FilteredGroupable) and nil
+	switch token {
+	case Comment:
+		return Filtered, nil, nil
+	case ';':
+		return markFilteredGroupable(token), nil, nil
+	case As:
+		if !f.keepSQLAlias {
+			return As, nil, nil
+		}
+		fallthrough
+	default:
+		return token, buffer, nil
+	}
+}
+
+// Reset implements tokenFilter.
+func (f *discardFilter) Reset() {}
+
+// replaceFilter is a token filter which obfuscates strings and numbers in queries by replacing them
+// with the "?" character.
+type replaceFilter struct {
+	replaceDigits bool
+}
+
+// Filter the given token so that it will be replaced if in the token replacement list
+func (f *replaceFilter) Filter(token, lastToken TokenKind, buffer []byte) (tokenType TokenKind, tokenBytes []byte, err error) {
+	switch lastToken {
+	case Savepoint:
+		return markFilteredGroupable(token), questionMark, nil
+	case '=':
+		switch token {
+		case DoubleQuotedString:
+			// double-quoted strings after assignments are eligible for obfuscation
+			return markFilteredGroupable(token), questionMark, nil
+		}
+	}
+	switch token {
+	case DollarQuotedString, String, Number, Null, Variable, PreparedStatement, BooleanLiteral, EscapeSequence:
+		return markFilteredGroupable(token), questionMark, nil
+	case '?':
+		// Cases like 'ARRAY [ ?, ? ]' should be collapsed into 'ARRAY [ ? ]'
+		return markFilteredGroupable(token), questionMark, nil
+	case TableName, ID:
+		if f.replaceDigits {
+			return token, replaceDigits(buffer), nil
+		}
+		fallthrough
+	default:
+		return token, buffer, nil
+	}
+}
+
+// Reset implements tokenFilter.
+func (f *replaceFilter) Reset() {}
+
+// groupingFilter is a token filter which groups together items replaced by the replaceFilter. It is meant
+// to run immediately after it.
+type groupingFilter struct {
+	groupFilter int // counts the number of values, e.g. 3 = ?, ?, ?
+	groupMulti  int // counts the number of groups, e.g. 2 = (?, ?), (?, ?, ?)
+}
+
+// Filter the given token so that it will be discarded if a grouping pattern
+// has been recognized. A grouping is composed by items like:
+//   - '( ?, ?, ? )'
+//   - '( ?, ? ), ( ?, ? )'
+func (f *groupingFilter) Filter(token, lastToken TokenKind, buffer []byte) (tokenType TokenKind, tokenBytes []byte, err error) {
+	// increasing the number of groups means that we're filtering an entire group
+	// because it can be represented with a single '( ? )'
+	if (lastToken == '(' && isFilteredGroupable(token)) || (token == '(' && f.groupMulti > 0) {
+		f.groupMulti++
+	}
+
+	// Potential commands that could indicate the start of a subquery.
+	isStartOfSubquery := token == Select || token == Delete || token == Update || token == ID
+
+	switch {
+	case f.groupMulti > 0 && lastToken == FilteredGroupableParenthesis && isStartOfSubquery:
+		// this is the start of a new group that seems to be a nested query;
+		// cancel grouping.
+		f.Reset()
+		return token, append([]byte("( "), buffer...), nil
+	case isFilteredGroupable(token):
+		// the previous filter has dropped this token so we should start
+		// counting the group filter so that we accept only one '?' for
+		// the same group
+		f.groupFilter++
+
+		if f.groupFilter > 1 {
+			return markFilteredGroupable(token), nil, nil
+		}
+	case f.groupFilter > 0 && (token == ',' || token == '?'):
+		// if we are in a group drop all commas
+		return markFilteredGroupable(token), nil, nil
+	case f.groupMulti > 1:
+		// drop all tokens since we're in a counting group
+		// and they're duplicated
+		return markFilteredGroupable(token), nil, nil
+	case token != ',' && token != '(' && token != ')' && !isFilteredGroupable(token):
+		// when we're out of a group reset the filter state
+		f.Reset()
+	}
+
+	return token, buffer, nil
+}
+
+// isFilteredGroupable reports whether token is to be considered filtered groupable.
+func isFilteredGroupable(token TokenKind) bool {
+	switch token {
+	case FilteredGroupable, FilteredGroupableParenthesis:
+		return true
+	default:
+		return false
+	}
+}
+
+// markFilteredGroupable returns the appropriate TokenKind to mark this token as
+// filtered groupable.
+func markFilteredGroupable(token TokenKind) TokenKind {
+	switch token {
+	case '(':
+		return FilteredGroupableParenthesis
+	default:
+		return FilteredGroupable
+	}
+}
+
+// Reset resets the groupingFilter so that it may be used again.
+func (f *groupingFilter) Reset() {
+	f.groupFilter = 0
+	f.groupMulti = 0
+}
+
+// ObfuscateSQLString quantizes and obfuscates the given input SQL query string. Quantization removes
+// some elements such as comments and aliases and obfuscation attempts to hide sensitive information
+// in strings and numbers by redacting them.
+func (o *Obfuscator) ObfuscateSQLString(in string) (*ObfuscatedQuery, error) {
+	return o.ObfuscateSQLStringWithOptions(in, &o.opts.SQL)
+}
+
+// ObfuscateSQLStringWithOptions accepts an optional SQLOptions to change the behavior of the obfuscator
+// to quantize and obfuscate the given input SQL query string. Quantization removes some elements such as comments
+// and aliases and obfuscation attempts to hide sensitive information in strings and numbers by redacting them.
+func (o *Obfuscator) ObfuscateSQLStringWithOptions(in string, opts *SQLConfig) (*ObfuscatedQuery, error) {
+	if v, ok := o.queryCache.Get(in); ok {
+		return v.(*ObfuscatedQuery), nil
+	}
+	oq, err := o.obfuscateSQLString(in, opts)
+	if err != nil {
+		return oq, err
+	}
+	o.queryCache.Set(in, oq, oq.Cost())
+	return oq, nil
+}
+
+func (o *Obfuscator) obfuscateSQLString(in string, opts *SQLConfig) (*ObfuscatedQuery, error) {
+	lesc := o.useSQLLiteralEscapes()
+	tok := NewSQLTokenizer(in, lesc, opts)
+	out, err := attemptObfuscation(tok)
+	if err != nil && tok.SeenEscape() {
+		// If the tokenizer failed, but saw an escape character in the process,
+		// try again treating escapes differently
+		tok = NewSQLTokenizer(in, !lesc, opts)
+		if out, err2 := attemptObfuscation(tok); err2 == nil {
+			// If the second attempt succeeded, change the default behavior so that
+			// on the next run we get it right in the first run.
+			o.setSQLLiteralEscapes(!lesc)
+			return out, nil
+		}
+	}
+	return out, err
+}
+
+// ObfuscatedQuery specifies information about an obfuscated SQL query.
+type ObfuscatedQuery struct {
+	Query    string      `json:"query"`    // the obfuscated SQL query
+	Metadata SQLMetadata `json:"metadata"` // metadata extracted from the SQL query
+}
+
+// Cost returns the number of bytes needed to store all the fields
+// of this ObfuscatedQuery.
+func (oq *ObfuscatedQuery) Cost() int64 {
+	return int64(len(oq.Query)) + oq.Metadata.Size
+}
+
+// attemptObfuscation attempts to obfuscate the SQL query loaded into the tokenizer, using the given set of filters.
+func attemptObfuscation(tokenizer *SQLTokenizer) (*ObfuscatedQuery, error) {
+	var (
+		out       = bytes.NewBuffer(make([]byte, 0, len(tokenizer.buf)))
+		err       error
+		lastToken TokenKind
+		metadata  = metadataFinderFilter{
+			collectTableNames: tokenizer.cfg.TableNames,
+			collectCommands:   tokenizer.cfg.CollectCommands,
+			collectComments:   tokenizer.cfg.CollectComments,
+			replaceDigits:     tokenizer.cfg.ReplaceDigits,
+		}
+		discard  = discardFilter{keepSQLAlias: tokenizer.cfg.KeepSQLAlias}
+		replace  = replaceFilter{replaceDigits: tokenizer.cfg.ReplaceDigits}
+		grouping groupingFilter
+	)
+	defer metadata.Reset()
+	// call Scan() function until tokens are available or if a LEX_ERROR is raised. After
+	// retrieving a token, send it to the tokenFilter chains so that the token is discarded
+	// or replaced.
+	for {
+		token, buff := tokenizer.Scan()
+		if token == EndChar {
+			break
+		}
+		if token == LexError {
+			return nil, fmt.Errorf("%v", tokenizer.Err())
+		}
+
+		if token, buff, err = metadata.Filter(token, lastToken, buff); err != nil {
+			return nil, err
+		}
+		if token, buff, err = discard.Filter(token, lastToken, buff); err != nil {
+			return nil, err
+		}
+		if token, buff, err = replace.Filter(token, lastToken, buff); err != nil {
+			return nil, err
+		}
+		if token, buff, err = grouping.Filter(token, lastToken, buff); err != nil {
+			return nil, err
+		}
+		if buff != nil {
+			if out.Len() != 0 {
+				switch token {
+				case ',':
+				case '=':
+					if lastToken == ':' {
+						// do not add a space before an equals if a colon was
+						// present before it.
+						break
+					}
+					fallthrough
+				default:
+					out.WriteRune(' ')
+				}
+			}
+			out.Write(buff)
+		}
+		lastToken = token
+	}
+	if out.Len() == 0 {
+		return nil, errors.New("result is empty")
+	}
+	return &ObfuscatedQuery{
+		Query:    out.String(),
+		Metadata: metadata.Results(),
+	}, nil
+}
+
+// ObfuscateSQLExecPlan obfuscates query conditions in the provided JSON encoded execution plan. If normalize=True,
+// then cost and row estimates are also obfuscated away.
+func (o *Obfuscator) ObfuscateSQLExecPlan(jsonPlan string, normalize bool) (string, error) {
+	if normalize {
+		return o.sqlExecPlanNormalize.obfuscate([]byte(jsonPlan))
+	}
+	return o.sqlExecPlan.obfuscate([]byte(jsonPlan))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql_tokenizer.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql_tokenizer.go
new file mode 100644
index 0000000000..f9c1e39bc2
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql_tokenizer.go
@@ -0,0 +1,912 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package obfuscate
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+// tokenizer.go implemenents a lexer-like iterator that tokenizes SQL and CQL
+// strings, so that an external component can filter or alter each token of the
+// string. This implementation can't be used as a real SQL lexer (so a parser
+// cannot build the AST) because many rules are ignored to make the tokenizer
+// simpler.
+// This implementation was inspired by https://github.com/youtube/vitess sql parser
+// TODO: add the license to the NOTICE file
+
+// TokenKind specifies the type of the token being scanned. It may be one of the defined
+// constants below or in some cases the actual rune itself.
+type TokenKind uint32
+
+// EndChar is used to signal that the scanner has finished reading the query. This happens when
+// there are no more characters left in the query or when invalid encoding is discovered. EndChar
+// is an invalid rune value that can not be found in any valid string.
+const EndChar = unicode.MaxRune + 1
+
+// list of available tokens; this list has been reduced because we don't
+// need a full-fledged tokenizer to implement a Lexer
+const (
+	LexError = TokenKind(57346) + iota
+
+	ID
+	Limit
+	Null
+	String
+	DoubleQuotedString
+	DollarQuotedString // https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-DOLLAR-QUOTING
+	DollarQuotedFunc   // a dollar-quoted string delimited by the tag "$func$"; gets special treatment when feature "dollar_quoted_func" is set
+	Number
+	BooleanLiteral
+	ValueArg
+	ListArg
+	Comment
+	Variable
+	Savepoint
+	PreparedStatement
+	EscapeSequence
+	NullSafeEqual
+	LE
+	GE
+	NE
+	Not
+	As
+	Alter
+	Drop
+	Create
+	Grant
+	Revoke
+	Commit
+	Begin
+	Truncate
+	Select
+	From
+	Update
+	Delete
+	Insert
+	Into
+	Join
+	TableName
+	ColonCast
+
+	// PostgreSQL specific JSON operators
+	JSONSelect         // ->
+	JSONSelectText     // ->>
+	JSONSelectPath     // #>
+	JSONSelectPathText // #>>
+	JSONContains       // @>
+	JSONContainsLeft   // <@
+	JSONKeyExists      // ?
+	JSONAnyKeysExist   // ?|
+	JSONAllKeysExist   // ?&
+	JSONDelete         // #-
+
+	// FilteredGroupable specifies that the given token has been discarded by one of the
+	// token filters and that it is groupable together with consecutive FilteredGroupable
+	// tokens.
+	FilteredGroupable
+
+	// FilteredGroupableParenthesis is a parenthesis marked as filtered groupable. It is the
+	// beginning of either a group of values ('(') or a nested query. We track is as
+	// a special case for when it may start a nested query as opposed to just another
+	// value group to be obfuscated.
+	FilteredGroupableParenthesis
+
+	// Filtered specifies that the token is a comma and was discarded by one
+	// of the filters.
+	Filtered
+
+	// FilteredBracketedIdentifier specifies that we are currently discarding
+	// a bracketed identifier (MSSQL).
+	// See issue https://github.com/DataDog/datadog-trace-agent/issues/475.
+	FilteredBracketedIdentifier
+)
+
+var tokenKindStrings = map[TokenKind]string{
+	LexError:                     "LexError",
+	ID:                           "ID",
+	Limit:                        "Limit",
+	Null:                         "Null",
+	String:                       "String",
+	DoubleQuotedString:           "DoubleQuotedString",
+	DollarQuotedString:           "DollarQuotedString",
+	DollarQuotedFunc:             "DollarQuotedFunc",
+	Number:                       "Number",
+	BooleanLiteral:               "BooleanLiteral",
+	ValueArg:                     "ValueArg",
+	ListArg:                      "ListArg",
+	Comment:                      "Comment",
+	Variable:                     "Variable",
+	Savepoint:                    "Savepoint",
+	PreparedStatement:            "PreparedStatement",
+	EscapeSequence:               "EscapeSequence",
+	NullSafeEqual:                "NullSafeEqual",
+	LE:                           "LE",
+	GE:                           "GE",
+	NE:                           "NE",
+	Not:                          "NOT",
+	As:                           "As",
+	Alter:                        "Alter",
+	Drop:                         "Drop",
+	Create:                       "Create",
+	Grant:                        "Grant",
+	Revoke:                       "Revoke",
+	Commit:                       "Commit",
+	Begin:                        "Begin",
+	Truncate:                     "Truncate",
+	Select:                       "Select",
+	From:                         "From",
+	Update:                       "Update",
+	Delete:                       "Delete",
+	Insert:                       "Insert",
+	Into:                         "Into",
+	Join:                         "Join",
+	TableName:                    "TableName",
+	ColonCast:                    "ColonCast",
+	FilteredGroupable:            "FilteredGroupable",
+	FilteredGroupableParenthesis: "FilteredGroupableParenthesis",
+	Filtered:                     "Filtered",
+	FilteredBracketedIdentifier:  "FilteredBracketedIdentifier",
+	JSONSelect:                   "JSONSelect",
+	JSONSelectText:               "JSONSelectText",
+	JSONSelectPath:               "JSONSelectPath",
+	JSONSelectPathText:           "JSONSelectPathText",
+	JSONContains:                 "JSONContains",
+	JSONContainsLeft:             "JSONContainsLeft",
+	JSONKeyExists:                "JSONKeyExists",
+	JSONAnyKeysExist:             "JSONAnyKeysExist",
+	JSONAllKeysExist:             "JSONAllKeysExist",
+	JSONDelete:                   "JSONDelete",
+}
+
+func (k TokenKind) String() string {
+	str, ok := tokenKindStrings[k]
+	if !ok {
+		return "<unknown>"
+	}
+	return str
+}
+
+const (
+	// DBMSSQLServer is a MS SQL Server
+	DBMSSQLServer = "mssql"
+	// DBMSPostgres is a PostgreSQL Server
+	DBMSPostgres = "postgresql"
+)
+
+const escapeCharacter = '\\'
+
+// SQLTokenizer is the struct used to generate SQL
+// tokens for the parser.
+type SQLTokenizer struct {
+	pos      int    // byte offset of lastChar
+	lastChar rune   // last read rune
+	buf      []byte // buf holds the query that we are parsing
+	off      int    // off is the index into buf where the unread portion of the query begins.
+	err      error  // any error occurred while reading
+
+	curlys uint32 // number of active open curly braces in top-level SQL escape sequences.
+
+	literalEscapes bool // indicates we should not treat backslashes as escape characters
+	seenEscape     bool // indicates whether this tokenizer has seen an escape character within a string
+
+	cfg *SQLConfig
+}
+
+// NewSQLTokenizer creates a new SQLTokenizer for the given SQL string. The literalEscapes argument specifies
+// whether escape characters should be treated literally or as such.
+func NewSQLTokenizer(sql string, literalEscapes bool, cfg *SQLConfig) *SQLTokenizer {
+	if cfg == nil {
+		cfg = new(SQLConfig)
+	}
+	return &SQLTokenizer{
+		buf:            []byte(sql),
+		cfg:            cfg,
+		literalEscapes: literalEscapes,
+	}
+}
+
+// Reset the underlying buffer and positions
+func (tkn *SQLTokenizer) Reset(in string) {
+	tkn.pos = 0
+	tkn.lastChar = 0
+	tkn.buf = []byte(in)
+	tkn.off = 0
+	tkn.err = nil
+}
+
+// keywords used to recognize string tokens
+var keywords = map[string]TokenKind{
+	"NULL":      Null,
+	"TRUE":      BooleanLiteral,
+	"FALSE":     BooleanLiteral,
+	"SAVEPOINT": Savepoint,
+	"LIMIT":     Limit,
+	"AS":        As,
+	"ALTER":     Alter,
+	"CREATE":    Create,
+	"GRANT":     Grant,
+	"REVOKE":    Revoke,
+	"COMMIT":    Commit,
+	"BEGIN":     Begin,
+	"TRUNCATE":  Truncate,
+	"DROP":      Drop,
+	"SELECT":    Select,
+	"FROM":      From,
+	"UPDATE":    Update,
+	"DELETE":    Delete,
+	"INSERT":    Insert,
+	"INTO":      Into,
+	"JOIN":      Join,
+}
+
+// Err returns the last error that the tokenizer encountered, or nil.
+func (tkn *SQLTokenizer) Err() error { return tkn.err }
+
+func (tkn *SQLTokenizer) setErr(format string, args ...interface{}) {
+	if tkn.err != nil {
+		return
+	}
+	tkn.err = fmt.Errorf("at position %d: %v", tkn.pos, fmt.Errorf(format, args...))
+}
+
+// SeenEscape returns whether or not this tokenizer has seen an escape character within a scanned string
+func (tkn *SQLTokenizer) SeenEscape() bool { return tkn.seenEscape }
+
+// Scan scans the tokenizer for the next token and returns
+// the token type and the token buffer.
+func (tkn *SQLTokenizer) Scan() (TokenKind, []byte) {
+	if tkn.lastChar == 0 {
+		tkn.advance()
+	}
+	tkn.SkipBlank()
+
+	switch ch := tkn.lastChar; {
+	case isLeadingLetter(ch) &&
+		!(tkn.cfg.DBMS == DBMSPostgres && ch == '@'):
+		// The '@' symbol should not be considered part of an identifier in
+		// postgres, so we skip this in the case where the DBMS is postgres
+		// and ch is '@'.
+		return tkn.scanIdentifier()
+	case isDigit(ch):
+		return tkn.scanNumber(false)
+	default:
+		tkn.advance()
+		if tkn.lastChar == EndChar && tkn.err != nil {
+			// advance discovered an invalid encoding. We should return early.
+			return LexError, nil
+		}
+		switch ch {
+		case EndChar:
+			if tkn.err != nil {
+				return LexError, nil
+			}
+			return EndChar, nil
+		case ':':
+			if tkn.lastChar == ':' {
+				tkn.advance()
+				return ColonCast, []byte("::")
+			}
+			if unicode.IsSpace(tkn.lastChar) {
+				// example scenario: "autovacuum: VACUUM ANALYZE fake.table"
+				return TokenKind(ch), tkn.bytes()
+			}
+			if tkn.lastChar != '=' {
+				return tkn.scanBindVar()
+			}
+			fallthrough
+		case '~':
+			switch tkn.lastChar {
+			case '*':
+				tkn.advance()
+				return TokenKind('~'), []byte("~*")
+			default:
+				return TokenKind(ch), tkn.bytes()
+			}
+		case '?':
+			if tkn.cfg.DBMS == DBMSPostgres {
+				switch tkn.lastChar {
+				case '|':
+					tkn.advance()
+					return JSONAnyKeysExist, []byte("?|")
+				case '&':
+					tkn.advance()
+					return JSONAllKeysExist, []byte("?&")
+				default:
+					return JSONKeyExists, tkn.bytes()
+				}
+			}
+			fallthrough
+		case '=', ',', ';', '(', ')', '+', '*', '&', '|', '^', ']':
+			return TokenKind(ch), tkn.bytes()
+		case '[':
+			if tkn.cfg.DBMS == DBMSSQLServer {
+				return tkn.scanString(']', DoubleQuotedString)
+			}
+			return TokenKind(ch), tkn.bytes()
+		case '.':
+			if isDigit(tkn.lastChar) {
+				return tkn.scanNumber(true)
+			}
+			return TokenKind(ch), tkn.bytes()
+		case '/':
+			switch tkn.lastChar {
+			case '/':
+				tkn.advance()
+				return tkn.scanCommentType1("//")
+			case '*':
+				tkn.advance()
+				return tkn.scanCommentType2()
+			default:
+				return TokenKind(ch), tkn.bytes()
+			}
+		case '-':
+			switch {
+			case tkn.lastChar == '-':
+				tkn.advance()
+				return tkn.scanCommentType1("--")
+			case tkn.lastChar == '>':
+				if tkn.cfg.DBMS == DBMSPostgres {
+					tkn.advance()
+					switch tkn.lastChar {
+					case '>':
+						tkn.advance()
+						return JSONSelectText, []byte("->>")
+					default:
+						return JSONSelect, []byte("->")
+					}
+				}
+				fallthrough
+			case isDigit(tkn.lastChar):
+				return tkn.scanNumber(false)
+			case tkn.lastChar == '.':
+				tkn.advance()
+				if isDigit(tkn.lastChar) {
+					return tkn.scanNumber(true)
+				}
+				tkn.lastChar = '.'
+				tkn.pos--
+				fallthrough
+			default:
+				return TokenKind(ch), tkn.bytes()
+			}
+		case '#':
+			switch tkn.cfg.DBMS {
+			case DBMSSQLServer:
+				return tkn.scanIdentifier()
+			case DBMSPostgres:
+				switch tkn.lastChar {
+				case '>':
+					tkn.advance()
+					switch tkn.lastChar {
+					case '>':
+						tkn.advance()
+						return JSONSelectPathText, []byte("#>>")
+					default:
+						return JSONSelectPath, []byte("#>")
+					}
+				case '-':
+					tkn.advance()
+					return JSONDelete, []byte("#-")
+				default:
+					return TokenKind(ch), tkn.bytes()
+				}
+			default:
+				tkn.advance()
+				return tkn.scanCommentType1("#")
+			}
+		case '<':
+			switch tkn.lastChar {
+			case '>':
+				tkn.advance()
+				return NE, []byte("<>")
+			case '=':
+				tkn.advance()
+				switch tkn.lastChar {
+				case '>':
+					tkn.advance()
+					return NullSafeEqual, []byte("<=>")
+				default:
+					return LE, []byte("<=")
+				}
+			case '@':
+				if tkn.cfg.DBMS == DBMSPostgres {
+					// check for JSONContainsLeft (<@)
+					tkn.advance()
+					return JSONContainsLeft, []byte("<@")
+				}
+				fallthrough
+			default:
+				return TokenKind(ch), tkn.bytes()
+			}
+		case '>':
+			if tkn.lastChar == '=' {
+				tkn.advance()
+				return GE, []byte(">=")
+			}
+			return TokenKind(ch), tkn.bytes()
+		case '!':
+			switch tkn.lastChar {
+			case '=':
+				tkn.advance()
+				return NE, []byte("!=")
+			case '~':
+				tkn.advance()
+				switch tkn.lastChar {
+				case '*':
+					tkn.advance()
+					return NE, []byte("!~*")
+				default:
+					return NE, []byte("!~")
+				}
+			default:
+				if isValidCharAfterOperator(tkn.lastChar) {
+					return Not, tkn.bytes()
+				}
+				tkn.setErr(`unexpected char "%c" (%d) after "!"`, tkn.lastChar, tkn.lastChar)
+				return LexError, tkn.bytes()
+			}
+		case '\'':
+			return tkn.scanString(ch, String)
+		case '"':
+			return tkn.scanString(ch, DoubleQuotedString)
+		case '`':
+			return tkn.scanString(ch, ID)
+		case '%':
+			if tkn.lastChar == '(' {
+				return tkn.scanVariableIdentifier('%')
+			}
+			if isLetter(tkn.lastChar) {
+				// format parameter (e.g. '%s')
+				return tkn.scanFormatParameter('%')
+			}
+			// modulo operator (e.g. 'id % 8')
+			return TokenKind(ch), tkn.bytes()
+		case '$':
+			if isDigit(tkn.lastChar) {
+				// TODO(gbbr): the first digit after $ does not necessarily guarantee
+				// that this isn't a dollar-quoted string constant. We might eventually
+				// want to cover for this use-case too (e.g. $1$some text$1$).
+				return tkn.scanPreparedStatement('$')
+			}
+			kind, tok := tkn.scanDollarQuotedString()
+			if kind == DollarQuotedFunc {
+				// this is considered an embedded query, we should try and
+				// obfuscate it
+				out, err := attemptObfuscation(NewSQLTokenizer(string(tok), tkn.literalEscapes, tkn.cfg))
+				if err != nil {
+					// if we can't obfuscate it, treat it as a regular string
+					return DollarQuotedString, tok
+				}
+				tok = append(append([]byte("$func$"), []byte(out.Query)...), []byte("$func$")...)
+			}
+			return kind, tok
+		case '@':
+			if tkn.cfg.DBMS == DBMSPostgres {
+				// For postgres the @ symbol is reserved as an operator
+				// https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-OPERATORS
+				// And is used as a json operator
+				// https://www.postgresql.org/docs/9.5/functions-json.html
+				switch tkn.lastChar {
+				case '>':
+					tkn.advance()
+					return JSONContains, []byte("@>")
+				default:
+					return TokenKind(ch), tkn.bytes()
+				}
+			}
+			fallthrough
+		case '{':
+			if tkn.pos == 1 || tkn.curlys > 0 {
+				// Do not fully obfuscate top-level SQL escape sequences like {{[?=]call procedure-name[([parameter][,parameter]...)]}.
+				// We want these to display a bit more context than just a plain '?'
+				// See: https://docs.oracle.com/cd/E13157_01/wlevs/docs30/jdbc_drivers/sqlescape.html
+				tkn.curlys++
+				return TokenKind(ch), tkn.bytes()
+			}
+			return tkn.scanEscapeSequence('{')
+		case '}':
+			if tkn.curlys == 0 {
+				// A closing curly brace has no place outside an in-progress top-level SQL escape sequence
+				// started by the '{' switch-case.
+				tkn.setErr(`unexpected byte %d`, ch)
+				return LexError, tkn.bytes()
+			}
+			tkn.curlys--
+			return TokenKind(ch), tkn.bytes()
+		default:
+			tkn.setErr(`unexpected byte %d`, ch)
+			return LexError, tkn.bytes()
+		}
+	}
+}
+
+// SkipBlank moves the tokenizer forward until hitting a non-whitespace character
+// The whitespace definition used here is the same as unicode.IsSpace
+func (tkn *SQLTokenizer) SkipBlank() {
+	for unicode.IsSpace(tkn.lastChar) {
+		tkn.advance()
+	}
+	tkn.bytes()
+}
+
+// toUpper is a modified version of bytes.ToUpper. It returns an upper-cased version of the byte
+// slice src with all Unicode letters mapped to their upper case. It is modified to also accept a
+// byte slice dst as an argument, the underlying storage of which (up to the capacity of dst)
+// will be used as the destination of the upper-case copy of src, if it fits. As a special case,
+// toUpper will return src if the byte slice is already upper-case. This function is used rather
+// than bytes.ToUpper to improve the memory performance of the obfuscator by saving unnecessary
+// allocations happening in bytes.ToUpper
+func toUpper(src, dst []byte) []byte {
+	dst = dst[:0]
+	isASCII, hasLower := true, false
+	for i := 0; i < len(src); i++ {
+		c := src[i]
+		if c >= utf8.RuneSelf {
+			isASCII = false
+			break
+		}
+		hasLower = hasLower || ('a' <= c && c <= 'z')
+	}
+	if cap(dst) < len(src) {
+		dst = make([]byte, 0, len(src))
+	}
+	if isASCII { // optimize for ASCII-only byte slices.
+		if !hasLower {
+			// Just return src.
+			return src
+		}
+		dst = dst[:len(src)]
+		for i := 0; i < len(src); i++ {
+			c := src[i]
+			if 'a' <= c && c <= 'z' {
+				c -= 'a' - 'A'
+			}
+			dst[i] = c
+		}
+		return dst
+	}
+	// This *could* be optimized, but it's an uncommon case.
+	return bytes.Map(unicode.ToUpper, src)
+}
+
+func (tkn *SQLTokenizer) scanIdentifier() (TokenKind, []byte) {
+	tkn.advance()
+	for isLetter(tkn.lastChar) || isDigit(tkn.lastChar) || strings.ContainsRune(".*$", tkn.lastChar) {
+		tkn.advance()
+	}
+
+	t := tkn.bytes()
+	// Space allows us to upper-case identifiers 256 bytes long or less without allocating heap
+	// storage for them, since space is allocated on the stack. A size of 256 bytes was chosen
+	// based on the allowed length of sql identifiers in various sql implementations.
+	var space [256]byte
+	upper := toUpper(t, space[:0])
+	if keywordID, found := keywords[string(upper)]; found {
+		return keywordID, t
+	}
+	return ID, t
+}
+
+func (tkn *SQLTokenizer) scanVariableIdentifier(prefix rune) (TokenKind, []byte) {
+	for tkn.advance(); tkn.lastChar != ')' && tkn.lastChar != EndChar; tkn.advance() {
+	}
+	tkn.advance()
+	if !isLetter(tkn.lastChar) {
+		tkn.setErr(`invalid character after variable identifier: "%c" (%d)`, tkn.lastChar, tkn.lastChar)
+		return LexError, tkn.bytes()
+	}
+	tkn.advance()
+	return Variable, tkn.bytes()
+}
+
+func (tkn *SQLTokenizer) scanFormatParameter(prefix rune) (TokenKind, []byte) {
+	tkn.advance()
+	return Variable, tkn.bytes()
+}
+
+// scanDollarQuotedString scans a Postgres dollar-quoted string constant.
+// See: https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-DOLLAR-QUOTING
+func (tkn *SQLTokenizer) scanDollarQuotedString() (TokenKind, []byte) {
+	kind, tag := tkn.scanString('$', String)
+	if kind == LexError {
+		return kind, tkn.bytes()
+	}
+	var (
+		got int
+		buf bytes.Buffer
+	)
+	delim := tag
+	// on empty strings, tkn.scanString returns the delimiters
+	if string(delim) != "$$" {
+		// on non-empty strings, the delimiter is $tag$
+		delim = append([]byte{'$'}, delim...)
+		delim = append(delim, '$')
+	}
+	for {
+		ch := tkn.lastChar
+		tkn.advance()
+		if ch == EndChar {
+			tkn.setErr("unexpected EOF in dollar-quoted string")
+			return LexError, buf.Bytes()
+		}
+		if byte(ch) == delim[got] {
+			got++
+			if got == len(delim) {
+				break
+			}
+			continue
+		}
+		if got > 0 {
+			_, err := buf.Write(delim[:got])
+			if err != nil {
+				tkn.setErr("error reading dollar-quoted string: %v", err)
+				return LexError, buf.Bytes()
+			}
+			got = 0
+		}
+		buf.WriteRune(ch)
+	}
+	if tkn.cfg.DollarQuotedFunc && string(delim) == "$func$" {
+		return DollarQuotedFunc, buf.Bytes()
+	}
+	return DollarQuotedString, buf.Bytes()
+}
+
+func (tkn *SQLTokenizer) scanPreparedStatement(prefix rune) (TokenKind, []byte) {
+	// a prepared statement expect a digit identifier like $1
+	if !isDigit(tkn.lastChar) {
+		tkn.setErr(`prepared statements must start with digits, got "%c" (%d)`, tkn.lastChar, tkn.lastChar)
+		return LexError, tkn.bytes()
+	}
+
+	// scanNumber keeps the prefix rune intact.
+	// read numbers and return an error if any
+	token, buff := tkn.scanNumber(false)
+	if token == LexError {
+		tkn.setErr("invalid number")
+		return LexError, tkn.bytes()
+	}
+	return PreparedStatement, buff
+}
+
+func (tkn *SQLTokenizer) scanEscapeSequence(braces rune) (TokenKind, []byte) {
+	for tkn.lastChar != '}' && tkn.lastChar != EndChar {
+		tkn.advance()
+	}
+
+	// we've reached the end of the string without finding
+	// the closing curly braces
+	if tkn.lastChar == EndChar {
+		tkn.setErr("unexpected EOF in escape sequence")
+		return LexError, tkn.bytes()
+	}
+
+	tkn.advance()
+	return EscapeSequence, tkn.bytes()
+}
+
+func (tkn *SQLTokenizer) scanBindVar() (TokenKind, []byte) {
+	token := ValueArg
+	if tkn.lastChar == ':' {
+		token = ListArg
+		tkn.advance()
+	}
+	if !isLetter(tkn.lastChar) && !isDigit(tkn.lastChar) {
+		tkn.setErr(`bind variables should start with letters or digits, got "%c" (%d)`, tkn.lastChar, tkn.lastChar)
+		return LexError, tkn.bytes()
+	}
+	for isLetter(tkn.lastChar) || isDigit(tkn.lastChar) || tkn.lastChar == '.' {
+		tkn.advance()
+	}
+	return token, tkn.bytes()
+}
+
+func (tkn *SQLTokenizer) scanMantissa(base int) {
+	for digitVal(tkn.lastChar) < base {
+		tkn.advance()
+	}
+}
+
+func (tkn *SQLTokenizer) scanNumber(seenDecimalPoint bool) (TokenKind, []byte) {
+	if seenDecimalPoint {
+		tkn.scanMantissa(10)
+		goto exponent
+	}
+
+	if tkn.lastChar == '0' {
+		// int or float
+		tkn.advance()
+		if tkn.lastChar == 'x' || tkn.lastChar == 'X' {
+			// hexadecimal int
+			tkn.advance()
+			tkn.scanMantissa(16)
+		} else {
+			// octal int or float
+			tkn.scanMantissa(8)
+			if tkn.lastChar == '8' || tkn.lastChar == '9' {
+				tkn.scanMantissa(10)
+			}
+			if tkn.lastChar == '.' || tkn.lastChar == 'e' || tkn.lastChar == 'E' {
+				goto fraction
+			}
+		}
+		goto exit
+	}
+
+	// decimal int or float
+	tkn.scanMantissa(10)
+
+fraction:
+	if tkn.lastChar == '.' {
+		tkn.advance()
+		tkn.scanMantissa(10)
+	}
+
+exponent:
+	if tkn.lastChar == 'e' || tkn.lastChar == 'E' {
+		tkn.advance()
+		if tkn.lastChar == '+' || tkn.lastChar == '-' {
+			tkn.advance()
+		}
+		tkn.scanMantissa(10)
+	}
+
+exit:
+	t := tkn.bytes()
+	if len(t) == 0 {
+		tkn.setErr("Parse error: ended up with zero-length number.")
+		return LexError, nil
+	}
+	return Number, t
+}
+
+func (tkn *SQLTokenizer) scanString(delim rune, kind TokenKind) (TokenKind, []byte) {
+	buf := bytes.NewBuffer(tkn.buf[:0])
+	for {
+		ch := tkn.lastChar
+		tkn.advance()
+		if ch == delim {
+			if tkn.lastChar == delim {
+				// doubling a delimiter is the default way to embed the delimiter within a string
+				tkn.advance()
+			} else {
+				// a single delimiter denotes the end of the string
+				break
+			}
+		} else if ch == escapeCharacter {
+			tkn.seenEscape = true
+
+			if !tkn.literalEscapes {
+				// treat as an escape character
+				ch = tkn.lastChar
+				tkn.advance()
+			}
+		}
+		if ch == EndChar {
+			tkn.setErr("unexpected EOF in string")
+			return LexError, buf.Bytes()
+		}
+		buf.WriteRune(ch)
+	}
+	if kind == ID && buf.Len() == 0 || bytes.IndexFunc(buf.Bytes(), func(r rune) bool { return !unicode.IsSpace(r) }) == -1 {
+		// This string is an empty or white-space only identifier.
+		// We should keep the start and end delimiters in order to
+		// avoid creating invalid queries.
+		// See: https://github.com/DataDog/datadog-trace-agent/issues/316
+		return kind, append(runeBytes(delim), runeBytes(delim)...)
+	}
+	return kind, buf.Bytes()
+}
+
+func (tkn *SQLTokenizer) scanCommentType1(prefix string) (TokenKind, []byte) {
+	for tkn.lastChar != EndChar {
+		if tkn.lastChar == '\n' {
+			tkn.advance()
+			break
+		}
+		tkn.advance()
+	}
+	return Comment, tkn.bytes()
+}
+
+func (tkn *SQLTokenizer) scanCommentType2() (TokenKind, []byte) {
+	for {
+		if tkn.lastChar == '*' {
+			tkn.advance()
+			if tkn.lastChar == '/' {
+				tkn.advance()
+				break
+			}
+			continue
+		}
+		if tkn.lastChar == EndChar {
+			tkn.setErr("unexpected EOF in comment")
+			return LexError, tkn.bytes()
+		}
+		tkn.advance()
+	}
+	return Comment, tkn.bytes()
+}
+
+// advance advances the tokenizer to the next rune. If the decoder encounters an error decoding, or
+// the end of the buffer is reached, tkn.lastChar will be set to EndChar. In case of a decoding
+// error, tkn.err will also be set.
+func (tkn *SQLTokenizer) advance() {
+	ch, n := utf8.DecodeRune(tkn.buf[tkn.off:])
+	if ch == utf8.RuneError && n < 2 {
+		tkn.pos++
+		tkn.lastChar = EndChar
+		if n == 1 {
+			tkn.setErr("invalid UTF-8 encoding beginning with 0x%x", tkn.buf[tkn.off])
+		}
+		return
+	}
+	if tkn.lastChar != 0 || tkn.pos > 0 {
+		// we are past the first character
+		tkn.pos += n
+	}
+	tkn.off += n
+	tkn.lastChar = ch
+}
+
+// bytes returns all the bytes that were advanced over since its last call.
+// This excludes tkn.lastChar, which will remain in the buffer
+func (tkn *SQLTokenizer) bytes() []byte {
+	if tkn.lastChar == EndChar {
+		ret := tkn.buf[:tkn.off]
+		tkn.buf = tkn.buf[tkn.off:]
+		tkn.off = 0
+		return ret
+	}
+	lastLen := utf8.RuneLen(tkn.lastChar)
+	ret := tkn.buf[:tkn.off-lastLen]
+	tkn.buf = tkn.buf[tkn.off-lastLen:]
+	tkn.off = lastLen
+	return ret
+}
+
+// Position exports the tokenizer's current position in the query
+func (tkn *SQLTokenizer) Position() int {
+	return tkn.pos
+}
+
+func isLeadingLetter(ch rune) bool {
+	return unicode.IsLetter(ch) || ch == '_' || ch == '@'
+}
+
+func isLetter(ch rune) bool {
+	return isLeadingLetter(ch) || ch == '#'
+}
+
+func digitVal(ch rune) int {
+	switch {
+	case '0' <= ch && ch <= '9':
+		return int(ch) - '0'
+	case 'a' <= ch && ch <= 'f':
+		return int(ch) - 'a' + 10
+	case 'A' <= ch && ch <= 'F':
+		return int(ch) - 'A' + 10
+	}
+	return 16 // larger than any legal digit val
+}
+
+func isDigit(ch rune) bool { return '0' <= ch && ch <= '9' }
+
+// runeBytes converts the given rune to a slice of bytes.
+func runeBytes(r rune) []byte {
+	buf := make([]byte, utf8.UTFMax)
+	n := utf8.EncodeRune(buf, r)
+	return buf[:n]
+}
+
+// isValidCharAfterOperator returns true if c is a valid character after an operator
+func isValidCharAfterOperator(c rune) bool {
+	return c == '(' || c == '`' || c == '\'' || c == '"' || c == '+' || c == '-' || unicode.IsSpace(c) || isLetter(c) || isDigit(c)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/LICENSE
@@ -0,0 +1,200 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2016-present Datadog, Inc.
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/configs.go b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/configs.go
new file mode 100644
index 0000000000..0df551cc96
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/configs.go
@@ -0,0 +1,479 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022-present Datadog, Inc.
+
+package state
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+
+	"github.com/DataDog/go-tuf/data"
+)
+
+/*
+	To add support for a new product:
+
+	1. Add the definition of the product to the const() block of products and the `allProducts` list.
+	2. Define the serialized configuration struct as well as a function to parse the config from a []byte.
+	3. Add the product to the `parseConfig` function
+	4. Add a method on the `Repository` to retrieved typed configs for the product.
+*/
+
+var allProducts = []string{ProductAPMSampling, ProductCWSDD, ProductCWSCustom, ProductCWSProfiles, ProductASM, ProductASMFeatures, ProductASMDD, ProductASMData, ProductAPMTracing}
+
+const (
+	// ProductAPMSampling is the apm sampling product
+	ProductAPMSampling = "APM_SAMPLING"
+	// ProductCWSDD is the cloud workload security product managed by datadog employees
+	ProductCWSDD = "CWS_DD"
+	// ProductCWSCustom is the cloud workload security product managed by datadog customers
+	ProductCWSCustom = "CWS_CUSTOM"
+	// ProductCWSProfile is the cloud workload security profile product
+	ProductCWSProfiles = "CWS_SECURITY_PROFILES"
+	// ProductASM is the ASM product used by customers to issue rules configurations
+	ProductASM = "ASM"
+	// ProductASMFeatures is the ASM product used form ASM activation through remote config
+	ProductASMFeatures = "ASM_FEATURES"
+	// ProductASMDD is the application security monitoring product managed by datadog employees
+	ProductASMDD = "ASM_DD"
+	// ProductASMData is the ASM product used to configure WAF rules data
+	ProductASMData = "ASM_DATA"
+	// ProductAPMTracing is the apm tracing product
+	ProductAPMTracing = "APM_TRACING"
+)
+
+// ErrNoConfigVersion occurs when a target file's custom meta is missing the config version
+var ErrNoConfigVersion = errors.New("version missing in custom file meta")
+
+func parseConfig(product string, raw []byte, metadata Metadata) (interface{}, error) {
+	var c interface{}
+	var err error
+	switch product {
+	case ProductAPMSampling:
+		c, err = parseConfigAPMSampling(raw, metadata)
+	case ProductASMFeatures:
+		c, err = parseASMFeaturesConfig(raw, metadata)
+	case ProductCWSDD:
+		c, err = parseConfigCWSDD(raw, metadata)
+	case ProductCWSCustom:
+		c, err = parseConfigCWSCustom(raw, metadata)
+	case ProductCWSProfiles:
+		c, err = parseConfigCWSProfiles(raw, metadata)
+	case ProductASM:
+		c, err = parseConfigASM(raw, metadata)
+	case ProductASMDD:
+		c, err = parseConfigASMDD(raw, metadata)
+	case ProductASMData:
+		c, err = parseConfigASMData(raw, metadata)
+	case ProductAPMTracing:
+		c, err = parseConfigAPMTracing(raw, metadata)
+	default:
+		return nil, fmt.Errorf("unknown product - %s", product)
+	}
+
+	return c, err
+}
+
+// APMSamplingConfig is a deserialized APM Sampling configuration file
+// along with its associated remote config metadata.
+type APMSamplingConfig struct {
+	Config   []byte
+	Metadata Metadata
+}
+
+func parseConfigAPMSampling(data []byte, metadata Metadata) (APMSamplingConfig, error) {
+	// We actually don't parse the payload here, we delegate this responsibility to the trace agent
+	return APMSamplingConfig{
+		Config:   data,
+		Metadata: metadata,
+	}, nil
+}
+
+// APMConfigs returns the currently active APM configs
+func (r *Repository) APMConfigs() map[string]APMSamplingConfig {
+	typedConfigs := make(map[string]APMSamplingConfig)
+
+	configs := r.getConfigs(ProductAPMSampling)
+
+	for path, conf := range configs {
+		// We control this, so if this has gone wrong something has gone horribly wrong
+		typed, ok := conf.(APMSamplingConfig)
+		if !ok {
+			panic("unexpected config stored as APMSamplingConfig")
+		}
+
+		typedConfigs[path] = typed
+	}
+
+	return typedConfigs
+}
+
+// ConfigCWSDD is a deserialized CWS DD configuration file along with its
+// associated remote config metadata
+type ConfigCWSDD struct {
+	Config   []byte
+	Metadata Metadata
+}
+
+func parseConfigCWSDD(data []byte, metadata Metadata) (ConfigCWSDD, error) {
+	return ConfigCWSDD{
+		Config:   data,
+		Metadata: metadata,
+	}, nil
+}
+
+// CWSDDConfigs returns the currently active CWSDD config files
+func (r *Repository) CWSDDConfigs() map[string]ConfigCWSDD {
+	typedConfigs := make(map[string]ConfigCWSDD)
+
+	configs := r.getConfigs(ProductCWSDD)
+
+	for path, conf := range configs {
+		// We control this, so if this has gone wrong something has gone horribly wrong
+		typed, ok := conf.(ConfigCWSDD)
+		if !ok {
+			panic("unexpected config stored as CWSDD Config")
+		}
+
+		typedConfigs[path] = typed
+	}
+
+	return typedConfigs
+}
+
+// ConfigCWSCustom is a deserialized CWS Custom configuration file along with its
+// associated remote config metadata
+type ConfigCWSCustom struct {
+	Config   []byte
+	Metadata Metadata
+}
+
+func parseConfigCWSCustom(data []byte, metadata Metadata) (ConfigCWSCustom, error) {
+	return ConfigCWSCustom{
+		Config:   data,
+		Metadata: metadata,
+	}, nil
+}
+
+// CWSCustomConfigs returns the currently active CWSCustom config files
+func (r *Repository) CWSCustomConfigs() map[string]ConfigCWSCustom {
+	typedConfigs := make(map[string]ConfigCWSCustom)
+
+	configs := r.getConfigs(ProductCWSCustom)
+
+	for path, conf := range configs {
+		// We control this, so if this has gone wrong something has gone horribly wrong
+		typed, ok := conf.(ConfigCWSCustom)
+		if !ok {
+			panic("unexpected config stored as CWS_CUSTOM Config")
+		}
+
+		typedConfigs[path] = typed
+	}
+
+	return typedConfigs
+}
+
+// ConfigCWSProfiles is a deserialized CWS Profile configuration file along with its
+// associated remote config metadata
+type ConfigCWSProfiles struct {
+	Config   []byte
+	Metadata Metadata
+}
+
+func parseConfigCWSProfiles(data []byte, metadata Metadata) (ConfigCWSProfiles, error) {
+	return ConfigCWSProfiles{
+		Config:   data,
+		Metadata: metadata,
+	}, nil
+}
+
+// CWSProfilesConfigs returns the currently active CWSProfiles config files
+func (r *Repository) CWSProfilesConfigs() map[string]ConfigCWSProfiles {
+	typedConfigs := make(map[string]ConfigCWSProfiles)
+
+	configs := r.getConfigs(ProductCWSProfiles)
+
+	for path, conf := range configs {
+		// We control this, so if this has gone wrong something has gone horribly wrong
+		typed, ok := conf.(ConfigCWSProfiles)
+		if !ok {
+			panic("unexpected config stored as CWS_SECURITY_PROFILES Config")
+		}
+
+		typedConfigs[path] = typed
+	}
+
+	return typedConfigs
+}
+
+// ConfigASM is a deserialized ASM configuration file along with its
+// associated remote config metadata
+type ConfigASM struct {
+	Config   []byte
+	Metadata Metadata
+}
+
+func parseConfigASM(data []byte, metadata Metadata) (ConfigASMDD, error) {
+	return ConfigASMDD{
+		Config:   data,
+		Metadata: metadata,
+	}, nil
+}
+
+// ASMConfigs returns the currently active ASM configs
+func (r *Repository) ASMConfigs() map[string]ConfigASM {
+	typedConfigs := make(map[string]ConfigASM)
+
+	configs := r.getConfigs(ProductASM)
+
+	for path, conf := range configs {
+		// We control this, so if this has gone wrong something has gone horribly wrong
+		typed, ok := conf.(ConfigASM)
+		if !ok {
+			panic("unexpected config stored as ASM Config")
+		}
+
+		typedConfigs[path] = typed
+	}
+
+	return typedConfigs
+}
+
+// ConfigASMDD is a deserialized ASM DD configuration file along with its
+// associated remote config metadata
+type ConfigASMDD struct {
+	Config   []byte
+	Metadata Metadata
+}
+
+func parseConfigASMDD(data []byte, metadata Metadata) (ConfigASMDD, error) {
+	return ConfigASMDD{
+		Config:   data,
+		Metadata: metadata,
+	}, nil
+}
+
+// ASMDDConfigs returns the currently active ASMDD configs
+func (r *Repository) ASMDDConfigs() map[string]ConfigASMDD {
+	typedConfigs := make(map[string]ConfigASMDD)
+
+	configs := r.getConfigs(ProductASMDD)
+
+	for path, conf := range configs {
+		// We control this, so if this has gone wrong something has gone horribly wrong
+		typed, ok := conf.(ConfigASMDD)
+		if !ok {
+			panic("unexpected config stored as ASMDD Config")
+		}
+
+		typedConfigs[path] = typed
+	}
+
+	return typedConfigs
+}
+
+// ASMFeaturesConfig is a deserialized configuration file that indicates whether ASM should be enabled
+// within a tracer, along with its associated remote config metadata.
+type ASMFeaturesConfig struct {
+	Config   ASMFeaturesData
+	Metadata Metadata
+}
+
+// ASMFeaturesData describes the enabled state of ASM features
+type ASMFeaturesData struct {
+	ASM struct {
+		Enabled bool `json:"enabled"`
+	} `json:"asm"`
+}
+
+func parseASMFeaturesConfig(data []byte, metadata Metadata) (ASMFeaturesConfig, error) {
+	var f ASMFeaturesData
+
+	err := json.Unmarshal(data, &f)
+	if err != nil {
+		return ASMFeaturesConfig{}, nil
+	}
+
+	return ASMFeaturesConfig{
+		Config:   f,
+		Metadata: metadata,
+	}, nil
+}
+
+// ASMFeaturesConfigs returns the currently active ASMFeatures configs
+func (r *Repository) ASMFeaturesConfigs() map[string]ASMFeaturesConfig {
+	typedConfigs := make(map[string]ASMFeaturesConfig)
+
+	configs := r.getConfigs(ProductASMFeatures)
+
+	for path, conf := range configs {
+		// We control this, so if this has gone wrong something has gone horribly wrong
+		typed, ok := conf.(ASMFeaturesConfig)
+		if !ok {
+			panic("unexpected config stored as ASMFeaturesConfig")
+		}
+
+		typedConfigs[path] = typed
+	}
+
+	return typedConfigs
+}
+
+// ApplyState represents the status of a configuration application by a remote configuration client
+// Clients need to either ack the correct application of received configurations, or communicate that
+// they haven't applied it yet, or communicate any error that may have happened while doing so
+type ApplyState uint64
+
+const (
+	ApplyStateUnknown ApplyState = iota
+	ApplyStateUnacknowledged
+	ApplyStateAcknowledged
+	ApplyStateError
+)
+
+// ApplyStatus is the processing status for a given configuration.
+// It basically represents whether a config was successfully processed and apply, or if an error occurred
+type ApplyStatus struct {
+	State ApplyState
+	Error string
+}
+
+// ASMDataConfig is a deserialized configuration file that holds rules data that can be used
+// by the ASM WAF for specific features (example: ip blocking).
+type ASMDataConfig struct {
+	Config   ASMDataRulesData
+	Metadata Metadata
+}
+
+// ASMDataRulesData is a serializable array of rules data entries
+type ASMDataRulesData struct {
+	RulesData []ASMDataRuleData `json:"rules_data"`
+}
+
+// ASMDataRuleData is an entry in the rules data list held by an ASMData configuration
+type ASMDataRuleData struct {
+	ID   string                 `json:"id"`
+	Type string                 `json:"type"`
+	Data []ASMDataRuleDataEntry `json:"data"`
+}
+
+// ASMDataRuleDataEntry represents a data entry in a rule data file
+type ASMDataRuleDataEntry struct {
+	Expiration int64  `json:"expiration,omitempty"`
+	Value      string `json:"value"`
+}
+
+func parseConfigASMData(data []byte, metadata Metadata) (ASMDataConfig, error) {
+	cfg := ASMDataConfig{
+		Metadata: metadata,
+	}
+	err := json.Unmarshal(data, &cfg.Config)
+	return cfg, err
+}
+
+// ASMDataConfigs returns the currently active ASMData configs
+func (r *Repository) ASMDataConfigs() map[string]ASMDataConfig {
+	typedConfigs := make(map[string]ASMDataConfig)
+	configs := r.getConfigs(ProductASMData)
+
+	for path, cfg := range configs {
+		// We control this, so if this has gone wrong something has gone horribly wrong
+		typed, ok := cfg.(ASMDataConfig)
+		if !ok {
+			panic("unexpected config stored as ASMDataConfig")
+		}
+		typedConfigs[path] = typed
+	}
+
+	return typedConfigs
+}
+
+type APMTracingConfig struct {
+	Config   []byte
+	Metadata Metadata
+}
+
+func parseConfigAPMTracing(data []byte, metadata Metadata) (APMTracingConfig, error) {
+	// Delegate the parsing responsibility to the cluster agent
+	return APMTracingConfig{
+		Config:   data,
+		Metadata: metadata,
+	}, nil
+}
+
+// APMTracingConfigs returns the currently active APMTracing configs
+func (r *Repository) APMTracingConfigs() map[string]APMTracingConfig {
+	typedConfigs := make(map[string]APMTracingConfig)
+	configs := r.getConfigs(ProductAPMTracing)
+	for path, conf := range configs {
+		// We control this, so if this has gone wrong something has gone horribly wrong
+		typed, ok := conf.(APMTracingConfig)
+		if !ok {
+			panic("unexpected config stored as APMTracingConfig")
+		}
+		typedConfigs[path] = typed
+	}
+	return typedConfigs
+}
+
+// Metadata stores remote config metadata for a given configuration
+type Metadata struct {
+	Product     string
+	ID          string
+	Name        string
+	Version     uint64
+	RawLength   uint64
+	Hashes      map[string][]byte
+	ApplyStatus ApplyStatus
+}
+
+func newConfigMetadata(parsedPath configPath, tfm data.TargetFileMeta) (Metadata, error) {
+	var m Metadata
+	m.ID = parsedPath.ConfigID
+	m.Product = parsedPath.Product
+	m.Name = parsedPath.Name
+	m.RawLength = uint64(tfm.Length)
+	m.Hashes = make(map[string][]byte)
+	for k, v := range tfm.Hashes {
+		m.Hashes[k] = []byte(v)
+	}
+	v, err := fileMetaVersion(tfm)
+	if err != nil {
+		return Metadata{}, err
+	}
+	m.Version = v
+
+	return m, nil
+}
+
+type fileMetaCustom struct {
+	Version *uint64 `json:"v"`
+}
+
+func fileMetaVersion(fm data.TargetFileMeta) (uint64, error) {
+	if fm.Custom == nil {
+		return 0, ErrNoConfigVersion
+	}
+	fmc, err := parseFileMetaCustom(*fm.Custom)
+	if err != nil {
+		return 0, err
+	}
+
+	return *fmc.Version, nil
+}
+
+func parseFileMetaCustom(rawCustom []byte) (fileMetaCustom, error) {
+	var custom fileMetaCustom
+	err := json.Unmarshal(rawCustom, &custom)
+	if err != nil {
+		return fileMetaCustom{}, err
+	}
+	if custom.Version == nil {
+		return fileMetaCustom{}, ErrNoConfigVersion
+	}
+	return custom, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/path.go b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/path.go
new file mode 100644
index 0000000000..d1a4d69e21
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/path.go
@@ -0,0 +1,100 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022-present Datadog, Inc.
+
+package state
+
+import (
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var (
+	// matches datadog/<int>/<string>/<string>/<string> for datadog/<org_id>/<product>/<config_id>/<file>
+	datadogPathRegexp       = regexp.MustCompile(`^datadog/(\d+)/([^/]+)/([^/]+)/([^/]+)$`)
+	datadogPathRegexpGroups = 4
+
+	// matches employee/<string>/<string>/<string> for employee/<org_id>/<product>/<config_id>/<file>
+	employeePathRegexp       = regexp.MustCompile(`^employee/([^/]+)/([^/]+)/([^/]+)$`)
+	employeePathRegexpGroups = 3
+)
+
+type source uint
+
+const (
+	sourceUnknown source = iota
+	sourceDatadog
+	sourceEmployee
+)
+
+type configPath struct {
+	Source   source
+	OrgID    int64
+	Product  string
+	ConfigID string
+	Name     string
+}
+
+func parseConfigPath(path string) (configPath, error) {
+	configType := parseConfigPathSource(path)
+	switch configType {
+	case sourceDatadog:
+		return parseDatadogConfigPath(path)
+	case sourceEmployee:
+		return parseEmployeeConfigPath(path)
+	}
+	return configPath{}, fmt.Errorf("config path '%s' has unknown source", path)
+}
+
+func parseDatadogConfigPath(path string) (configPath, error) {
+	matchedGroups := datadogPathRegexp.FindStringSubmatch(path)
+	if len(matchedGroups) != datadogPathRegexpGroups+1 {
+		return configPath{}, fmt.Errorf("config file path '%s' has wrong format", path)
+	}
+	rawOrgID := matchedGroups[1]
+	orgID, err := strconv.ParseInt(rawOrgID, 10, 64)
+	if err != nil {
+		return configPath{}, fmt.Errorf("could not parse orgID '%s' in config file path: %v", rawOrgID, err)
+	}
+	rawProduct := matchedGroups[2]
+	if len(rawProduct) == 0 {
+		return configPath{}, fmt.Errorf("product is empty")
+	}
+	return configPath{
+		Source:   sourceDatadog,
+		OrgID:    orgID,
+		Product:  rawProduct,
+		ConfigID: matchedGroups[3],
+		Name:     matchedGroups[4],
+	}, nil
+}
+
+func parseEmployeeConfigPath(path string) (configPath, error) {
+	matchedGroups := employeePathRegexp.FindStringSubmatch(path)
+	if len(matchedGroups) != employeePathRegexpGroups+1 {
+		return configPath{}, fmt.Errorf("config file path '%s' has wrong format", path)
+	}
+	rawProduct := matchedGroups[1]
+	if len(rawProduct) == 0 {
+		return configPath{}, fmt.Errorf("product is empty")
+	}
+	return configPath{
+		Source:   sourceEmployee,
+		Product:  rawProduct,
+		ConfigID: matchedGroups[2],
+		Name:     matchedGroups[3],
+	}, nil
+}
+
+func parseConfigPathSource(path string) source {
+	switch {
+	case strings.HasPrefix(path, "datadog/"):
+		return sourceDatadog
+	case strings.HasPrefix(path, "employee/"):
+		return sourceEmployee
+	}
+	return sourceUnknown
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/repository.go b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/repository.go
new file mode 100644
index 0000000000..06758cc805
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/repository.go
@@ -0,0 +1,442 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022-present Datadog, Inc.
+
+package state
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"log"
+	"strings"
+
+	"github.com/DataDog/go-tuf/data"
+)
+
+var (
+	// ErrMalformedEmbeddedRoot occurs when the TUF root provided is invalid
+	ErrMalformedEmbeddedRoot = errors.New("malformed embedded TUF root file provided")
+)
+
+// RepositoryState contains all of the information about the current config files
+// stored by the client to be able to make an update request to an Agent
+type RepositoryState struct {
+	Configs            []ConfigState
+	CachedFiles        []CachedFile
+	TargetsVersion     int64
+	RootsVersion       int64
+	OpaqueBackendState []byte
+}
+
+// ConfigState describes an applied config by the agent client.
+type ConfigState struct {
+	Product     string
+	ID          string
+	Version     uint64
+	ApplyStatus ApplyStatus
+}
+
+// CachedFile describes a cached file stored by the agent client
+//
+// Note: You may be wondering why this exists when `ConfigState` exists
+// as well. The API for requesting updates does not mandate that a client
+// cache config files. This implementation just happens to do so.
+type CachedFile struct {
+	Path   string
+	Length uint64
+	Hashes map[string][]byte
+}
+
+// An Update contains all the data needed to update a client's remote config repository state
+type Update struct {
+	// TUFRoots contains, in order, updated roots that this repository needs to keep up with TUF validation
+	TUFRoots [][]byte
+	// TUFTargets is the latest TUF Targets file and is used to validate raw config files
+	TUFTargets []byte
+	// TargetFiles stores the raw config files by their full TUF path
+	TargetFiles map[string][]byte
+	// ClientcConfigs is a list of TUF path's corresponding to config files designated for this repository
+	ClientConfigs []string
+}
+
+// isEmpty returns whether or not all the fields of `Update` are empty
+func (u *Update) isEmpty() bool {
+	return len(u.TUFRoots) == 0 && len(u.TUFTargets) == 0 && (u.TargetFiles == nil || len(u.TargetFiles) == 0) && len(u.ClientConfigs) == 0
+}
+
+// Repository is a remote config client used in a downstream process to retrieve
+// remote config updates from an Agent.
+type Repository struct {
+	// TUF related data
+	latestTargets      *data.Targets
+	tufRootsClient     *tufRootsClient
+	opaqueBackendState []byte
+
+	// Unverified mode
+	tufVerificationEnabled bool
+	latestRootVersion      int64
+
+	// Config file storage
+	metadata map[string]Metadata
+	configs  map[string]map[string]interface{}
+}
+
+// NewRepository creates a new remote config repository that will track
+// both TUF metadata and raw config files for a client.
+func NewRepository(embeddedRoot []byte) (*Repository, error) {
+	if embeddedRoot == nil {
+		return nil, ErrMalformedEmbeddedRoot
+	}
+
+	configs := make(map[string]map[string]interface{})
+	for _, product := range allProducts {
+		configs[product] = make(map[string]interface{})
+	}
+
+	tufRootsClient, err := newTufRootsClient(embeddedRoot)
+	if err != nil {
+		return nil, err
+	}
+
+	return &Repository{
+		latestTargets:          data.NewTargets(),
+		tufRootsClient:         tufRootsClient,
+		metadata:               make(map[string]Metadata),
+		configs:                configs,
+		tufVerificationEnabled: true,
+	}, nil
+}
+
+// NewUnverifiedRepository creates a new remote config repository that will
+// track config files for a client WITHOUT verifying any TUF related metadata.
+//
+// When creating this we pretend we have a root version of 1, as the backend expects
+// to not have to send the initial "embedded" root.
+func NewUnverifiedRepository() (*Repository, error) {
+	configs := make(map[string]map[string]interface{})
+	for _, product := range allProducts {
+		configs[product] = make(map[string]interface{})
+	}
+
+	return &Repository{
+		latestTargets:          data.NewTargets(),
+		metadata:               make(map[string]Metadata),
+		configs:                configs,
+		tufVerificationEnabled: false,
+		latestRootVersion:      1, // The backend expects us to start with a root version of 1.
+	}, nil
+}
+
+// Update processes the ClientGetConfigsResponse from the Agent and updates the
+// configuration state
+func (r *Repository) Update(update Update) ([]string, error) {
+	var err error
+	var updatedTargets *data.Targets
+	var tmpRootClient *tufRootsClient
+
+	// If there's literally nothing in the update, it's not an error.
+	if update.isEmpty() {
+		return []string{}, nil
+	}
+
+	// TUF: Update the roots and verify the TUF Targets file (optional)
+	//
+	// We don't want to partially update the state, so we need a temporary client to hold the new root
+	// data until we know it's valid. Since verification is optional, if the repository was configured
+	// to not do TUF verification we only deserialize the TUF targets file.
+	if r.tufVerificationEnabled {
+		tmpRootClient, err = r.tufRootsClient.clone()
+		if err != nil {
+			return nil, err
+		}
+		err = tmpRootClient.updateRoots(update.TUFRoots)
+		if err != nil {
+			return nil, err
+		}
+
+		updatedTargets, err = tmpRootClient.validateTargets(update.TUFTargets)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		updatedTargets, err = unsafeUnmarshalTargets(update.TUFTargets)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	clientConfigsMap := make(map[string]struct{})
+	for _, f := range update.ClientConfigs {
+		clientConfigsMap[f] = struct{}{}
+	}
+
+	result := newUpdateResult()
+
+	// 2: Check the config list and mark any missing configs as "to be removed"
+	for _, configs := range r.configs {
+		for path := range configs {
+			if _, ok := clientConfigsMap[path]; !ok {
+				result.removed = append(result.removed, path)
+				parsedPath, err := parseConfigPath(path)
+				if err != nil {
+					return nil, err
+				}
+				result.productsUpdated[parsedPath.Product] = true
+			}
+		}
+	}
+
+	// 3: For all the files referenced in this update
+	for _, path := range update.ClientConfigs {
+		targetFileMetadata, ok := updatedTargets.Targets[path]
+		if !ok {
+			return nil, fmt.Errorf("missing config file in TUF targets - %s", path)
+		}
+
+		// 3.a: Extract the product and ID from the path
+		parsedPath, err := parseConfigPath(path)
+		if err != nil {
+			return nil, err
+		}
+
+		// 3.b and 3.c: Check if this configuration is either new or has been modified
+		storedMetadata, exists := r.metadata[path]
+		if exists && hashesEqual(targetFileMetadata.Hashes, storedMetadata.Hashes) {
+			continue
+		}
+
+		// 3.d: Ensure that the raw configuration file is present in the
+		// update payload.
+		raw, ok := update.TargetFiles[path]
+		if !ok {
+			return nil, fmt.Errorf("missing update file - %s", path)
+		}
+
+		// TUF: Validate the hash of the raw target file and ensure that it matches
+		// the TUF metadata
+		err = validateTargetFileHash(targetFileMetadata, raw)
+		if err != nil {
+			return nil, fmt.Errorf("error validating %s hash with TUF metadata - %v", path, err)
+		}
+
+		// 3.e: Deserialize the configuration.
+		// 3.f: Store the update details for application later
+		//
+		// Note: We don't have to worry about extra fields as mentioned
+		// in the RFC because the encoding/json library handles that for us.
+		m, err := newConfigMetadata(parsedPath, targetFileMetadata)
+		if err != nil {
+			return nil, err
+		}
+		config, err := parseConfig(parsedPath.Product, raw, m)
+		if err != nil {
+			return nil, err
+		}
+		result.metadata[path] = m
+		result.changed[parsedPath.Product][path] = config
+		result.productsUpdated[parsedPath.Product] = true
+	}
+
+	// 4.a: Store the new targets.signed.custom.opaque_client_state
+	// TUF: Store the updated roots now that everything has validated
+	if r.tufVerificationEnabled {
+		r.tufRootsClient = tmpRootClient
+	} else if update.TUFRoots != nil && len(update.TUFRoots) > 0 {
+		v, err := extractRootVersion(update.TUFRoots[len(update.TUFRoots)-1])
+		if err != nil {
+			return nil, err
+		}
+		r.latestRootVersion = v
+	}
+	r.latestTargets = updatedTargets
+	if r.latestTargets.Custom != nil {
+		r.opaqueBackendState = extractOpaqueBackendState(*r.latestTargets.Custom)
+	}
+
+	// Upstream may not want to take any actions if the update result doesn't
+	// change any configs.
+	if result.isEmpty() {
+		return nil, nil
+	}
+
+	changedProducts := make([]string, 0)
+	for product, updated := range result.productsUpdated {
+		if updated {
+			changedProducts = append(changedProducts, product)
+		}
+	}
+
+	// 4.b/4.rave the new state and apply cleanups
+	r.applyUpdateResult(update, result)
+
+	return changedProducts, nil
+}
+
+// UpdateApplyStatus updates the config's metadata to reflect its processing state
+// Can be used after a call to Update() in order to tell the repository which config was acked, which
+// wasn't and which errors occurred while processing.
+// Note: it is the responsibility of the caller to ensure that no new Update() call was made between
+// the first Update() call and the call to UpdateApplyStatus() so as to keep the repository state accurate.
+func (r *Repository) UpdateApplyStatus(cfgPath string, status ApplyStatus) {
+	if m, ok := r.metadata[cfgPath]; ok {
+		m.ApplyStatus = status
+		r.metadata[cfgPath] = m
+	}
+}
+
+func (r *Repository) getConfigs(product string) map[string]interface{} {
+	configs, ok := r.configs[product]
+	if !ok {
+		return nil
+	}
+
+	return configs
+}
+
+// applyUpdateResult changes the state of the client based on the given update.
+//
+// The update is guaranteed to succeed at this point, having been vetted and the details
+// needed to apply the update stored in the `updateResult`.
+func (r *Repository) applyUpdateResult(update Update, result updateResult) {
+	// 4.b Save all the updated and new config files
+	for product, configs := range result.changed {
+		for path, config := range configs {
+			m := r.configs[product]
+			m[path] = config
+		}
+	}
+	for path, metadata := range result.metadata {
+		r.metadata[path] = metadata
+	}
+
+	// 5.b Clean up the cache of any removed configs
+	for _, path := range result.removed {
+		delete(r.metadata, path)
+		for _, configs := range r.configs {
+			delete(configs, path)
+		}
+	}
+}
+
+// CurrentState returns all of the information needed to
+// make an update for new configurations.
+func (r *Repository) CurrentState() (RepositoryState, error) {
+	var configs []ConfigState
+	var cached []CachedFile
+
+	for path, metadata := range r.metadata {
+		configs = append(configs, configStateFromMetadata(metadata))
+		cached = append(cached, cachedFileFromMetadata(path, metadata))
+	}
+
+	var latestRootVersion int64
+	if r.tufVerificationEnabled {
+		root, err := r.tufRootsClient.latestRoot()
+		if err != nil {
+			return RepositoryState{}, err
+		}
+		latestRootVersion = root.Version
+	} else {
+		latestRootVersion = r.latestRootVersion
+	}
+
+	return RepositoryState{
+		Configs:            configs,
+		CachedFiles:        cached,
+		TargetsVersion:     r.latestTargets.Version,
+		RootsVersion:       latestRootVersion,
+		OpaqueBackendState: r.opaqueBackendState,
+	}, nil
+}
+
+// An updateResult allows the client to apply the update as a transaction
+// after validating all required preconditions
+type updateResult struct {
+	removed         []string
+	metadata        map[string]Metadata
+	changed         map[string]map[string]interface{}
+	productsUpdated map[string]bool
+}
+
+func newUpdateResult() updateResult {
+	changed := make(map[string]map[string]interface{})
+
+	for _, p := range allProducts {
+		changed[p] = make(map[string]interface{})
+	}
+
+	return updateResult{
+		removed:         make([]string, 0),
+		metadata:        make(map[string]Metadata),
+		changed:         changed,
+		productsUpdated: map[string]bool{},
+	}
+}
+
+func (ur updateResult) Log() {
+	log.Printf("Removed Configs: %v", ur.removed)
+
+	var b strings.Builder
+	b.WriteString("Changed configs: [")
+	for path := range ur.metadata {
+		b.WriteString(path)
+		b.WriteString(" ")
+	}
+	b.WriteString("]")
+
+	log.Println(b.String())
+}
+
+func (ur updateResult) isEmpty() bool {
+	return len(ur.removed) == 0 && len(ur.metadata) == 0
+}
+
+func configStateFromMetadata(m Metadata) ConfigState {
+	return ConfigState{
+		Product:     m.Product,
+		ID:          m.ID,
+		Version:     m.Version,
+		ApplyStatus: m.ApplyStatus,
+	}
+}
+
+func cachedFileFromMetadata(path string, m Metadata) CachedFile {
+	return CachedFile{
+		Path:   path,
+		Length: m.RawLength,
+		Hashes: m.Hashes,
+	}
+}
+
+// hashesEqual checks if the hash values in the TUF metadata file match the stored
+// hash values for a given config
+func hashesEqual(tufHashes data.Hashes, storedHashes map[string][]byte) bool {
+	for algorithm, value := range tufHashes {
+		v, ok := storedHashes[algorithm]
+		if !ok {
+			continue
+		}
+
+		if !bytes.Equal(value, v) {
+			return false
+		}
+	}
+
+	return true
+}
+
+func extractOpaqueBackendState(targetsCustom []byte) []byte {
+	state := struct {
+		State []byte `json:"opaque_backend_state"`
+	}{nil}
+
+	err := json.Unmarshal(targetsCustom, &state)
+	if err != nil {
+		return []byte{}
+	}
+
+	return state.State
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/tuf.go b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/tuf.go
new file mode 100644
index 0000000000..3cc712d9cc
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/tuf.go
@@ -0,0 +1,233 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022-present Datadog, Inc.
+
+package state
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/DataDog/go-tuf/client"
+	"github.com/DataDog/go-tuf/data"
+	"github.com/DataDog/go-tuf/util"
+	"github.com/DataDog/go-tuf/verify"
+)
+
+type tufRootsClient struct {
+	rootClient      *client.Client
+	rootLocalStore  client.LocalStore
+	rootRemoteStore *rootClientRemoteStore
+}
+
+func newTufRootsClient(root []byte) (*tufRootsClient, error) {
+	rootLocalStore := client.MemoryLocalStore()
+	rootRemoteStore := &rootClientRemoteStore{}
+	rootClient := client.NewClient(rootLocalStore, rootRemoteStore)
+
+	err := rootClient.InitLocal(root)
+	if err != nil {
+		return nil, err
+	}
+
+	return &tufRootsClient{
+		rootClient:      rootClient,
+		rootLocalStore:  rootLocalStore,
+		rootRemoteStore: rootRemoteStore,
+	}, nil
+}
+
+func (trc *tufRootsClient) clone() (*tufRootsClient, error) {
+	root, err := trc.latestRootRaw()
+	if err != nil {
+		return nil, err
+	}
+
+	return newTufRootsClient(root)
+}
+
+func (trc *tufRootsClient) updateRoots(newRoots [][]byte) error {
+	if len(newRoots) == 0 {
+		return nil
+	}
+
+	trc.rootRemoteStore.roots = append(trc.rootRemoteStore.roots, newRoots...)
+
+	return trc.rootClient.UpdateRoots()
+}
+
+func (trc *tufRootsClient) latestRoot() (*data.Root, error) {
+	raw, err := trc.latestRootRaw()
+	if err != nil {
+		return nil, err
+	}
+
+	return unsafeUnmarshalRoot(raw)
+}
+
+func (trc *tufRootsClient) latestRootRaw() ([]byte, error) {
+	metas, err := trc.rootLocalStore.GetMeta()
+	if err != nil {
+		return nil, err
+	}
+	rawRoot := metas["root.json"]
+
+	return rawRoot, nil
+}
+
+func (trc *tufRootsClient) validateTargets(rawTargets []byte) (*data.Targets, error) {
+	root, err := trc.latestRoot()
+	if err != nil {
+		return nil, err
+	}
+
+	db := verify.NewDB()
+	for _, key := range root.Keys {
+		for _, id := range key.IDs() {
+			if err := db.AddKey(id, key); err != nil {
+				return nil, err
+			}
+		}
+	}
+	targetsRole, hasRoleTargets := root.Roles["targets"]
+	if !hasRoleTargets {
+		return nil, fmt.Errorf("root is missing a targets role")
+	}
+	role := &data.Role{Threshold: targetsRole.Threshold, KeyIDs: targetsRole.KeyIDs}
+	if err := db.AddRole("targets", role); err != nil {
+		return nil, fmt.Errorf("could not add targets role to db: %v", err)
+	}
+	var targets data.Targets
+	err = db.Unmarshal(rawTargets, &targets, "targets", 0)
+	if err != nil {
+		return nil, err
+	}
+
+	return &targets, nil
+}
+
+type rootClientRemoteStore struct {
+	roots [][]byte
+}
+
+func (s *rootClientRemoteStore) GetMeta(name string) (stream io.ReadCloser, size int64, err error) {
+	metaPath, err := parseMetaPath(name)
+	if err != nil {
+		return nil, 0, err
+	}
+	if metaPath.role != roleRoot || !metaPath.versionSet {
+		return nil, 0, client.ErrNotFound{File: name}
+	}
+	for _, root := range s.roots {
+		parsedRoot, err := unsafeUnmarshalRoot(root)
+		if err != nil {
+			return nil, 0, err
+		}
+		if parsedRoot.Version == metaPath.version {
+			return io.NopCloser(bytes.NewReader(root)), int64(len(root)), nil
+		}
+	}
+	return nil, 0, client.ErrNotFound{File: name}
+}
+
+func (s *rootClientRemoteStore) GetTarget(path string) (stream io.ReadCloser, size int64, err error) {
+	return nil, 0, client.ErrNotFound{File: path}
+}
+
+type role string
+
+const (
+	roleRoot role = "root"
+)
+
+type metaPath struct {
+	role       role
+	version    int64
+	versionSet bool
+}
+
+func parseMetaPath(rawMetaPath string) (metaPath, error) {
+	splitRawMetaPath := strings.SplitN(rawMetaPath, ".", 3)
+	if len(splitRawMetaPath) != 2 && len(splitRawMetaPath) != 3 {
+		return metaPath{}, fmt.Errorf("invalid metadata path '%s'", rawMetaPath)
+	}
+	suffix := splitRawMetaPath[len(splitRawMetaPath)-1]
+	if suffix != "json" {
+		return metaPath{}, fmt.Errorf("invalid metadata path (suffix) '%s'", rawMetaPath)
+	}
+	rawRole := splitRawMetaPath[len(splitRawMetaPath)-2]
+	if rawRole == "" {
+		return metaPath{}, fmt.Errorf("invalid metadata path (role) '%s'", rawMetaPath)
+	}
+	if len(splitRawMetaPath) == 2 {
+		return metaPath{
+			role: role(rawRole),
+		}, nil
+	}
+	rawVersion, err := strconv.ParseInt(splitRawMetaPath[0], 10, 64)
+	if err != nil {
+		return metaPath{}, fmt.Errorf("invalid metadata path (version) '%s': %w", rawMetaPath, err)
+	}
+	return metaPath{
+		role:       role(rawRole),
+		version:    rawVersion,
+		versionSet: true,
+	}, nil
+}
+
+func validateTargetFileHash(targetMeta data.TargetFileMeta, targetFile []byte) error {
+	if len(targetMeta.HashAlgorithms()) == 0 {
+		return fmt.Errorf("target file has no hash")
+	}
+	generatedMeta, err := util.GenerateFileMeta(bytes.NewBuffer(targetFile), targetMeta.HashAlgorithms()...)
+	if err != nil {
+		return err
+	}
+	err = util.FileMetaEqual(targetMeta.FileMeta, generatedMeta)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func unsafeUnmarshalRoot(raw []byte) (*data.Root, error) {
+	var signedRoot data.Signed
+	err := json.Unmarshal(raw, &signedRoot)
+	if err != nil {
+		return nil, err
+	}
+	var root data.Root
+	err = json.Unmarshal(signedRoot.Signed, &root)
+	if err != nil {
+		return nil, err
+	}
+	return &root, err
+}
+
+func unsafeUnmarshalTargets(raw []byte) (*data.Targets, error) {
+	var signedTargets data.Signed
+	err := json.Unmarshal(raw, &signedTargets)
+	if err != nil {
+		return nil, err
+	}
+	var targets data.Targets
+	err = json.Unmarshal(signedTargets.Signed, &targets)
+	if err != nil {
+		return nil, err
+	}
+	return &targets, err
+}
+
+func extractRootVersion(raw []byte) (int64, error) {
+	root, err := unsafeUnmarshalRoot(raw)
+	if err != nil {
+		return 0, err
+	}
+
+	return root.Version, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/LICENSE.txt b/vendor/github.com/DataDog/datadog-go/v5/LICENSE.txt
new file mode 100644
index 0000000000..97cd06d7fb
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2015 Datadog, Inc
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/README.md b/vendor/github.com/DataDog/datadog-go/v5/statsd/README.md
new file mode 100644
index 0000000000..2fc899687e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/README.md
@@ -0,0 +1,4 @@
+## Overview
+
+Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client.  Dogstatsd extends Statsd, adding tags
+and histograms.
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/aggregator.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/aggregator.go
new file mode 100644
index 0000000000..ae4723c42b
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/aggregator.go
@@ -0,0 +1,290 @@
+package statsd
+
+import (
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+type (
+	countsMap         map[string]*countMetric
+	gaugesMap         map[string]*gaugeMetric
+	setsMap           map[string]*setMetric
+	bufferedMetricMap map[string]*bufferedMetric
+)
+
+type aggregator struct {
+	nbContextGauge uint64
+	nbContextCount uint64
+	nbContextSet   uint64
+
+	countsM sync.RWMutex
+	gaugesM sync.RWMutex
+	setsM   sync.RWMutex
+
+	gauges        gaugesMap
+	counts        countsMap
+	sets          setsMap
+	histograms    bufferedMetricContexts
+	distributions bufferedMetricContexts
+	timings       bufferedMetricContexts
+
+	closed chan struct{}
+
+	client *Client
+
+	// aggregator implements channelMode mechanism to receive histograms,
+	// distributions and timings. Since they need sampling they need to
+	// lock for random. When using both channelMode and ExtendedAggregation
+	// we don't want goroutine to fight over the lock.
+	inputMetrics    chan metric
+	stopChannelMode chan struct{}
+	wg              sync.WaitGroup
+}
+
+func newAggregator(c *Client) *aggregator {
+	return &aggregator{
+		client:          c,
+		counts:          countsMap{},
+		gauges:          gaugesMap{},
+		sets:            setsMap{},
+		histograms:      newBufferedContexts(newHistogramMetric),
+		distributions:   newBufferedContexts(newDistributionMetric),
+		timings:         newBufferedContexts(newTimingMetric),
+		closed:          make(chan struct{}),
+		stopChannelMode: make(chan struct{}),
+	}
+}
+
+func (a *aggregator) start(flushInterval time.Duration) {
+	ticker := time.NewTicker(flushInterval)
+
+	go func() {
+		for {
+			select {
+			case <-ticker.C:
+				a.flush()
+			case <-a.closed:
+				ticker.Stop()
+				return
+			}
+		}
+	}()
+}
+
+func (a *aggregator) startReceivingMetric(bufferSize int, nbWorkers int) {
+	a.inputMetrics = make(chan metric, bufferSize)
+	for i := 0; i < nbWorkers; i++ {
+		a.wg.Add(1)
+		go a.pullMetric()
+	}
+}
+
+func (a *aggregator) stopReceivingMetric() {
+	close(a.stopChannelMode)
+	a.wg.Wait()
+}
+
+func (a *aggregator) stop() {
+	a.closed <- struct{}{}
+}
+
+func (a *aggregator) pullMetric() {
+	for {
+		select {
+		case m := <-a.inputMetrics:
+			switch m.metricType {
+			case histogram:
+				a.histogram(m.name, m.fvalue, m.tags, m.rate)
+			case distribution:
+				a.distribution(m.name, m.fvalue, m.tags, m.rate)
+			case timing:
+				a.timing(m.name, m.fvalue, m.tags, m.rate)
+			}
+		case <-a.stopChannelMode:
+			a.wg.Done()
+			return
+		}
+	}
+}
+
+func (a *aggregator) flush() {
+	for _, m := range a.flushMetrics() {
+		a.client.sendBlocking(m)
+	}
+}
+
+func (a *aggregator) flushTelemetryMetrics(t *Telemetry) {
+	if a == nil {
+		// aggregation is disabled
+		return
+	}
+
+	t.AggregationNbContextGauge = atomic.LoadUint64(&a.nbContextGauge)
+	t.AggregationNbContextCount = atomic.LoadUint64(&a.nbContextCount)
+	t.AggregationNbContextSet = atomic.LoadUint64(&a.nbContextSet)
+	t.AggregationNbContextHistogram = a.histograms.getNbContext()
+	t.AggregationNbContextDistribution = a.distributions.getNbContext()
+	t.AggregationNbContextTiming = a.timings.getNbContext()
+}
+
+func (a *aggregator) flushMetrics() []metric {
+	metrics := []metric{}
+
+	// We reset the values to avoid sending 'zero' values for metrics not
+	// sampled during this flush interval
+
+	a.setsM.Lock()
+	sets := a.sets
+	a.sets = setsMap{}
+	a.setsM.Unlock()
+
+	for _, s := range sets {
+		metrics = append(metrics, s.flushUnsafe()...)
+	}
+
+	a.gaugesM.Lock()
+	gauges := a.gauges
+	a.gauges = gaugesMap{}
+	a.gaugesM.Unlock()
+
+	for _, g := range gauges {
+		metrics = append(metrics, g.flushUnsafe())
+	}
+
+	a.countsM.Lock()
+	counts := a.counts
+	a.counts = countsMap{}
+	a.countsM.Unlock()
+
+	for _, c := range counts {
+		metrics = append(metrics, c.flushUnsafe())
+	}
+
+	metrics = a.histograms.flush(metrics)
+	metrics = a.distributions.flush(metrics)
+	metrics = a.timings.flush(metrics)
+
+	atomic.AddUint64(&a.nbContextCount, uint64(len(counts)))
+	atomic.AddUint64(&a.nbContextGauge, uint64(len(gauges)))
+	atomic.AddUint64(&a.nbContextSet, uint64(len(sets)))
+	return metrics
+}
+
+func getContext(name string, tags []string) string {
+	c, _ := getContextAndTags(name, tags)
+	return c
+}
+
+func getContextAndTags(name string, tags []string) (string, string) {
+	if len(tags) == 0 {
+		return name + nameSeparatorSymbol, ""
+	}
+	n := len(name) + len(nameSeparatorSymbol) + len(tagSeparatorSymbol)*(len(tags)-1)
+	for _, s := range tags {
+		n += len(s)
+	}
+
+	var sb strings.Builder
+	sb.Grow(n)
+	sb.WriteString(name)
+	sb.WriteString(nameSeparatorSymbol)
+	sb.WriteString(tags[0])
+	for _, s := range tags[1:] {
+		sb.WriteString(tagSeparatorSymbol)
+		sb.WriteString(s)
+	}
+
+	s := sb.String()
+
+	return s, s[len(name)+len(nameSeparatorSymbol):]
+}
+
+func (a *aggregator) count(name string, value int64, tags []string) error {
+	context := getContext(name, tags)
+	a.countsM.RLock()
+	if count, found := a.counts[context]; found {
+		count.sample(value)
+		a.countsM.RUnlock()
+		return nil
+	}
+	a.countsM.RUnlock()
+
+	a.countsM.Lock()
+	// Check if another goroutines hasn't created the value betwen the RUnlock and 'Lock'
+	if count, found := a.counts[context]; found {
+		count.sample(value)
+		a.countsM.Unlock()
+		return nil
+	}
+
+	a.counts[context] = newCountMetric(name, value, tags)
+	a.countsM.Unlock()
+	return nil
+}
+
+func (a *aggregator) gauge(name string, value float64, tags []string) error {
+	context := getContext(name, tags)
+	a.gaugesM.RLock()
+	if gauge, found := a.gauges[context]; found {
+		gauge.sample(value)
+		a.gaugesM.RUnlock()
+		return nil
+	}
+	a.gaugesM.RUnlock()
+
+	gauge := newGaugeMetric(name, value, tags)
+
+	a.gaugesM.Lock()
+	// Check if another goroutines hasn't created the value betwen the 'RUnlock' and 'Lock'
+	if gauge, found := a.gauges[context]; found {
+		gauge.sample(value)
+		a.gaugesM.Unlock()
+		return nil
+	}
+	a.gauges[context] = gauge
+	a.gaugesM.Unlock()
+	return nil
+}
+
+func (a *aggregator) set(name string, value string, tags []string) error {
+	context := getContext(name, tags)
+	a.setsM.RLock()
+	if set, found := a.sets[context]; found {
+		set.sample(value)
+		a.setsM.RUnlock()
+		return nil
+	}
+	a.setsM.RUnlock()
+
+	a.setsM.Lock()
+	// Check if another goroutines hasn't created the value betwen the 'RUnlock' and 'Lock'
+	if set, found := a.sets[context]; found {
+		set.sample(value)
+		a.setsM.Unlock()
+		return nil
+	}
+	a.sets[context] = newSetMetric(name, value, tags)
+	a.setsM.Unlock()
+	return nil
+}
+
+// Only histograms, distributions and timings are sampled with a rate since we
+// only pack them in on message instead of aggregating them. Discarding the
+// sample rate will have impacts on the CPU and memory usage of the Agent.
+
+// type alias for Client.sendToAggregator
+type bufferedMetricSampleFunc func(name string, value float64, tags []string, rate float64) error
+
+func (a *aggregator) histogram(name string, value float64, tags []string, rate float64) error {
+	return a.histograms.sample(name, value, tags, rate)
+}
+
+func (a *aggregator) distribution(name string, value float64, tags []string, rate float64) error {
+	return a.distributions.sample(name, value, tags, rate)
+}
+
+func (a *aggregator) timing(name string, value float64, tags []string, rate float64) error {
+	return a.timings.sample(name, value, tags, rate)
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/buffer.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/buffer.go
new file mode 100644
index 0000000000..f7bb8b0aaa
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/buffer.go
@@ -0,0 +1,197 @@
+package statsd
+
+import (
+	"strconv"
+)
+
+// MessageTooLongError is an error returned when a sample, event or service check is too large once serialized. See
+// WithMaxBytesPerPayload option for more details.
+type MessageTooLongError struct{}
+
+func (e MessageTooLongError) Error() string {
+	return "message too long. See 'WithMaxBytesPerPayload' documentation."
+}
+
+var errBufferFull = MessageTooLongError{}
+
+type partialWriteError string
+
+func (e partialWriteError) Error() string { return string(e) }
+
+const errPartialWrite = partialWriteError("value partially written")
+
+const metricOverhead = 512
+
+// statsdBuffer is a buffer containing statsd messages
+// this struct methods are NOT safe for concurent use
+type statsdBuffer struct {
+	buffer       []byte
+	maxSize      int
+	maxElements  int
+	elementCount int
+}
+
+func newStatsdBuffer(maxSize, maxElements int) *statsdBuffer {
+	return &statsdBuffer{
+		buffer:      make([]byte, 0, maxSize+metricOverhead), // pre-allocate the needed size + metricOverhead to avoid having Go re-allocate on it's own if an element does not fit
+		maxSize:     maxSize,
+		maxElements: maxElements,
+	}
+}
+
+func (b *statsdBuffer) writeGauge(namespace string, globalTags []string, name string, value float64, tags []string, rate float64, timestamp int64) error {
+	if b.elementCount >= b.maxElements {
+		return errBufferFull
+	}
+	originalBuffer := b.buffer
+	b.buffer = appendGauge(b.buffer, namespace, globalTags, name, value, tags, rate)
+	b.buffer = appendTimestamp(b.buffer, timestamp)
+	b.writeSeparator()
+	return b.validateNewElement(originalBuffer)
+}
+
+func (b *statsdBuffer) writeCount(namespace string, globalTags []string, name string, value int64, tags []string, rate float64, timestamp int64) error {
+	if b.elementCount >= b.maxElements {
+		return errBufferFull
+	}
+	originalBuffer := b.buffer
+	b.buffer = appendCount(b.buffer, namespace, globalTags, name, value, tags, rate)
+	b.buffer = appendTimestamp(b.buffer, timestamp)
+	b.writeSeparator()
+	return b.validateNewElement(originalBuffer)
+}
+
+func (b *statsdBuffer) writeHistogram(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error {
+	if b.elementCount >= b.maxElements {
+		return errBufferFull
+	}
+	originalBuffer := b.buffer
+	b.buffer = appendHistogram(b.buffer, namespace, globalTags, name, value, tags, rate)
+	b.writeSeparator()
+	return b.validateNewElement(originalBuffer)
+}
+
+// writeAggregated serialized as many values as possible in the current buffer and return the position in values where it stopped.
+func (b *statsdBuffer) writeAggregated(metricSymbol []byte, namespace string, globalTags []string, name string, values []float64, tags string, tagSize int, precision int) (int, error) {
+	if b.elementCount >= b.maxElements {
+		return 0, errBufferFull
+	}
+
+	originalBuffer := b.buffer
+	b.buffer = appendHeader(b.buffer, namespace, name)
+
+	// buffer already full
+	if len(b.buffer)+tagSize > b.maxSize {
+		b.buffer = originalBuffer
+		return 0, errBufferFull
+	}
+
+	// We add as many value as possible
+	var position int
+	for idx, v := range values {
+		previousBuffer := b.buffer
+		if idx != 0 {
+			b.buffer = append(b.buffer, ':')
+		}
+
+		b.buffer = strconv.AppendFloat(b.buffer, v, 'f', precision, 64)
+
+		// Should we stop serializing and switch to another buffer
+		if len(b.buffer)+tagSize > b.maxSize {
+			b.buffer = previousBuffer
+			break
+		}
+		position = idx + 1
+	}
+
+	// we could not add a single value
+	if position == 0 {
+		b.buffer = originalBuffer
+		return 0, errBufferFull
+	}
+
+	b.buffer = append(b.buffer, '|')
+	b.buffer = append(b.buffer, metricSymbol...)
+	b.buffer = appendTagsAggregated(b.buffer, globalTags, tags)
+	b.buffer = appendContainerID(b.buffer)
+	b.writeSeparator()
+	b.elementCount++
+
+	if position != len(values) {
+		return position, errPartialWrite
+	}
+	return position, nil
+
+}
+
+func (b *statsdBuffer) writeDistribution(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error {
+	if b.elementCount >= b.maxElements {
+		return errBufferFull
+	}
+	originalBuffer := b.buffer
+	b.buffer = appendDistribution(b.buffer, namespace, globalTags, name, value, tags, rate)
+	b.writeSeparator()
+	return b.validateNewElement(originalBuffer)
+}
+
+func (b *statsdBuffer) writeSet(namespace string, globalTags []string, name string, value string, tags []string, rate float64) error {
+	if b.elementCount >= b.maxElements {
+		return errBufferFull
+	}
+	originalBuffer := b.buffer
+	b.buffer = appendSet(b.buffer, namespace, globalTags, name, value, tags, rate)
+	b.writeSeparator()
+	return b.validateNewElement(originalBuffer)
+}
+
+func (b *statsdBuffer) writeTiming(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error {
+	if b.elementCount >= b.maxElements {
+		return errBufferFull
+	}
+	originalBuffer := b.buffer
+	b.buffer = appendTiming(b.buffer, namespace, globalTags, name, value, tags, rate)
+	b.writeSeparator()
+	return b.validateNewElement(originalBuffer)
+}
+
+func (b *statsdBuffer) writeEvent(event *Event, globalTags []string) error {
+	if b.elementCount >= b.maxElements {
+		return errBufferFull
+	}
+	originalBuffer := b.buffer
+	b.buffer = appendEvent(b.buffer, event, globalTags)
+	b.writeSeparator()
+	return b.validateNewElement(originalBuffer)
+}
+
+func (b *statsdBuffer) writeServiceCheck(serviceCheck *ServiceCheck, globalTags []string) error {
+	if b.elementCount >= b.maxElements {
+		return errBufferFull
+	}
+	originalBuffer := b.buffer
+	b.buffer = appendServiceCheck(b.buffer, serviceCheck, globalTags)
+	b.writeSeparator()
+	return b.validateNewElement(originalBuffer)
+}
+
+func (b *statsdBuffer) validateNewElement(originalBuffer []byte) error {
+	if len(b.buffer) > b.maxSize {
+		b.buffer = originalBuffer
+		return errBufferFull
+	}
+	b.elementCount++
+	return nil
+}
+
+func (b *statsdBuffer) writeSeparator() {
+	b.buffer = append(b.buffer, '\n')
+}
+
+func (b *statsdBuffer) reset() {
+	b.buffer = b.buffer[:0]
+	b.elementCount = 0
+}
+
+func (b *statsdBuffer) bytes() []byte {
+	return b.buffer
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/buffer_pool.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/buffer_pool.go
new file mode 100644
index 0000000000..7a3e3c9d22
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/buffer_pool.go
@@ -0,0 +1,40 @@
+package statsd
+
+type bufferPool struct {
+	pool              chan *statsdBuffer
+	bufferMaxSize     int
+	bufferMaxElements int
+}
+
+func newBufferPool(poolSize, bufferMaxSize, bufferMaxElements int) *bufferPool {
+	p := &bufferPool{
+		pool:              make(chan *statsdBuffer, poolSize),
+		bufferMaxSize:     bufferMaxSize,
+		bufferMaxElements: bufferMaxElements,
+	}
+	for i := 0; i < poolSize; i++ {
+		p.addNewBuffer()
+	}
+	return p
+}
+
+func (p *bufferPool) addNewBuffer() {
+	p.pool <- newStatsdBuffer(p.bufferMaxSize, p.bufferMaxElements)
+}
+
+func (p *bufferPool) borrowBuffer() *statsdBuffer {
+	select {
+	case b := <-p.pool:
+		return b
+	default:
+		return newStatsdBuffer(p.bufferMaxSize, p.bufferMaxElements)
+	}
+}
+
+func (p *bufferPool) returnBuffer(buffer *statsdBuffer) {
+	buffer.reset()
+	select {
+	case p.pool <- buffer:
+	default:
+	}
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/buffered_metric_context.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/buffered_metric_context.go
new file mode 100644
index 0000000000..41404d98e2
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/buffered_metric_context.go
@@ -0,0 +1,82 @@
+package statsd
+
+import (
+	"math/rand"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// bufferedMetricContexts represent the contexts for Histograms, Distributions
+// and Timing. Since those 3 metric types behave the same way and are sampled
+// with the same type they're represented by the same class.
+type bufferedMetricContexts struct {
+	nbContext uint64
+	mutex     sync.RWMutex
+	values    bufferedMetricMap
+	newMetric func(string, float64, string) *bufferedMetric
+
+	// Each bufferedMetricContexts uses its own random source and random
+	// lock to prevent goroutines from contending for the lock on the
+	// "math/rand" package-global random source (e.g. calls like
+	// "rand.Float64()" must acquire a shared lock to get the next
+	// pseudorandom number).
+	random     *rand.Rand
+	randomLock sync.Mutex
+}
+
+func newBufferedContexts(newMetric func(string, float64, string) *bufferedMetric) bufferedMetricContexts {
+	return bufferedMetricContexts{
+		values:    bufferedMetricMap{},
+		newMetric: newMetric,
+		// Note that calling "time.Now().UnixNano()" repeatedly quickly may return
+		// very similar values. That's fine for seeding the worker-specific random
+		// source because we just need an evenly distributed stream of float values.
+		// Do not use this random source for cryptographic randomness.
+		random: rand.New(rand.NewSource(time.Now().UnixNano())),
+	}
+}
+
+func (bc *bufferedMetricContexts) flush(metrics []metric) []metric {
+	bc.mutex.Lock()
+	values := bc.values
+	bc.values = bufferedMetricMap{}
+	bc.mutex.Unlock()
+
+	for _, d := range values {
+		metrics = append(metrics, d.flushUnsafe())
+	}
+	atomic.AddUint64(&bc.nbContext, uint64(len(values)))
+	return metrics
+}
+
+func (bc *bufferedMetricContexts) sample(name string, value float64, tags []string, rate float64) error {
+	if !shouldSample(rate, bc.random, &bc.randomLock) {
+		return nil
+	}
+
+	context, stringTags := getContextAndTags(name, tags)
+
+	bc.mutex.RLock()
+	if v, found := bc.values[context]; found {
+		v.sample(value)
+		bc.mutex.RUnlock()
+		return nil
+	}
+	bc.mutex.RUnlock()
+
+	bc.mutex.Lock()
+	// Check if another goroutines hasn't created the value betwen the 'RUnlock' and 'Lock'
+	if v, found := bc.values[context]; found {
+		v.sample(value)
+		bc.mutex.Unlock()
+		return nil
+	}
+	bc.values[context] = bc.newMetric(name, value, stringTags)
+	bc.mutex.Unlock()
+	return nil
+}
+
+func (bc *bufferedMetricContexts) getNbContext() uint64 {
+	return atomic.LoadUint64(&bc.nbContext)
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/container.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/container.go
new file mode 100644
index 0000000000..b2331e8290
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/container.go
@@ -0,0 +1,82 @@
+package statsd
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"regexp"
+	"sync"
+)
+
+const (
+	// cgroupPath is the path to the cgroup file where we can find the container id if one exists.
+	cgroupPath = "/proc/self/cgroup"
+)
+
+const (
+	uuidSource      = "[0-9a-f]{8}[-_][0-9a-f]{4}[-_][0-9a-f]{4}[-_][0-9a-f]{4}[-_][0-9a-f]{12}"
+	containerSource = "[0-9a-f]{64}"
+	taskSource      = "[0-9a-f]{32}-\\d+"
+)
+
+var (
+	// expLine matches a line in the /proc/self/cgroup file. It has a submatch for the last element (path), which contains the container ID.
+	expLine = regexp.MustCompile(`^\d+:[^:]*:(.+)$`)
+
+	// expContainerID matches contained IDs and sources. Source: https://github.com/Qard/container-info/blob/master/index.js
+	expContainerID = regexp.MustCompile(fmt.Sprintf(`(%s|%s|%s)(?:.scope)?$`, uuidSource, containerSource, taskSource))
+
+	// containerID holds the container ID.
+	containerID = ""
+)
+
+// parseContainerID finds the first container ID reading from r and returns it.
+func parseContainerID(r io.Reader) string {
+	scn := bufio.NewScanner(r)
+	for scn.Scan() {
+		path := expLine.FindStringSubmatch(scn.Text())
+		if len(path) != 2 {
+			// invalid entry, continue
+			continue
+		}
+		if parts := expContainerID.FindStringSubmatch(path[1]); len(parts) == 2 {
+			return parts[1]
+		}
+	}
+	return ""
+}
+
+// readContainerID attempts to return the container ID from the provided file path or empty on failure.
+func readContainerID(fpath string) string {
+	f, err := os.Open(fpath)
+	if err != nil {
+		return ""
+	}
+	defer f.Close()
+	return parseContainerID(f)
+}
+
+// getContainerID returns the container ID configured at the client creation
+// It can either be auto-discovered with origin detection or provided by the user.
+// User-defined container ID is prioritized.
+func getContainerID() string {
+	return containerID
+}
+
+var initOnce sync.Once
+
+// initContainerID initializes the container ID.
+// It can either be provided by the user or read from cgroups.
+func initContainerID(userProvidedID string, cgroupFallback bool) {
+	initOnce.Do(func() {
+		if userProvidedID != "" {
+			containerID = userProvidedID
+			return
+		}
+
+		if cgroupFallback {
+			containerID = readContainerID(cgroupPath)
+		}
+	})
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/event.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/event.go
new file mode 100644
index 0000000000..a2ca4faf7f
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/event.go
@@ -0,0 +1,75 @@
+package statsd
+
+import (
+	"fmt"
+	"time"
+)
+
+// Events support
+// EventAlertType and EventAlertPriority became exported types after this issue was submitted: https://github.com/DataDog/datadog-go/issues/41
+// The reason why they got exported is so that client code can directly use the types.
+
+// EventAlertType is the alert type for events
+type EventAlertType string
+
+const (
+	// Info is the "info" AlertType for events
+	Info EventAlertType = "info"
+	// Error is the "error" AlertType for events
+	Error EventAlertType = "error"
+	// Warning is the "warning" AlertType for events
+	Warning EventAlertType = "warning"
+	// Success is the "success" AlertType for events
+	Success EventAlertType = "success"
+)
+
+// EventPriority is the event priority for events
+type EventPriority string
+
+const (
+	// Normal is the "normal" Priority for events
+	Normal EventPriority = "normal"
+	// Low is the "low" Priority for events
+	Low EventPriority = "low"
+)
+
+// An Event is an object that can be posted to your DataDog event stream.
+type Event struct {
+	// Title of the event.  Required.
+	Title string
+	// Text is the description of the event.
+	Text string
+	// Timestamp is a timestamp for the event.  If not provided, the dogstatsd
+	// server will set this to the current time.
+	Timestamp time.Time
+	// Hostname for the event.
+	Hostname string
+	// AggregationKey groups this event with others of the same key.
+	AggregationKey string
+	// Priority of the event.  Can be statsd.Low or statsd.Normal.
+	Priority EventPriority
+	// SourceTypeName is a source type for the event.
+	SourceTypeName string
+	// AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success.
+	// If absent, the default value applied by the dogstatsd server is Info.
+	AlertType EventAlertType
+	// Tags for the event.
+	Tags []string
+}
+
+// NewEvent creates a new event with the given title and text.  Error checking
+// against these values is done at send-time, or upon running e.Check.
+func NewEvent(title, text string) *Event {
+	return &Event{
+		Title: title,
+		Text:  text,
+	}
+}
+
+// Check verifies that an event is valid.
+func (e *Event) Check() error {
+	if len(e.Title) == 0 {
+		return fmt.Errorf("statsd.Event title is required")
+	}
+	return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/fnv1a.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/fnv1a.go
new file mode 100644
index 0000000000..03dc8a07c7
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/fnv1a.go
@@ -0,0 +1,39 @@
+package statsd
+
+const (
+	// FNV-1a
+	offset32 = uint32(2166136261)
+	prime32  = uint32(16777619)
+
+	// init32 is what 32 bits hash values should be initialized with.
+	init32 = offset32
+)
+
+// HashString32 returns the hash of s.
+func hashString32(s string) uint32 {
+	return addString32(init32, s)
+}
+
+// AddString32 adds the hash of s to the precomputed hash value h.
+func addString32(h uint32, s string) uint32 {
+	i := 0
+	n := (len(s) / 8) * 8
+
+	for i != n {
+		h = (h ^ uint32(s[i])) * prime32
+		h = (h ^ uint32(s[i+1])) * prime32
+		h = (h ^ uint32(s[i+2])) * prime32
+		h = (h ^ uint32(s[i+3])) * prime32
+		h = (h ^ uint32(s[i+4])) * prime32
+		h = (h ^ uint32(s[i+5])) * prime32
+		h = (h ^ uint32(s[i+6])) * prime32
+		h = (h ^ uint32(s[i+7])) * prime32
+		i += 8
+	}
+
+	for _, c := range s[i:] {
+		h = (h ^ uint32(c)) * prime32
+	}
+
+	return h
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/format.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/format.go
new file mode 100644
index 0000000000..f3ab9231ff
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/format.go
@@ -0,0 +1,280 @@
+package statsd
+
+import (
+	"strconv"
+	"strings"
+)
+
+var (
+	gaugeSymbol         = []byte("g")
+	countSymbol         = []byte("c")
+	histogramSymbol     = []byte("h")
+	distributionSymbol  = []byte("d")
+	setSymbol           = []byte("s")
+	timingSymbol        = []byte("ms")
+	tagSeparatorSymbol  = ","
+	nameSeparatorSymbol = ":"
+)
+
+func appendHeader(buffer []byte, namespace string, name string) []byte {
+	if namespace != "" {
+		buffer = append(buffer, namespace...)
+	}
+	buffer = append(buffer, name...)
+	buffer = append(buffer, ':')
+	return buffer
+}
+
+func appendRate(buffer []byte, rate float64) []byte {
+	if rate < 1 {
+		buffer = append(buffer, "|@"...)
+		buffer = strconv.AppendFloat(buffer, rate, 'f', -1, 64)
+	}
+	return buffer
+}
+
+func appendWithoutNewlines(buffer []byte, s string) []byte {
+	// fastpath for strings without newlines
+	if strings.IndexByte(s, '\n') == -1 {
+		return append(buffer, s...)
+	}
+
+	for _, b := range []byte(s) {
+		if b != '\n' {
+			buffer = append(buffer, b)
+		}
+	}
+	return buffer
+}
+
+func appendTags(buffer []byte, globalTags []string, tags []string) []byte {
+	if len(globalTags) == 0 && len(tags) == 0 {
+		return buffer
+	}
+	buffer = append(buffer, "|#"...)
+	firstTag := true
+
+	for _, tag := range globalTags {
+		if !firstTag {
+			buffer = append(buffer, tagSeparatorSymbol...)
+		}
+		buffer = appendWithoutNewlines(buffer, tag)
+		firstTag = false
+	}
+	for _, tag := range tags {
+		if !firstTag {
+			buffer = append(buffer, tagSeparatorSymbol...)
+		}
+		buffer = appendWithoutNewlines(buffer, tag)
+		firstTag = false
+	}
+	return buffer
+}
+
+func appendTagsAggregated(buffer []byte, globalTags []string, tags string) []byte {
+	if len(globalTags) == 0 && tags == "" {
+		return buffer
+	}
+
+	buffer = append(buffer, "|#"...)
+	firstTag := true
+
+	for _, tag := range globalTags {
+		if !firstTag {
+			buffer = append(buffer, tagSeparatorSymbol...)
+		}
+		buffer = appendWithoutNewlines(buffer, tag)
+		firstTag = false
+	}
+	if tags != "" {
+		if !firstTag {
+			buffer = append(buffer, tagSeparatorSymbol...)
+		}
+		buffer = appendWithoutNewlines(buffer, tags)
+	}
+	return buffer
+}
+
+func appendFloatMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64, precision int) []byte {
+	buffer = appendHeader(buffer, namespace, name)
+	buffer = strconv.AppendFloat(buffer, value, 'f', precision, 64)
+	buffer = append(buffer, '|')
+	buffer = append(buffer, typeSymbol...)
+	buffer = appendRate(buffer, rate)
+	buffer = appendTags(buffer, globalTags, tags)
+	buffer = appendContainerID(buffer)
+	return buffer
+}
+
+func appendIntegerMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value int64, tags []string, rate float64) []byte {
+	buffer = appendHeader(buffer, namespace, name)
+	buffer = strconv.AppendInt(buffer, value, 10)
+	buffer = append(buffer, '|')
+	buffer = append(buffer, typeSymbol...)
+	buffer = appendRate(buffer, rate)
+	buffer = appendTags(buffer, globalTags, tags)
+	buffer = appendContainerID(buffer)
+	return buffer
+}
+
+func appendStringMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value string, tags []string, rate float64) []byte {
+	buffer = appendHeader(buffer, namespace, name)
+	buffer = append(buffer, value...)
+	buffer = append(buffer, '|')
+	buffer = append(buffer, typeSymbol...)
+	buffer = appendRate(buffer, rate)
+	buffer = appendTags(buffer, globalTags, tags)
+	buffer = appendContainerID(buffer)
+	return buffer
+}
+
+func appendGauge(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte {
+	return appendFloatMetric(buffer, gaugeSymbol, namespace, globalTags, name, value, tags, rate, -1)
+}
+
+func appendCount(buffer []byte, namespace string, globalTags []string, name string, value int64, tags []string, rate float64) []byte {
+	return appendIntegerMetric(buffer, countSymbol, namespace, globalTags, name, value, tags, rate)
+}
+
+func appendHistogram(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte {
+	return appendFloatMetric(buffer, histogramSymbol, namespace, globalTags, name, value, tags, rate, -1)
+}
+
+func appendDistribution(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte {
+	return appendFloatMetric(buffer, distributionSymbol, namespace, globalTags, name, value, tags, rate, -1)
+}
+
+func appendSet(buffer []byte, namespace string, globalTags []string, name string, value string, tags []string, rate float64) []byte {
+	return appendStringMetric(buffer, setSymbol, namespace, globalTags, name, value, tags, rate)
+}
+
+func appendTiming(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte {
+	return appendFloatMetric(buffer, timingSymbol, namespace, globalTags, name, value, tags, rate, 6)
+}
+
+func escapedEventTextLen(text string) int {
+	return len(text) + strings.Count(text, "\n")
+}
+
+func appendEscapedEventText(buffer []byte, text string) []byte {
+	for _, b := range []byte(text) {
+		if b != '\n' {
+			buffer = append(buffer, b)
+		} else {
+			buffer = append(buffer, "\\n"...)
+		}
+	}
+	return buffer
+}
+
+func appendEvent(buffer []byte, event *Event, globalTags []string) []byte {
+	escapedTextLen := escapedEventTextLen(event.Text)
+
+	buffer = append(buffer, "_e{"...)
+	buffer = strconv.AppendInt(buffer, int64(len(event.Title)), 10)
+	buffer = append(buffer, tagSeparatorSymbol...)
+	buffer = strconv.AppendInt(buffer, int64(escapedTextLen), 10)
+	buffer = append(buffer, "}:"...)
+	buffer = append(buffer, event.Title...)
+	buffer = append(buffer, '|')
+	if escapedTextLen != len(event.Text) {
+		buffer = appendEscapedEventText(buffer, event.Text)
+	} else {
+		buffer = append(buffer, event.Text...)
+	}
+
+	if !event.Timestamp.IsZero() {
+		buffer = append(buffer, "|d:"...)
+		buffer = strconv.AppendInt(buffer, int64(event.Timestamp.Unix()), 10)
+	}
+
+	if len(event.Hostname) != 0 {
+		buffer = append(buffer, "|h:"...)
+		buffer = append(buffer, event.Hostname...)
+	}
+
+	if len(event.AggregationKey) != 0 {
+		buffer = append(buffer, "|k:"...)
+		buffer = append(buffer, event.AggregationKey...)
+	}
+
+	if len(event.Priority) != 0 {
+		buffer = append(buffer, "|p:"...)
+		buffer = append(buffer, event.Priority...)
+	}
+
+	if len(event.SourceTypeName) != 0 {
+		buffer = append(buffer, "|s:"...)
+		buffer = append(buffer, event.SourceTypeName...)
+	}
+
+	if len(event.AlertType) != 0 {
+		buffer = append(buffer, "|t:"...)
+		buffer = append(buffer, string(event.AlertType)...)
+	}
+
+	buffer = appendTags(buffer, globalTags, event.Tags)
+	buffer = appendContainerID(buffer)
+	return buffer
+}
+
+func appendEscapedServiceCheckText(buffer []byte, text string) []byte {
+	for i := 0; i < len(text); i++ {
+		if text[i] == '\n' {
+			buffer = append(buffer, "\\n"...)
+		} else if text[i] == 'm' && i+1 < len(text) && text[i+1] == ':' {
+			buffer = append(buffer, "m\\:"...)
+			i++
+		} else {
+			buffer = append(buffer, text[i])
+		}
+	}
+	return buffer
+}
+
+func appendServiceCheck(buffer []byte, serviceCheck *ServiceCheck, globalTags []string) []byte {
+	buffer = append(buffer, "_sc|"...)
+	buffer = append(buffer, serviceCheck.Name...)
+	buffer = append(buffer, '|')
+	buffer = strconv.AppendInt(buffer, int64(serviceCheck.Status), 10)
+
+	if !serviceCheck.Timestamp.IsZero() {
+		buffer = append(buffer, "|d:"...)
+		buffer = strconv.AppendInt(buffer, int64(serviceCheck.Timestamp.Unix()), 10)
+	}
+
+	if len(serviceCheck.Hostname) != 0 {
+		buffer = append(buffer, "|h:"...)
+		buffer = append(buffer, serviceCheck.Hostname...)
+	}
+
+	buffer = appendTags(buffer, globalTags, serviceCheck.Tags)
+
+	if len(serviceCheck.Message) != 0 {
+		buffer = append(buffer, "|m:"...)
+		buffer = appendEscapedServiceCheckText(buffer, serviceCheck.Message)
+	}
+
+	buffer = appendContainerID(buffer)
+	return buffer
+}
+
+func appendSeparator(buffer []byte) []byte {
+	return append(buffer, '\n')
+}
+
+func appendContainerID(buffer []byte) []byte {
+	if containerID := getContainerID(); len(containerID) > 0 {
+		buffer = append(buffer, "|c:"...)
+		buffer = append(buffer, containerID...)
+	}
+	return buffer
+}
+
+func appendTimestamp(buffer []byte, timestamp int64) []byte {
+	if timestamp > noTimestamp {
+		buffer = append(buffer, "|T"...)
+		buffer = strconv.AppendInt(buffer, timestamp, 10)
+	}
+	return buffer
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/metrics.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/metrics.go
new file mode 100644
index 0000000000..82f11ac18f
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/metrics.go
@@ -0,0 +1,181 @@
+package statsd
+
+import (
+	"math"
+	"sync"
+	"sync/atomic"
+)
+
+/*
+Those are metrics type that can be aggregated on the client side:
+  - Gauge
+  - Count
+  - Set
+*/
+
+type countMetric struct {
+	value int64
+	name  string
+	tags  []string
+}
+
+func newCountMetric(name string, value int64, tags []string) *countMetric {
+	return &countMetric{
+		value: value,
+		name:  name,
+		tags:  copySlice(tags),
+	}
+}
+
+func (c *countMetric) sample(v int64) {
+	atomic.AddInt64(&c.value, v)
+}
+
+func (c *countMetric) flushUnsafe() metric {
+	return metric{
+		metricType: count,
+		name:       c.name,
+		tags:       c.tags,
+		rate:       1,
+		ivalue:     c.value,
+	}
+}
+
+// Gauge
+
+type gaugeMetric struct {
+	value uint64
+	name  string
+	tags  []string
+}
+
+func newGaugeMetric(name string, value float64, tags []string) *gaugeMetric {
+	return &gaugeMetric{
+		value: math.Float64bits(value),
+		name:  name,
+		tags:  copySlice(tags),
+	}
+}
+
+func (g *gaugeMetric) sample(v float64) {
+	atomic.StoreUint64(&g.value, math.Float64bits(v))
+}
+
+func (g *gaugeMetric) flushUnsafe() metric {
+	return metric{
+		metricType: gauge,
+		name:       g.name,
+		tags:       g.tags,
+		rate:       1,
+		fvalue:     math.Float64frombits(g.value),
+	}
+}
+
+// Set
+
+type setMetric struct {
+	data map[string]struct{}
+	name string
+	tags []string
+	sync.Mutex
+}
+
+func newSetMetric(name string, value string, tags []string) *setMetric {
+	set := &setMetric{
+		data: map[string]struct{}{},
+		name: name,
+		tags: copySlice(tags),
+	}
+	set.data[value] = struct{}{}
+	return set
+}
+
+func (s *setMetric) sample(v string) {
+	s.Lock()
+	defer s.Unlock()
+	s.data[v] = struct{}{}
+}
+
+// Sets are aggregated on the agent side too. We flush the keys so a set from
+// multiple application can be correctly aggregated on the agent side.
+func (s *setMetric) flushUnsafe() []metric {
+	if len(s.data) == 0 {
+		return nil
+	}
+
+	metrics := make([]metric, len(s.data))
+	i := 0
+	for value := range s.data {
+		metrics[i] = metric{
+			metricType: set,
+			name:       s.name,
+			tags:       s.tags,
+			rate:       1,
+			svalue:     value,
+		}
+		i++
+	}
+	return metrics
+}
+
+// Histograms, Distributions and Timings
+
+type bufferedMetric struct {
+	sync.Mutex
+
+	data []float64
+	name string
+	// Histograms and Distributions store tags as one string since we need
+	// to compute its size multiple time when serializing.
+	tags  string
+	mtype metricType
+}
+
+func (s *bufferedMetric) sample(v float64) {
+	s.Lock()
+	defer s.Unlock()
+	s.data = append(s.data, v)
+}
+
+func (s *bufferedMetric) flushUnsafe() metric {
+	return metric{
+		metricType: s.mtype,
+		name:       s.name,
+		stags:      s.tags,
+		rate:       1,
+		fvalues:    s.data,
+	}
+}
+
+type histogramMetric = bufferedMetric
+
+func newHistogramMetric(name string, value float64, stringTags string) *histogramMetric {
+	return &histogramMetric{
+		data:  []float64{value},
+		name:  name,
+		tags:  stringTags,
+		mtype: histogramAggregated,
+	}
+}
+
+type distributionMetric = bufferedMetric
+
+func newDistributionMetric(name string, value float64, stringTags string) *distributionMetric {
+	return &distributionMetric{
+		data:  []float64{value},
+		name:  name,
+		tags:  stringTags,
+		mtype: distributionAggregated,
+	}
+}
+
+type timingMetric = bufferedMetric
+
+func newTimingMetric(name string, value float64, stringTags string) *timingMetric {
+	return &timingMetric{
+		data:  []float64{value},
+		name:  name,
+		tags:  stringTags,
+		mtype: timingAggregated,
+	}
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/noop.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/noop.go
new file mode 100644
index 0000000000..e92744f407
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/noop.go
@@ -0,0 +1,106 @@
+package statsd
+
+import "time"
+
+// NoOpClient is a statsd client that does nothing. Can be useful in testing
+// situations for library users.
+type NoOpClient struct{}
+
+// Gauge does nothing and returns nil
+func (n *NoOpClient) Gauge(name string, value float64, tags []string, rate float64) error {
+	return nil
+}
+
+// GaugeWithTimestamp does nothing and returns nil
+func (n *NoOpClient) GaugeWithTimestamp(name string, value float64, tags []string, rate float64, timestamp time.Time) error {
+	return nil
+}
+
+// Count does nothing and returns nil
+func (n *NoOpClient) Count(name string, value int64, tags []string, rate float64) error {
+	return nil
+}
+
+// CountWithTimestamp does nothing and returns nil
+func (n *NoOpClient) CountWithTimestamp(name string, value int64, tags []string, rate float64, timestamp time.Time) error {
+	return nil
+}
+
+// Histogram does nothing and returns nil
+func (n *NoOpClient) Histogram(name string, value float64, tags []string, rate float64) error {
+	return nil
+}
+
+// Distribution does nothing and returns nil
+func (n *NoOpClient) Distribution(name string, value float64, tags []string, rate float64) error {
+	return nil
+}
+
+// Decr does nothing and returns nil
+func (n *NoOpClient) Decr(name string, tags []string, rate float64) error {
+	return nil
+}
+
+// Incr does nothing and returns nil
+func (n *NoOpClient) Incr(name string, tags []string, rate float64) error {
+	return nil
+}
+
+// Set does nothing and returns nil
+func (n *NoOpClient) Set(name string, value string, tags []string, rate float64) error {
+	return nil
+}
+
+// Timing does nothing and returns nil
+func (n *NoOpClient) Timing(name string, value time.Duration, tags []string, rate float64) error {
+	return nil
+}
+
+// TimeInMilliseconds does nothing and returns nil
+func (n *NoOpClient) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error {
+	return nil
+}
+
+// Event does nothing and returns nil
+func (n *NoOpClient) Event(e *Event) error {
+	return nil
+}
+
+// SimpleEvent does nothing and returns nil
+func (n *NoOpClient) SimpleEvent(title, text string) error {
+	return nil
+}
+
+// ServiceCheck does nothing and returns nil
+func (n *NoOpClient) ServiceCheck(sc *ServiceCheck) error {
+	return nil
+}
+
+// SimpleServiceCheck does nothing and returns nil
+func (n *NoOpClient) SimpleServiceCheck(name string, status ServiceCheckStatus) error {
+	return nil
+}
+
+// Close does nothing and returns nil
+func (n *NoOpClient) Close() error {
+	return nil
+}
+
+// Flush does nothing and returns nil
+func (n *NoOpClient) Flush() error {
+	return nil
+}
+
+// IsClosed does nothing and return false
+func (n *NoOpClient) IsClosed() bool {
+	return false
+}
+
+// GetTelemetry does nothing and returns an empty Telemetry
+func (n *NoOpClient) GetTelemetry() Telemetry {
+	return Telemetry{}
+}
+
+// Verify that NoOpClient implements the ClientInterface.
+// https://golang.org/doc/faq#guarantee_satisfies_interface
+var _ ClientInterface = &NoOpClient{}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/options.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/options.go
new file mode 100644
index 0000000000..0728a976b4
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/options.go
@@ -0,0 +1,348 @@
+package statsd
+
+import (
+	"fmt"
+	"math"
+	"strings"
+	"time"
+)
+
+var (
+	defaultNamespace                = ""
+	defaultTags                     = []string{}
+	defaultMaxBytesPerPayload       = 0
+	defaultMaxMessagesPerPayload    = math.MaxInt32
+	defaultBufferPoolSize           = 0
+	defaultBufferFlushInterval      = 100 * time.Millisecond
+	defaultWorkerCount              = 32
+	defaultSenderQueueSize          = 0
+	defaultWriteTimeout             = 100 * time.Millisecond
+	defaultTelemetry                = true
+	defaultReceivingMode            = mutexMode
+	defaultChannelModeBufferSize    = 4096
+	defaultAggregationFlushInterval = 2 * time.Second
+	defaultAggregation              = true
+	defaultExtendedAggregation      = false
+	defaultOriginDetection          = true
+)
+
+// Options contains the configuration options for a client.
+type Options struct {
+	namespace                string
+	tags                     []string
+	maxBytesPerPayload       int
+	maxMessagesPerPayload    int
+	bufferPoolSize           int
+	bufferFlushInterval      time.Duration
+	workersCount             int
+	senderQueueSize          int
+	writeTimeout             time.Duration
+	telemetry                bool
+	receiveMode              receivingMode
+	channelModeBufferSize    int
+	aggregationFlushInterval time.Duration
+	aggregation              bool
+	extendedAggregation      bool
+	telemetryAddr            string
+	originDetection          bool
+	containerID              string
+}
+
+func resolveOptions(options []Option) (*Options, error) {
+	o := &Options{
+		namespace:                defaultNamespace,
+		tags:                     defaultTags,
+		maxBytesPerPayload:       defaultMaxBytesPerPayload,
+		maxMessagesPerPayload:    defaultMaxMessagesPerPayload,
+		bufferPoolSize:           defaultBufferPoolSize,
+		bufferFlushInterval:      defaultBufferFlushInterval,
+		workersCount:             defaultWorkerCount,
+		senderQueueSize:          defaultSenderQueueSize,
+		writeTimeout:             defaultWriteTimeout,
+		telemetry:                defaultTelemetry,
+		receiveMode:              defaultReceivingMode,
+		channelModeBufferSize:    defaultChannelModeBufferSize,
+		aggregationFlushInterval: defaultAggregationFlushInterval,
+		aggregation:              defaultAggregation,
+		extendedAggregation:      defaultExtendedAggregation,
+		originDetection:          defaultOriginDetection,
+	}
+
+	for _, option := range options {
+		err := option(o)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return o, nil
+}
+
+// Option is a client option. Can return an error if validation fails.
+type Option func(*Options) error
+
+// WithNamespace sets a string to be prepend to all metrics, events and service checks name.
+//
+// A '.' will automatically be added after the namespace if needed. For example a metrics 'test' with a namespace 'prod'
+// will produce a final metric named 'prod.test'.
+func WithNamespace(namespace string) Option {
+	return func(o *Options) error {
+		if strings.HasSuffix(namespace, ".") {
+			o.namespace = namespace
+		} else {
+			o.namespace = namespace + "."
+		}
+		return nil
+	}
+}
+
+// WithTags sets global tags to be applied to every metrics, events and service checks.
+func WithTags(tags []string) Option {
+	return func(o *Options) error {
+		o.tags = tags
+		return nil
+	}
+}
+
+// WithMaxMessagesPerPayload sets the maximum number of metrics, events and/or service checks that a single payload can
+// contain.
+//
+// The default is 'math.MaxInt32' which will most likely let the WithMaxBytesPerPayload option take precedence. This
+// option can be set to `1` to create an unbuffered client (each metrics/event/service check will be send in its own
+// payload to the agent).
+func WithMaxMessagesPerPayload(maxMessagesPerPayload int) Option {
+	return func(o *Options) error {
+		o.maxMessagesPerPayload = maxMessagesPerPayload
+		return nil
+	}
+}
+
+// WithMaxBytesPerPayload sets the maximum number of bytes a single payload can contain. Each sample, even and service
+// check must be lower than this value once serialized or an `MessageTooLongError` is returned.
+//
+// The default value 0 which will set the option to the optimal size for the transport protocol used: 1432 for UDP and
+// named pipe and 8192 for UDS. Those values offer the best performances.
+// Be careful when changing this option, see
+// https://docs.datadoghq.com/developers/dogstatsd/high_throughput/#ensure-proper-packet-sizes.
+func WithMaxBytesPerPayload(MaxBytesPerPayload int) Option {
+	return func(o *Options) error {
+		o.maxBytesPerPayload = MaxBytesPerPayload
+		return nil
+	}
+}
+
+// WithBufferPoolSize sets the size of the pool of buffers used to serialized metrics, events and service_checks.
+//
+// The default, 0, will set the option to the optimal size for the transport protocol used: 2048 for UDP and named pipe
+// and 512 for UDS.
+func WithBufferPoolSize(bufferPoolSize int) Option {
+	return func(o *Options) error {
+		o.bufferPoolSize = bufferPoolSize
+		return nil
+	}
+}
+
+// WithBufferFlushInterval sets the interval after which the current buffer is flushed.
+//
+// A buffers are used to serialized data, they're flushed either when full (see WithMaxBytesPerPayload) or when it's
+// been open for longer than this interval.
+//
+// With apps sending a high number of metrics/events/service_checks the interval rarely timeout. But with slow sending
+// apps increasing this value will reduce the number of payload sent on the wire as more data is serialized in the same
+// payload.
+//
+// Default is 100ms
+func WithBufferFlushInterval(bufferFlushInterval time.Duration) Option {
+	return func(o *Options) error {
+		o.bufferFlushInterval = bufferFlushInterval
+		return nil
+	}
+}
+
+// WithWorkersCount sets the number of workers that will be used to serialized data.
+//
+// Those workers allow the use of multiple buffers at the same time (see WithBufferPoolSize) to reduce lock contention.
+//
+// Default is 32.
+func WithWorkersCount(workersCount int) Option {
+	return func(o *Options) error {
+		if workersCount < 1 {
+			return fmt.Errorf("workersCount must be a positive integer")
+		}
+		o.workersCount = workersCount
+		return nil
+	}
+}
+
+// WithSenderQueueSize sets the size of the sender queue in number of buffers.
+//
+// After data has been serialized in a buffer they're pushed to a queue that the sender will consume and then each one
+// ot the agent.
+//
+// The default value 0 will set the option to the optimal size for the transport protocol used: 2048 for UDP and named
+// pipe and 512 for UDS.
+func WithSenderQueueSize(senderQueueSize int) Option {
+	return func(o *Options) error {
+		o.senderQueueSize = senderQueueSize
+		return nil
+	}
+}
+
+// WithWriteTimeout sets the timeout for network communication with the Agent, after this interval a payload is
+// dropped. This is only used for UDS and named pipes connection.
+func WithWriteTimeout(writeTimeout time.Duration) Option {
+	return func(o *Options) error {
+		o.writeTimeout = writeTimeout
+		return nil
+	}
+}
+
+// WithChannelMode make the client use channels to receive metrics
+//
+// This determines how the client receive metrics from the app (for example when calling the `Gauge()` method).
+// The client will either drop the metrics if its buffers are full (WithChannelMode option) or block the caller until the
+// metric can be handled (WithMutexMode option). By default the client use mutexes.
+//
+// WithChannelMode uses a channel (see WithChannelModeBufferSize to configure its size) to receive metrics and drops metrics if
+// the channel is full. Sending metrics in this mode is much slower that WithMutexMode (because of the channel), but will not
+// block the application. This mode is made for application using many goroutines, sending the same metrics, at a very
+// high volume. The goal is to not slow down the application at the cost of dropping metrics and having a lower max
+// throughput.
+func WithChannelMode() Option {
+	return func(o *Options) error {
+		o.receiveMode = channelMode
+		return nil
+	}
+}
+
+// WithMutexMode will use mutex to receive metrics from the app throught the API.
+//
+// This determines how the client receive metrics from the app (for example when calling the `Gauge()` method).
+// The client will either drop the metrics if its buffers are full (WithChannelMode option) or block the caller until the
+// metric can be handled (WithMutexMode option). By default the client use mutexes.
+//
+// WithMutexMode uses mutexes to receive metrics which is much faster than channels but can cause some lock contention
+// when used with a high number of goroutines sendint the same metrics. Mutexes are sharded based on the metrics name
+// which limit mutex contention when multiple goroutines send different metrics (see WithWorkersCount). This is the
+// default behavior which will produce the best throughput.
+func WithMutexMode() Option {
+	return func(o *Options) error {
+		o.receiveMode = mutexMode
+		return nil
+	}
+}
+
+// WithChannelModeBufferSize sets the size of the channel holding incoming metrics when WithChannelMode is used.
+func WithChannelModeBufferSize(bufferSize int) Option {
+	return func(o *Options) error {
+		o.channelModeBufferSize = bufferSize
+		return nil
+	}
+}
+
+// WithAggregationInterval sets the interval at which aggregated metrics are flushed. See WithClientSideAggregation and
+// WithExtendedClientSideAggregation for more.
+//
+// The default interval is 2s. The interval must divide the Agent reporting period (default=10s) evenly to reduce "aliasing"
+// that can cause values to appear irregular/spiky.
+//
+// For example a 3s aggregation interval will create spikes in the final graph: a application sending a count metric
+// that increments at a constant 1000 time per second will appear noisy with an interval of 3s. This is because
+// client-side aggregation would report every 3 seconds, while the agent is reporting every 10 seconds. This means in
+// each agent bucket, the values are: 9000, 9000, 12000.
+func WithAggregationInterval(interval time.Duration) Option {
+	return func(o *Options) error {
+		o.aggregationFlushInterval = interval
+		return nil
+	}
+}
+
+// WithClientSideAggregation enables client side aggregation for Gauges, Counts and Sets.
+func WithClientSideAggregation() Option {
+	return func(o *Options) error {
+		o.aggregation = true
+		return nil
+	}
+}
+
+// WithoutClientSideAggregation disables client side aggregation.
+func WithoutClientSideAggregation() Option {
+	return func(o *Options) error {
+		o.aggregation = false
+		o.extendedAggregation = false
+		return nil
+	}
+}
+
+// WithExtendedClientSideAggregation enables client side aggregation for all types. This feature is only compatible with
+// Agent's version >=6.25.0 && <7.0.0 or Agent's versions >=7.25.0.
+func WithExtendedClientSideAggregation() Option {
+	return func(o *Options) error {
+		o.aggregation = true
+		o.extendedAggregation = true
+		return nil
+	}
+}
+
+// WithoutTelemetry disables the client telemetry.
+//
+// More on this here: https://docs.datadoghq.com/developers/dogstatsd/high_throughput/#client-side-telemetry
+func WithoutTelemetry() Option {
+	return func(o *Options) error {
+		o.telemetry = false
+		return nil
+	}
+}
+
+// WithTelemetryAddr sets a different address for telemetry metrics. By default the same address as the client is used
+// for telemetry.
+//
+// More on this here: https://docs.datadoghq.com/developers/dogstatsd/high_throughput/#client-side-telemetry
+func WithTelemetryAddr(addr string) Option {
+	return func(o *Options) error {
+		o.telemetryAddr = addr
+		return nil
+	}
+}
+
+// WithoutOriginDetection disables the client origin detection.
+// When enabled, the client tries to discover its container ID and sends it to the Agent
+// to enrich the metrics with container tags.
+// Origin detection can also be disabled by configuring the environment variabe DD_ORIGIN_DETECTION_ENABLED=false
+// The client tries to read the container ID by parsing the file /proc/self/cgroup, this is not supported on Windows.
+// The client prioritizes the value passed via DD_ENTITY_ID (if set) over the container ID.
+//
+// More on this here: https://docs.datadoghq.com/developers/dogstatsd/?tab=kubernetes#origin-detection-over-udp
+func WithoutOriginDetection() Option {
+	return func(o *Options) error {
+		o.originDetection = false
+		return nil
+	}
+}
+
+// WithOriginDetection enables the client origin detection.
+// This feature requires Datadog Agent version >=6.35.0 && <7.0.0 or Agent versions >=7.35.0.
+// When enabled, the client tries to discover its container ID and sends it to the Agent
+// to enrich the metrics with container tags.
+// Origin detection can be disabled by configuring the environment variabe DD_ORIGIN_DETECTION_ENABLED=false
+// The client tries to read the container ID by parsing the file /proc/self/cgroup, this is not supported on Windows.
+// The client prioritizes the value passed via DD_ENTITY_ID (if set) over the container ID.
+//
+// More on this here: https://docs.datadoghq.com/developers/dogstatsd/?tab=kubernetes#origin-detection-over-udp
+func WithOriginDetection() Option {
+	return func(o *Options) error {
+		o.originDetection = true
+		return nil
+	}
+}
+
+// WithContainerID allows passing the container ID, this will be used by the Agent to enrich metrics with container tags.
+// This feature requires Datadog Agent version >=6.35.0 && <7.0.0 or Agent versions >=7.35.0.
+// When configured, the provided container ID is prioritized over the container ID discovered via Origin Detection.
+// The client prioritizes the value passed via DD_ENTITY_ID (if set) over the container ID.
+func WithContainerID(id string) Option {
+	return func(o *Options) error {
+		o.containerID = id
+		return nil
+	}
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/pipe.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/pipe.go
new file mode 100644
index 0000000000..84c38e966d
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/pipe.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package statsd
+
+import (
+	"errors"
+	"io"
+	"time"
+)
+
+func newWindowsPipeWriter(pipepath string, writeTimeout time.Duration) (io.WriteCloser, error) {
+	return nil, errors.New("Windows Named Pipes are only supported on Windows")
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/pipe_windows.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/pipe_windows.go
new file mode 100644
index 0000000000..5ab60f00c2
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/pipe_windows.go
@@ -0,0 +1,75 @@
+// +build windows
+
+package statsd
+
+import (
+	"net"
+	"sync"
+	"time"
+
+	"github.com/Microsoft/go-winio"
+)
+
+type pipeWriter struct {
+	mu       sync.RWMutex
+	conn     net.Conn
+	timeout  time.Duration
+	pipepath string
+}
+
+func (p *pipeWriter) Write(data []byte) (n int, err error) {
+	conn, err := p.ensureConnection()
+	if err != nil {
+		return 0, err
+	}
+
+	p.mu.RLock()
+	conn.SetWriteDeadline(time.Now().Add(p.timeout))
+	p.mu.RUnlock()
+
+	n, err = conn.Write(data)
+	if err != nil {
+		if e, ok := err.(net.Error); !ok || !e.Temporary() {
+			// disconnected; retry again on next attempt
+			p.mu.Lock()
+			p.conn = nil
+			p.mu.Unlock()
+		}
+	}
+	return n, err
+}
+
+func (p *pipeWriter) ensureConnection() (net.Conn, error) {
+	p.mu.RLock()
+	conn := p.conn
+	p.mu.RUnlock()
+	if conn != nil {
+		return conn, nil
+	}
+
+	// looks like we might need to connect - try again with write locking.
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.conn != nil {
+		return p.conn, nil
+	}
+	newconn, err := winio.DialPipe(p.pipepath, nil)
+	if err != nil {
+		return nil, err
+	}
+	p.conn = newconn
+	return newconn, nil
+}
+
+func (p *pipeWriter) Close() error {
+	return p.conn.Close()
+}
+
+func newWindowsPipeWriter(pipepath string, writeTimeout time.Duration) (*pipeWriter, error) {
+	// Defer connection establishment to first write
+	return &pipeWriter{
+		conn:     nil,
+		timeout:  writeTimeout,
+		pipepath: pipepath,
+	}, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/sender.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/sender.go
new file mode 100644
index 0000000000..500d53c408
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/sender.go
@@ -0,0 +1,111 @@
+package statsd
+
+import (
+	"io"
+	"sync/atomic"
+)
+
+// senderTelemetry contains telemetry about the health of the sender
+type senderTelemetry struct {
+	totalPayloadsSent             uint64
+	totalPayloadsDroppedQueueFull uint64
+	totalPayloadsDroppedWriter    uint64
+	totalBytesSent                uint64
+	totalBytesDroppedQueueFull    uint64
+	totalBytesDroppedWriter       uint64
+}
+
+type sender struct {
+	transport   io.WriteCloser
+	pool        *bufferPool
+	queue       chan *statsdBuffer
+	telemetry   *senderTelemetry
+	stop        chan struct{}
+	flushSignal chan struct{}
+}
+
+func newSender(transport io.WriteCloser, queueSize int, pool *bufferPool) *sender {
+	sender := &sender{
+		transport:   transport,
+		pool:        pool,
+		queue:       make(chan *statsdBuffer, queueSize),
+		telemetry:   &senderTelemetry{},
+		stop:        make(chan struct{}),
+		flushSignal: make(chan struct{}),
+	}
+
+	go sender.sendLoop()
+	return sender
+}
+
+func (s *sender) send(buffer *statsdBuffer) {
+	select {
+	case s.queue <- buffer:
+	default:
+		atomic.AddUint64(&s.telemetry.totalPayloadsDroppedQueueFull, 1)
+		atomic.AddUint64(&s.telemetry.totalBytesDroppedQueueFull, uint64(len(buffer.bytes())))
+		s.pool.returnBuffer(buffer)
+	}
+}
+
+func (s *sender) write(buffer *statsdBuffer) {
+	_, err := s.transport.Write(buffer.bytes())
+	if err != nil {
+		atomic.AddUint64(&s.telemetry.totalPayloadsDroppedWriter, 1)
+		atomic.AddUint64(&s.telemetry.totalBytesDroppedWriter, uint64(len(buffer.bytes())))
+	} else {
+		atomic.AddUint64(&s.telemetry.totalPayloadsSent, 1)
+		atomic.AddUint64(&s.telemetry.totalBytesSent, uint64(len(buffer.bytes())))
+	}
+	s.pool.returnBuffer(buffer)
+}
+
+func (s *sender) flushTelemetryMetrics(t *Telemetry) {
+	t.TotalPayloadsSent = atomic.LoadUint64(&s.telemetry.totalPayloadsSent)
+	t.TotalPayloadsDroppedQueueFull = atomic.LoadUint64(&s.telemetry.totalPayloadsDroppedQueueFull)
+	t.TotalPayloadsDroppedWriter = atomic.LoadUint64(&s.telemetry.totalPayloadsDroppedWriter)
+
+	t.TotalBytesSent = atomic.LoadUint64(&s.telemetry.totalBytesSent)
+	t.TotalBytesDroppedQueueFull = atomic.LoadUint64(&s.telemetry.totalBytesDroppedQueueFull)
+	t.TotalBytesDroppedWriter = atomic.LoadUint64(&s.telemetry.totalBytesDroppedWriter)
+}
+
+func (s *sender) sendLoop() {
+	defer close(s.stop)
+	for {
+		select {
+		case buffer := <-s.queue:
+			s.write(buffer)
+		case <-s.stop:
+			return
+		case <-s.flushSignal:
+			// At that point we know that the workers are paused (the statsd client
+			// will pause them before calling sender.flush()).
+			// So we can fully flush the input queue
+			s.flushInputQueue()
+			s.flushSignal <- struct{}{}
+		}
+	}
+}
+
+func (s *sender) flushInputQueue() {
+	for {
+		select {
+		case buffer := <-s.queue:
+			s.write(buffer)
+		default:
+			return
+		}
+	}
+}
+func (s *sender) flush() {
+	s.flushSignal <- struct{}{}
+	<-s.flushSignal
+}
+
+func (s *sender) close() error {
+	s.stop <- struct{}{}
+	<-s.stop
+	s.flushInputQueue()
+	return s.transport.Close()
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/service_check.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/service_check.go
new file mode 100644
index 0000000000..e2850465c2
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/service_check.go
@@ -0,0 +1,57 @@
+package statsd
+
+import (
+	"fmt"
+	"time"
+)
+
+// ServiceCheckStatus support
+type ServiceCheckStatus byte
+
+const (
+	// Ok is the "ok" ServiceCheck status
+	Ok ServiceCheckStatus = 0
+	// Warn is the "warning" ServiceCheck status
+	Warn ServiceCheckStatus = 1
+	// Critical is the "critical" ServiceCheck status
+	Critical ServiceCheckStatus = 2
+	// Unknown is the "unknown" ServiceCheck status
+	Unknown ServiceCheckStatus = 3
+)
+
+// A ServiceCheck is an object that contains status of DataDog service check.
+type ServiceCheck struct {
+	// Name of the service check.  Required.
+	Name string
+	// Status of service check.  Required.
+	Status ServiceCheckStatus
+	// Timestamp is a timestamp for the serviceCheck.  If not provided, the dogstatsd
+	// server will set this to the current time.
+	Timestamp time.Time
+	// Hostname for the serviceCheck.
+	Hostname string
+	// A message describing the current state of the serviceCheck.
+	Message string
+	// Tags for the serviceCheck.
+	Tags []string
+}
+
+// NewServiceCheck creates a new serviceCheck with the given name and status. Error checking
+// against these values is done at send-time, or upon running sc.Check.
+func NewServiceCheck(name string, status ServiceCheckStatus) *ServiceCheck {
+	return &ServiceCheck{
+		Name:   name,
+		Status: status,
+	}
+}
+
+// Check verifies that a service check is valid.
+func (sc *ServiceCheck) Check() error {
+	if len(sc.Name) == 0 {
+		return fmt.Errorf("statsd.ServiceCheck name is required")
+	}
+	if byte(sc.Status) < 0 || byte(sc.Status) > 3 {
+		return fmt.Errorf("statsd.ServiceCheck status has invalid value")
+	}
+	return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/statsd.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/statsd.go
new file mode 100644
index 0000000000..378581b9be
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/statsd.go
@@ -0,0 +1,838 @@
+// Copyright 2013 Ooyala, Inc.
+
+/*
+Package statsd provides a Go dogstatsd client. Dogstatsd extends the popular statsd,
+adding tags and histograms and pushing upstream to Datadog.
+
+Refer to http://docs.datadoghq.com/guides/dogstatsd/ for information about DogStatsD.
+
+statsd is based on go-statsd-client.
+*/
+package statsd
+
+//go:generate mockgen -source=statsd.go -destination=mocks/statsd.go
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"net/url"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+/*
+OptimalUDPPayloadSize defines the optimal payload size for a UDP datagram, 1432 bytes
+is optimal for regular networks with an MTU of 1500 so datagrams don't get
+fragmented. It's generally recommended not to fragment UDP datagrams as losing
+a single fragment will cause the entire datagram to be lost.
+*/
+const OptimalUDPPayloadSize = 1432
+
+/*
+MaxUDPPayloadSize defines the maximum payload size for a UDP datagram.
+Its value comes from the calculation: 65535 bytes Max UDP datagram size -
+8byte UDP header - 60byte max IP headers
+any number greater than that will see frames being cut out.
+*/
+const MaxUDPPayloadSize = 65467
+
+// DefaultUDPBufferPoolSize is the default size of the buffer pool for UDP clients.
+const DefaultUDPBufferPoolSize = 2048
+
+// DefaultUDSBufferPoolSize is the default size of the buffer pool for UDS clients.
+const DefaultUDSBufferPoolSize = 512
+
+/*
+DefaultMaxAgentPayloadSize is the default maximum payload size the agent
+can receive. This can be adjusted by changing dogstatsd_buffer_size in the
+agent configuration file datadog.yaml. This is also used as the optimal payload size
+for UDS datagrams.
+*/
+const DefaultMaxAgentPayloadSize = 8192
+
+/*
+UnixAddressPrefix holds the prefix to use to enable Unix Domain Socket
+traffic instead of UDP.
+*/
+const UnixAddressPrefix = "unix://"
+
+/*
+WindowsPipeAddressPrefix holds the prefix to use to enable Windows Named Pipes
+traffic instead of UDP.
+*/
+const WindowsPipeAddressPrefix = `\\.\pipe\`
+
+const (
+	agentHostEnvVarName = "DD_AGENT_HOST"
+	agentPortEnvVarName = "DD_DOGSTATSD_PORT"
+	agentURLEnvVarName  = "DD_DOGSTATSD_URL"
+	defaultUDPPort      = "8125"
+)
+
+const (
+	// ddEntityID specifies client-side user-specified entity ID injection.
+	// This env var can be set to the Pod UID on Kubernetes via the downward API.
+	// Docs: https://docs.datadoghq.com/developers/dogstatsd/?tab=kubernetes#origin-detection-over-udp
+	ddEntityID = "DD_ENTITY_ID"
+
+	// ddEntityIDTag specifies the tag name for the client-side entity ID injection
+	// The Agent expects this tag to contain a non-prefixed Kubernetes Pod UID.
+	ddEntityIDTag = "dd.internal.entity_id"
+
+	// originDetectionEnabled specifies the env var to enable/disable sending the container ID field.
+	originDetectionEnabled = "DD_ORIGIN_DETECTION_ENABLED"
+)
+
+/*
+ddEnvTagsMapping is a mapping of each "DD_" prefixed environment variable
+to a specific tag name. We use a slice to keep the order and simplify tests.
+*/
+var ddEnvTagsMapping = []struct{ envName, tagName string }{
+	{ddEntityID, ddEntityIDTag}, // Client-side entity ID injection for container tagging.
+	{"DD_ENV", "env"},           // The name of the env in which the service runs.
+	{"DD_SERVICE", "service"},   // The name of the running service.
+	{"DD_VERSION", "version"},   // The current version of the running service.
+}
+
+type metricType int
+
+const (
+	gauge metricType = iota
+	count
+	histogram
+	histogramAggregated
+	distribution
+	distributionAggregated
+	set
+	timing
+	timingAggregated
+	event
+	serviceCheck
+)
+
+type receivingMode int
+
+const (
+	mutexMode receivingMode = iota
+	channelMode
+)
+
+const (
+	writerNameUDP     string = "udp"
+	writerNameUDS     string = "uds"
+	writerWindowsPipe string = "pipe"
+)
+
+// noTimestamp is used as a value for metric without a given timestamp.
+const noTimestamp = int64(0)
+
+type metric struct {
+	metricType metricType
+	namespace  string
+	globalTags []string
+	name       string
+	fvalue     float64
+	fvalues    []float64
+	ivalue     int64
+	svalue     string
+	evalue     *Event
+	scvalue    *ServiceCheck
+	tags       []string
+	stags      string
+	rate       float64
+	timestamp  int64
+}
+
+type noClientErr string
+
+// ErrNoClient is returned if statsd reporting methods are invoked on
+// a nil client.
+const ErrNoClient = noClientErr("statsd client is nil")
+
+func (e noClientErr) Error() string {
+	return string(e)
+}
+
+type invalidTimestampErr string
+
+// InvalidTimestamp is returned if a provided timestamp is invalid.
+const InvalidTimestamp = invalidTimestampErr("invalid timestamp")
+
+func (e invalidTimestampErr) Error() string {
+	return string(e)
+}
+
+// ClientInterface is an interface that exposes the common client functions for the
+// purpose of being able to provide a no-op client or even mocking. This can aid
+// downstream users' with their testing.
+type ClientInterface interface {
+	// Gauge measures the value of a metric at a particular time.
+	Gauge(name string, value float64, tags []string, rate float64) error
+
+	// GaugeWithTimestamp measures the value of a metric at a given time.
+	// BETA - Please contact our support team for more information to use this feature: https://www.datadoghq.com/support/
+	// The value will bypass any aggregation on the client side and agent side, this is
+	// useful when sending points in the past.
+	//
+	// Minimum Datadog Agent version: 7.40.0
+	GaugeWithTimestamp(name string, value float64, tags []string, rate float64, timestamp time.Time) error
+
+	// Count tracks how many times something happened per second.
+	Count(name string, value int64, tags []string, rate float64) error
+
+	// CountWithTimestamp tracks how many times something happened at the given second.
+	// BETA - Please contact our support team for more information to use this feature: https://www.datadoghq.com/support/
+	// The value will bypass any aggregation on the client side and agent side, this is
+	// useful when sending points in the past.
+	//
+	// Minimum Datadog Agent version: 7.40.0
+	CountWithTimestamp(name string, value int64, tags []string, rate float64, timestamp time.Time) error
+
+	// Histogram tracks the statistical distribution of a set of values on each host.
+	Histogram(name string, value float64, tags []string, rate float64) error
+
+	// Distribution tracks the statistical distribution of a set of values across your infrastructure.
+	Distribution(name string, value float64, tags []string, rate float64) error
+
+	// Decr is just Count of -1
+	Decr(name string, tags []string, rate float64) error
+
+	// Incr is just Count of 1
+	Incr(name string, tags []string, rate float64) error
+
+	// Set counts the number of unique elements in a group.
+	Set(name string, value string, tags []string, rate float64) error
+
+	// Timing sends timing information, it is an alias for TimeInMilliseconds
+	Timing(name string, value time.Duration, tags []string, rate float64) error
+
+	// TimeInMilliseconds sends timing information in milliseconds.
+	// It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing)
+	TimeInMilliseconds(name string, value float64, tags []string, rate float64) error
+
+	// Event sends the provided Event.
+	Event(e *Event) error
+
+	// SimpleEvent sends an event with the provided title and text.
+	SimpleEvent(title, text string) error
+
+	// ServiceCheck sends the provided ServiceCheck.
+	ServiceCheck(sc *ServiceCheck) error
+
+	// SimpleServiceCheck sends an serviceCheck with the provided name and status.
+	SimpleServiceCheck(name string, status ServiceCheckStatus) error
+
+	// Close the client connection.
+	Close() error
+
+	// Flush forces a flush of all the queued dogstatsd payloads.
+	Flush() error
+
+	// IsClosed returns if the client has been closed.
+	IsClosed() bool
+
+	// GetTelemetry return the telemetry metrics for the client since it started.
+	GetTelemetry() Telemetry
+}
+
+// A Client is a handle for sending messages to dogstatsd.  It is safe to
+// use one Client from multiple goroutines simultaneously.
+type Client struct {
+	// Sender handles the underlying networking protocol
+	sender *sender
+	// namespace to prepend to all statsd calls
+	namespace string
+	// tags are global tags to be added to every statsd call
+	tags            []string
+	flushTime       time.Duration
+	telemetry       *statsdTelemetry
+	telemetryClient *telemetryClient
+	stop            chan struct{}
+	wg              sync.WaitGroup
+	workers         []*worker
+	closerLock      sync.Mutex
+	workersMode     receivingMode
+	aggregatorMode  receivingMode
+	agg             *aggregator
+	aggExtended     *aggregator
+	options         []Option
+	addrOption      string
+	isClosed        bool
+}
+
+// statsdTelemetry contains telemetry metrics about the client
+type statsdTelemetry struct {
+	totalMetricsGauge        uint64
+	totalMetricsCount        uint64
+	totalMetricsHistogram    uint64
+	totalMetricsDistribution uint64
+	totalMetricsSet          uint64
+	totalMetricsTiming       uint64
+	totalEvents              uint64
+	totalServiceChecks       uint64
+	totalDroppedOnReceive    uint64
+}
+
+// Verify that Client implements the ClientInterface.
+// https://golang.org/doc/faq#guarantee_satisfies_interface
+var _ ClientInterface = &Client{}
+
+func resolveAddr(addr string) string {
+	envPort := ""
+
+	if addr == "" {
+		addr = os.Getenv(agentHostEnvVarName)
+		envPort = os.Getenv(agentPortEnvVarName)
+		agentURL, _ := os.LookupEnv(agentURLEnvVarName)
+		agentURL = parseAgentURL(agentURL)
+
+		// agentURLEnvVarName has priority over agentHostEnvVarName
+		if agentURL != "" {
+			return agentURL
+		}
+	}
+
+	if addr == "" {
+		return ""
+	}
+
+	if !strings.HasPrefix(addr, WindowsPipeAddressPrefix) && !strings.HasPrefix(addr, UnixAddressPrefix) {
+		if !strings.Contains(addr, ":") {
+			if envPort != "" {
+				addr = fmt.Sprintf("%s:%s", addr, envPort)
+			} else {
+				addr = fmt.Sprintf("%s:%s", addr, defaultUDPPort)
+			}
+		}
+	}
+	return addr
+}
+
+func parseAgentURL(agentURL string) string {
+	if agentURL != "" {
+		if strings.HasPrefix(agentURL, WindowsPipeAddressPrefix) {
+			return agentURL
+		}
+
+		parsedURL, err := url.Parse(agentURL)
+		if err != nil {
+			return ""
+		}
+
+		if parsedURL.Scheme == "udp" {
+			if strings.Contains(parsedURL.Host, ":") {
+				return parsedURL.Host
+			}
+			return fmt.Sprintf("%s:%s", parsedURL.Host, defaultUDPPort)
+		}
+
+		if parsedURL.Scheme == "unix" {
+			return agentURL
+		}
+	}
+	return ""
+}
+
+func createWriter(addr string, writeTimeout time.Duration) (io.WriteCloser, string, error) {
+	addr = resolveAddr(addr)
+	if addr == "" {
+		return nil, "", errors.New("No address passed and autodetection from environment failed")
+	}
+
+	switch {
+	case strings.HasPrefix(addr, WindowsPipeAddressPrefix):
+		w, err := newWindowsPipeWriter(addr, writeTimeout)
+		return w, writerWindowsPipe, err
+	case strings.HasPrefix(addr, UnixAddressPrefix):
+		w, err := newUDSWriter(addr[len(UnixAddressPrefix):], writeTimeout)
+		return w, writerNameUDS, err
+	default:
+		w, err := newUDPWriter(addr, writeTimeout)
+		return w, writerNameUDP, err
+	}
+}
+
+// New returns a pointer to a new Client given an addr in the format "hostname:port" for UDP,
+// "unix:///path/to/socket" for UDS or "\\.\pipe\path\to\pipe" for Windows Named Pipes.
+func New(addr string, options ...Option) (*Client, error) {
+	o, err := resolveOptions(options)
+	if err != nil {
+		return nil, err
+	}
+
+	w, writerType, err := createWriter(addr, o.writeTimeout)
+	if err != nil {
+		return nil, err
+	}
+
+	client, err := newWithWriter(w, o, writerType)
+	if err == nil {
+		client.options = append(client.options, options...)
+		client.addrOption = addr
+	}
+	return client, err
+}
+
+// NewWithWriter creates a new Client with given writer. Writer is a
+// io.WriteCloser
+func NewWithWriter(w io.WriteCloser, options ...Option) (*Client, error) {
+	o, err := resolveOptions(options)
+	if err != nil {
+		return nil, err
+	}
+	return newWithWriter(w, o, "custom")
+}
+
+// CloneWithExtraOptions create a new Client with extra options
+func CloneWithExtraOptions(c *Client, options ...Option) (*Client, error) {
+	if c == nil {
+		return nil, ErrNoClient
+	}
+
+	if c.addrOption == "" {
+		return nil, fmt.Errorf("can't clone client with no addrOption")
+	}
+	opt := append(c.options, options...)
+	return New(c.addrOption, opt...)
+}
+
+func newWithWriter(w io.WriteCloser, o *Options, writerName string) (*Client, error) {
+	c := Client{
+		namespace: o.namespace,
+		tags:      o.tags,
+		telemetry: &statsdTelemetry{},
+	}
+
+	hasEntityID := false
+	// Inject values of DD_* environment variables as global tags.
+	for _, mapping := range ddEnvTagsMapping {
+		if value := os.Getenv(mapping.envName); value != "" {
+			if mapping.envName == ddEntityID {
+				hasEntityID = true
+			}
+			c.tags = append(c.tags, fmt.Sprintf("%s:%s", mapping.tagName, value))
+		}
+	}
+
+	if !hasEntityID {
+		initContainerID(o.containerID, isOriginDetectionEnabled(o, hasEntityID))
+	}
+
+	if o.maxBytesPerPayload == 0 {
+		if writerName == writerNameUDS {
+			o.maxBytesPerPayload = DefaultMaxAgentPayloadSize
+		} else {
+			o.maxBytesPerPayload = OptimalUDPPayloadSize
+		}
+	}
+	if o.bufferPoolSize == 0 {
+		if writerName == writerNameUDS {
+			o.bufferPoolSize = DefaultUDSBufferPoolSize
+		} else {
+			o.bufferPoolSize = DefaultUDPBufferPoolSize
+		}
+	}
+	if o.senderQueueSize == 0 {
+		if writerName == writerNameUDS {
+			o.senderQueueSize = DefaultUDSBufferPoolSize
+		} else {
+			o.senderQueueSize = DefaultUDPBufferPoolSize
+		}
+	}
+
+	bufferPool := newBufferPool(o.bufferPoolSize, o.maxBytesPerPayload, o.maxMessagesPerPayload)
+	c.sender = newSender(w, o.senderQueueSize, bufferPool)
+	c.aggregatorMode = o.receiveMode
+
+	c.workersMode = o.receiveMode
+	// channelMode mode at the worker level is not enabled when
+	// ExtendedAggregation is since the user app will not directly
+	// use the worker (the aggregator sit between the app and the
+	// workers).
+	if o.extendedAggregation {
+		c.workersMode = mutexMode
+	}
+
+	if o.aggregation || o.extendedAggregation {
+		c.agg = newAggregator(&c)
+		c.agg.start(o.aggregationFlushInterval)
+
+		if o.extendedAggregation {
+			c.aggExtended = c.agg
+
+			if c.aggregatorMode == channelMode {
+				c.agg.startReceivingMetric(o.channelModeBufferSize, o.workersCount)
+			}
+		}
+	}
+
+	for i := 0; i < o.workersCount; i++ {
+		w := newWorker(bufferPool, c.sender)
+		c.workers = append(c.workers, w)
+
+		if c.workersMode == channelMode {
+			w.startReceivingMetric(o.channelModeBufferSize)
+		}
+	}
+
+	c.flushTime = o.bufferFlushInterval
+	c.stop = make(chan struct{}, 1)
+
+	c.wg.Add(1)
+	go func() {
+		defer c.wg.Done()
+		c.watch()
+	}()
+
+	if o.telemetry {
+		if o.telemetryAddr == "" {
+			c.telemetryClient = newTelemetryClient(&c, writerName, c.agg != nil)
+		} else {
+			var err error
+			c.telemetryClient, err = newTelemetryClientWithCustomAddr(&c, writerName, o.telemetryAddr, c.agg != nil, bufferPool, o.writeTimeout)
+			if err != nil {
+				return nil, err
+			}
+		}
+		c.telemetryClient.run(&c.wg, c.stop)
+	}
+
+	return &c, nil
+}
+
+func (c *Client) watch() {
+	ticker := time.NewTicker(c.flushTime)
+
+	for {
+		select {
+		case <-ticker.C:
+			for _, w := range c.workers {
+				w.flush()
+			}
+		case <-c.stop:
+			ticker.Stop()
+			return
+		}
+	}
+}
+
+// Flush forces a flush of all the queued dogstatsd payloads This method is
+// blocking and will not return until everything is sent through the network.
+// In mutexMode, this will also block sampling new data to the client while the
+// workers and sender are flushed.
+func (c *Client) Flush() error {
+	if c == nil {
+		return ErrNoClient
+	}
+	if c.agg != nil {
+		c.agg.flush()
+	}
+	for _, w := range c.workers {
+		w.pause()
+		defer w.unpause()
+		w.flushUnsafe()
+	}
+	// Now that the worker are pause the sender can flush the queue between
+	// worker and senders
+	c.sender.flush()
+	return nil
+}
+
+// IsClosed returns if the client has been closed.
+func (c *Client) IsClosed() bool {
+	c.closerLock.Lock()
+	defer c.closerLock.Unlock()
+	return c.isClosed
+}
+
+func (c *Client) flushTelemetryMetrics(t *Telemetry) {
+	t.TotalMetricsGauge = atomic.LoadUint64(&c.telemetry.totalMetricsGauge)
+	t.TotalMetricsCount = atomic.LoadUint64(&c.telemetry.totalMetricsCount)
+	t.TotalMetricsSet = atomic.LoadUint64(&c.telemetry.totalMetricsSet)
+	t.TotalMetricsHistogram = atomic.LoadUint64(&c.telemetry.totalMetricsHistogram)
+	t.TotalMetricsDistribution = atomic.LoadUint64(&c.telemetry.totalMetricsDistribution)
+	t.TotalMetricsTiming = atomic.LoadUint64(&c.telemetry.totalMetricsTiming)
+	t.TotalEvents = atomic.LoadUint64(&c.telemetry.totalEvents)
+	t.TotalServiceChecks = atomic.LoadUint64(&c.telemetry.totalServiceChecks)
+	t.TotalDroppedOnReceive = atomic.LoadUint64(&c.telemetry.totalDroppedOnReceive)
+}
+
+// GetTelemetry return the telemetry metrics for the client since it started.
+func (c *Client) GetTelemetry() Telemetry {
+	return c.telemetryClient.getTelemetry()
+}
+
+func (c *Client) send(m metric) error {
+	h := hashString32(m.name)
+	worker := c.workers[h%uint32(len(c.workers))]
+
+	if c.workersMode == channelMode {
+		select {
+		case worker.inputMetrics <- m:
+		default:
+			atomic.AddUint64(&c.telemetry.totalDroppedOnReceive, 1)
+		}
+		return nil
+	}
+	return worker.processMetric(m)
+}
+
+// sendBlocking is used by the aggregator to inject aggregated metrics.
+func (c *Client) sendBlocking(m metric) error {
+	m.globalTags = c.tags
+	m.namespace = c.namespace
+
+	h := hashString32(m.name)
+	worker := c.workers[h%uint32(len(c.workers))]
+	return worker.processMetric(m)
+}
+
+func (c *Client) sendToAggregator(mType metricType, name string, value float64, tags []string, rate float64, f bufferedMetricSampleFunc) error {
+	if c.aggregatorMode == channelMode {
+		select {
+		case c.aggExtended.inputMetrics <- metric{metricType: mType, name: name, fvalue: value, tags: tags, rate: rate}:
+		default:
+			atomic.AddUint64(&c.telemetry.totalDroppedOnReceive, 1)
+		}
+		return nil
+	}
+	return f(name, value, tags, rate)
+}
+
+// Gauge measures the value of a metric at a particular time.
+func (c *Client) Gauge(name string, value float64, tags []string, rate float64) error {
+	if c == nil {
+		return ErrNoClient
+	}
+	atomic.AddUint64(&c.telemetry.totalMetricsGauge, 1)
+	if c.agg != nil {
+		return c.agg.gauge(name, value, tags)
+	}
+	return c.send(metric{metricType: gauge, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace})
+}
+
+// GaugeWithTimestamp measures the value of a metric at a given time.
+// BETA - Please contact our support team for more information to use this feature: https://www.datadoghq.com/support/
+// The value will bypass any aggregation on the client side and agent side, this is
+// useful when sending points in the past.
+//
+// Minimum Datadog Agent version: 7.40.0
+func (c *Client) GaugeWithTimestamp(name string, value float64, tags []string, rate float64, timestamp time.Time) error {
+	if c == nil {
+		return ErrNoClient
+	}
+
+	if timestamp.IsZero() || timestamp.Unix() <= noTimestamp {
+		return InvalidTimestamp
+	}
+
+	atomic.AddUint64(&c.telemetry.totalMetricsGauge, 1)
+	return c.send(metric{metricType: gauge, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace, timestamp: timestamp.Unix()})
+}
+
+// Count tracks how many times something happened per second.
+func (c *Client) Count(name string, value int64, tags []string, rate float64) error {
+	if c == nil {
+		return ErrNoClient
+	}
+	atomic.AddUint64(&c.telemetry.totalMetricsCount, 1)
+	if c.agg != nil {
+		return c.agg.count(name, value, tags)
+	}
+	return c.send(metric{metricType: count, name: name, ivalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace})
+}
+
+// CountWithTimestamp tracks how many times something happened at the given second.
+// BETA - Please contact our support team for more information to use this feature: https://www.datadoghq.com/support/
+// The value will bypass any aggregation on the client side and agent side, this is
+// useful when sending points in the past.
+//
+// Minimum Datadog Agent version: 7.40.0
+func (c *Client) CountWithTimestamp(name string, value int64, tags []string, rate float64, timestamp time.Time) error {
+	if c == nil {
+		return ErrNoClient
+	}
+
+	if timestamp.IsZero() || timestamp.Unix() <= noTimestamp {
+		return InvalidTimestamp
+	}
+
+	atomic.AddUint64(&c.telemetry.totalMetricsCount, 1)
+	return c.send(metric{metricType: count, name: name, ivalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace, timestamp: timestamp.Unix()})
+}
+
+// Histogram tracks the statistical distribution of a set of values on each host.
+func (c *Client) Histogram(name string, value float64, tags []string, rate float64) error {
+	if c == nil {
+		return ErrNoClient
+	}
+	atomic.AddUint64(&c.telemetry.totalMetricsHistogram, 1)
+	if c.aggExtended != nil {
+		return c.sendToAggregator(histogram, name, value, tags, rate, c.aggExtended.histogram)
+	}
+	return c.send(metric{metricType: histogram, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace})
+}
+
+// Distribution tracks the statistical distribution of a set of values across your infrastructure.
+func (c *Client) Distribution(name string, value float64, tags []string, rate float64) error {
+	if c == nil {
+		return ErrNoClient
+	}
+	atomic.AddUint64(&c.telemetry.totalMetricsDistribution, 1)
+	if c.aggExtended != nil {
+		return c.sendToAggregator(distribution, name, value, tags, rate, c.aggExtended.distribution)
+	}
+	return c.send(metric{metricType: distribution, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace})
+}
+
+// Decr is just Count of -1
+func (c *Client) Decr(name string, tags []string, rate float64) error {
+	return c.Count(name, -1, tags, rate)
+}
+
+// Incr is just Count of 1
+func (c *Client) Incr(name string, tags []string, rate float64) error {
+	return c.Count(name, 1, tags, rate)
+}
+
+// Set counts the number of unique elements in a group.
+func (c *Client) Set(name string, value string, tags []string, rate float64) error {
+	if c == nil {
+		return ErrNoClient
+	}
+	atomic.AddUint64(&c.telemetry.totalMetricsSet, 1)
+	if c.agg != nil {
+		return c.agg.set(name, value, tags)
+	}
+	return c.send(metric{metricType: set, name: name, svalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace})
+}
+
+// Timing sends timing information, it is an alias for TimeInMilliseconds
+func (c *Client) Timing(name string, value time.Duration, tags []string, rate float64) error {
+	return c.TimeInMilliseconds(name, value.Seconds()*1000, tags, rate)
+}
+
+// TimeInMilliseconds sends timing information in milliseconds.
+// It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing)
+func (c *Client) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error {
+	if c == nil {
+		return ErrNoClient
+	}
+	atomic.AddUint64(&c.telemetry.totalMetricsTiming, 1)
+	if c.aggExtended != nil {
+		return c.sendToAggregator(timing, name, value, tags, rate, c.aggExtended.timing)
+	}
+	return c.send(metric{metricType: timing, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace})
+}
+
+// Event sends the provided Event.
+func (c *Client) Event(e *Event) error {
+	if c == nil {
+		return ErrNoClient
+	}
+	atomic.AddUint64(&c.telemetry.totalEvents, 1)
+	return c.send(metric{metricType: event, evalue: e, rate: 1, globalTags: c.tags, namespace: c.namespace})
+}
+
+// SimpleEvent sends an event with the provided title and text.
+func (c *Client) SimpleEvent(title, text string) error {
+	e := NewEvent(title, text)
+	return c.Event(e)
+}
+
+// ServiceCheck sends the provided ServiceCheck.
+func (c *Client) ServiceCheck(sc *ServiceCheck) error {
+	if c == nil {
+		return ErrNoClient
+	}
+	atomic.AddUint64(&c.telemetry.totalServiceChecks, 1)
+	return c.send(metric{metricType: serviceCheck, scvalue: sc, rate: 1, globalTags: c.tags, namespace: c.namespace})
+}
+
+// SimpleServiceCheck sends an serviceCheck with the provided name and status.
+func (c *Client) SimpleServiceCheck(name string, status ServiceCheckStatus) error {
+	sc := NewServiceCheck(name, status)
+	return c.ServiceCheck(sc)
+}
+
+// Close the client connection.
+func (c *Client) Close() error {
+	if c == nil {
+		return ErrNoClient
+	}
+
+	// Acquire closer lock to ensure only one thread can close the stop channel
+	c.closerLock.Lock()
+	defer c.closerLock.Unlock()
+
+	if c.isClosed {
+		return nil
+	}
+
+	// Notify all other threads that they should stop
+	select {
+	case <-c.stop:
+		return nil
+	default:
+	}
+	close(c.stop)
+
+	if c.workersMode == channelMode {
+		for _, w := range c.workers {
+			w.stopReceivingMetric()
+		}
+	}
+
+	// flush the aggregator first
+	if c.agg != nil {
+		if c.aggExtended != nil && c.aggregatorMode == channelMode {
+			c.agg.stopReceivingMetric()
+		}
+		c.agg.stop()
+	}
+
+	// Wait for the threads to stop
+	c.wg.Wait()
+
+	c.Flush()
+
+	c.isClosed = true
+	return c.sender.close()
+}
+
+// isOriginDetectionEnabled returns whether the clients should fill the container field.
+//
+// If DD_ENTITY_ID is set, we don't send the container ID
+// If a user-defined container ID is provided, we don't ignore origin detection
+// as dd.internal.entity_id is prioritized over the container field for backward compatibility.
+// If DD_ENTITY_ID is not set, we try to fill the container field automatically unless
+// DD_ORIGIN_DETECTION_ENABLED is explicitly set to false.
+func isOriginDetectionEnabled(o *Options, hasEntityID bool) bool {
+	if !o.originDetection || hasEntityID || o.containerID != "" {
+		// originDetection is explicitly disabled
+		// or DD_ENTITY_ID was found
+		// or a user-defined container ID was provided
+		return false
+	}
+
+	envVarValue := os.Getenv(originDetectionEnabled)
+	if envVarValue == "" {
+		// DD_ORIGIN_DETECTION_ENABLED is not set
+		// default to true
+		return true
+	}
+
+	enabled, err := strconv.ParseBool(envVarValue)
+	if err != nil {
+		// Error due to an unsupported DD_ORIGIN_DETECTION_ENABLED value
+		// default to true
+		return true
+	}
+
+	return enabled
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/telemetry.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/telemetry.go
new file mode 100644
index 0000000000..1e2bc0a3f8
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/telemetry.go
@@ -0,0 +1,274 @@
+package statsd
+
+import (
+	"fmt"
+	"sync"
+	"time"
+)
+
+/*
+telemetryInterval is the interval at which telemetry will be sent by the client.
+*/
+const telemetryInterval = 10 * time.Second
+
+/*
+clientTelemetryTag is a tag identifying this specific client.
+*/
+var clientTelemetryTag = "client:go"
+
+/*
+clientVersionTelemetryTag is a tag identifying this specific client version.
+*/
+var clientVersionTelemetryTag = "client_version:5.3.0"
+
+// Telemetry represents internal metrics about the client behavior since it started.
+type Telemetry struct {
+	//
+	// Those are produced by the 'Client'
+	//
+
+	// TotalMetrics is the total number of metrics sent by the client before aggregation and sampling.
+	TotalMetrics uint64
+	// TotalMetricsGauge is the total number of gauges sent by the client before aggregation and sampling.
+	TotalMetricsGauge uint64
+	// TotalMetricsCount is the total number of counts sent by the client before aggregation and sampling.
+	TotalMetricsCount uint64
+	// TotalMetricsHistogram is the total number of histograms sent by the client before aggregation and sampling.
+	TotalMetricsHistogram uint64
+	// TotalMetricsDistribution is the total number of distributions sent by the client before aggregation and
+	// sampling.
+	TotalMetricsDistribution uint64
+	// TotalMetricsSet is the total number of sets sent by the client before aggregation and sampling.
+	TotalMetricsSet uint64
+	// TotalMetricsTiming is the total number of timings sent by the client before aggregation and sampling.
+	TotalMetricsTiming uint64
+	// TotalEvents is the total number of events sent by the client before aggregation and sampling.
+	TotalEvents uint64
+	// TotalServiceChecks is the total number of service_checks sent by the client before aggregation and sampling.
+	TotalServiceChecks uint64
+
+	// TotalDroppedOnReceive is the total number metrics/event/service_checks dropped when using ChannelMode (see
+	// WithChannelMode option).
+	TotalDroppedOnReceive uint64
+
+	//
+	// Those are produced by the 'sender'
+	//
+
+	// TotalPayloadsSent is the total number of payload (packet on the network) succesfully sent by the client. When
+	// using UDP we don't know if packet dropped or not, so all packet are considered as succesfully sent.
+	TotalPayloadsSent uint64
+	// TotalPayloadsDropped is the total number of payload dropped by the client. This includes all cause of dropped
+	// (TotalPayloadsDroppedQueueFull and TotalPayloadsDroppedWriter). When using UDP This won't includes the
+	// network dropped.
+	TotalPayloadsDropped uint64
+	// TotalPayloadsDroppedWriter is the total number of payload dropped by the writer (when using UDS or named
+	// pipe) due to network timeout or error.
+	TotalPayloadsDroppedWriter uint64
+	// TotalPayloadsDroppedQueueFull is the total number of payload dropped internally because the queue of payloads
+	// waiting to be sent on the wire is full. This means the client is generating more metrics than can be sent on
+	// the wire. If your app sends metrics in batch look at WithSenderQueueSize option to increase the queue size.
+	TotalPayloadsDroppedQueueFull uint64
+
+	// TotalBytesSent is the total number of bytes succesfully sent by the client. When using UDP we don't know if
+	// packet dropped or not, so all packet are considered as succesfully sent.
+	TotalBytesSent uint64
+	// TotalBytesDropped is the total number of bytes dropped by the client. This includes all cause of dropped
+	// (TotalBytesDroppedQueueFull and TotalBytesDroppedWriter). When using UDP This
+	// won't includes the network dropped.
+	TotalBytesDropped uint64
+	// TotalBytesDroppedWriter is the total number of bytes dropped by the writer (when using UDS or named pipe) due
+	// to network timeout or error.
+	TotalBytesDroppedWriter uint64
+	// TotalBytesDroppedQueueFull is the total number of bytes dropped internally because the queue of payloads
+	// waiting to be sent on the wire is full. This means the client is generating more metrics than can be sent on
+	// the wire. If your app sends metrics in batch look at WithSenderQueueSize option to increase the queue size.
+	TotalBytesDroppedQueueFull uint64
+
+	//
+	// Those are produced by the 'aggregator'
+	//
+
+	// AggregationNbContext is the total number of contexts flushed by the aggregator when either
+	// WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled.
+	AggregationNbContext uint64
+	// AggregationNbContextGauge is the total number of contexts for gauges flushed by the aggregator when either
+	// WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled.
+	AggregationNbContextGauge uint64
+	// AggregationNbContextCount is the total number of contexts for counts flushed by the aggregator when either
+	// WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled.
+	AggregationNbContextCount uint64
+	// AggregationNbContextSet is the total number of contexts for sets flushed by the aggregator when either
+	// WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled.
+	AggregationNbContextSet uint64
+	// AggregationNbContextHistogram is the total number of contexts for histograms flushed by the aggregator when either
+	// WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled.
+	AggregationNbContextHistogram uint64
+	// AggregationNbContextDistribution is the total number of contexts for distributions flushed by the aggregator when either
+	// WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled.
+	AggregationNbContextDistribution uint64
+	// AggregationNbContextTiming is the total number of contexts for timings flushed by the aggregator when either
+	// WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled.
+	AggregationNbContextTiming uint64
+}
+
+type telemetryClient struct {
+	c          *Client
+	tags       []string
+	aggEnabled bool // is aggregation enabled and should we sent aggregation telemetry.
+	tagsByType map[metricType][]string
+	sender     *sender
+	worker     *worker
+	lastSample Telemetry // The previous sample of telemetry sent
+}
+
+func newTelemetryClient(c *Client, transport string, aggregationEnabled bool) *telemetryClient {
+	t := &telemetryClient{
+		c:          c,
+		tags:       append(c.tags, clientTelemetryTag, clientVersionTelemetryTag, "client_transport:"+transport),
+		aggEnabled: aggregationEnabled,
+		tagsByType: map[metricType][]string{},
+	}
+
+	t.tagsByType[gauge] = append(append([]string{}, t.tags...), "metrics_type:gauge")
+	t.tagsByType[count] = append(append([]string{}, t.tags...), "metrics_type:count")
+	t.tagsByType[set] = append(append([]string{}, t.tags...), "metrics_type:set")
+	t.tagsByType[timing] = append(append([]string{}, t.tags...), "metrics_type:timing")
+	t.tagsByType[histogram] = append(append([]string{}, t.tags...), "metrics_type:histogram")
+	t.tagsByType[distribution] = append(append([]string{}, t.tags...), "metrics_type:distribution")
+	return t
+}
+
+func newTelemetryClientWithCustomAddr(c *Client, transport string, telemetryAddr string, aggregationEnabled bool, pool *bufferPool, writeTimeout time.Duration) (*telemetryClient, error) {
+	telemetryWriter, _, err := createWriter(telemetryAddr, writeTimeout)
+	if err != nil {
+		return nil, fmt.Errorf("Could not resolve telemetry address: %v", err)
+	}
+
+	t := newTelemetryClient(c, transport, aggregationEnabled)
+
+	// Creating a custom sender/worker with 1 worker in mutex mode for the
+	// telemetry that share the same bufferPool.
+	// FIXME due to performance pitfall, we're always using UDP defaults
+	// even for UDS.
+	t.sender = newSender(telemetryWriter, DefaultUDPBufferPoolSize, pool)
+	t.worker = newWorker(pool, t.sender)
+	return t, nil
+}
+
+func (t *telemetryClient) run(wg *sync.WaitGroup, stop chan struct{}) {
+	wg.Add(1)
+	go func() {
+		defer wg.Done()
+		ticker := time.NewTicker(telemetryInterval)
+		for {
+			select {
+			case <-ticker.C:
+				t.sendTelemetry()
+			case <-stop:
+				ticker.Stop()
+				if t.sender != nil {
+					t.sender.close()
+				}
+				return
+			}
+		}
+	}()
+}
+
+func (t *telemetryClient) sendTelemetry() {
+	for _, m := range t.flush() {
+		if t.worker != nil {
+			t.worker.processMetric(m)
+		} else {
+			t.c.send(m)
+		}
+	}
+
+	if t.worker != nil {
+		t.worker.flush()
+	}
+}
+
+func (t *telemetryClient) getTelemetry() Telemetry {
+	if t == nil {
+		// telemetry was disabled through the WithoutTelemetry option
+		return Telemetry{}
+	}
+
+	tlm := Telemetry{}
+	t.c.flushTelemetryMetrics(&tlm)
+	t.c.sender.flushTelemetryMetrics(&tlm)
+	t.c.agg.flushTelemetryMetrics(&tlm)
+
+	tlm.TotalMetrics = tlm.TotalMetricsGauge +
+		tlm.TotalMetricsCount +
+		tlm.TotalMetricsSet +
+		tlm.TotalMetricsHistogram +
+		tlm.TotalMetricsDistribution +
+		tlm.TotalMetricsTiming
+
+	tlm.TotalPayloadsDropped = tlm.TotalPayloadsDroppedQueueFull + tlm.TotalPayloadsDroppedWriter
+	tlm.TotalBytesDropped = tlm.TotalBytesDroppedQueueFull + tlm.TotalBytesDroppedWriter
+
+	if t.aggEnabled {
+		tlm.AggregationNbContext = tlm.AggregationNbContextGauge +
+			tlm.AggregationNbContextCount +
+			tlm.AggregationNbContextSet +
+			tlm.AggregationNbContextHistogram +
+			tlm.AggregationNbContextDistribution +
+			tlm.AggregationNbContextTiming
+	}
+	return tlm
+}
+
+// flushTelemetry returns Telemetry metrics to be flushed. It's its own function to ease testing.
+func (t *telemetryClient) flush() []metric {
+	m := []metric{}
+
+	// same as Count but without global namespace
+	telemetryCount := func(name string, value int64, tags []string) {
+		m = append(m, metric{metricType: count, name: name, ivalue: value, tags: tags, rate: 1})
+	}
+
+	tlm := t.getTelemetry()
+
+	// We send the diff between now and the previous telemetry flush. This keep the same telemetry behavior from V4
+	// so users dashboard's aren't broken when upgrading to V5. It also allow to graph on the same dashboard a mix
+	// of V4 and V5 apps.
+	telemetryCount("datadog.dogstatsd.client.metrics", int64(tlm.TotalMetrics-t.lastSample.TotalMetrics), t.tags)
+	telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsGauge-t.lastSample.TotalMetricsGauge), t.tagsByType[gauge])
+	telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsCount-t.lastSample.TotalMetricsCount), t.tagsByType[count])
+	telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsHistogram-t.lastSample.TotalMetricsHistogram), t.tagsByType[histogram])
+	telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsDistribution-t.lastSample.TotalMetricsDistribution), t.tagsByType[distribution])
+	telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsSet-t.lastSample.TotalMetricsSet), t.tagsByType[set])
+	telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsTiming-t.lastSample.TotalMetricsTiming), t.tagsByType[timing])
+	telemetryCount("datadog.dogstatsd.client.events", int64(tlm.TotalEvents-t.lastSample.TotalEvents), t.tags)
+	telemetryCount("datadog.dogstatsd.client.service_checks", int64(tlm.TotalServiceChecks-t.lastSample.TotalServiceChecks), t.tags)
+
+	telemetryCount("datadog.dogstatsd.client.metric_dropped_on_receive", int64(tlm.TotalDroppedOnReceive-t.lastSample.TotalDroppedOnReceive), t.tags)
+
+	telemetryCount("datadog.dogstatsd.client.packets_sent", int64(tlm.TotalPayloadsSent-t.lastSample.TotalPayloadsSent), t.tags)
+	telemetryCount("datadog.dogstatsd.client.packets_dropped", int64(tlm.TotalPayloadsDropped-t.lastSample.TotalPayloadsDropped), t.tags)
+	telemetryCount("datadog.dogstatsd.client.packets_dropped_queue", int64(tlm.TotalPayloadsDroppedQueueFull-t.lastSample.TotalPayloadsDroppedQueueFull), t.tags)
+	telemetryCount("datadog.dogstatsd.client.packets_dropped_writer", int64(tlm.TotalPayloadsDroppedWriter-t.lastSample.TotalPayloadsDroppedWriter), t.tags)
+
+	telemetryCount("datadog.dogstatsd.client.bytes_dropped", int64(tlm.TotalBytesDropped-t.lastSample.TotalBytesDropped), t.tags)
+	telemetryCount("datadog.dogstatsd.client.bytes_sent", int64(tlm.TotalBytesSent-t.lastSample.TotalBytesSent), t.tags)
+	telemetryCount("datadog.dogstatsd.client.bytes_dropped_queue", int64(tlm.TotalBytesDroppedQueueFull-t.lastSample.TotalBytesDroppedQueueFull), t.tags)
+	telemetryCount("datadog.dogstatsd.client.bytes_dropped_writer", int64(tlm.TotalBytesDroppedWriter-t.lastSample.TotalBytesDroppedWriter), t.tags)
+
+	if t.aggEnabled {
+		telemetryCount("datadog.dogstatsd.client.aggregated_context", int64(tlm.AggregationNbContext-t.lastSample.AggregationNbContext), t.tags)
+		telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextGauge-t.lastSample.AggregationNbContextGauge), t.tagsByType[gauge])
+		telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextSet-t.lastSample.AggregationNbContextSet), t.tagsByType[set])
+		telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextCount-t.lastSample.AggregationNbContextCount), t.tagsByType[count])
+		telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextHistogram-t.lastSample.AggregationNbContextHistogram), t.tagsByType[histogram])
+		telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextDistribution-t.lastSample.AggregationNbContextDistribution), t.tagsByType[distribution])
+		telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextTiming-t.lastSample.AggregationNbContextTiming), t.tagsByType[timing])
+	}
+
+	t.lastSample = tlm
+
+	return m
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/udp.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/udp.go
new file mode 100644
index 0000000000..e2922a9117
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/udp.go
@@ -0,0 +1,34 @@
+package statsd
+
+import (
+	"net"
+	"time"
+)
+
+// udpWriter is an internal class wrapping around management of UDP connection
+type udpWriter struct {
+	conn net.Conn
+}
+
+// New returns a pointer to a new udpWriter given an addr in the format "hostname:port".
+func newUDPWriter(addr string, _ time.Duration) (*udpWriter, error) {
+	udpAddr, err := net.ResolveUDPAddr("udp", addr)
+	if err != nil {
+		return nil, err
+	}
+	conn, err := net.DialUDP("udp", nil, udpAddr)
+	if err != nil {
+		return nil, err
+	}
+	writer := &udpWriter{conn: conn}
+	return writer, nil
+}
+
+// Write data to the UDP connection with no error handling
+func (w *udpWriter) Write(data []byte) (int, error) {
+	return w.conn.Write(data)
+}
+
+func (w *udpWriter) Close() error {
+	return w.conn.Close()
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/uds.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/uds.go
new file mode 100644
index 0000000000..fa5f5917fe
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/uds.go
@@ -0,0 +1,88 @@
+// +build !windows
+
+package statsd
+
+import (
+	"net"
+	"sync"
+	"time"
+)
+
+// udsWriter is an internal class wrapping around management of UDS connection
+type udsWriter struct {
+	// Address to send metrics to, needed to allow reconnection on error
+	addr net.Addr
+	// Established connection object, or nil if not connected yet
+	conn net.Conn
+	// write timeout
+	writeTimeout time.Duration
+	sync.RWMutex // used to lock conn / writer can replace it
+}
+
+// newUDSWriter returns a pointer to a new udsWriter given a socket file path as addr.
+func newUDSWriter(addr string, writeTimeout time.Duration) (*udsWriter, error) {
+	udsAddr, err := net.ResolveUnixAddr("unixgram", addr)
+	if err != nil {
+		return nil, err
+	}
+	// Defer connection to first Write
+	writer := &udsWriter{addr: udsAddr, conn: nil, writeTimeout: writeTimeout}
+	return writer, nil
+}
+
+// Write data to the UDS connection with write timeout and minimal error handling:
+// create the connection if nil, and destroy it if the statsd server has disconnected
+func (w *udsWriter) Write(data []byte) (int, error) {
+	conn, err := w.ensureConnection()
+	if err != nil {
+		return 0, err
+	}
+
+	conn.SetWriteDeadline(time.Now().Add(w.writeTimeout))
+	n, e := conn.Write(data)
+
+	if err, isNetworkErr := e.(net.Error); err != nil && (!isNetworkErr || !err.Temporary()) {
+		// Statsd server disconnected, retry connecting at next packet
+		w.unsetConnection()
+		return 0, e
+	}
+	return n, e
+}
+
+func (w *udsWriter) Close() error {
+	if w.conn != nil {
+		return w.conn.Close()
+	}
+	return nil
+}
+
+func (w *udsWriter) ensureConnection() (net.Conn, error) {
+	// Check if we've already got a socket we can use
+	w.RLock()
+	currentConn := w.conn
+	w.RUnlock()
+
+	if currentConn != nil {
+		return currentConn, nil
+	}
+
+	// Looks like we might need to connect - try again with write locking.
+	w.Lock()
+	defer w.Unlock()
+	if w.conn != nil {
+		return w.conn, nil
+	}
+
+	newConn, err := net.Dial(w.addr.Network(), w.addr.String())
+	if err != nil {
+		return nil, err
+	}
+	w.conn = newConn
+	return newConn, nil
+}
+
+func (w *udsWriter) unsetConnection() {
+	w.Lock()
+	defer w.Unlock()
+	w.conn = nil
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/uds_windows.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/uds_windows.go
new file mode 100644
index 0000000000..077894a33c
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/uds_windows.go
@@ -0,0 +1,14 @@
+// +build windows
+
+package statsd
+
+import (
+	"fmt"
+	"io"
+	"time"
+)
+
+// newUDSWriter is disabled on Windows as Unix sockets are not available.
+func newUDSWriter(_ string, _ time.Duration) (io.WriteCloser, error) {
+	return nil, fmt.Errorf("Unix socket is not available on Windows")
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/utils.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/utils.go
new file mode 100644
index 0000000000..8c3ac84268
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/utils.go
@@ -0,0 +1,32 @@
+package statsd
+
+import (
+	"math/rand"
+	"sync"
+)
+
+func shouldSample(rate float64, r *rand.Rand, lock *sync.Mutex) bool {
+	if rate >= 1 {
+		return true
+	}
+	// sources created by rand.NewSource() (ie. w.random) are not thread safe.
+	// TODO: use defer once the lowest Go version we support is 1.14 (defer
+	// has an overhead before that).
+	lock.Lock()
+	if r.Float64() > rate {
+		lock.Unlock()
+		return false
+	}
+	lock.Unlock()
+	return true
+}
+
+func copySlice(src []string) []string {
+	if src == nil {
+		return nil
+	}
+
+	c := make([]string, len(src))
+	copy(c, src)
+	return c
+}
diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/worker.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/worker.go
new file mode 100644
index 0000000000..952a9fe365
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/worker.go
@@ -0,0 +1,150 @@
+package statsd
+
+import (
+	"math/rand"
+	"sync"
+	"time"
+)
+
+type worker struct {
+	pool       *bufferPool
+	buffer     *statsdBuffer
+	sender     *sender
+	random     *rand.Rand
+	randomLock sync.Mutex
+	sync.Mutex
+
+	inputMetrics chan metric
+	stop         chan struct{}
+}
+
+func newWorker(pool *bufferPool, sender *sender) *worker {
+	// Each worker uses its own random source and random lock to prevent
+	// workers in separate goroutines from contending for the lock on the
+	// "math/rand" package-global random source (e.g. calls like
+	// "rand.Float64()" must acquire a shared lock to get the next
+	// pseudorandom number).
+	// Note that calling "time.Now().UnixNano()" repeatedly quickly may return
+	// very similar values. That's fine for seeding the worker-specific random
+	// source because we just need an evenly distributed stream of float values.
+	// Do not use this random source for cryptographic randomness.
+	random := rand.New(rand.NewSource(time.Now().UnixNano()))
+	return &worker{
+		pool:   pool,
+		sender: sender,
+		buffer: pool.borrowBuffer(),
+		random: random,
+		stop:   make(chan struct{}),
+	}
+}
+
+func (w *worker) startReceivingMetric(bufferSize int) {
+	w.inputMetrics = make(chan metric, bufferSize)
+	go w.pullMetric()
+}
+
+func (w *worker) stopReceivingMetric() {
+	w.stop <- struct{}{}
+}
+
+func (w *worker) pullMetric() {
+	for {
+		select {
+		case m := <-w.inputMetrics:
+			w.processMetric(m)
+		case <-w.stop:
+			return
+		}
+	}
+}
+
+func (w *worker) processMetric(m metric) error {
+	if !shouldSample(m.rate, w.random, &w.randomLock) {
+		return nil
+	}
+	w.Lock()
+	var err error
+	if err = w.writeMetricUnsafe(m); err == errBufferFull {
+		w.flushUnsafe()
+		err = w.writeMetricUnsafe(m)
+	}
+	w.Unlock()
+	return err
+}
+
+func (w *worker) writeAggregatedMetricUnsafe(m metric, metricSymbol []byte, precision int) error {
+	globalPos := 0
+
+	// first check how much data we can write to the buffer:
+	//   +3 + len(metricSymbol) because the message will include '|<metricSymbol>|#' before the tags
+	//   +1 for the potential line break at the start of the metric
+	tagsSize := len(m.stags) + 4 + len(metricSymbol)
+	for _, t := range m.globalTags {
+		tagsSize += len(t) + 1
+	}
+
+	for {
+		pos, err := w.buffer.writeAggregated(metricSymbol, m.namespace, m.globalTags, m.name, m.fvalues[globalPos:], m.stags, tagsSize, precision)
+		if err == errPartialWrite {
+			// We successfully wrote part of the histogram metrics.
+			// We flush the current buffer and finish the histogram
+			// in a new one.
+			w.flushUnsafe()
+			globalPos += pos
+		} else {
+			return err
+		}
+	}
+}
+
+func (w *worker) writeMetricUnsafe(m metric) error {
+	switch m.metricType {
+	case gauge:
+		return w.buffer.writeGauge(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate, m.timestamp)
+	case count:
+		return w.buffer.writeCount(m.namespace, m.globalTags, m.name, m.ivalue, m.tags, m.rate, m.timestamp)
+	case histogram:
+		return w.buffer.writeHistogram(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate)
+	case distribution:
+		return w.buffer.writeDistribution(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate)
+	case set:
+		return w.buffer.writeSet(m.namespace, m.globalTags, m.name, m.svalue, m.tags, m.rate)
+	case timing:
+		return w.buffer.writeTiming(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate)
+	case event:
+		return w.buffer.writeEvent(m.evalue, m.globalTags)
+	case serviceCheck:
+		return w.buffer.writeServiceCheck(m.scvalue, m.globalTags)
+	case histogramAggregated:
+		return w.writeAggregatedMetricUnsafe(m, histogramSymbol, -1)
+	case distributionAggregated:
+		return w.writeAggregatedMetricUnsafe(m, distributionSymbol, -1)
+	case timingAggregated:
+		return w.writeAggregatedMetricUnsafe(m, timingSymbol, 6)
+	default:
+		return nil
+	}
+}
+
+func (w *worker) flush() {
+	w.Lock()
+	w.flushUnsafe()
+	w.Unlock()
+}
+
+func (w *worker) pause() {
+	w.Lock()
+}
+
+func (w *worker) unpause() {
+	w.Unlock()
+}
+
+// flush the current buffer. Lock must be held by caller.
+// flushed buffer written to the network asynchronously.
+func (w *worker) flushUnsafe() {
+	if len(w.buffer.bytes()) > 0 {
+		w.sender.send(w.buffer)
+		w.buffer = w.pool.borrowBuffer()
+	}
+}
diff --git a/vendor/github.com/DataDog/go-libddwaf/.gitattributes b/vendor/github.com/DataDog/go-libddwaf/.gitattributes
new file mode 100644
index 0000000000..003a800797
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/.gitattributes
@@ -0,0 +1,3 @@
+*.dylib -diff
+*.so -diff
+*.a -diff
diff --git a/vendor/github.com/DataDog/go-libddwaf/.gitignore b/vendor/github.com/DataDog/go-libddwaf/.gitignore
new file mode 100644
index 0000000000..95311a6686
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/.gitignore
@@ -0,0 +1,15 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+.vscode/
+.idea/
diff --git a/vendor/github.com/DataDog/go-libddwaf/LICENSE b/vendor/github.com/DataDog/go-libddwaf/LICENSE
new file mode 100644
index 0000000000..9301dd7ab9
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/LICENSE
@@ -0,0 +1,200 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2016-present Datadog, Inc.
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/DataDog/go-libddwaf/README.md b/vendor/github.com/DataDog/go-libddwaf/README.md
new file mode 100644
index 0000000000..84cb1f89d4
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/README.md
@@ -0,0 +1,120 @@
+# go-libddwaf
+
+This project's goal is to produce a higher level API for the go bindings to [libddwaf](https://github.com/DataDog/libddwaf): DataDog in-app WAF.
+It consists of 2 separate entities: the bindings for the calls to libddwaf, and the encoder whose job is to convert _any_ go value to its libddwaf object representation.
+
+An example usage would be:
+
+```go
+import waf "github.com/DataDog/go-libddwaf"
+
+//go:embed
+var ruleset []byte
+
+func main() {
+    var parsedRuleset any
+
+    if err := json.Unmarshal(ruleset, &parsedRuleset); err != nil {
+        return 1
+    }
+
+    wafHandle, err := waf.NewHandle(parsedRuleset, "", "")
+    if err != nil {
+        return 1
+    }
+
+    defer wafHandle.Close()
+
+    wafCtx := wafHandle.NewContext()
+    defer wafCtx.Close()
+
+    matches, actions := wafCtx.Run(map[string]any{
+        "server.request.path_params": "/rfiinc.txt",
+    }, time.Minute)
+}
+```
+
+The API documentation details can be found on [pkg.go.dev](https://pkg.go.dev/github.com/DataDog/go-libddwaf).
+
+Originally this project was only here to provide CGO Wrappers to the calls to libddwaf.
+But with the appearance of `ddwaf_object` tree like structure,
+but also with the intention to build CGO-less bindings, this project size has grown to be a fully integrated brick in the DataDog tracer structure.
+Which in turn made it necessary to document the project, to maintain it in an orderly fashion.
+
+## Design
+
+The WAF bindings have multiple moving parts that are necessary to understand:
+
+- Handle: a object wrapper over the pointer to the C WAF Handle
+- Context: a object wrapper over a pointer to the C WAF Context
+- Encoder: whose goal is to construct a tree of Waf Objects to send to the WAF
+- Allocator: Does all writing and allocation operations for the construction of Waf Objects
+- Decoder: Transforms Waf Objects returned from the WAF to usual go objects (e.g. maps, arrays, ...)
+- Library: The library which wraps all calls to C code
+
+```mermaid
+flowchart LR
+
+    START:::hidden -->|NewHandle| Handle -->|NewContext| Context
+
+    Context -->|Encode Inputs| Encoder
+
+    Handle -->|Encode Ruleset| Encoder
+    Handle -->|Init WAF| Library
+    Context -->|Decode Result| Decoder
+
+    Handle -->|Decode Init Errors| Decoder
+
+    Context -->|Run| Library
+    Context -->|Store Go References| ContextAllocator
+
+    Encoder -->|Allocate Waf Objects| EncoderAllocator
+
+    EncoderAllocator -->|Copy after each encoding| ContextAllocator
+
+    Library -->|Call C code| libddwaf
+
+    classDef hidden display: none;
+```
+
+### Allocator
+
+The cgoRefPool is a pure Go cgoRefPool of `ddwaf_object` C values on the Go memory heap.
+the `cgoRefPool` go type is a way to make sure we can safely send go allocated data to the C side of the WAF
+The main issue is the following: the `wafObject` uses a C union to store the tree structure of the full object,
+union equivalent in go are interfaces and they are not compatible with C unions. The only way to be 100% sure
+that the Go `wafObject` struct has the same layout as the C one is to only use primitive types. So the only way to
+store a raw pointer is to use the `uintptr` type. But since `uintptr` do not have pointer semantics (and are just
+basically integers), we need another structure to store the value as Go pointer because the GC is lurking. That's
+where the `cgoRefPool` object comes into play: all new `wafObject` elements are created via this API whose especially
+built to make sure there is no gap for the Garbage Collector to exploit. From there, since underlying values of the
+`wafObject` are either arrays (for maps, structs and arrays) or string (for all ints, booleans and strings),
+we can store 2 slices of arrays and use `runtime.KeepAlive` in each code path to protect them from the GC.
+
+### Typical call to Run()
+
+Here is an example of the flow of operations on a simple call to Run():
+
+- Encode input data into Waf Objects
+- Lock the context mutex until the end of the call
+- Call `ddwaf_run`
+- Decode the matches and actions
+
+### CGO-less C Bindings
+
+The main component used to build C bindings without using CGO is called [purego](https://github.com/ebitengine/purego). The flow of execution on our side is to embed the C shared library using `go:embed`. Then to dump it into a file, load it using `dlopen` and to load the symbols using `dlsym`. And finally to call them.
+
+⚠️ Keep in mind that **purego only works on linux/darwin for amd64/arm64 and so does go-libddwaf.**
+
+Another requirement of `libddwaf` is to have a FHS filesystem on your machine and, for linux, to provide `libc.so.6`, `libpthread.so.0` and `libm.so.6`, `libdl.so.2` as dynamic libraries.
+
+## Contributing usual pitfalls
+
+- Cannot dlopen twice in the app lifetime on OSX
+- `runtime.KeepAlive()` calls are here to prevent the GC from destroying objects too early
+- Since there is a stack switch between the go code and the C code, usually the only C stacktrace you will ever get is from gdb
+- If a segfault happens during a call to the C code, the goroutine stacktrace which has done the call is the one annotated with `[syscall]`.
+- [GoLand](https://www.jetbrains.com/go/) does not support `CGO_ENABLED=0` (as of June 2023)
+- Keep in mind that we fully escape the type system. If you send the wrong data it will segfaults in the best cases but not always!
+- The structs in `ctypes.go` are here to reproduce the memory layout of the structs in `include/ddwaf.h` because pointer to these structs will be passed directly.
+- Do not use `uintptr` as function arguments or results types, coming from `unsafe.Pointer` casts of Go values, because they escape the pointer analysis which can create wrongly optimized code and crash. Pointer arithmetic is of course necessary in such a library but must be kept in the same function scope.
diff --git a/vendor/github.com/DataDog/go-libddwaf/cgo_ref_pool.go b/vendor/github.com/DataDog/go-libddwaf/cgo_ref_pool.go
new file mode 100644
index 0000000000..fccd41c9e3
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/cgo_ref_pool.go
@@ -0,0 +1,90 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package waf
+
+import (
+	"strconv"
+)
+
+// cgoRefPool is a way to make sure we can safely send go allocated data on the C side of the WAF
+// The main issue is the following: the wafObject uses a C union to store the tree structure of the full object,
+// union equivalent in go are interfaces and they are not compatible with C unions. The only way to be 100% sure
+// that the Go wafObject struct have the same layout as the C one is to only use primitive types. So the only way to
+// store a raw pointer is to use the uintptr type. But since uintptr do not have pointer semantics (and are just
+// basically integers), we need another structure to store the value as Go pointer because the GC is lurking. That's
+// where the cgoRefPool object comes into play: All new wafObject elements are created via this API whose especially
+// built to make sure there is no gap for the Garbage Collector to exploit. From there, since underlying values of the
+// wafObject are either arrays (for maps, structs and arrays) or string (for all ints, booleans and strings),
+// we can store 2 slices of arrays and use runtime.KeepAlive in each code path to protect them from the GC.
+type cgoRefPool struct {
+	stringRefs [][]byte
+	arrayRefs  [][]wafObject
+}
+
+func (refPool *cgoRefPool) append(newRefs cgoRefPool) {
+	refPool.stringRefs = append(refPool.stringRefs, newRefs.stringRefs...)
+	refPool.arrayRefs = append(refPool.arrayRefs, newRefs.arrayRefs...)
+}
+
+func (refPool *cgoRefPool) AllocCString(str string) uintptr {
+	goArray := make([]byte, len(str)+1)
+	copy(goArray, str)
+	refPool.stringRefs = append(refPool.stringRefs, goArray)
+	goArray[len(str)] = 0 // Null termination byte for C strings
+
+	return sliceToUintptr(goArray)
+}
+
+func (refPool *cgoRefPool) AllocWafString(obj *wafObject, str string) {
+	obj._type = wafStringType
+
+	if len(str) == 0 {
+		obj.nbEntries = 0
+		obj.value = 0
+		return
+	}
+
+	goArray := make([]byte, len(str))
+	copy(goArray, str)
+	refPool.stringRefs = append(refPool.stringRefs, goArray)
+
+	obj.value = sliceToUintptr(goArray)
+	obj.nbEntries = uint64(len(goArray))
+}
+
+func (refPool *cgoRefPool) AllocWafArray(obj *wafObject, typ wafObjectType, size uint64) []wafObject {
+	if typ != wafMapType && typ != wafArrayType {
+		panic("Cannot allocate this waf object data type as an array: " + strconv.Itoa(int(typ)))
+	}
+
+	obj._type = typ
+	obj.nbEntries = size
+
+	// If the array size is zero no need to allocate anything
+	if size == 0 {
+		obj.value = 0
+		return nil
+	}
+
+	goArray := make([]wafObject, size)
+	refPool.arrayRefs = append(refPool.arrayRefs, goArray)
+
+	obj.value = sliceToUintptr(goArray)
+	return goArray
+}
+
+func (refPool *cgoRefPool) AllocWafMapKey(obj *wafObject, str string) {
+	if len(str) == 0 {
+		return
+	}
+
+	goArray := make([]byte, len(str))
+	copy(goArray, str)
+	refPool.stringRefs = append(refPool.stringRefs, goArray)
+
+	obj.parameterName = sliceToUintptr(goArray)
+	obj.parameterNameLength = uint64(len(goArray))
+}
diff --git a/vendor/github.com/DataDog/go-libddwaf/context.go b/vendor/github.com/DataDog/go-libddwaf/context.go
new file mode 100644
index 0000000000..8fc632d1ce
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/context.go
@@ -0,0 +1,155 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package waf
+
+import (
+	"sync"
+	"time"
+
+	"go.uber.org/atomic"
+)
+
+// Context is a WAF execution context. It allows running the WAF incrementally
+// when calling it multiple times to run its rules every time new addresses
+// become available. Each request must have its own Context.
+type Context struct {
+	// Instance of the WAF
+	handle   *Handle
+	cContext wafContext
+	// cgoRefs is used to retain go references to WafObjects until the context is destroyed.
+	// As per libddwaf documentation, WAF Objects must be alive during all the context lifetime
+	cgoRefs cgoRefPool
+	// Mutex protecting the use of cContext which is not thread-safe and cgoRefs.
+	mutex sync.Mutex
+
+	// Stats
+	// Cumulated internal WAF run time - in nanoseconds - for this context.
+	totalRuntimeNs atomic.Uint64
+	// Cumulated overall run time - in nanoseconds - for this context.
+	totalOverallRuntimeNs atomic.Uint64
+	// Cumulated timeout count for this context.
+	timeoutCount atomic.Uint64
+}
+
+// NewContext returns a new WAF context of to the given WAF handle.
+// A nil value is returned when the WAF handle was released or when the
+// WAF context couldn't be created.
+// handle. A nil value is returned when the WAF handle can no longer be used
+// or the WAF context couldn't be created.
+func NewContext(handle *Handle) *Context {
+	// Handle has been released
+	if handle.addRefCounter(1) == 0 {
+		return nil
+	}
+
+	cContext := wafLib.wafContextInit(handle.cHandle)
+	if cContext == 0 {
+		handle.addRefCounter(-1)
+		return nil
+	}
+
+	return &Context{handle: handle, cContext: cContext}
+}
+
+// Run encodes the given addressesToData values and runs them against the WAF rules within the given
+// timeout value. It returns the matches as a JSON string (usually opaquely used) along with the corresponding
+// actions in any. In case of an error, matches and actions can still be returned, for instance in the case of a
+// timeout error. Errors can be tested against the RunError type.
+func (context *Context) Run(addressesToData map[string]any, timeout time.Duration) (matches []byte, actions []string, err error) {
+	if len(addressesToData) == 0 {
+		return
+	}
+
+	now := time.Now()
+	defer func() {
+		dt := time.Since(now)
+		context.totalOverallRuntimeNs.Add(uint64(dt.Nanoseconds()))
+	}()
+
+	encoder := encoder{
+		stringMaxSize:    wafMaxStringLength,
+		containerMaxSize: wafMaxContainerSize,
+		objectMaxDepth:   wafMaxContainerDepth,
+	}
+	obj, err := encoder.Encode(addressesToData)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// ddwaf_run cannot run concurrently and the next append write on the context state so we need a mutex
+	context.mutex.Lock()
+	defer context.mutex.Unlock()
+
+	// Save the Go pointer references to addressesToData that were referenced by the encoder
+	// into C ddwaf_objects. libddwaf's API requires to keep this data for the lifetime of the ddwaf_context.
+	defer context.cgoRefs.append(encoder.cgoRefs)
+
+	return context.run(obj, timeout, &encoder.cgoRefs)
+}
+
+func (context *Context) run(obj *wafObject, timeout time.Duration, cgoRefs *cgoRefPool) ([]byte, []string, error) {
+	// RLock the handle to safely get read access to the WAF handle and prevent concurrent changes of it
+	// such as a rules-data update.
+	context.handle.mutex.RLock()
+	defer context.handle.mutex.RUnlock()
+
+	result := new(wafResult)
+	defer wafLib.wafResultFree(result)
+
+	ret := wafLib.wafRun(context.cContext, obj, result, uint64(timeout/time.Microsecond))
+
+	context.totalRuntimeNs.Add(result.total_runtime)
+	matches, actions, err := unwrapWafResult(ret, result)
+	if err == ErrTimeout {
+		context.timeoutCount.Inc()
+	}
+
+	return matches, actions, err
+}
+
+func unwrapWafResult(ret wafReturnCode, result *wafResult) (matches []byte, actions []string, err error) {
+	if result.timeout > 0 {
+		err = ErrTimeout
+	}
+
+	if ret == wafOK {
+		return nil, nil, err
+	}
+
+	if ret != wafMatch {
+		return nil, nil, goRunError(ret)
+	}
+
+	if result.data != 0 {
+		matches = []byte(gostring(cast[byte](result.data)))
+	}
+
+	if size := result.actions.size; size > 0 {
+		actions = decodeActions(result.actions.array, uint64(size))
+	}
+
+	return matches, actions, err
+}
+
+// Close calls handle.closeContext which calls ddwaf_context_destroy and maybe also close the handle if it in termination state.
+func (context *Context) Close() {
+	defer context.handle.closeContext(context)
+	// Keep the Go pointer references until the end of the context
+	keepAlive(context.cgoRefs)
+	// The context is no longer used so we can try releasing the Go pointer references asap by nulling them
+	context.cgoRefs = cgoRefPool{}
+}
+
+// TotalRuntime returns the cumulated WAF runtime across various run calls within the same WAF context.
+// Returned time is in nanoseconds.
+func (context *Context) TotalRuntime() (overallRuntimeNs, internalRuntimeNs uint64) {
+	return context.totalOverallRuntimeNs.Load(), context.totalRuntimeNs.Load()
+}
+
+// TotalTimeouts returns the cumulated amount of WAF timeouts across various run calls within the same WAF context.
+func (context *Context) TotalTimeouts() uint64 {
+	return context.timeoutCount.Load()
+}
diff --git a/vendor/github.com/DataDog/go-libddwaf/ctypes.go b/vendor/github.com/DataDog/go-libddwaf/ctypes.go
new file mode 100644
index 0000000000..f5ab1ea263
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/ctypes.go
@@ -0,0 +1,176 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package waf
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+const (
+	wafMaxStringLength   = 4096
+	wafMaxContainerDepth = 20
+	wafMaxContainerSize  = 256
+	wafRunTimeout        = 5000
+)
+
+type wafReturnCode int32
+
+const (
+	wafErrInternal        wafReturnCode = -3
+	wafErrInvalidObject                 = -2
+	wafErrInvalidArgument               = -1
+	wafOK                               = 0
+	wafMatch                            = 1
+)
+
+// wafObjectType is an enum in C which has the size of DWORD.
+// But DWORD is 4 bytes in amd64 and arm64 so uint32 it is.
+type wafObjectType uint32
+
+const (
+	wafInvalidType wafObjectType = 0
+	wafIntType                   = 1 << 0
+	wafUintType                  = 1 << 1
+	wafStringType                = 1 << 2
+	wafArrayType                 = 1 << 3
+	wafMapType                   = 1 << 4
+)
+
+type wafObject struct {
+	parameterName       uintptr
+	parameterNameLength uint64
+	value               uintptr
+	nbEntries           uint64
+	_type               wafObjectType
+	_                   [4]byte
+	// Forced padding
+	// We only support 2 archs and cgo generated the same padding to both.
+	// We don't want the C struct to be packed because actually go will do the same padding itself,
+	// we just add it explicitly to not take any chance.
+	// And we cannot pack a struct in go so it will get tricky if the struct is
+	// packed (apart from breaking all tracers of course)
+}
+
+type wafConfig struct {
+	limits     wafConfigLimits
+	obfuscator wafConfigObfuscator
+	freeFn     uintptr
+}
+
+type wafConfigLimits struct {
+	maxContainerSize  uint32
+	maxContainerDepth uint32
+	maxStringLength   uint32
+}
+
+type wafConfigObfuscator struct {
+	keyRegex   uintptr // char *
+	valueRegex uintptr // char *
+}
+
+type wafResult struct {
+	timeout       byte
+	data          uintptr
+	actions       wafResultActions
+	total_runtime uint64
+}
+
+type wafResultActions struct {
+	array uintptr // char **
+	size  uint32
+	_     [4]byte // Forced padding
+}
+
+type wafRulesetInfo struct {
+	loaded  uint16
+	failed  uint16
+	errors  wafObject
+	version uintptr // char *
+}
+
+// wafHandle is a forward declaration in ddwaf.h header
+// We basically don't need to modify it, only to give it to the waf
+type wafHandle uintptr
+
+// wafContext is a forward declaration in ddwaf.h header
+// We basically don't need to modify it, only to give it to the waf
+type wafContext uintptr
+
+// gostring copies a char* to a Go string.
+func gostring(ptr *byte) string {
+	if ptr == nil {
+		return ""
+	}
+	var length int
+	for {
+		if *(*byte)(unsafe.Add(unsafe.Pointer(ptr), uintptr(length))) == '\x00' {
+			break
+		}
+		length++
+	}
+	//string builtin copies the slice
+	return string(unsafe.Slice(ptr, length))
+}
+
+func gostringSized(ptr *byte, size uint64) string {
+	if ptr == nil {
+		return ""
+	}
+	return string(unsafe.Slice(ptr, size))
+}
+
+// cstring converts a go string to *byte that can be passed to C code.
+func cstring(name string) *byte {
+	var b = make([]byte, len(name)+1)
+	copy(b, name)
+	return &b[0]
+}
+
+// cast is used to centralize unsafe use C of allocated pointer.
+// We take the address and then dereference it to trick go vet from creating a possible misuse of unsafe.Pointer
+func cast[T any](ptr uintptr) *T {
+	return (*T)(*(*unsafe.Pointer)(unsafe.Pointer(&ptr)))
+}
+
+// castWithOffset is the same as cast but adding an offset to the pointer by a multiple of the size
+// of the type pointed.
+func castWithOffset[T any](ptr uintptr, offset uint64) *T {
+	return (*T)(unsafe.Add(*(*unsafe.Pointer)(unsafe.Pointer(&ptr)), offset*uint64(unsafe.Sizeof(*new(T)))))
+}
+
+// ptrToUintptr is a helper to centralize of usage of unsafe.Pointer
+// do not use this function to cast interfaces
+func ptrToUintptr[T any](arg *T) uintptr {
+	return uintptr(unsafe.Pointer(arg))
+}
+
+func sliceToUintptr[T any](arg []T) uintptr {
+	return (*reflect.SliceHeader)(unsafe.Pointer(&arg)).Data
+}
+
+func stringToUintptr(arg string) uintptr {
+	return (*reflect.StringHeader)(unsafe.Pointer(&arg)).Data
+}
+
+// keepAlive() globals
+var (
+	alwaysFalse bool
+	escapeSink  any
+)
+
+// keepAlive is a copy of runtime.KeepAlive
+// keepAlive has 2 usages:
+// - It forces the deallocation of the memory to take place later than expected (just like runtime.KeepAlive)
+// - It forces the given argument x to be escaped on the heap by saving it into a global value (Go doesn't provide a standard way to do it as of today)
+// It is implemented so that the compiler cannot optimize it.
+//
+//go:noinline
+func keepAlive[T any](x T) {
+	if alwaysFalse {
+		escapeSink = x
+	}
+}
diff --git a/vendor/github.com/DataDog/go-libddwaf/decoder.go b/vendor/github.com/DataDog/go-libddwaf/decoder.go
new file mode 100644
index 0000000000..b877025398
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/decoder.go
@@ -0,0 +1,73 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package waf
+
+// decodeErrors transforms the wafObject received by the wafRulesetInfo after the call to wafDl.wafInit to a map where
+// keys are the error message and the value is a array of all the rule ids which triggered this specific error
+func decodeErrors(obj *wafObject) (map[string][]string, error) {
+	if obj._type != wafMapType {
+		return nil, errInvalidObjectType
+	}
+
+	if obj.value == 0 && obj.nbEntries > 0 {
+		return nil, errNilObjectPtr
+	}
+
+	wafErrors := map[string][]string{}
+	for i := uint64(0); i < obj.nbEntries; i++ {
+		objElem := castWithOffset[wafObject](obj.value, i)
+		if objElem._type != wafArrayType {
+			return nil, errInvalidObjectType
+		}
+
+		errorMessage := gostringSized(cast[byte](objElem.parameterName), objElem.parameterNameLength)
+		ruleIds, err := decodeRuleIdArray(objElem)
+		if err != nil {
+			return nil, err
+		}
+
+		wafErrors[errorMessage] = ruleIds
+	}
+
+	return wafErrors, nil
+}
+
+func decodeRuleIdArray(obj *wafObject) ([]string, error) {
+	if obj._type != wafArrayType {
+		return nil, errInvalidObjectType
+	}
+
+	if obj.value == 0 && obj.nbEntries > 0 {
+		return nil, errNilObjectPtr
+	}
+
+	var ruleIds []string
+	for i := uint64(0); i < obj.nbEntries; i++ {
+		objElem := castWithOffset[wafObject](obj.value, i)
+		if objElem._type != wafStringType {
+			return nil, errInvalidObjectType
+		}
+
+		ruleIds = append(ruleIds, gostringSized(cast[byte](objElem.value), objElem.nbEntries))
+	}
+
+	return ruleIds, nil
+}
+
+func decodeActions(cActions uintptr, size uint64) []string {
+	if size == 0 {
+		return nil
+	}
+
+	actions := make([]string, size)
+	for i := uint64(0); i < size; i++ {
+		// This line does the following operation without casts:
+		// gostring(*(cActions + i * sizeof(ptr)))
+		actions[i] = gostring(*castWithOffset[*byte](cActions, i))
+	}
+
+	return actions
+}
diff --git a/vendor/github.com/DataDog/go-libddwaf/embed_darwin_amd64.go b/vendor/github.com/DataDog/go-libddwaf/embed_darwin_amd64.go
new file mode 100644
index 0000000000..952d4130e4
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/embed_darwin_amd64.go
@@ -0,0 +1,13 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build darwin && amd64 && !go1.21
+
+package waf
+
+import _ "embed" // Needed for go:embed
+
+//go:embed lib/darwin-amd64/_libddwaf.dylib
+var libddwaf []byte
diff --git a/vendor/github.com/DataDog/go-libddwaf/embed_darwin_arm64.go b/vendor/github.com/DataDog/go-libddwaf/embed_darwin_arm64.go
new file mode 100644
index 0000000000..0a3bc72ac0
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/embed_darwin_arm64.go
@@ -0,0 +1,13 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build darwin && arm64 && !go1.21
+
+package waf
+
+import _ "embed" // Needed for go:embed
+
+//go:embed lib/darwin-arm64/_libddwaf.dylib
+var libddwaf []byte
diff --git a/vendor/github.com/DataDog/go-libddwaf/embed_linux_amd64.go b/vendor/github.com/DataDog/go-libddwaf/embed_linux_amd64.go
new file mode 100644
index 0000000000..55d6693b32
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/embed_linux_amd64.go
@@ -0,0 +1,13 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux && amd64 && !go1.21
+
+package waf
+
+import _ "embed" // Needed for go:embed
+
+//go:embed lib/linux-amd64/libddwaf.so
+var libddwaf []byte
diff --git a/vendor/github.com/DataDog/go-libddwaf/embed_linux_arm64.go b/vendor/github.com/DataDog/go-libddwaf/embed_linux_arm64.go
new file mode 100644
index 0000000000..fe55f81ebc
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/embed_linux_arm64.go
@@ -0,0 +1,13 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux && arm64 && !go1.21
+
+package waf
+
+import _ "embed" // Needed for go:embed
+
+//go:embed lib/linux-arm64/libddwaf.so
+var libddwaf []byte
diff --git a/vendor/github.com/DataDog/go-libddwaf/encoder.go b/vendor/github.com/DataDog/go-libddwaf/encoder.go
new file mode 100644
index 0000000000..153c124504
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/encoder.go
@@ -0,0 +1,256 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package waf
+
+import (
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode"
+)
+
+// Encode Go values into wafObjects. Only the subset of Go types representable into wafObjects
+// will be encoded while ignoring the rest of it.
+// The encoder allocates the memory required for new wafObjects into the Go memory, which must be kept
+// referenced for their lifetime in the C world. This lifetime depends on the ddwaf function being used with.
+// the encoded result. The Go references of the allocated wafObjects, along with every Go pointer they may
+// reference now or in the future, are stored and referenced in the `cgoRefs` field. The user MUST leverage
+// `keepAlive()` with it according to its ddwaf use-case.
+type encoder struct {
+	containerMaxSize int
+	stringMaxSize    int
+	objectMaxDepth   int
+	cgoRefs          cgoRefPool
+}
+
+func newMaxEncoder() *encoder {
+	return &encoder{
+		containerMaxSize: math.MaxInt,
+		stringMaxSize:    math.MaxInt,
+		objectMaxDepth:   math.MaxInt,
+	}
+}
+
+func (encoder *encoder) Encode(data any) (*wafObject, error) {
+	value := reflect.ValueOf(data)
+	wo := &wafObject{}
+
+	if err := encoder.encode(value, wo, encoder.objectMaxDepth); err != nil {
+		return nil, err
+	}
+
+	return wo, nil
+}
+
+func (encoder *encoder) encode(value reflect.Value, obj *wafObject, depth int) error {
+	switch kind := value.Kind(); {
+	// Terminal cases (leafs of the tree)
+	case kind == reflect.Invalid:
+		return errUnsupportedValue
+
+	// 		Booleans
+	case kind == reflect.Bool && value.Bool(): // true
+		return encoder.encodeString("true", wafStringType, obj)
+	case kind == reflect.Bool && !value.Bool(): // false
+		return encoder.encodeString("false", wafStringType, obj)
+
+	// 		Numbers
+	case value.CanInt(): // any int type or alias
+		return encoder.encodeString(strconv.FormatInt(value.Int(), 10), wafStringType, obj)
+	case value.CanUint(): // any Uint type or alias
+		return encoder.encodeString(strconv.FormatUint(value.Uint(), 10), wafStringType, obj)
+	case value.CanFloat(): // any float type or alias
+		return encoder.encodeString(strconv.FormatInt(int64(math.Round(value.Float())), 10), wafStringType, obj)
+
+	//		Strings
+	case kind == reflect.String: // string type
+		return encoder.encodeString(value.String(), wafStringType, obj)
+	case value.Type() == reflect.TypeOf([]byte(nil)): // byte array -> string
+		return encoder.encodeString(string(value.Bytes()), wafStringType, obj)
+
+	// Recursive cases (internal nodes of the tree)
+	case kind == reflect.Interface || kind == reflect.Pointer: // Pointer and interfaces are not taken into account
+		return encoder.encode(value.Elem(), obj, depth)
+	case kind == reflect.Array || kind == reflect.Slice: // either an array or a slice of an array
+		return encoder.encodeArray(value, obj, depth)
+	case kind == reflect.Map:
+		return encoder.encodeMap(value, obj, depth)
+	case kind == reflect.Struct:
+		return encoder.encodeStruct(value, obj, depth)
+
+	default:
+		return errUnsupportedValue
+	}
+}
+
+func (encoder *encoder) encodeString(str string, typ wafObjectType, obj *wafObject) error {
+	if len(str) > encoder.stringMaxSize {
+		str = str[:encoder.stringMaxSize]
+	}
+
+	encoder.cgoRefs.AllocWafString(obj, str)
+	return nil
+}
+
+func getFieldNameFromType(field reflect.StructField) (string, bool) {
+	fieldName := field.Name
+
+	// Private and synthetics fields
+	if len(fieldName) < 1 || unicode.IsLower(rune(fieldName[0])) {
+		return "", false
+	}
+
+	// Use the json tag name as field name if present
+	if tag, ok := field.Tag.Lookup("json"); ok {
+		if i := strings.IndexByte(tag, byte(',')); i > 0 {
+			tag = tag[:i]
+		}
+		if len(tag) > 0 {
+			fieldName = tag
+		}
+	}
+
+	return fieldName, true
+}
+
+// encodeStruct takes a reflect.Value and a wafObject pointer and iterates on the struct field to build
+// a wafObject map of type wafMapType. The specificities are the following:
+// - It will only take the first encoder.containerMaxSize elements of the struct
+// - If the field has a json tag it will become the field name
+// - Private fields and also values producing an error at encoding will be skipped
+func (encoder *encoder) encodeStruct(value reflect.Value, obj *wafObject, depth int) error {
+	if depth < 0 {
+		return errMaxDepth
+	}
+
+	typ := value.Type()
+	nbFields := typ.NumField()
+	capacity := nbFields
+	length := 0
+	if capacity > encoder.containerMaxSize {
+		capacity = encoder.containerMaxSize
+	}
+
+	objArray := encoder.cgoRefs.AllocWafArray(obj, wafMapType, uint64(capacity))
+	for i := 0; length < capacity && i < nbFields; i++ {
+		fieldType := typ.Field(i)
+		fieldName, usable := getFieldNameFromType(fieldType)
+		if !usable {
+			continue
+		}
+
+		objElem := &objArray[length]
+		if encoder.encodeMapKey(reflect.ValueOf(fieldName), objElem) != nil {
+			continue
+		}
+
+		if encoder.encode(value.Field(i), objElem, depth-1) != nil {
+			continue
+		}
+
+		length++
+	}
+
+	// Set the length to the final number of successfully encoded elements
+	obj.nbEntries = uint64(length)
+	return nil
+}
+
+// encodeMap takes a reflect.Value and a wafObject pointer and iterates on the map elements and returns
+// a wafObject map of type wafMapType. The specificities are the following:
+// - It will only take the first encoder.containerMaxSize elements of the map
+// - Values and keys producing an error at encoding will be skipped
+func (encoder *encoder) encodeMap(value reflect.Value, obj *wafObject, depth int) error {
+	if depth < 0 {
+		return errMaxDepth
+	}
+
+	capacity := value.Len()
+	length := 0
+	if capacity > encoder.containerMaxSize {
+		capacity = encoder.containerMaxSize
+	}
+
+	objArray := encoder.cgoRefs.AllocWafArray(obj, wafMapType, uint64(capacity))
+	for iter := value.MapRange(); iter.Next(); {
+		if length == capacity {
+			break
+		}
+
+		objElem := &objArray[length]
+		if encoder.encodeMapKey(iter.Key(), objElem) != nil {
+			continue
+		}
+
+		if encoder.encode(iter.Value(), objElem, depth-1) != nil {
+			continue
+		}
+
+		length++
+	}
+
+	// Fix the size because we skipped map entries
+	obj.nbEntries = uint64(length)
+	return nil
+}
+
+// encodeMapKey takes a reflect.Value and a wafObject and returns a wafObject ready to be considered a map key
+// We use the function cgoRefPool.AllocWafMapKey to store the key in the wafObject. But first we need
+// to grab the real underlying value by recursing through the pointer and interface values.
+func (encoder *encoder) encodeMapKey(value reflect.Value, obj *wafObject) error {
+	kind := value.Kind()
+	for ; kind == reflect.Pointer || kind == reflect.Interface; value, kind = value.Elem(), value.Elem().Kind() {
+		if value.IsNil() {
+			return errInvalidMapKey
+		}
+	}
+
+	if kind != reflect.String && value.Type() != reflect.TypeOf([]byte(nil)) {
+		return errInvalidMapKey
+	}
+
+	if value.Type() == reflect.TypeOf([]byte(nil)) {
+		encoder.cgoRefs.AllocWafMapKey(obj, string(value.Bytes()))
+	}
+
+	if reflect.String == kind {
+		encoder.cgoRefs.AllocWafMapKey(obj, value.String())
+	}
+
+	return nil
+}
+
+// encodeArray takes a reflect.Value and a wafObject pointer and iterates on the elements and returns
+// a wafObject array of type wafArrayType. The specificities are the following:
+// - It will only take the first encoder.containerMaxSize elements of the array
+// - Values producing an error at encoding will be skipped
+func (encoder *encoder) encodeArray(value reflect.Value, obj *wafObject, depth int) error {
+	if depth < 0 {
+		return errMaxDepth
+	}
+
+	length := value.Len()
+	capacity := length
+	if capacity > encoder.containerMaxSize {
+		capacity = encoder.containerMaxSize
+	}
+
+	currIndex := 0
+	objArray := encoder.cgoRefs.AllocWafArray(obj, wafArrayType, uint64(capacity))
+	for i := 0; currIndex < capacity && i < length; i++ {
+		objElem := &objArray[currIndex]
+		if encoder.encode(value.Index(i), objElem, depth-1) != nil {
+			continue
+		}
+
+		currIndex++
+	}
+
+	// Fix the size because we skipped map entries
+	obj.nbEntries = uint64(currIndex)
+	return nil
+}
diff --git a/vendor/github.com/DataDog/go-libddwaf/handle.go b/vendor/github.com/DataDog/go-libddwaf/handle.go
new file mode 100644
index 0000000000..c7652a81ff
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/handle.go
@@ -0,0 +1,169 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package waf
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+
+	"github.com/DataDog/go-libddwaf/internal/noopfree"
+	"go.uber.org/atomic"
+)
+
+// Handle represents an instance of the WAF for a given ruleset.
+type Handle struct {
+	// Instance of the WAF
+	cHandle wafHandle
+
+	// Lock-less reference counter avoiding blocking calls to the Close() method
+	// while WAF contexts are still using the WAF handle. Instead, we let the
+	// release actually happen only when the reference counter reaches 0.
+	// This can happen either from a request handler calling its WAF context's
+	// Close() method, or either from the appsec instance calling the WAF
+	// handle's Close() method when creating a new WAF handle with new rules.
+	// Note that this means several instances of the WAF can exist at the same
+	// time with their own set of rules. This choice was done to be able to
+	// efficiently update the security rules concurrently, without having to
+	// block the request handlers for the time of the security rules update.
+	refCounter *atomic.Int32
+
+	// RWMutex protecting the R/W accesses to the internal rules data (stored
+	// in the handle).
+	mutex sync.RWMutex
+
+	// rulesetInfo holds information about rules initialization
+	rulesetInfo RulesetInfo
+}
+
+// NewHandle creates and returns a new instance of the WAF with the given security rules and configuration
+// of the sensitive data obfuscator. The returned handle is nil in case of an error.
+// Rules-related metrics, including errors, are accessible with the `RulesetInfo()` method.
+func NewHandle(rules any, keyObfuscatorRegex string, valueObfuscatorRegex string) (*Handle, error) {
+	// The order of action is the following:
+	// - Open the ddwaf C library
+	// - Encode the security rules as a ddwaf_object
+	// - Create a ddwaf_config object and fill the values
+	// - Run ddwaf_init to create a new handle based on the given rules and config
+	// - Check for errors and streamline the ddwaf_ruleset_info returned
+
+	if ok, err := Load(); !ok {
+		return nil, err
+		// The case where ok == true && err != nil is ignored on purpose, as
+		// this is out of the scope of NewHandle which only requires a properly
+		// loaded libddwaf in order to use it
+	}
+
+	encoder := newMaxEncoder()
+	obj, err := encoder.Encode(rules)
+	if err != nil {
+		return nil, fmt.Errorf("could not encode the WAF ruleset into a WAF object: %w", err)
+	}
+
+	config := newConfig(&encoder.cgoRefs, keyObfuscatorRegex, valueObfuscatorRegex)
+	cRulesetInfo := new(wafRulesetInfo)
+
+	cHandle := wafLib.wafInit(obj, config, cRulesetInfo)
+	keepAlive(encoder.cgoRefs)
+	// Note that the encoded obj was copied by libddwaf, so we don't need to keep them alive
+	// for the lifetime of the handle (ddwaf API guarantee).
+	if cHandle == 0 {
+		return nil, errors.New("could not instantiate the WAF")
+	}
+
+	defer wafLib.wafRulesetInfoFree(cRulesetInfo)
+
+	errorsMap, err := decodeErrors(&cRulesetInfo.errors)
+	if err != nil { // Something is very wrong
+		return nil, fmt.Errorf("could not decode the WAF ruleset errors: %w", err)
+	}
+
+	return &Handle{
+		cHandle:    cHandle,
+		refCounter: atomic.NewInt32(1), // We count the handle itself in the counter
+		rulesetInfo: RulesetInfo{
+			Loaded:  cRulesetInfo.loaded,
+			Failed:  cRulesetInfo.failed,
+			Errors:  errorsMap,
+			Version: gostring(cast[byte](cRulesetInfo.version)),
+		},
+	}, nil
+}
+
+// RulesetInfo returns the rules initialization metrics for the current WAF handle
+func (handle *Handle) RulesetInfo() RulesetInfo {
+	return handle.rulesetInfo
+}
+
+// Addresses returns the list of addresses the WAF rule is expecting.
+func (handle *Handle) Addresses() []string {
+	return wafLib.wafRequiredAddresses(handle.cHandle)
+}
+
+// closeContext calls ddwaf_context_destroy and eventually ddwaf_destroy on the handle
+func (handle *Handle) closeContext(context *Context) {
+	wafLib.wafContextDestroy(context.cContext)
+	if handle.addRefCounter(-1) == 0 {
+		wafLib.wafDestroy(handle.cHandle)
+	}
+}
+
+// Close puts the handle in termination state, when all the contexts are closed the handle will be destroyed
+func (handle *Handle) Close() {
+	if handle.addRefCounter(-1) > 0 {
+		// There are still Contexts that are not closed
+		return
+	}
+
+	wafLib.wafDestroy(handle.cHandle)
+}
+
+// addRefCounter add x to Handle.refCounter.
+// It relies on a CAS spin-loop implementation in order to avoid changing the
+// counter when 0 has been reached.
+func (handle *Handle) addRefCounter(x int32) int32 {
+	for {
+		current := handle.refCounter.Load()
+		if current == 0 {
+			// The object was released
+			return 0
+		}
+		if swapped := handle.refCounter.CompareAndSwap(current, current+x); swapped {
+			return current + x
+		}
+	}
+}
+
+func newConfig(cgoRefs *cgoRefPool, keyObfuscatorRegex string, valueObfuscatorRegex string) *wafConfig {
+	config := new(wafConfig)
+	*config = wafConfig{
+		limits: wafConfigLimits{
+			maxContainerDepth: wafMaxContainerDepth,
+			maxContainerSize:  wafMaxContainerSize,
+			maxStringLength:   wafMaxStringLength,
+		},
+		obfuscator: wafConfigObfuscator{
+			keyRegex:   cgoRefs.AllocCString(keyObfuscatorRegex),
+			valueRegex: cgoRefs.AllocCString(valueObfuscatorRegex),
+		},
+		// Prevent libddwaf from freeing our Go-memory-allocated ddwaf_objects
+		freeFn: noopfree.NoopFreeFn,
+	}
+	return config
+}
+
+func goRunError(rc wafReturnCode) error {
+	switch rc {
+	case wafErrInternal:
+		return ErrInternal
+	case wafErrInvalidObject:
+		return ErrInvalidObject
+	case wafErrInvalidArgument:
+		return ErrInvalidArgument
+	default:
+		return fmt.Errorf("unknown waf return code %d", int(rc))
+	}
+}
diff --git a/vendor/github.com/DataDog/go-libddwaf/internal/noopfree/noopfree.go b/vendor/github.com/DataDog/go-libddwaf/internal/noopfree/noopfree.go
new file mode 100644
index 0000000000..3c6b7eac7c
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/internal/noopfree/noopfree.go
@@ -0,0 +1,15 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package noopfree provides a noop-ed free function. A separate package is
+// needed to avoid the special go-build case with CGO enabled where it compiles
+// .s files with CC instead of the Go assembler that we want here.
+package noopfree
+
+import "unsafe"
+
+//go:linkname _noop_free _noop_free
+var _noop_free byte
+var NoopFreeFn uintptr = uintptr(unsafe.Pointer(&_noop_free))
diff --git a/vendor/github.com/DataDog/go-libddwaf/internal/noopfree/noopfree.s b/vendor/github.com/DataDog/go-libddwaf/internal/noopfree/noopfree.s
new file mode 100644
index 0000000000..afabadb340
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/internal/noopfree/noopfree.s
@@ -0,0 +1,10 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+#include "textflag.h"
+
+TEXT _noop_free(SB), NOSPLIT, $0-0
+	RET
+
diff --git a/vendor/github.com/DataDog/go-libddwaf/lib_dl.go b/vendor/github.com/DataDog/go-libddwaf/lib_dl.go
new file mode 100644
index 0000000000..e68f043b69
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/lib_dl.go
@@ -0,0 +1,88 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Purego only works on linux/macOS with amd64 and arm64 from now
+//go:build (linux || darwin) && (amd64 || arm64) && !go1.21
+
+package waf
+
+import (
+	"fmt"
+	"reflect"
+	"unsafe"
+
+	"github.com/ebitengine/purego"
+)
+
+// libDl is created to wraps all interactions with the purego library
+type libDl struct {
+	handle uintptr
+}
+
+// dlOpen open a handle for the shared library `name`.
+// the libLoader object is only a wrapped type for the linker handle
+// the `loader` parameter must have tags in the form of `dlsym:<symbol_name>`
+// dlOpen will fill the object with symbols loaded from the library
+// the struct of type `loader` must have a field of type `LibLoader`
+// to be able to close the handle later
+func dlOpen(name string, lib any) error {
+	handle, err := purego.Dlopen(name, purego.RTLD_GLOBAL|purego.RTLD_NOW)
+	if err != nil {
+		return fmt.Errorf("error opening shared library '%s'. Reason: %w", name, err)
+	}
+
+	return dlOpenFromHandle(handle, lib)
+}
+
+func dlOpenFromHandle(handle uintptr, lib any) error {
+	foundHandle := false
+
+	libValue := reflect.ValueOf(lib).Elem()
+	libType := reflect.TypeOf(lib).Elem()
+	dl := libDl{handle: handle}
+
+	for i := 0; i < libValue.NumField(); i++ {
+		fieldType := libType.Field(i)
+
+		symbolName, ok := fieldType.Tag.Lookup("dlsym")
+		if ok {
+			symbol, err := purego.Dlsym(handle, symbolName)
+			if err != nil {
+				return fmt.Errorf("cannot load symbol '%s'. Reason: %w", symbolName, err)
+			}
+
+			libValue.Field(i).Set(reflect.ValueOf(symbol))
+			continue
+		}
+
+		if fieldType.Type == reflect.TypeOf(dl) {
+			// Bypass the fact the reflect package doesn't allow writing to private struct fields by directly writing to the field's memory address ourselves
+			reflect.NewAt(reflect.TypeOf(dl), unsafe.Pointer(libValue.Field(i).UnsafeAddr())).Elem().Set(reflect.ValueOf(dl))
+			foundHandle = true
+		}
+	}
+
+	if !foundHandle {
+		return fmt.Errorf("could not find `libLoader` embedding to set the library handle, cowardly refusing the handle to be lost")
+	}
+
+	return nil
+}
+
+// syscall is the only way to make C calls with this interface.
+// purego implementation limits the number of arguments to 9, it will panic if more are provided
+// Note: `purego.SyscallN` has 3 return values: these are the following:
+//
+//	1st - The return value is a pointer or a int of any type
+//	2nd - The return value is a float
+//	3rd - The value of `errno` at the end of the call
+func (lib *libDl) syscall(fn uintptr, args ...uintptr) uintptr {
+	ret, _, _ := purego.SyscallN(fn, args...)
+	return ret
+}
+
+func (lib *libDl) Close() error {
+	return purego.Dlclose(lib.handle)
+}
diff --git a/vendor/github.com/DataDog/go-libddwaf/safe.go b/vendor/github.com/DataDog/go-libddwaf/safe.go
new file mode 100644
index 0000000000..de24bb51f4
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/safe.go
@@ -0,0 +1,68 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package waf
+
+import (
+	"fmt"
+	"reflect"
+	"runtime"
+
+	"github.com/pkg/errors"
+)
+
+// PanicError is an error type wrapping a recovered panic value that happened
+// during a function call. Such error must be considered unrecoverable and be
+// used to try to gracefully abort. Keeping using this package after such an
+// error is unreliable and the caller must rather stop using the library.
+// Examples include safety checks errors.
+type PanicError struct {
+	// The function symbol name that was given to `tryCall()`.
+	in string
+	// The recovered panic error while executing the function `in`.
+	Err error
+}
+
+func newPanicError(in func() error, err error) *PanicError {
+	return &PanicError{
+		in:  runtime.FuncForPC(reflect.ValueOf(in).Pointer()).Name(),
+		Err: err,
+	}
+}
+
+// Unwrap the error and return it.
+// Required by errors.Is and errors.As functions.
+func (e *PanicError) Unwrap() error {
+	return e.Err
+}
+
+// Error returns the error string representation.
+func (e *PanicError) Error() string {
+	return fmt.Sprintf("panic while executing %s: %#+v", e.in, e.Err)
+}
+
+// tryCall calls function `f` and recovers from any panic occurring while it
+// executes, returning it in a `PanicError` object type.
+func tryCall(f func() error) (err error) {
+	defer func() {
+		r := recover()
+		if r == nil {
+			// Note that panic(nil) matches this case and cannot be really tested for.
+			return
+		}
+
+		switch actual := r.(type) {
+		case error:
+			err = errors.WithStack(actual)
+		case string:
+			err = errors.New(actual)
+		default:
+			err = errors.Errorf("%v", r)
+		}
+
+		err = newPanicError(f, err)
+	}()
+	return f()
+}
diff --git a/vendor/github.com/DataDog/go-libddwaf/symbols_linux_cgo.go b/vendor/github.com/DataDog/go-libddwaf/symbols_linux_cgo.go
new file mode 100644
index 0000000000..86c57d010b
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/symbols_linux_cgo.go
@@ -0,0 +1,21 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build cgo && linux && !go1.21
+
+package waf
+
+/*
+// Needed otherwise cielf call is optimized away or the builtin version is used
+#cgo CFLAGS: -O0
+#cgo LDFLAGS: -lm
+float __attribute__((__noinline__)) ceilf(float arg);
+*/
+import "C"
+
+// Required because libddwaf uses ceilf located in libm
+// This forces CGO to link with libm, from there since
+// libm is loaded, we can dlopen the waf without issues
+var _ = C.ceilf(2.3)
diff --git a/vendor/github.com/DataDog/go-libddwaf/symbols_linux_purego.go b/vendor/github.com/DataDog/go-libddwaf/symbols_linux_purego.go
new file mode 100644
index 0000000000..840bd7bf70
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/symbols_linux_purego.go
@@ -0,0 +1,15 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build !cgo && linux && !go1.21
+
+package waf
+
+// Adds a dynamic import for libm.so because libddwaf needs the ceilf symbol
+// This mechanic only works when CGO is not enabled
+//
+//go:cgo_import_dynamic purego_ceilf ceilf "libm.so.6"
+//go:cgo_import_dynamic _ _ "libm.so.6"
+var purego_ceilf uintptr
diff --git a/vendor/github.com/DataDog/go-libddwaf/waf.go b/vendor/github.com/DataDog/go-libddwaf/waf.go
new file mode 100644
index 0000000000..d7dd53dcb0
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/waf.go
@@ -0,0 +1,130 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package waf
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+)
+
+// UnsupportedTargetError is a wrapper error type helping to handle the error
+// case of trying to execute this package on an unsupported target environment.
+type UnsupportedTargetError struct {
+	error
+}
+
+// Unwrap the error and return it.
+// Required by errors.Is and errors.As functions.
+func (e *UnsupportedTargetError) Unwrap() error {
+	return e.error
+}
+
+// RulesetInfo stores the information - provided by the WAF - about WAF rules initialization.
+type RulesetInfo struct {
+	// Number of rules successfully loaded
+	Loaded uint16
+	// Number of rules which failed to parse
+	Failed uint16
+	// Map from an error string to an array of all the rule ids for which
+	// that error was raised. {error: [rule_ids]}
+	Errors map[string][]string
+	// Ruleset version
+	Version string
+}
+
+// Encoder/Decoder errors
+var (
+	errMaxDepth          = errors.New("max depth reached")
+	errUnsupportedValue  = errors.New("unsupported Go value")
+	errInvalidMapKey     = errors.New("invalid WAF object map key")
+	errNilObjectPtr      = errors.New("nil WAF object pointer")
+	errInvalidObjectType = errors.New("invalid type encountered when decoding")
+)
+
+// RunError the WAF can return when running it.
+type RunError int
+
+// Errors the WAF can return when running it.
+const (
+	ErrInternal RunError = iota + 1
+	ErrInvalidObject
+	ErrInvalidArgument
+	ErrTimeout
+	ErrOutOfMemory
+	ErrEmptyRuleAddresses
+)
+
+// Error returns the string representation of the RunError.
+func (e RunError) Error() string {
+	switch e {
+	case ErrInternal:
+		return "internal waf error"
+	case ErrTimeout:
+		return "waf timeout"
+	case ErrInvalidObject:
+		return "invalid waf object"
+	case ErrInvalidArgument:
+		return "invalid waf argument"
+	case ErrOutOfMemory:
+		return "out of memory"
+	case ErrEmptyRuleAddresses:
+		return "empty rule addresses"
+	default:
+		return fmt.Sprintf("unknown waf error %d", e)
+	}
+}
+
+// Globally dlopen() libddwaf only once because several dlopens (eg. in tests)
+// aren't supported by macOS.
+var (
+	// libddwaf's dynamic library handle and entrypoints
+	wafLib *wafDl
+	// libddwaf's dlopen error if any
+	wafErr      error
+	openWafOnce sync.Once
+)
+
+// Load loads libddwaf's dynamic library. The dynamic library is opened only
+// once by the first call to this function and internally stored globally, and
+// no function is currently provided in this API to close the opened handle.
+// Calling this function is not mandatory and is automatically performed by
+// calls to NewHandle, the entrypoint of libddwaf, but Load is useful in order
+// to explicitly check libddwaf's general health where calling NewHandle doesn't
+// necessarily apply nor is doable.
+// The function returns ok when libddwaf was successfully loaded, along with a
+// non-nil error if any. Note that both ok and err can be set, meaning that
+// libddwaf is usable but some non-critical errors happened, such as failures
+// to remove temporary files. It is safe to continue using libddwaf in such
+// case.
+func Load() (ok bool, err error) {
+	openWafOnce.Do(func() {
+		wafLib, wafErr = newWafDl()
+		if wafErr != nil {
+			return
+		}
+		wafVersion = wafLib.wafGetVersion()
+	})
+
+	return wafLib != nil, wafErr
+}
+
+// SupportsTarget returns true and a nil error when the target host environment
+// is supported by this package and can be further used.
+// Otherwise, it returns false along with an error detailing why.
+func SupportsTarget() (bool, error) {
+	return supportsTarget()
+}
+
+var wafVersion string
+
+// Version returns the version returned by libddwaf.
+// It relies on the dynamic loading of the library, which can fail and return
+// an empty string or the previously loaded version, if any.
+func Version() string {
+	Load()
+	return wafVersion
+}
diff --git a/vendor/github.com/DataDog/go-libddwaf/waf_dl.go b/vendor/github.com/DataDog/go-libddwaf/waf_dl.go
new file mode 100644
index 0000000000..0867a38a54
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/waf_dl.go
@@ -0,0 +1,168 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build (linux || darwin) && (amd64 || arm64) && !go1.21
+
+package waf
+
+import (
+	"fmt"
+	"os"
+)
+
+// wafDl is the type wrapper for all C calls to the waf
+// It uses `libwaf` to make C calls
+// All calls must go through this one-liner to be type safe
+// since purego calls are not type safe
+type wafDl struct {
+	libDl
+
+	Ddwaf_ruleset_info_free  uintptr `dlsym:"ddwaf_ruleset_info_free"`
+	Ddwaf_init               uintptr `dlsym:"ddwaf_init"`
+	Ddwaf_destroy            uintptr `dlsym:"ddwaf_destroy"`
+	Ddwaf_required_addresses uintptr `dlsym:"ddwaf_required_addresses"`
+	Ddwaf_get_version        uintptr `dlsym:"ddwaf_get_version"`
+	Ddwaf_context_init       uintptr `dlsym:"ddwaf_context_init"`
+	Ddwaf_context_destroy    uintptr `dlsym:"ddwaf_context_destroy"`
+	Ddwaf_result_free        uintptr `dlsym:"ddwaf_result_free"`
+	Ddwaf_run                uintptr `dlsym:"ddwaf_run"`
+}
+
+func dumpWafLibrary() (*os.File, error) {
+	file, err := os.CreateTemp("", "libddwaf-*.so")
+	if err != nil {
+		return nil, fmt.Errorf("Error creating temp file: %w", err)
+	}
+
+	if err := os.WriteFile(file.Name(), libddwaf, 0400); err != nil {
+		return nil, fmt.Errorf("Error writing file: %w", err)
+	}
+
+	return file, nil
+}
+
+// newWafDl loads the libddwaf shared library along with all the needed symbols.
+// The returned dynamic library handle dl can be non-nil even with a returned
+// error, meaning that the dynamic library handle can be used but some errors
+// happened in the last internal steps following the successful call to
+// dlopen().
+func newWafDl() (dl *wafDl, err error) {
+	file, err := dumpWafLibrary()
+	if err != nil {
+		return nil, err
+	}
+	fName := file.Name()
+	defer func() {
+		rmErr := os.Remove(fName)
+		if rmErr != nil {
+			if err == nil {
+				err = rmErr
+			} else {
+				// TODO: rely on errors.Join() once go1.20 is our min supported Go version
+				err = fmt.Errorf("%w; along with an error while removing %s: %v", err, fName, rmErr)
+			}
+		}
+	}()
+
+	var waf wafDl
+	if err := dlOpen(fName, &waf); err != nil {
+		return nil, fmt.Errorf("error while opening libddwaf library at %s: %w", fName, err)
+	}
+	defer func() {
+		closeErr := file.Close()
+		if closeErr != nil {
+			if err == nil {
+				err = closeErr
+			} else {
+				// TODO: rely on errors.Join() once go1.20 is our min supported Go version
+				err = fmt.Errorf("%w; along with an error while closing the shared libddwaf library file: %v", err, closeErr)
+			}
+		}
+	}()
+
+	// Try calling the waf to make sure everything is fine
+	err = tryCall(func() error {
+		waf.wafGetVersion()
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return &waf, nil
+}
+
+// wafGetVersion returned string is a static string so we do not need to free it
+func (waf *wafDl) wafGetVersion() string {
+	return gostring(cast[byte](waf.syscall(waf.Ddwaf_get_version)))
+}
+
+func (waf *wafDl) wafInit(obj *wafObject, config *wafConfig, info *wafRulesetInfo) wafHandle {
+	handle := wafHandle(waf.syscall(waf.Ddwaf_init, ptrToUintptr(obj), ptrToUintptr(config), ptrToUintptr(info)))
+	keepAlive(obj)
+	keepAlive(config)
+	keepAlive(info)
+	return handle
+}
+
+func (waf *wafDl) wafRulesetInfoFree(info *wafRulesetInfo) {
+	waf.syscall(waf.Ddwaf_ruleset_info_free, ptrToUintptr(info))
+	keepAlive(info)
+}
+
+func (waf *wafDl) wafDestroy(handle wafHandle) {
+	waf.syscall(waf.Ddwaf_destroy, uintptr(handle))
+	keepAlive(handle)
+}
+
+// wafRequiredAddresses returns static strings so we do not need to free them
+func (waf *wafDl) wafRequiredAddresses(handle wafHandle) []string {
+	var nbAddresses uint32
+
+	arrayVoidC := waf.syscall(waf.Ddwaf_required_addresses, uintptr(handle), ptrToUintptr(&nbAddresses))
+	if arrayVoidC == 0 {
+		return nil
+	}
+
+	addresses := make([]string, int(nbAddresses))
+	for i := 0; i < int(nbAddresses); i++ {
+		addresses[i] = gostring(*castWithOffset[*byte](arrayVoidC, uint64(i)))
+	}
+
+	keepAlive(&nbAddresses)
+	keepAlive(handle)
+
+	return addresses
+}
+
+func (waf *wafDl) wafContextInit(handle wafHandle) wafContext {
+	ctx := wafContext(waf.syscall(waf.Ddwaf_context_init, uintptr(handle)))
+	keepAlive(handle)
+	return ctx
+}
+
+func (waf *wafDl) wafContextDestroy(context wafContext) {
+	waf.syscall(waf.Ddwaf_context_destroy, uintptr(context))
+	keepAlive(context)
+}
+
+func (waf *wafDl) wafResultFree(result *wafResult) {
+	waf.syscall(waf.Ddwaf_result_free, ptrToUintptr(result))
+	keepAlive(result)
+}
+
+func (waf *wafDl) wafRun(context wafContext, obj *wafObject, result *wafResult, timeout uint64) wafReturnCode {
+	rc := wafReturnCode(waf.syscall(waf.Ddwaf_run, uintptr(context), ptrToUintptr(obj), ptrToUintptr(result), uintptr(timeout)))
+	keepAlive(context)
+	keepAlive(obj)
+	keepAlive(result)
+	keepAlive(timeout)
+	return rc
+}
+
+// Implement SupportsTarget()
+func supportsTarget() (bool, error) {
+	return true, nil
+}
diff --git a/vendor/github.com/DataDog/go-libddwaf/waf_dl_unsupported.go b/vendor/github.com/DataDog/go-libddwaf/waf_dl_unsupported.go
new file mode 100644
index 0000000000..f603ec5f9a
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/waf_dl_unsupported.go
@@ -0,0 +1,54 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Build when the target OS or architecture are not supported
+//go:build (!linux && !darwin) || (!amd64 && !arm64) || go1.21
+
+package waf
+
+type wafDl struct{}
+
+func newWafDl() (dl *wafDl, err error) {
+	return nil, unsupportedTargetErr
+}
+
+func (waf *wafDl) wafGetVersion() string {
+	return ""
+}
+
+func (waf *wafDl) wafInit(obj *wafObject, config *wafConfig, info *wafRulesetInfo) wafHandle {
+	return 0
+}
+
+func (waf *wafDl) wafRulesetInfoFree(info *wafRulesetInfo) {
+}
+
+func (waf *wafDl) wafDestroy(handle wafHandle) {
+}
+
+func (waf *wafDl) wafRequiredAddresses(handle wafHandle) []string {
+	return nil
+}
+
+func (waf *wafDl) wafContextInit(handle wafHandle) wafContext {
+	return 0
+}
+
+func (waf *wafDl) wafContextDestroy(context wafContext) {
+}
+
+func (waf *wafDl) wafResultFree(result *wafResult) {
+}
+
+func (waf *wafDl) wafRun(context wafContext, obj *wafObject, result *wafResult, timeout uint64) wafReturnCode {
+	return wafErrInternal
+}
+
+// Implement SupportsTarget()
+func supportsTarget() (bool, error) {
+	// TODO: provide finer-grained unsupported target error message giving the
+	//    exact reason why
+	return false, unsupportedTargetErr
+}
diff --git a/vendor/github.com/DataDog/go-libddwaf/waf_unsupported_go.go b/vendor/github.com/DataDog/go-libddwaf/waf_unsupported_go.go
new file mode 100644
index 0000000000..e47dd61498
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/waf_unsupported_go.go
@@ -0,0 +1,17 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Supported OS/Arch but unsupported Go version
+//           Supported OS        Supported Arch     Bad Go Version
+//go:build (linux || darwin) && (amd64 || arm64) && go1.21
+
+package waf
+
+import (
+	"fmt"
+	"runtime"
+)
+
+var unsupportedTargetErr = &UnsupportedTargetError{fmt.Errorf("the Go version %s is not supported", runtime.Version())}
diff --git a/vendor/github.com/DataDog/go-libddwaf/waf_unsupported_target.go b/vendor/github.com/DataDog/go-libddwaf/waf_unsupported_target.go
new file mode 100644
index 0000000000..bd653e7661
--- /dev/null
+++ b/vendor/github.com/DataDog/go-libddwaf/waf_unsupported_target.go
@@ -0,0 +1,17 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Unsupported target OS or architecture on a supported Go version
+//            Unsupported OS        Unsupported Arch      Good Go Version
+//go:build ((!linux && !darwin) || (!amd64 && !arm64)) && !go1.21
+
+package waf
+
+import (
+	"fmt"
+	"runtime"
+)
+
+var unsupportedTargetErr = &UnsupportedTargetError{fmt.Errorf("the target operating-system %s or architecture %s are not supported", runtime.GOOS, runtime.GOARCH)}
diff --git a/vendor/github.com/DataDog/go-tuf/LICENSE b/vendor/github.com/DataDog/go-tuf/LICENSE
new file mode 100644
index 0000000000..38163dd4bd
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Prime Directive, Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/DataDog/go-tuf/client/client.go b/vendor/github.com/DataDog/go-tuf/client/client.go
new file mode 100644
index 0000000000..8715e0f285
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/client/client.go
@@ -0,0 +1,997 @@
+package client
+
+import (
+	"bytes"
+	"encoding/hex"
+	"encoding/json"
+	"io"
+	"io/ioutil"
+
+	"github.com/DataDog/go-tuf/data"
+	"github.com/DataDog/go-tuf/internal/roles"
+	"github.com/DataDog/go-tuf/util"
+	"github.com/DataDog/go-tuf/verify"
+)
+
+const (
+	// This is the upper limit in bytes we will use to limit the download
+	// size of the root/timestamp roles, since we might not don't know how
+	// big it is.
+	defaultRootDownloadLimit      = 512000
+	defaultTimestampDownloadLimit = 16384
+	defaultMaxDelegations         = 32
+	defaultMaxRootRotations       = 1e3
+)
+
+// LocalStore is local storage for downloaded top-level metadata.
+type LocalStore interface {
+	io.Closer
+
+	// GetMeta returns top-level metadata from local storage. The keys are
+	// in the form `ROLE.json`, with ROLE being a valid top-level role.
+	GetMeta() (map[string]json.RawMessage, error)
+
+	// SetMeta persists the given top-level metadata in local storage, the
+	// name taking the same format as the keys returned by GetMeta.
+	SetMeta(name string, meta json.RawMessage) error
+
+	// DeleteMeta deletes a given metadata.
+	DeleteMeta(name string) error
+}
+
+// RemoteStore downloads top-level metadata and target files from a remote
+// repository.
+type RemoteStore interface {
+	// GetMeta downloads the given metadata from remote storage.
+	//
+	// `name` is the filename of the metadata (e.g. "root.json")
+	//
+	// `err` is ErrNotFound if the given file does not exist.
+	//
+	// `size` is the size of the stream, -1 indicating an unknown length.
+	GetMeta(name string) (stream io.ReadCloser, size int64, err error)
+
+	// GetTarget downloads the given target file from remote storage.
+	//
+	// `path` is the path of the file relative to the root of the remote
+	//        targets directory (e.g. "/path/to/file.txt").
+	//
+	// `err` is ErrNotFound if the given file does not exist.
+	//
+	// `size` is the size of the stream, -1 indicating an unknown length.
+	GetTarget(path string) (stream io.ReadCloser, size int64, err error)
+}
+
+// Client provides methods for fetching updates from a remote repository and
+// downloading remote target files.
+type Client struct {
+	local  LocalStore
+	remote RemoteStore
+
+	// The following four fields represent the versions of metatdata either
+	// from local storage or from recently downloaded metadata
+	rootVer      int64
+	targetsVer   int64
+	snapshotVer  int64
+	timestampVer int64
+
+	// targets is the list of available targets, either from local storage
+	// or from recently downloaded targets metadata
+	targets data.TargetFiles
+
+	// localMeta is the raw metadata from local storage and is used to
+	// check whether remote metadata is present locally
+	localMeta map[string]json.RawMessage
+
+	// db is a key DB used for verifying metadata
+	db *verify.DB
+
+	// consistentSnapshot indicates whether the remote storage is using
+	// consistent snapshots (as specified in root.json)
+	consistentSnapshot bool
+
+	// MaxDelegations limits by default the number of delegations visited for any
+	// target
+	MaxDelegations int
+
+	// MaxRootRotations limits the number of downloaded roots in 1.0.19 root updater
+	MaxRootRotations int
+}
+
+func NewClient(local LocalStore, remote RemoteStore) *Client {
+	return &Client{
+		local:            local,
+		remote:           remote,
+		MaxDelegations:   defaultMaxDelegations,
+		MaxRootRotations: defaultMaxRootRotations,
+	}
+}
+
+// Init initializes a local repository.
+//
+// The latest root.json is fetched from remote storage, verified using rootKeys
+// and threshold, and then saved in local storage. It is expected that rootKeys
+// were securely distributed with the software being updated.
+//
+// Deprecated: Use c.InitLocal and c.Update to initialize a local repository.
+func (c *Client) Init(rootKeys []*data.PublicKey, threshold int) error {
+	if len(rootKeys) < threshold {
+		return ErrInsufficientKeys
+	}
+	rootJSON, err := c.downloadMetaUnsafe("root.json", defaultRootDownloadLimit)
+	if err != nil {
+		return err
+	}
+
+	// create a new key database, and add all the public `rootKeys` to it.
+	c.db = verify.NewDB()
+	rootKeyIDs := make([]string, 0, len(rootKeys))
+	for _, key := range rootKeys {
+		for _, id := range key.IDs() {
+			rootKeyIDs = append(rootKeyIDs, id)
+			if err := c.db.AddKey(id, key); err != nil {
+				return err
+			}
+		}
+	}
+
+	// add a mock "root" role that trusts the passed in key ids. These keys
+	// will be used to verify the `root.json` we just fetched.
+	role := &data.Role{Threshold: threshold, KeyIDs: rootKeyIDs}
+	if err := c.db.AddRole("root", role); err != nil {
+		return err
+	}
+
+	// verify that the new root is valid.
+	if err := c.decodeRoot(rootJSON); err != nil {
+		return err
+	}
+
+	return c.local.SetMeta("root.json", rootJSON)
+}
+
+// InitLocal initializes a local repository from root metadata.
+//
+// The root's keys are extracted from the root and saved in local storage.
+// Root expiration is not checked.
+// It is expected that rootJSON was securely distributed with the software
+// being updated.
+func (c *Client) InitLocal(rootJSON []byte) error {
+	err := c.loadAndVerifyRootMeta(rootJSON, true /*ignoreExpiredCheck*/)
+	if err != nil {
+		return err
+	}
+	return c.local.SetMeta("root.json", rootJSON)
+}
+
+// Update downloads and verifies remote metadata and returns updated targets.
+// It always performs root update (5.2 and 5.3) section of the v1.0.19 spec.
+//
+// https://DataDog.github.io/specification/v1.0.19/index.html#load-trusted-root
+func (c *Client) Update() (data.TargetFiles, error) {
+	if err := c.UpdateRoots(); err != nil {
+		if _, ok := err.(verify.ErrExpired); ok {
+			// For backward compatibility, we wrap the ErrExpired inside
+			// ErrDecodeFailed.
+			return nil, ErrDecodeFailed{"root.json", err}
+		}
+		return nil, err
+	}
+
+	// Load trusted metadata files, if any, and verify them against the latest root
+	c.getLocalMeta()
+
+	// 5.4.1 - Download the timestamp metadata
+	timestampJSON, err := c.downloadMetaUnsafe("timestamp.json", defaultTimestampDownloadLimit)
+	if err != nil {
+		return nil, err
+	}
+	// 5.4.(2,3 and 4) - Verify timestamp against various attacks
+	// Returns the extracted snapshot metadata
+	snapshotMeta, err := c.decodeTimestamp(timestampJSON)
+	if err != nil {
+		return nil, err
+	}
+	// 5.4.5 - Persist the timestamp metadata
+	if err := c.local.SetMeta("timestamp.json", timestampJSON); err != nil {
+		return nil, err
+	}
+
+	// 5.5.1 - Download snapshot metadata
+	// 5.5.2 and 5.5.4 - Check against timestamp role's snapshot hash and version
+	snapshotJSON, err := c.downloadMetaFromTimestamp("snapshot.json", snapshotMeta)
+	if err != nil {
+		return nil, err
+	}
+	// 5.5.(3,5 and 6) - Verify snapshot against various attacks
+	// Returns the extracted metadata files
+	snapshotMetas, err := c.decodeSnapshot(snapshotJSON)
+	if err != nil {
+		return nil, err
+	}
+	// 5.5.7 - Persist snapshot metadata
+	if err := c.local.SetMeta("snapshot.json", snapshotJSON); err != nil {
+		return nil, err
+	}
+
+	// If we don't have the targets.json, download it, determine updated
+	// targets and save targets.json in local storage
+	var updatedTargets data.TargetFiles
+	targetsMeta := snapshotMetas["targets.json"]
+	if !c.hasMetaFromSnapshot("targets.json", targetsMeta) {
+		// 5.6.1 - Download the top-level targets metadata file
+		// 5.6.2 and 5.6.4 - Check against snapshot role's targets hash and version
+		targetsJSON, err := c.downloadMetaFromSnapshot("targets.json", targetsMeta)
+		if err != nil {
+			return nil, err
+		}
+		// 5.6.(3 and 5) - Verify signatures and check against freeze attack
+		updatedTargets, err = c.decodeTargets(targetsJSON)
+		if err != nil {
+			return nil, err
+		}
+		// 5.6.6 - Persist targets metadata
+		if err := c.local.SetMeta("targets.json", targetsJSON); err != nil {
+			return nil, err
+		}
+	}
+
+	return updatedTargets, nil
+}
+
+func (c *Client) UpdateRoots() error {
+	// https://DataDog.github.io/specification/v1.0.19/index.html#load-trusted-root
+	// 5.2 Load the trusted root metadata file. We assume that a good,
+	// trusted copy of this file was shipped with the package manager
+	// or software updater using an out-of-band process.
+	if err := c.loadAndVerifyLocalRootMeta( /*ignoreExpiredCheck=*/ true); err != nil {
+		return err
+	}
+	m, err := c.local.GetMeta()
+	if err != nil {
+		return err
+	}
+
+	type KeyInfo struct {
+		KeyIDs    map[string]bool
+		Threshold int
+	}
+
+	// Prepare for 5.3.11: If the timestamp and / or snapshot keys have been rotated,
+	// then delete the trusted timestamp and snapshot metadata files.
+	getKeyInfo := func(role string) KeyInfo {
+		keyIDs := make(map[string]bool)
+		for k := range c.db.GetRole(role).KeyIDs {
+			keyIDs[k] = true
+		}
+		return KeyInfo{keyIDs, c.db.GetRole(role).Threshold}
+	}
+
+	// The nonRootKeyInfo looks like this:
+	// {
+	//	"timestamp": {KeyIDs={"KEYID1": true, "KEYID2": true}, Threshold=2},
+	//	"snapshot": {KeyIDs={"KEYID3": true}, Threshold=1},
+	//	"targets": {KeyIDs={"KEYID4": true, "KEYID5": true, "KEYID6": true}, Threshold=1}
+	// }
+
+	nonRootKeyInfo := map[string]KeyInfo{"timestamp": {}, "snapshot": {}, "targets": {}}
+	for k := range nonRootKeyInfo {
+		nonRootKeyInfo[k] = getKeyInfo(k)
+	}
+
+	// 5.3.1 Temorarily turn on the consistent snapshots in order to download
+	// versioned root metadata files as described next.
+	consistentSnapshot := c.consistentSnapshot
+	c.consistentSnapshot = true
+
+	nRootMetadata := m["root.json"]
+
+	// https://DataDog.github.io/specification/v1.0.19/index.html#update-root
+
+	// 5.3.1 Since it may now be signed using entirely different keys,
+	// the client MUST somehow be able to establish a trusted line of
+	// continuity to the latest set of keys (see § 6.1 Key
+	// management and migration). To do so, the client MUST
+	// download intermediate root metadata files, until the
+	// latest available one is reached. Therefore, it MUST
+	// temporarily turn on consistent snapshots in order to
+	// download versioned root metadata files as described next.
+
+	// This loop returns on error or breaks after downloading the lastest root metadata.
+	// 5.3.2 Let N denote the version number of the trusted root metadata file.
+	for i := 0; i < c.MaxRootRotations; i++ {
+		// 5.3.3 Try downloading version nPlusOne of the root metadata file.
+		// NOTE: as a side effect, we do update c.rootVer to nPlusOne between iterations.
+		nPlusOne := c.rootVer + 1
+		nPlusOneRootPath := util.VersionedPath("root.json", nPlusOne)
+		nPlusOneRootMetadata, err := c.downloadMetaUnsafe(nPlusOneRootPath, defaultRootDownloadLimit)
+
+		if err != nil {
+			if _, ok := err.(ErrMissingRemoteMetadata); ok {
+				// stop when the next root can't be downloaded
+				break
+			}
+			return err
+		}
+
+		// 5.3.4 Check for an arbitrary software attack.
+		// 5.3.4.1 Check that N signed N+1
+		nPlusOneRootMetadataSigned, err := c.verifyRoot(nRootMetadata, nPlusOneRootMetadata)
+		if err != nil {
+			return err
+		}
+
+		// 5.3.4.2 check that N+1 signed itself.
+		if _, err := c.verifyRoot(nPlusOneRootMetadata, nPlusOneRootMetadata); err != nil {
+			// 5.3.6 Note that the expiration of the new (intermediate) root
+			// metadata file does not matter yet, because we will check for
+			// it in step 5.3.10.
+			return err
+		}
+
+		// 5.3.5 Check for a rollback attack. Here, we check that nPlusOneRootMetadataSigned.version == nPlusOne.
+		if nPlusOneRootMetadataSigned.Version != nPlusOne {
+			return verify.ErrWrongVersion{
+				Given:    nPlusOneRootMetadataSigned.Version,
+				Expected: nPlusOne,
+			}
+		}
+
+		// 5.3.7 Set the trusted root metadata file to the new root metadata file.
+		c.rootVer = nPlusOneRootMetadataSigned.Version
+		// NOTE: following up on 5.3.1, we want to always have consistent snapshots on for the duration
+		// of root rotation. AFTER the rotation is over, we will set it to the value of the last root.
+		consistentSnapshot = nPlusOneRootMetadataSigned.ConsistentSnapshot
+		// 5.3.8 Persist root metadata. The client MUST write the file to non-volatile storage as FILENAME.EXT (e.g. root.json).
+		// NOTE: Internally, setMeta stores metadata in LevelDB in a persistent manner.
+		if err := c.local.SetMeta("root.json", nPlusOneRootMetadata); err != nil {
+			return err
+		}
+		nRootMetadata = nPlusOneRootMetadata
+		// 5.3.9 Repeat steps 5.3.2 to 5.3.9
+
+	} // End of the for loop.
+
+	// 5.3.10 Check for a freeze attack.
+	// NOTE: This will check for any, including freeze, attack.
+	if err := c.loadAndVerifyLocalRootMeta( /*ignoreExpiredCheck=*/ false); err != nil {
+		return err
+	}
+
+	countDeleted := func(s1 map[string]bool, s2 map[string]bool) int {
+		c := 0
+		for k := range s1 {
+			if _, ok := s2[k]; !ok {
+				c++
+			}
+		}
+		return c
+	}
+
+	// 5.3.11 To recover from fast-forward attack, certain metadata files need
+	// to be deleted if a threshold of keys are revoked.
+	// List of metadata that should be deleted per role if a threshold of keys
+	// are revoked:
+	// (based on the ongoing PR: https://github.com/mnm678/specification/tree/e50151d9df632299ddea364c4f44fe8ca9c10184)
+	// timestamp -> delete timestamp.json
+	// snapshot ->  delete timestamp.json and snapshot.json
+	// targets ->   delete snapshot.json and targets.json
+	//
+	// nonRootKeyInfo contains the keys and thresholds from root.json
+	// that were on disk before the root update process begins.
+	for topLevelRolename := range nonRootKeyInfo {
+		// ki contains the keys and thresholds from the latest downloaded root.json.
+		ki := getKeyInfo(topLevelRolename)
+		if countDeleted(nonRootKeyInfo[topLevelRolename].KeyIDs, ki.KeyIDs) >= nonRootKeyInfo[topLevelRolename].Threshold {
+			deleteMeta := map[string][]string{
+				"timestamp": {"timestamp.json"},
+				"snapshot":  {"timestamp.json", "snapshot.json"},
+				"targets":   {"snapshot.json", "targets.json"},
+			}
+
+			for _, r := range deleteMeta[topLevelRolename] {
+				c.local.DeleteMeta(r)
+			}
+		}
+	}
+
+	// 5.3.12 Set whether consistent snapshots are used as per the trusted root metadata file.
+	c.consistentSnapshot = consistentSnapshot
+	return nil
+}
+
+// getLocalMeta decodes and verifies metadata from local storage.
+// The verification of local files is purely for consistency, if an attacker
+// has compromised the local storage, there is no guarantee it can be trusted.
+// Before trying to load the metadata files, it clears the in-memory copy of the local metadata.
+// This is to insure that all of the loaded metadata files at the end are indeed verified by the latest root.
+// If some of the metadata files fail to load it will proceed with trying to load the rest,
+// but still return an error at the end, if such occurred. Otherwise returns nil.
+func (c *Client) getLocalMeta() error {
+	var retErr error
+	loadFailed := false
+	// Clear the in-memory copy of the local metadata. The goal is to reload and take into account
+	// only the metadata files that are verified by the latest root. Otherwise, their content should
+	// be ignored.
+	c.localMeta = make(map[string]json.RawMessage)
+
+	// Load the latest root meta
+	if err := c.loadAndVerifyLocalRootMeta( /*ignoreExpiredCheck=*/ false); err != nil {
+		return err
+	}
+
+	// Load into memory the existing meta, if any, from the local storage
+	meta, err := c.local.GetMeta()
+	if err != nil {
+		return nil
+	}
+
+	// Verify the top-level metadata (timestamp, snapshot and targets) against the latest root and load it, if okay
+	if timestampJSON, ok := meta["timestamp.json"]; ok {
+		timestamp := &data.Timestamp{}
+		if err := c.db.UnmarshalTrusted(timestampJSON, timestamp, "timestamp"); err != nil {
+			loadFailed = true
+			retErr = err
+		} else {
+			c.localMeta["timestamp.json"] = meta["timestamp.json"]
+			c.timestampVer = timestamp.Version
+		}
+	}
+
+	if snapshotJSON, ok := meta["snapshot.json"]; ok {
+		snapshot := &data.Snapshot{}
+		if err := c.db.UnmarshalTrusted(snapshotJSON, snapshot, "snapshot"); err != nil {
+			loadFailed = true
+			retErr = err
+		} else {
+			c.localMeta["snapshot.json"] = meta["snapshot.json"]
+			c.snapshotVer = snapshot.Version
+		}
+	}
+
+	if targetsJSON, ok := meta["targets.json"]; ok {
+		targets := &data.Targets{}
+		if err := c.db.UnmarshalTrusted(targetsJSON, targets, "targets"); err != nil {
+			loadFailed = true
+			retErr = err
+		} else {
+			c.localMeta["targets.json"] = meta["targets.json"]
+			c.targetsVer = targets.Version
+			// FIXME(TUF-0.9) temporarily support files with leading path separators.
+			// c.targets = targets.Targets
+			c.loadTargets(targets.Targets)
+		}
+	}
+
+	for fileName := range meta {
+		if roles.IsDelegatedTargetsManifest(fileName) {
+			c.localMeta[fileName] = meta[fileName]
+		}
+	}
+
+	if loadFailed {
+		// If any of the metadata failed to be verified, return the reason for that failure
+		return retErr
+	}
+	return nil
+}
+
+// loadAndVerifyLocalRootMeta decodes and verifies root metadata from
+// local storage and loads the top-level keys. This method first clears
+// the DB for top-level keys and then loads the new keys.
+func (c *Client) loadAndVerifyLocalRootMeta(ignoreExpiredCheck bool) error {
+	meta, err := c.local.GetMeta()
+	if err != nil {
+		return err
+	}
+	rootJSON, ok := meta["root.json"]
+	if !ok {
+		return ErrNoRootKeys
+	}
+	return c.loadAndVerifyRootMeta(rootJSON, ignoreExpiredCheck)
+}
+
+// loadAndVerifyRootMeta decodes and verifies root metadata and loads the top-level keys.
+// This method first clears the DB for top-level keys and then loads the new keys.
+func (c *Client) loadAndVerifyRootMeta(rootJSON []byte, ignoreExpiredCheck bool) error {
+	// unmarshal root.json without verifying as we need the root
+	// keys first
+	s := &data.Signed{}
+	if err := json.Unmarshal(rootJSON, s); err != nil {
+		return err
+	}
+	root := &data.Root{}
+	if err := json.Unmarshal(s.Signed, root); err != nil {
+		return err
+	}
+	ndb := verify.NewDB()
+	for id, k := range root.Keys {
+		if err := ndb.AddKey(id, k); err != nil {
+			// TUF is considering in TAP-12 removing the
+			// requirement that the keyid hash algorithm be derived
+			// from the public key. So to be forwards compatible,
+			// we ignore `ErrWrongID` errors.
+			//
+			// TAP-12: https://github.com/DataDog/taps/blob/master/tap12.md
+			if _, ok := err.(verify.ErrWrongID); !ok {
+				return err
+			}
+		}
+	}
+	for name, role := range root.Roles {
+		if err := ndb.AddRole(name, role); err != nil {
+			return err
+		}
+	}
+	// Any trusted local root metadata version must be greater than 0.
+	if ignoreExpiredCheck {
+		if err := ndb.VerifyIgnoreExpiredCheck(s, "root", 0); err != nil {
+			return err
+		}
+	} else {
+		if err := ndb.Verify(s, "root", 0); err != nil {
+			return err
+		}
+	}
+	c.consistentSnapshot = root.ConsistentSnapshot
+	c.rootVer = root.Version
+	c.db = ndb
+	return nil
+}
+
+// verifyRoot verifies Signed section of the bJSON
+// using verification keys in aJSON.
+func (c *Client) verifyRoot(aJSON []byte, bJSON []byte) (*data.Root, error) {
+	aSigned := &data.Signed{}
+	if err := json.Unmarshal(aJSON, aSigned); err != nil {
+		return nil, err
+	}
+	aRoot := &data.Root{}
+	if err := json.Unmarshal(aSigned.Signed, aRoot); err != nil {
+		return nil, err
+	}
+
+	bSigned := &data.Signed{}
+	if err := json.Unmarshal(bJSON, bSigned); err != nil {
+		return nil, err
+	}
+	bRoot := &data.Root{}
+	if err := json.Unmarshal(bSigned.Signed, bRoot); err != nil {
+		return nil, err
+	}
+
+	ndb := verify.NewDB()
+	for id, k := range aRoot.Keys {
+		if err := ndb.AddKey(id, k); err != nil {
+			// TUF is considering in TAP-12 removing the
+			// requirement that the keyid hash algorithm be derived
+			// from the public key. So to be forwards compatible,
+			// we ignore `ErrWrongID` errors.
+			//
+			// TAP-12: https://github.com/DataDog/taps/blob/master/tap12.md
+			if _, ok := err.(verify.ErrWrongID); !ok {
+				return nil, err
+			}
+		}
+	}
+	for name, role := range aRoot.Roles {
+		if err := ndb.AddRole(name, role); err != nil {
+			return nil, err
+		}
+	}
+
+	if err := ndb.VerifySignatures(bSigned, "root"); err != nil {
+		return nil, err
+	}
+	return bRoot, nil
+}
+
+// FIXME(TUF-0.9) TUF is considering removing support for target files starting
+// with a leading path separator. In order to be backwards compatible, we'll
+// just remove leading separators for now.
+func (c *Client) loadTargets(targets data.TargetFiles) {
+	c.targets = make(data.TargetFiles)
+	for name, meta := range targets {
+		c.targets[name] = meta
+		c.targets[util.NormalizeTarget(name)] = meta
+	}
+}
+
+// downloadMetaUnsafe downloads top-level metadata from remote storage without
+// verifying it's length and hashes (used for example to download timestamp.json
+// which has unknown size). It will download at most maxMetaSize bytes.
+func (c *Client) downloadMetaUnsafe(name string, maxMetaSize int64) ([]byte, error) {
+	r, size, err := c.remote.GetMeta(name)
+	if err != nil {
+		if IsNotFound(err) {
+			return nil, ErrMissingRemoteMetadata{name}
+		}
+		return nil, ErrDownloadFailed{name, err}
+	}
+	defer r.Close()
+
+	// return ErrMetaTooLarge if the reported size is greater than maxMetaSize
+	if size > maxMetaSize {
+		return nil, ErrMetaTooLarge{name, size, maxMetaSize}
+	}
+
+	// although the size has been checked above, use a LimitReader in case
+	// the reported size is inaccurate, or size is -1 which indicates an
+	// unknown length
+	return ioutil.ReadAll(io.LimitReader(r, maxMetaSize))
+}
+
+// remoteGetFunc is the type of function the download method uses to download
+// remote files
+type remoteGetFunc func(string) (io.ReadCloser, int64, error)
+
+// downloadHashed tries to download the hashed prefixed version of the file.
+func (c *Client) downloadHashed(file string, get remoteGetFunc, hashes data.Hashes) (io.ReadCloser, int64, error) {
+	// try each hashed path in turn, and either return the contents,
+	// try the next one if a 404 is returned, or return an error
+	for _, path := range util.HashedPaths(file, hashes) {
+		r, size, err := get(path)
+		if err != nil {
+			if IsNotFound(err) {
+				continue
+			}
+			return nil, 0, err
+		}
+		return r, size, nil
+	}
+	return nil, 0, ErrNotFound{file}
+}
+
+// download downloads the given target file from remote storage using the get
+// function, adding hashes to the path if consistent snapshots are in use
+func (c *Client) downloadTarget(file string, get remoteGetFunc, hashes data.Hashes) (io.ReadCloser, int64, error) {
+	if c.consistentSnapshot {
+		return c.downloadHashed(file, get, hashes)
+	} else {
+		return get(file)
+	}
+}
+
+// downloadVersionedMeta downloads top-level metadata from remote storage and
+// verifies it using the given file metadata.
+func (c *Client) downloadMeta(name string, version int64, m data.FileMeta) ([]byte, error) {
+	r, size, err := func() (io.ReadCloser, int64, error) {
+		if c.consistentSnapshot {
+			path := util.VersionedPath(name, version)
+			r, size, err := c.remote.GetMeta(path)
+			if err == nil {
+				return r, size, nil
+			}
+
+			return nil, 0, err
+		} else {
+			return c.remote.GetMeta(name)
+		}
+	}()
+	if err != nil {
+		if IsNotFound(err) {
+			return nil, ErrMissingRemoteMetadata{name}
+		}
+		return nil, err
+	}
+	defer r.Close()
+
+	// return ErrWrongSize if the reported size is known and incorrect
+	var stream io.Reader
+	if m.Length != 0 {
+		if size >= 0 && size != m.Length {
+			return nil, ErrWrongSize{name, size, m.Length}
+		}
+
+		// wrap the data in a LimitReader so we download at most m.Length bytes
+		stream = io.LimitReader(r, m.Length)
+	} else {
+		stream = r
+	}
+
+	return ioutil.ReadAll(stream)
+}
+
+func (c *Client) downloadMetaFromSnapshot(name string, m data.SnapshotFileMeta) ([]byte, error) {
+	b, err := c.downloadMeta(name, m.Version, m.FileMeta)
+	if err != nil {
+		return nil, err
+	}
+
+	meta, err := util.GenerateSnapshotFileMeta(bytes.NewReader(b), m.HashAlgorithms()...)
+	if err != nil {
+		return nil, err
+	}
+	// 5.6.2 and 5.6.4 - Check against snapshot role's targets hash and version
+	if err := util.SnapshotFileMetaEqual(meta, m); err != nil {
+		return nil, ErrDownloadFailed{name, err}
+	}
+	return b, nil
+}
+
+func (c *Client) downloadMetaFromTimestamp(name string, m data.TimestampFileMeta) ([]byte, error) {
+	b, err := c.downloadMeta(name, m.Version, m.FileMeta)
+	if err != nil {
+		return nil, err
+	}
+
+	meta, err := util.GenerateTimestampFileMeta(bytes.NewReader(b), m.HashAlgorithms()...)
+	if err != nil {
+		return nil, err
+	}
+	// 5.5.2 and 5.5.4 - Check against timestamp role's snapshot hash and version
+	if err := util.TimestampFileMetaEqual(meta, m); err != nil {
+		return nil, ErrDownloadFailed{name, err}
+	}
+	return b, nil
+}
+
+// decodeRoot decodes and verifies root metadata.
+func (c *Client) decodeRoot(b json.RawMessage) error {
+	root := &data.Root{}
+	if err := c.db.Unmarshal(b, root, "root", c.rootVer); err != nil {
+		return ErrDecodeFailed{"root.json", err}
+	}
+	c.rootVer = root.Version
+	c.consistentSnapshot = root.ConsistentSnapshot
+	return nil
+}
+
+// decodeSnapshot decodes and verifies snapshot metadata, and returns the new
+// root and targets file meta.
+func (c *Client) decodeSnapshot(b json.RawMessage) (data.SnapshotFiles, error) {
+	snapshot := &data.Snapshot{}
+	// 5.5.(3 and 6) - Verify it's signed correctly and it's not expired
+	if err := c.db.Unmarshal(b, snapshot, "snapshot", c.snapshotVer); err != nil {
+		return data.SnapshotFiles{}, ErrDecodeFailed{"snapshot.json", err}
+	}
+	// 5.5.5 - Check for top-level targets rollback attack
+	// Verify explicitly that current targets meta version is less than or equal to the new one
+	if snapshot.Meta["targets.json"].Version < c.targetsVer {
+		return data.SnapshotFiles{}, verify.ErrLowVersion{Actual: snapshot.Meta["targets.json"].Version, Current: c.targetsVer}
+	}
+
+	// 5.5.5 - Get the local/trusted snapshot metadata, if any, and check all target metafiles against rollback attack
+	// In case the local snapshot metadata was not verified by the keys in the latest root during getLocalMeta(),
+	// snapshot.json won't be present in c.localMeta and thus this check will not be processed.
+	if snapshotJSON, ok := c.localMeta["snapshot.json"]; ok {
+		currentSnapshot := &data.Snapshot{}
+		if err := c.db.UnmarshalTrusted(snapshotJSON, currentSnapshot, "snapshot"); err != nil {
+			return data.SnapshotFiles{}, err
+		}
+		// 5.5.5 - Check for rollback attacks in both top-level and delegated targets roles (note that the Meta object includes both)
+		for path, local := range currentSnapshot.Meta {
+			if newMeta, ok := snapshot.Meta[path]; ok {
+				// 5.5.5 - Check for rollback attack
+				if newMeta.Version < local.Version {
+					return data.SnapshotFiles{}, verify.ErrLowVersion{Actual: newMeta.Version, Current: local.Version}
+				}
+			} else {
+				// 5.5.5 - Abort the update if a target file has been removed from the new snapshot file
+				return data.SnapshotFiles{}, verify.ErrMissingTargetFile
+			}
+		}
+	}
+	// At this point we can trust the new snapshot, the top-level targets, and any delegated targets versions it refers to
+	// so we can update the client's trusted versions and proceed with persisting the new snapshot metadata
+	// c.snapshotVer was already set when we verified the timestamp metadata
+	c.targetsVer = snapshot.Meta["targets.json"].Version
+	return snapshot.Meta, nil
+}
+
+// decodeTargets decodes and verifies targets metadata, sets c.targets and
+// returns updated targets.
+func (c *Client) decodeTargets(b json.RawMessage) (data.TargetFiles, error) {
+	targets := &data.Targets{}
+	// 5.6.(3 and 5) - Verify signatures and check against freeze attack
+	if err := c.db.Unmarshal(b, targets, "targets", c.targetsVer); err != nil {
+		return nil, ErrDecodeFailed{"targets.json", err}
+	}
+	// Generate a list with the updated targets
+	updatedTargets := make(data.TargetFiles)
+	for path, meta := range targets.Targets {
+		if local, ok := c.targets[path]; ok {
+			if err := util.TargetFileMetaEqual(local, meta); err == nil {
+				continue
+			}
+		}
+		updatedTargets[path] = meta
+	}
+	// c.targetsVer was already updated when we verified the snapshot metadata
+	// FIXME(TUF-0.9) temporarily support files with leading path separators.
+	// c.targets = targets.Targets
+	c.loadTargets(targets.Targets)
+	return updatedTargets, nil
+}
+
+// decodeTimestamp decodes and verifies timestamp metadata, and returns the
+// new snapshot file meta.
+func (c *Client) decodeTimestamp(b json.RawMessage) (data.TimestampFileMeta, error) {
+	timestamp := &data.Timestamp{}
+	if err := c.db.Unmarshal(b, timestamp, "timestamp", c.timestampVer); err != nil {
+		return data.TimestampFileMeta{}, ErrDecodeFailed{"timestamp.json", err}
+	}
+	// 5.4.3.2 - Check for snapshot rollback attack
+	// Verify that the current snapshot meta version is less than or equal to the new one
+	if timestamp.Meta["snapshot.json"].Version < c.snapshotVer {
+		return data.TimestampFileMeta{}, verify.ErrLowVersion{Actual: timestamp.Meta["snapshot.json"].Version, Current: c.snapshotVer}
+	}
+	// At this point we can trust the new timestamp and the snaphost version it refers to
+	// so we can update the client's trusted versions and proceed with persisting the new timestamp
+	c.timestampVer = timestamp.Version
+	c.snapshotVer = timestamp.Meta["snapshot.json"].Version
+	return timestamp.Meta["snapshot.json"], nil
+}
+
+// hasMetaFromSnapshot checks whether local metadata has the given meta
+func (c *Client) hasMetaFromSnapshot(name string, m data.SnapshotFileMeta) bool {
+	_, ok := c.localMetaFromSnapshot(name, m)
+	return ok
+}
+
+// localMetaFromSnapshot returns localmetadata if it matches the snapshot
+func (c *Client) localMetaFromSnapshot(name string, m data.SnapshotFileMeta) (json.RawMessage, bool) {
+	b, ok := c.localMeta[name]
+	if !ok {
+		return nil, false
+	}
+	meta, err := util.GenerateSnapshotFileMeta(bytes.NewReader(b), m.HashAlgorithms()...)
+	if err != nil {
+		return nil, false
+	}
+	err = util.SnapshotFileMetaEqual(meta, m)
+	return b, err == nil
+}
+
+// hasTargetsMeta checks whether local metadata has the given snapshot meta
+//
+//lint:ignore U1000 unused
+func (c *Client) hasTargetsMeta(m data.SnapshotFileMeta) bool {
+	b, ok := c.localMeta["targets.json"]
+	if !ok {
+		return false
+	}
+	meta, err := util.GenerateSnapshotFileMeta(bytes.NewReader(b), m.HashAlgorithms()...)
+	if err != nil {
+		return false
+	}
+	err = util.SnapshotFileMetaEqual(meta, m)
+	return err == nil
+}
+
+// hasSnapshotMeta checks whether local metadata has the given meta
+//
+//lint:ignore U1000 unused
+func (c *Client) hasMetaFromTimestamp(name string, m data.TimestampFileMeta) bool {
+	b, ok := c.localMeta[name]
+	if !ok {
+		return false
+	}
+	meta, err := util.GenerateTimestampFileMeta(bytes.NewReader(b), m.HashAlgorithms()...)
+	if err != nil {
+		return false
+	}
+	err = util.TimestampFileMetaEqual(meta, m)
+	return err == nil
+}
+
+type Destination interface {
+	io.Writer
+	Delete() error
+}
+
+// Download downloads the given target file from remote storage into dest.
+//
+// dest will be deleted and an error returned in the following situations:
+//
+//   - The target does not exist in the local targets.json
+//   - Failed to fetch the chain of delegations accessible from local snapshot.json
+//   - The target does not exist in any targets
+//   - Metadata cannot be generated for the downloaded data
+//   - Generated metadata does not match local metadata for the given file
+func (c *Client) Download(name string, dest Destination) (err error) {
+	// delete dest if there is an error
+	defer func() {
+		if err != nil {
+			dest.Delete()
+		}
+	}()
+
+	// populate c.targets from local storage if not set
+	if c.targets == nil {
+		if err := c.getLocalMeta(); err != nil {
+			return err
+		}
+	}
+
+	normalizedName := util.NormalizeTarget(name)
+	localMeta, ok := c.targets[normalizedName]
+	if !ok {
+		// search in delegations
+		localMeta, err = c.getTargetFileMeta(normalizedName)
+		if err != nil {
+			return err
+		}
+	}
+
+	// get the data from remote storage
+	r, size, err := c.downloadTarget(normalizedName, c.remote.GetTarget, localMeta.Hashes)
+	if err != nil {
+		return err
+	}
+	defer r.Close()
+
+	// return ErrWrongSize if the reported size is known and incorrect
+	if size >= 0 && size != localMeta.Length {
+		return ErrWrongSize{name, size, localMeta.Length}
+	}
+
+	// wrap the data in a LimitReader so we download at most localMeta.Length bytes
+	stream := io.LimitReader(r, localMeta.Length)
+
+	// read the data, simultaneously writing it to dest and generating metadata
+	actual, err := util.GenerateTargetFileMeta(io.TeeReader(stream, dest), localMeta.HashAlgorithms()...)
+	if err != nil {
+		return ErrDownloadFailed{name, err}
+	}
+
+	// check the data has the correct length and hashes
+	if err := util.TargetFileMetaEqual(actual, localMeta); err != nil {
+		if e, ok := err.(util.ErrWrongLength); ok {
+			return ErrWrongSize{name, e.Actual, e.Expected}
+		}
+		return ErrDownloadFailed{name, err}
+	}
+
+	return nil
+}
+
+func (c *Client) VerifyDigest(digest string, digestAlg string, length int64, path string) error {
+	localMeta, ok := c.targets[path]
+	if !ok {
+		return ErrUnknownTarget{Name: path, SnapshotVersion: c.snapshotVer}
+	}
+
+	actual := data.FileMeta{Length: length, Hashes: make(data.Hashes, 1)}
+	var err error
+	actual.Hashes[digestAlg], err = hex.DecodeString(digest)
+	if err != nil {
+		return err
+	}
+
+	if err := util.TargetFileMetaEqual(data.TargetFileMeta{FileMeta: actual}, localMeta); err != nil {
+		if e, ok := err.(util.ErrWrongLength); ok {
+			return ErrWrongSize{path, e.Actual, e.Expected}
+		}
+		return ErrDownloadFailed{path, err}
+	}
+
+	return nil
+}
+
+// Target returns the target metadata for a specific target if it
+// exists, searching from top-level level targets then through
+// all delegations. If it does not, ErrNotFound will be returned.
+func (c *Client) Target(name string) (data.TargetFileMeta, error) {
+	target, err := c.getTargetFileMeta(util.NormalizeTarget(name))
+	if err == nil {
+		return target, nil
+	}
+
+	if _, ok := err.(ErrUnknownTarget); ok {
+		return data.TargetFileMeta{}, ErrNotFound{name}
+	}
+
+	return data.TargetFileMeta{}, err
+}
+
+// Targets returns the complete list of available top-level targets.
+func (c *Client) Targets() (data.TargetFiles, error) {
+	// populate c.targets from local storage if not set
+	if c.targets == nil {
+		if err := c.getLocalMeta(); err != nil {
+			return nil, err
+		}
+	}
+	return c.targets, nil
+}
diff --git a/vendor/github.com/DataDog/go-tuf/client/delegations.go b/vendor/github.com/DataDog/go-tuf/client/delegations.go
new file mode 100644
index 0000000000..ac3a319146
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/client/delegations.go
@@ -0,0 +1,117 @@
+package client
+
+import (
+	"github.com/DataDog/go-tuf/data"
+	"github.com/DataDog/go-tuf/pkg/targets"
+	"github.com/DataDog/go-tuf/verify"
+)
+
+// getTargetFileMeta searches for a verified TargetFileMeta matching a target
+// Requires a local snapshot to be loaded and is locked to the snapshot versions.
+// Searches through delegated targets following TUF spec 1.0.19 section 5.6.
+func (c *Client) getTargetFileMeta(target string) (data.TargetFileMeta, error) {
+	snapshot, err := c.loadLocalSnapshot()
+	if err != nil {
+		return data.TargetFileMeta{}, err
+	}
+
+	// delegationsIterator covers 5.6.7
+	// - pre-order depth-first search starting with the top targets
+	// - filter delegations with paths or path_hash_prefixes matching searched target
+	// - 5.6.7.1 cycles protection
+	// - 5.6.7.2 terminations
+	delegations, err := targets.NewDelegationsIterator(target, c.db)
+	if err != nil {
+		return data.TargetFileMeta{}, err
+	}
+
+	for i := 0; i < c.MaxDelegations; i++ {
+		d, ok := delegations.Next()
+		if !ok {
+			return data.TargetFileMeta{}, ErrUnknownTarget{target, snapshot.Version}
+		}
+
+		// covers 5.6.{1,2,3,4,5,6}
+		targets, err := c.loadDelegatedTargets(snapshot, d.Delegatee.Name, d.DB)
+		if err != nil {
+			return data.TargetFileMeta{}, err
+		}
+
+		// stop when the searched TargetFileMeta is found
+		if m, ok := targets.Targets[target]; ok {
+			return m, nil
+		}
+
+		if targets.Delegations != nil {
+			delegationsDB, err := verify.NewDBFromDelegations(targets.Delegations)
+			if err != nil {
+				return data.TargetFileMeta{}, err
+			}
+			err = delegations.Add(targets.Delegations.Roles, d.Delegatee.Name, delegationsDB)
+			if err != nil {
+				return data.TargetFileMeta{}, err
+			}
+		}
+	}
+
+	return data.TargetFileMeta{}, ErrMaxDelegations{
+		Target:          target,
+		MaxDelegations:  c.MaxDelegations,
+		SnapshotVersion: snapshot.Version,
+	}
+}
+
+func (c *Client) loadLocalSnapshot() (*data.Snapshot, error) {
+	if err := c.getLocalMeta(); err != nil {
+		return nil, err
+	}
+
+	rawS, ok := c.localMeta["snapshot.json"]
+	if !ok {
+		return nil, ErrNoLocalSnapshot
+	}
+
+	snapshot := &data.Snapshot{}
+	if err := c.db.Unmarshal(rawS, snapshot, "snapshot", c.snapshotVer); err != nil {
+		return nil, ErrDecodeFailed{"snapshot.json", err}
+	}
+	return snapshot, nil
+}
+
+// loadDelegatedTargets downloads, decodes, verifies and stores targets
+func (c *Client) loadDelegatedTargets(snapshot *data.Snapshot, role string, db *verify.DB) (*data.Targets, error) {
+	var err error
+	fileName := role + ".json"
+	fileMeta, ok := snapshot.Meta[fileName]
+	if !ok {
+		return nil, ErrRoleNotInSnapshot{role, snapshot.Version}
+	}
+
+	// 5.6.1 download target if not in the local store
+	// 5.6.2 check against snapshot hash
+	// 5.6.4 check against snapshot version
+	raw, alreadyStored := c.localMetaFromSnapshot(fileName, fileMeta)
+	if !alreadyStored {
+		raw, err = c.downloadMetaFromSnapshot(fileName, fileMeta)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	targets := &data.Targets{}
+	// 5.6.3 verify signature with parent public keys
+	// 5.6.5 verify that the targets is not expired
+	// role "targets" is a top role verified by root keys loaded in the client db
+	err = db.Unmarshal(raw, targets, role, fileMeta.Version)
+	if err != nil {
+		return nil, ErrDecodeFailed{fileName, err}
+	}
+
+	// 5.6.6 persist
+	if !alreadyStored {
+		if err := c.local.SetMeta(fileName, raw); err != nil {
+			return nil, err
+		}
+	}
+	return targets, nil
+}
diff --git a/vendor/github.com/DataDog/go-tuf/client/errors.go b/vendor/github.com/DataDog/go-tuf/client/errors.go
new file mode 100644
index 0000000000..3e7a5dcc4d
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/client/errors.go
@@ -0,0 +1,107 @@
+package client
+
+import (
+	"errors"
+	"fmt"
+)
+
+var (
+	ErrNoRootKeys       = errors.New("tuf: no root keys found in local meta store")
+	ErrInsufficientKeys = errors.New("tuf: insufficient keys to meet threshold")
+	ErrNoLocalSnapshot  = errors.New("tuf: no snapshot stored locally")
+)
+
+type ErrMissingRemoteMetadata struct {
+	Name string
+}
+
+func (e ErrMissingRemoteMetadata) Error() string {
+	return fmt.Sprintf("tuf: missing remote metadata %s", e.Name)
+}
+
+type ErrDownloadFailed struct {
+	File string
+	Err  error
+}
+
+func (e ErrDownloadFailed) Error() string {
+	return fmt.Sprintf("tuf: failed to download %s: %s", e.File, e.Err)
+}
+
+type ErrDecodeFailed struct {
+	File string
+	Err  error
+}
+
+func (e ErrDecodeFailed) Error() string {
+	return fmt.Sprintf("tuf: failed to decode %s: %s", e.File, e.Err)
+}
+
+type ErrMaxDelegations struct {
+	Target          string
+	MaxDelegations  int
+	SnapshotVersion int64
+}
+
+func (e ErrMaxDelegations) Error() string {
+	return fmt.Sprintf("tuf: max delegation of %d reached searching for %s with snapshot version %d", e.MaxDelegations, e.Target, e.SnapshotVersion)
+}
+
+type ErrNotFound struct {
+	File string
+}
+
+func (e ErrNotFound) Error() string {
+	return fmt.Sprintf("tuf: file not found: %s", e.File)
+}
+
+func IsNotFound(err error) bool {
+	_, ok := err.(ErrNotFound)
+	return ok
+}
+
+type ErrWrongSize struct {
+	File     string
+	Actual   int64
+	Expected int64
+}
+
+func (e ErrWrongSize) Error() string {
+	return fmt.Sprintf("tuf: unexpected file size: %s (expected %d bytes, got %d bytes)", e.File, e.Expected, e.Actual)
+}
+
+type ErrUnknownTarget struct {
+	Name            string
+	SnapshotVersion int64
+}
+
+func (e ErrUnknownTarget) Error() string {
+	return fmt.Sprintf("tuf: unknown target file: %s with snapshot version %d", e.Name, e.SnapshotVersion)
+}
+
+type ErrMetaTooLarge struct {
+	Name    string
+	Size    int64
+	MaxSize int64
+}
+
+func (e ErrMetaTooLarge) Error() string {
+	return fmt.Sprintf("tuf: %s size %d bytes greater than maximum %d bytes", e.Name, e.Size, e.MaxSize)
+}
+
+type ErrInvalidURL struct {
+	URL string
+}
+
+func (e ErrInvalidURL) Error() string {
+	return fmt.Sprintf("tuf: invalid repository URL %s", e.URL)
+}
+
+type ErrRoleNotInSnapshot struct {
+	Role            string
+	SnapshotVersion int64
+}
+
+func (e ErrRoleNotInSnapshot) Error() string {
+	return fmt.Sprintf("tuf: role %s not in snapshot version %d", e.Role, e.SnapshotVersion)
+}
diff --git a/vendor/github.com/DataDog/go-tuf/client/local_store.go b/vendor/github.com/DataDog/go-tuf/client/local_store.go
new file mode 100644
index 0000000000..bb9421f5d4
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/client/local_store.go
@@ -0,0 +1,29 @@
+package client
+
+import (
+	"encoding/json"
+)
+
+func MemoryLocalStore() LocalStore {
+	return make(memoryLocalStore)
+}
+
+type memoryLocalStore map[string]json.RawMessage
+
+func (m memoryLocalStore) GetMeta() (map[string]json.RawMessage, error) {
+	return m, nil
+}
+
+func (m memoryLocalStore) SetMeta(name string, meta json.RawMessage) error {
+	m[name] = meta
+	return nil
+}
+
+func (m memoryLocalStore) DeleteMeta(name string) error {
+	delete(m, name)
+	return nil
+}
+
+func (m memoryLocalStore) Close() error {
+	return nil
+}
diff --git a/vendor/github.com/DataDog/go-tuf/client/remote_store.go b/vendor/github.com/DataDog/go-tuf/client/remote_store.go
new file mode 100644
index 0000000000..17a63fc593
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/client/remote_store.go
@@ -0,0 +1,109 @@
+package client
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"path"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type HTTPRemoteOptions struct {
+	MetadataPath string
+	TargetsPath  string
+	UserAgent    string
+	Retries      *HTTPRemoteRetries
+}
+
+type HTTPRemoteRetries struct {
+	Delay time.Duration
+	Total time.Duration
+}
+
+var DefaultHTTPRetries = &HTTPRemoteRetries{
+	Delay: time.Second,
+	Total: 10 * time.Second,
+}
+
+func HTTPRemoteStore(baseURL string, opts *HTTPRemoteOptions, client *http.Client) (RemoteStore, error) {
+	if !strings.HasPrefix(baseURL, "http") {
+		return nil, ErrInvalidURL{baseURL}
+	}
+	if opts == nil {
+		opts = &HTTPRemoteOptions{}
+	}
+	if opts.TargetsPath == "" {
+		opts.TargetsPath = "targets"
+	}
+	if client == nil {
+		client = http.DefaultClient
+	}
+	return &httpRemoteStore{baseURL, opts, client}, nil
+}
+
+type httpRemoteStore struct {
+	baseURL string
+	opts    *HTTPRemoteOptions
+	cli     *http.Client
+}
+
+func (h *httpRemoteStore) GetMeta(name string) (io.ReadCloser, int64, error) {
+	return h.get(path.Join(h.opts.MetadataPath, name))
+}
+
+func (h *httpRemoteStore) GetTarget(name string) (io.ReadCloser, int64, error) {
+	return h.get(path.Join(h.opts.TargetsPath, name))
+}
+
+func (h *httpRemoteStore) get(s string) (io.ReadCloser, int64, error) {
+	u := h.url(s)
+	req, err := http.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, 0, err
+	}
+	if h.opts.UserAgent != "" {
+		req.Header.Set("User-Agent", h.opts.UserAgent)
+	}
+	var res *http.Response
+	if r := h.opts.Retries; r != nil {
+		for start := time.Now(); time.Since(start) < r.Total; time.Sleep(r.Delay) {
+			res, err = h.cli.Do(req)
+			if err == nil && (res.StatusCode < 500 || res.StatusCode > 599) {
+				break
+			}
+		}
+	} else {
+		res, err = h.cli.Do(req)
+	}
+	if err != nil {
+		return nil, 0, err
+	}
+
+	if res.StatusCode == http.StatusNotFound {
+		res.Body.Close()
+		return nil, 0, ErrNotFound{s}
+	} else if res.StatusCode != http.StatusOK {
+		res.Body.Close()
+		return nil, 0, &url.Error{
+			Op:  "GET",
+			URL: u,
+			Err: fmt.Errorf("unexpected HTTP status %d", res.StatusCode),
+		}
+	}
+
+	size, err := strconv.ParseInt(res.Header.Get("Content-Length"), 10, 0)
+	if err != nil {
+		return res.Body, -1, nil
+	}
+	return res.Body, size, nil
+}
+
+func (h *httpRemoteStore) url(path string) string {
+	if !strings.HasPrefix(path, "/") {
+		path = "/" + path
+	}
+	return h.baseURL + path
+}
diff --git a/vendor/github.com/DataDog/go-tuf/data/hex_bytes.go b/vendor/github.com/DataDog/go-tuf/data/hex_bytes.go
new file mode 100644
index 0000000000..ec200412ef
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/data/hex_bytes.go
@@ -0,0 +1,42 @@
+package data
+
+import (
+	"crypto/sha256"
+	"encoding/hex"
+	"errors"
+)
+
+type HexBytes []byte
+
+func (b *HexBytes) UnmarshalJSON(data []byte) error {
+	if len(data) < 2 || len(data)%2 != 0 || data[0] != '"' || data[len(data)-1] != '"' {
+		return errors.New("tuf: invalid JSON hex bytes")
+	}
+	res := make([]byte, hex.DecodedLen(len(data)-2))
+	_, err := hex.Decode(res, data[1:len(data)-1])
+	if err != nil {
+		return err
+	}
+	*b = res
+	return nil
+}
+
+func (b HexBytes) MarshalJSON() ([]byte, error) {
+	res := make([]byte, hex.EncodedLen(len(b))+2)
+	res[0] = '"'
+	res[len(res)-1] = '"'
+	hex.Encode(res[1:], b)
+	return res, nil
+}
+
+func (b HexBytes) String() string {
+	return hex.EncodeToString(b)
+}
+
+// 4.5. File formats: targets.json and delegated target roles:
+// ...each target path, when hashed with the SHA-256 hash function to produce
+// a 64-byte hexadecimal digest (HEX_DIGEST)...
+func PathHexDigest(s string) string {
+	b := sha256.Sum256([]byte(s))
+	return hex.EncodeToString(b[:])
+}
diff --git a/vendor/github.com/DataDog/go-tuf/data/types.go b/vendor/github.com/DataDog/go-tuf/data/types.go
new file mode 100644
index 0000000000..44d9bf13cb
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/data/types.go
@@ -0,0 +1,326 @@
+package data
+
+import (
+	"crypto/sha256"
+	"encoding/hex"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/secure-systems-lab/go-securesystemslib/cjson"
+)
+
+const (
+	KeyIDLength                = sha256.Size * 2
+	KeyTypeEd25519             = "ed25519"
+	KeyTypeECDSA_SHA2_P256     = "ecdsa-sha2-nistp256"
+	KeySchemeEd25519           = "ed25519"
+	KeySchemeECDSA_SHA2_P256   = "ecdsa-sha2-nistp256"
+	KeyTypeRSASSA_PSS_SHA256   = "rsa"
+	KeySchemeRSASSA_PSS_SHA256 = "rsassa-pss-sha256"
+)
+
+var (
+	HashAlgorithms           = []string{"sha256", "sha512"}
+	ErrPathsAndPathHashesSet = errors.New("tuf: failed validation of delegated target: paths and path_hash_prefixes are both set")
+)
+
+type Signed struct {
+	Signed     json.RawMessage `json:"signed"`
+	Signatures []Signature     `json:"signatures"`
+}
+
+type Signature struct {
+	KeyID     string   `json:"keyid"`
+	Signature HexBytes `json:"sig"`
+}
+
+type PublicKey struct {
+	Type       string          `json:"keytype"`
+	Scheme     string          `json:"scheme"`
+	Algorithms []string        `json:"keyid_hash_algorithms,omitempty"`
+	Value      json.RawMessage `json:"keyval"`
+
+	ids    []string
+	idOnce sync.Once
+}
+
+type PrivateKey struct {
+	Type       string          `json:"keytype"`
+	Scheme     string          `json:"scheme,omitempty"`
+	Algorithms []string        `json:"keyid_hash_algorithms,omitempty"`
+	Value      json.RawMessage `json:"keyval"`
+}
+
+func (k *PublicKey) IDs() []string {
+	k.idOnce.Do(func() {
+		data, err := cjson.EncodeCanonical(k)
+		if err != nil {
+			panic(fmt.Errorf("tuf: error creating key ID: %w", err))
+		}
+		digest := sha256.Sum256(data)
+		k.ids = []string{hex.EncodeToString(digest[:])}
+	})
+	return k.ids
+}
+
+func (k *PublicKey) ContainsID(id string) bool {
+	for _, keyid := range k.IDs() {
+		if id == keyid {
+			return true
+		}
+	}
+	return false
+}
+
+func DefaultExpires(role string) time.Time {
+	var t time.Time
+	switch role {
+	case "root":
+		t = time.Now().AddDate(1, 0, 0)
+	case "snapshot":
+		t = time.Now().AddDate(0, 0, 7)
+	case "timestamp":
+		t = time.Now().AddDate(0, 0, 1)
+	default:
+		// targets and delegated targets
+		t = time.Now().AddDate(0, 3, 0)
+	}
+	return t.UTC().Round(time.Second)
+}
+
+type Root struct {
+	Type        string                `json:"_type"`
+	SpecVersion string                `json:"spec_version"`
+	Version     int64                 `json:"version"`
+	Expires     time.Time             `json:"expires"`
+	Keys        map[string]*PublicKey `json:"keys"`
+	Roles       map[string]*Role      `json:"roles"`
+	Custom      *json.RawMessage      `json:"custom,omitempty"`
+
+	ConsistentSnapshot bool `json:"consistent_snapshot"`
+}
+
+func NewRoot() *Root {
+	return &Root{
+		Type:               "root",
+		SpecVersion:        "1.0",
+		Expires:            DefaultExpires("root"),
+		Keys:               make(map[string]*PublicKey),
+		Roles:              make(map[string]*Role),
+		ConsistentSnapshot: true,
+	}
+}
+
+func (r *Root) AddKey(key *PublicKey) bool {
+	changed := false
+	for _, id := range key.IDs() {
+		if _, ok := r.Keys[id]; !ok {
+			changed = true
+			r.Keys[id] = key
+		}
+	}
+	return changed
+}
+
+type Role struct {
+	KeyIDs    []string `json:"keyids"`
+	Threshold int      `json:"threshold"`
+}
+
+func (r *Role) AddKeyIDs(ids []string) bool {
+	roleIDs := make(map[string]struct{})
+	for _, id := range r.KeyIDs {
+		roleIDs[id] = struct{}{}
+	}
+	changed := false
+	for _, id := range ids {
+		if _, ok := roleIDs[id]; !ok {
+			changed = true
+			r.KeyIDs = append(r.KeyIDs, id)
+		}
+	}
+	return changed
+}
+
+type Files map[string]FileMeta
+
+type FileMeta struct {
+	Length int64            `json:"length,omitempty"`
+	Hashes Hashes           `json:"hashes,omitempty"`
+	Custom *json.RawMessage `json:"custom,omitempty"`
+}
+
+type Hashes map[string]HexBytes
+
+func (f FileMeta) HashAlgorithms() []string {
+	funcs := make([]string, 0, len(f.Hashes))
+	for name := range f.Hashes {
+		funcs = append(funcs, name)
+	}
+	return funcs
+}
+
+type SnapshotFileMeta struct {
+	FileMeta
+	Version int64 `json:"version"`
+}
+
+type SnapshotFiles map[string]SnapshotFileMeta
+
+type Snapshot struct {
+	Type        string           `json:"_type"`
+	SpecVersion string           `json:"spec_version"`
+	Version     int64            `json:"version"`
+	Expires     time.Time        `json:"expires"`
+	Meta        SnapshotFiles    `json:"meta"`
+	Custom      *json.RawMessage `json:"custom,omitempty"`
+}
+
+func NewSnapshot() *Snapshot {
+	return &Snapshot{
+		Type:        "snapshot",
+		SpecVersion: "1.0",
+		Expires:     DefaultExpires("snapshot"),
+		Meta:        make(SnapshotFiles),
+	}
+}
+
+type TargetFiles map[string]TargetFileMeta
+
+type TargetFileMeta struct {
+	FileMeta
+}
+
+func (f TargetFileMeta) HashAlgorithms() []string {
+	return f.FileMeta.HashAlgorithms()
+}
+
+type Targets struct {
+	Type        string           `json:"_type"`
+	SpecVersion string           `json:"spec_version"`
+	Version     int64            `json:"version"`
+	Expires     time.Time        `json:"expires"`
+	Targets     TargetFiles      `json:"targets"`
+	Delegations *Delegations     `json:"delegations,omitempty"`
+	Custom      *json.RawMessage `json:"custom,omitempty"`
+}
+
+// Delegations represents the edges from a parent Targets role to one or more
+// delegated target roles. See spec v1.0.19 section 4.5.
+type Delegations struct {
+	Keys  map[string]*PublicKey `json:"keys"`
+	Roles []DelegatedRole       `json:"roles"`
+}
+
+// DelegatedRole describes a delegated role, including what paths it is
+// reponsible for. See spec v1.0.19 section 4.5.
+type DelegatedRole struct {
+	Name             string   `json:"name"`
+	KeyIDs           []string `json:"keyids"`
+	Threshold        int      `json:"threshold"`
+	Terminating      bool     `json:"terminating"`
+	PathHashPrefixes []string `json:"path_hash_prefixes,omitempty"`
+	Paths            []string `json:"paths"`
+}
+
+// MatchesPath evaluates whether the path patterns or path hash prefixes match
+// a given file. This determines whether a delegated role is responsible for
+// signing and verifying the file.
+func (d *DelegatedRole) MatchesPath(file string) (bool, error) {
+	if err := d.validatePaths(); err != nil {
+		return false, err
+	}
+
+	for _, pattern := range d.Paths {
+		if matched, _ := filepath.Match(pattern, file); matched {
+			return true, nil
+		}
+	}
+
+	pathHash := PathHexDigest(file)
+	for _, hashPrefix := range d.PathHashPrefixes {
+		if strings.HasPrefix(pathHash, hashPrefix) {
+			return true, nil
+		}
+	}
+
+	return false, nil
+}
+
+// validatePaths enforces the spec
+// https://DataDog.github.io/specification/v1.0.19/index.html#file-formats-targets
+// 'role MUST specify only one of the "path_hash_prefixes" or "paths"'
+// Marshalling and unmarshalling JSON will fail and return
+// ErrPathsAndPathHashesSet if both fields are set and not empty.
+func (d *DelegatedRole) validatePaths() error {
+	if len(d.PathHashPrefixes) > 0 && len(d.Paths) > 0 {
+		return ErrPathsAndPathHashesSet
+	}
+
+	return nil
+}
+
+// MarshalJSON is called when writing the struct to JSON. We validate prior to
+// marshalling to ensure that an invalid delegated role can not be serialized
+// to JSON.
+func (d *DelegatedRole) MarshalJSON() ([]byte, error) {
+	type delegatedRoleAlias DelegatedRole
+
+	if err := d.validatePaths(); err != nil {
+		return nil, err
+	}
+
+	return json.Marshal((*delegatedRoleAlias)(d))
+}
+
+// UnmarshalJSON is called when reading the struct from JSON. We validate once
+// unmarshalled to ensure that an error is thrown if an invalid delegated role
+// is read.
+func (d *DelegatedRole) UnmarshalJSON(b []byte) error {
+	type delegatedRoleAlias DelegatedRole
+
+	if err := json.Unmarshal(b, (*delegatedRoleAlias)(d)); err != nil {
+		return err
+	}
+
+	return d.validatePaths()
+}
+
+func NewTargets() *Targets {
+	return &Targets{
+		Type:        "targets",
+		SpecVersion: "1.0",
+		Expires:     DefaultExpires("targets"),
+		Targets:     make(TargetFiles),
+	}
+}
+
+type TimestampFileMeta struct {
+	FileMeta
+	Version int64 `json:"version"`
+}
+
+type TimestampFiles map[string]TimestampFileMeta
+
+type Timestamp struct {
+	Type        string           `json:"_type"`
+	SpecVersion string           `json:"spec_version"`
+	Version     int64            `json:"version"`
+	Expires     time.Time        `json:"expires"`
+	Meta        TimestampFiles   `json:"meta"`
+	Custom      *json.RawMessage `json:"custom,omitempty"`
+}
+
+func NewTimestamp() *Timestamp {
+	return &Timestamp{
+		Type:        "timestamp",
+		SpecVersion: "1.0",
+		Expires:     DefaultExpires("timestamp"),
+		Meta:        make(TimestampFiles),
+	}
+}
diff --git a/vendor/github.com/DataDog/go-tuf/internal/roles/roles.go b/vendor/github.com/DataDog/go-tuf/internal/roles/roles.go
new file mode 100644
index 0000000000..f7841c2681
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/internal/roles/roles.go
@@ -0,0 +1,41 @@
+package roles
+
+import (
+	"strconv"
+	"strings"
+)
+
+var TopLevelRoles = map[string]struct{}{
+	"root":      {},
+	"targets":   {},
+	"snapshot":  {},
+	"timestamp": {},
+}
+
+func IsTopLevelRole(name string) bool {
+	_, ok := TopLevelRoles[name]
+	return ok
+}
+
+func IsDelegatedTargetsRole(name string) bool {
+	return !IsTopLevelRole(name)
+}
+
+func IsTopLevelManifest(name string) bool {
+	return IsTopLevelRole(strings.TrimSuffix(name, ".json"))
+}
+
+func IsDelegatedTargetsManifest(name string) bool {
+	return !IsTopLevelManifest(name)
+}
+
+func IsVersionedManifest(name string) bool {
+	parts := strings.Split(name, ".")
+	// Versioned manifests have the form "x.role.json"
+	if len(parts) < 3 {
+		return false
+	}
+
+	_, err := strconv.Atoi(parts[0])
+	return err == nil
+}
diff --git a/vendor/github.com/DataDog/go-tuf/internal/sets/strings.go b/vendor/github.com/DataDog/go-tuf/internal/sets/strings.go
new file mode 100644
index 0000000000..7eee57d094
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/internal/sets/strings.go
@@ -0,0 +1,24 @@
+package sets
+
+func StringSliceToSet(items []string) map[string]struct{} {
+	s := map[string]struct{}{}
+	for _, item := range items {
+		s[item] = struct{}{}
+	}
+	return s
+}
+
+func StringSetToSlice(items map[string]struct{}) []string {
+	ret := []string{}
+
+	for k := range items {
+		ret = append(ret, k)
+	}
+
+	return ret
+}
+
+func DeduplicateStrings(items []string) []string {
+	s := StringSliceToSet(items)
+	return StringSetToSlice(s)
+}
diff --git a/vendor/github.com/DataDog/go-tuf/pkg/keys/ecdsa.go b/vendor/github.com/DataDog/go-tuf/pkg/keys/ecdsa.go
new file mode 100644
index 0000000000..bfd9b69c2e
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/pkg/keys/ecdsa.go
@@ -0,0 +1,71 @@
+package keys
+
+import (
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"crypto/sha256"
+	"encoding/asn1"
+	"encoding/json"
+	"errors"
+	"math/big"
+
+	"github.com/DataDog/go-tuf/data"
+)
+
+func init() {
+	VerifierMap.Store(data.KeyTypeECDSA_SHA2_P256, NewEcdsaVerifier)
+}
+
+func NewEcdsaVerifier() Verifier {
+	return &p256Verifier{}
+}
+
+type ecdsaSignature struct {
+	R, S *big.Int
+}
+
+type p256Verifier struct {
+	PublicKey data.HexBytes `json:"public"`
+	key       *data.PublicKey
+}
+
+func (p *p256Verifier) Public() string {
+	return p.PublicKey.String()
+}
+
+func (p *p256Verifier) Verify(msg, sigBytes []byte) error {
+	x, y := elliptic.Unmarshal(elliptic.P256(), p.PublicKey)
+	k := &ecdsa.PublicKey{
+		Curve: elliptic.P256(),
+		X:     x,
+		Y:     y,
+	}
+
+	var sig ecdsaSignature
+	if _, err := asn1.Unmarshal(sigBytes, &sig); err != nil {
+		return err
+	}
+
+	hash := sha256.Sum256(msg)
+
+	if !ecdsa.Verify(k, hash[:], sig.R, sig.S) {
+		return errors.New("tuf: ecdsa signature verification failed")
+	}
+	return nil
+}
+
+func (p *p256Verifier) MarshalPublicKey() *data.PublicKey {
+	return p.key
+}
+
+func (p *p256Verifier) UnmarshalPublicKey(key *data.PublicKey) error {
+	if err := json.Unmarshal(key.Value, p); err != nil {
+		return err
+	}
+	x, _ := elliptic.Unmarshal(elliptic.P256(), p.PublicKey)
+	if x == nil {
+		return errors.New("tuf: invalid ecdsa public key point")
+	}
+	p.key = key
+	return nil
+}
diff --git a/vendor/github.com/DataDog/go-tuf/pkg/keys/ed25519.go b/vendor/github.com/DataDog/go-tuf/pkg/keys/ed25519.go
new file mode 100644
index 0000000000..130f786b74
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/pkg/keys/ed25519.go
@@ -0,0 +1,137 @@
+package keys
+
+import (
+	"crypto"
+	"crypto/ed25519"
+	"crypto/rand"
+	"encoding/json"
+	"errors"
+
+	"github.com/DataDog/go-tuf/data"
+)
+
+func init() {
+	SignerMap.Store(data.KeySchemeEd25519, NewP256Signer)
+	VerifierMap.Store(data.KeySchemeEd25519, NewP256Verifier)
+}
+
+func NewP256Signer() Signer {
+	return &ed25519Signer{}
+}
+
+func NewP256Verifier() Verifier {
+	return &ed25519Verifier{}
+}
+
+type ed25519Verifier struct {
+	PublicKey data.HexBytes `json:"public"`
+	key       *data.PublicKey
+}
+
+func (e *ed25519Verifier) Public() string {
+	return string(e.PublicKey)
+}
+
+func (e *ed25519Verifier) Verify(msg, sig []byte) error {
+	if !ed25519.Verify([]byte(e.PublicKey), msg, sig) {
+		return errors.New("tuf: ed25519 signature verification failed")
+	}
+	return nil
+}
+
+func (e *ed25519Verifier) MarshalPublicKey() *data.PublicKey {
+	return e.key
+}
+
+func (e *ed25519Verifier) UnmarshalPublicKey(key *data.PublicKey) error {
+	e.key = key
+	if err := json.Unmarshal(key.Value, e); err != nil {
+		return err
+	}
+	if len(e.PublicKey) != ed25519.PublicKeySize {
+		return errors.New("tuf: unexpected public key length for ed25519 key")
+	}
+	return nil
+}
+
+type Ed25519PrivateKeyValue struct {
+	Public  data.HexBytes `json:"public"`
+	Private data.HexBytes `json:"private"`
+}
+
+type ed25519Signer struct {
+	ed25519.PrivateKey
+
+	keyType       string
+	keyScheme     string
+	keyAlgorithms []string
+}
+
+func GenerateEd25519Key() (*ed25519Signer, error) {
+	_, private, err := ed25519.GenerateKey(rand.Reader)
+	if err != nil {
+		return nil, err
+	}
+	if err != nil {
+		return nil, err
+	}
+	return &ed25519Signer{
+		PrivateKey:    ed25519.PrivateKey(data.HexBytes(private)),
+		keyType:       data.KeyTypeEd25519,
+		keyScheme:     data.KeySchemeEd25519,
+		keyAlgorithms: data.HashAlgorithms,
+	}, nil
+}
+
+func NewEd25519Signer(keyValue Ed25519PrivateKeyValue) *ed25519Signer {
+	return &ed25519Signer{
+		PrivateKey:    ed25519.PrivateKey(data.HexBytes(keyValue.Private)),
+		keyType:       data.KeyTypeEd25519,
+		keyScheme:     data.KeySchemeEd25519,
+		keyAlgorithms: data.HashAlgorithms,
+	}
+}
+
+func (e *ed25519Signer) SignMessage(message []byte) ([]byte, error) {
+	return e.Sign(rand.Reader, message, crypto.Hash(0))
+}
+
+func (e *ed25519Signer) MarshalPrivateKey() (*data.PrivateKey, error) {
+	valueBytes, err := json.Marshal(Ed25519PrivateKeyValue{
+		Public:  data.HexBytes([]byte(e.PrivateKey.Public().(ed25519.PublicKey))),
+		Private: data.HexBytes(e.PrivateKey),
+	})
+	if err != nil {
+		return nil, err
+	}
+	return &data.PrivateKey{
+		Type:       e.keyType,
+		Scheme:     e.keyScheme,
+		Algorithms: e.keyAlgorithms,
+		Value:      valueBytes,
+	}, nil
+}
+
+func (e *ed25519Signer) UnmarshalPrivateKey(key *data.PrivateKey) error {
+	keyValue := &Ed25519PrivateKeyValue{}
+	if err := json.Unmarshal(key.Value, keyValue); err != nil {
+		return err
+	}
+	*e = ed25519Signer{
+		PrivateKey:    ed25519.PrivateKey(data.HexBytes(keyValue.Private)),
+		keyType:       key.Type,
+		keyScheme:     key.Scheme,
+		keyAlgorithms: key.Algorithms,
+	}
+	return nil
+}
+
+func (e *ed25519Signer) PublicData() *data.PublicKey {
+	keyValBytes, _ := json.Marshal(ed25519Verifier{PublicKey: []byte(e.PrivateKey.Public().(ed25519.PublicKey))})
+	return &data.PublicKey{
+		Type:       e.keyType,
+		Scheme:     e.keyScheme,
+		Algorithms: e.keyAlgorithms,
+		Value:      keyValBytes,
+	}
+}
diff --git a/vendor/github.com/DataDog/go-tuf/pkg/keys/keys.go b/vendor/github.com/DataDog/go-tuf/pkg/keys/keys.go
new file mode 100644
index 0000000000..b8ef3f24c4
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/pkg/keys/keys.go
@@ -0,0 +1,79 @@
+package keys
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+
+	"github.com/DataDog/go-tuf/data"
+)
+
+// SignerMap stores mapping between key type strings and signer constructors.
+var SignerMap sync.Map
+
+// Verifier stores mapping between key type strings and verifier constructors.
+var VerifierMap sync.Map
+
+var (
+	ErrInvalid    = errors.New("tuf: signature verification failed")
+	ErrInvalidKey = errors.New("invalid key")
+)
+
+// A Verifier verifies public key signatures.
+type Verifier interface {
+	// UnmarshalPublicKey takes key data to a working verifier implementation for the key type.
+	// This performs any validation over the data.PublicKey to ensure that the verifier is usable
+	// to verify signatures.
+	UnmarshalPublicKey(key *data.PublicKey) error
+
+	// MarshalPublicKey returns the data.PublicKey object associated with the verifier.
+	MarshalPublicKey() *data.PublicKey
+
+	// This is the public string used as a unique identifier for the verifier instance.
+	Public() string
+
+	// Verify takes a message and signature, all as byte slices,
+	// and determines whether the signature is valid for the given
+	// key and message.
+	Verify(msg, sig []byte) error
+}
+
+type Signer interface {
+	// MarshalPrivateKey returns the private key data.
+	MarshalPrivateKey() (*data.PrivateKey, error)
+
+	// UnmarshalPrivateKey takes private key data to a working Signer implementation for the key type.
+	UnmarshalPrivateKey(key *data.PrivateKey) error
+
+	// Returns the public data.PublicKey from the private key
+	PublicData() *data.PublicKey
+
+	// Sign returns the signature of the message.
+	// The signer is expected to do its own hashing, so the full message will be
+	// provided as the message to Sign with a zero opts.HashFunc().
+	SignMessage(message []byte) ([]byte, error)
+}
+
+func GetVerifier(key *data.PublicKey) (Verifier, error) {
+	st, ok := VerifierMap.Load(key.Type)
+	if !ok {
+		return nil, ErrInvalidKey
+	}
+	s := st.(func() Verifier)()
+	if err := s.UnmarshalPublicKey(key); err != nil {
+		return nil, fmt.Errorf("tuf: error unmarshalling key: %w", err)
+	}
+	return s, nil
+}
+
+func GetSigner(key *data.PrivateKey) (Signer, error) {
+	st, ok := SignerMap.Load(key.Type)
+	if !ok {
+		return nil, ErrInvalidKey
+	}
+	s := st.(func() Signer)()
+	if err := s.UnmarshalPrivateKey(key); err != nil {
+		return nil, fmt.Errorf("tuf: error unmarshalling key: %w", err)
+	}
+	return s, nil
+}
diff --git a/vendor/github.com/DataDog/go-tuf/pkg/keys/rsa.go b/vendor/github.com/DataDog/go-tuf/pkg/keys/rsa.go
new file mode 100644
index 0000000000..3c73e6d80e
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/pkg/keys/rsa.go
@@ -0,0 +1,138 @@
+package keys
+
+import (
+	"crypto"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/sha256"
+	"crypto/x509"
+	"encoding/json"
+	"encoding/pem"
+	"errors"
+
+	"github.com/DataDog/go-tuf/data"
+)
+
+func init() {
+	VerifierMap.Store(data.KeyTypeRSASSA_PSS_SHA256, NewRsaVerifier)
+	SignerMap.Store(data.KeyTypeRSASSA_PSS_SHA256, NewRsaSigner)
+}
+
+func NewRsaVerifier() Verifier {
+	return &rsaVerifier{}
+}
+
+func NewRsaSigner() Signer {
+	return &rsaSigner{}
+}
+
+type rsaVerifier struct {
+	PublicKey string `json:"public"`
+	rsaKey    *rsa.PublicKey
+	key       *data.PublicKey
+}
+
+func (p *rsaVerifier) Public() string {
+	// Unique public key identifier, use a uniform encodng
+	r, err := x509.MarshalPKIXPublicKey(p.rsaKey)
+	if err != nil {
+		// This shouldn't happen with a valid rsa key, but fallback on the
+		// JSON public key string
+		return string(p.PublicKey)
+	}
+	return string(r)
+}
+
+func (p *rsaVerifier) Verify(msg, sigBytes []byte) error {
+	hash := sha256.Sum256(msg)
+
+	return rsa.VerifyPSS(p.rsaKey, crypto.SHA256, hash[:], sigBytes, &rsa.PSSOptions{})
+}
+
+func (p *rsaVerifier) MarshalPublicKey() *data.PublicKey {
+	return p.key
+}
+
+func (p *rsaVerifier) UnmarshalPublicKey(key *data.PublicKey) error {
+	if err := json.Unmarshal(key.Value, p); err != nil {
+		return err
+	}
+	var err error
+	p.rsaKey, err = parseKey(p.PublicKey)
+	if err != nil {
+		return err
+	}
+	p.key = key
+	return nil
+}
+
+// parseKey tries to parse a PEM []byte slice by attempting PKCS1 and PKIX in order.
+func parseKey(data string) (*rsa.PublicKey, error) {
+	block, _ := pem.Decode([]byte(data))
+	if block == nil {
+		return nil, errors.New("tuf: pem decoding public key failed")
+	}
+	rsaPub, err := x509.ParsePKCS1PublicKey(block.Bytes)
+	if err == nil {
+		return rsaPub, nil
+	}
+	key, err := x509.ParsePKIXPublicKey(block.Bytes)
+	if err == nil {
+		rsaPub, ok := key.(*rsa.PublicKey)
+		if !ok {
+			return nil, errors.New("tuf: invalid rsa key")
+		}
+		return rsaPub, nil
+	}
+	return nil, errors.New("tuf: error unmarshalling rsa key")
+}
+
+type rsaSigner struct {
+	*rsa.PrivateKey
+}
+
+type rsaPublic struct {
+	// PEM encoded public key.
+	PublicKey string `json:"public"`
+}
+
+func (s *rsaSigner) PublicData() *data.PublicKey {
+	pub, _ := x509.MarshalPKIXPublicKey(s.Public().(*rsa.PublicKey))
+	pubBytes := pem.EncodeToMemory(&pem.Block{
+		Type:  "RSA PUBLIC KEY",
+		Bytes: pub,
+	})
+
+	keyValBytes, _ := json.Marshal(rsaPublic{PublicKey: string(pubBytes)})
+	return &data.PublicKey{
+		Type:       data.KeyTypeRSASSA_PSS_SHA256,
+		Scheme:     data.KeySchemeRSASSA_PSS_SHA256,
+		Algorithms: data.HashAlgorithms,
+		Value:      keyValBytes,
+	}
+}
+
+func (s *rsaSigner) SignMessage(message []byte) ([]byte, error) {
+	hash := sha256.Sum256(message)
+	return rsa.SignPSS(rand.Reader, s.PrivateKey, crypto.SHA256, hash[:], &rsa.PSSOptions{})
+}
+
+func (s *rsaSigner) ContainsID(id string) bool {
+	return s.PublicData().ContainsID(id)
+}
+
+func (s *rsaSigner) MarshalPrivateKey() (*data.PrivateKey, error) {
+	return nil, errors.New("not implemented for test")
+}
+
+func (s *rsaSigner) UnmarshalPrivateKey(key *data.PrivateKey) error {
+	return errors.New("not implemented for test")
+}
+
+func GenerateRsaKey() (*rsaSigner, error) {
+	privkey, err := rsa.GenerateKey(rand.Reader, 2048)
+	if err != nil {
+		return nil, err
+	}
+	return &rsaSigner{privkey}, nil
+}
diff --git a/vendor/github.com/DataDog/go-tuf/pkg/targets/delegation.go b/vendor/github.com/DataDog/go-tuf/pkg/targets/delegation.go
new file mode 100644
index 0000000000..22d2ceeced
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/pkg/targets/delegation.go
@@ -0,0 +1,95 @@
+package targets
+
+import (
+	"errors"
+
+	"github.com/DataDog/go-tuf/data"
+	"github.com/DataDog/go-tuf/internal/sets"
+	"github.com/DataDog/go-tuf/verify"
+)
+
+type Delegation struct {
+	Delegator string
+	Delegatee data.DelegatedRole
+	DB        *verify.DB
+}
+
+type delegationsIterator struct {
+	stack        []Delegation
+	target       string
+	visitedRoles map[string]struct{}
+}
+
+var ErrTopLevelTargetsRoleMissing = errors.New("tuf: top level targets role missing from top level keys DB")
+
+// NewDelegationsIterator initialises an iterator with a first step
+// on top level targets.
+func NewDelegationsIterator(target string, topLevelKeysDB *verify.DB) (*delegationsIterator, error) {
+	targetsRole := topLevelKeysDB.GetRole("targets")
+	if targetsRole == nil {
+		return nil, ErrTopLevelTargetsRoleMissing
+	}
+
+	i := &delegationsIterator{
+		target: target,
+		stack: []Delegation{
+			{
+				Delegatee: data.DelegatedRole{
+					Name:      "targets",
+					KeyIDs:    sets.StringSetToSlice(targetsRole.KeyIDs),
+					Threshold: targetsRole.Threshold,
+				},
+				DB: topLevelKeysDB,
+			},
+		},
+		visitedRoles: make(map[string]struct{}),
+	}
+	return i, nil
+}
+
+func (d *delegationsIterator) Next() (value Delegation, ok bool) {
+	if len(d.stack) == 0 {
+		return Delegation{}, false
+	}
+	delegation := d.stack[len(d.stack)-1]
+	d.stack = d.stack[:len(d.stack)-1]
+
+	// 5.6.7.1: If this role has been visited before, then skip this role (so
+	// that cycles in the delegation graph are avoided).
+	roleName := delegation.Delegatee.Name
+	if _, ok := d.visitedRoles[roleName]; ok {
+		return d.Next()
+	}
+	d.visitedRoles[roleName] = struct{}{}
+
+	// 5.6.7.2 trim delegations to visit, only the current role and its delegations
+	// will be considered
+	// https://github.com/DataDog/specification/issues/168
+	if delegation.Delegatee.Terminating {
+		// Empty the stack.
+		d.stack = d.stack[0:0]
+	}
+	return delegation, true
+}
+
+func (d *delegationsIterator) Add(roles []data.DelegatedRole, delegator string, db *verify.DB) error {
+	for i := len(roles) - 1; i >= 0; i-- {
+		// Push the roles onto the stack in reverse so we get an preorder traversal
+		// of the delegations graph.
+		r := roles[i]
+		matchesPath, err := r.MatchesPath(d.target)
+		if err != nil {
+			return err
+		}
+		if matchesPath {
+			delegation := Delegation{
+				Delegator: delegator,
+				Delegatee: r,
+				DB:        db,
+			}
+			d.stack = append(d.stack, delegation)
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/DataDog/go-tuf/pkg/targets/hash_bins.go b/vendor/github.com/DataDog/go-tuf/pkg/targets/hash_bins.go
new file mode 100644
index 0000000000..95f4405d42
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/pkg/targets/hash_bins.go
@@ -0,0 +1,113 @@
+package targets
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+const MinDelegationHashPrefixBitLen = 1
+const MaxDelegationHashPrefixBitLen = 32
+
+// hexEncode formats x as a hex string. The hex string is left padded with
+// zeros to padWidth, if necessary.
+func hexEncode(x uint64, padWidth int) string {
+	// Benchmarked to be more than 10x faster than padding with Sprintf.
+	s := strconv.FormatUint(x, 16)
+	if len(s) >= padWidth {
+		return s
+	}
+	return strings.Repeat("0", padWidth-len(s)) + s
+}
+
+const bitsPerHexDigit = 4
+
+// numHexDigits returns the number of hex digits required to encode the given
+// number of bits.
+func numHexDigits(numBits int) int {
+	// ceil(numBits / bitsPerHexDigit)
+	return ((numBits - 1) / bitsPerHexDigit) + 1
+}
+
+// HashBins represents an ordered list of hash bin target roles, which together
+// partition the space of target path hashes equal-sized buckets based on path
+// has prefix.
+type HashBins struct {
+	rolePrefix  string
+	bitLen      int
+	hexDigitLen int
+
+	numBins           uint64
+	numPrefixesPerBin uint64
+}
+
+// NewHashBins creates a HashBins partitioning with 2^bitLen buckets.
+func NewHashBins(rolePrefix string, bitLen int) (*HashBins, error) {
+	if bitLen < MinDelegationHashPrefixBitLen || bitLen > MaxDelegationHashPrefixBitLen {
+		return nil, fmt.Errorf("bitLen is out of bounds, should be between %v and %v inclusive", MinDelegationHashPrefixBitLen, MaxDelegationHashPrefixBitLen)
+	}
+
+	hexDigitLen := numHexDigits(bitLen)
+	numBins := uint64(1) << bitLen
+
+	numPrefixesTotal := uint64(1) << (bitsPerHexDigit * hexDigitLen)
+	numPrefixesPerBin := numPrefixesTotal / numBins
+
+	return &HashBins{
+		rolePrefix:        rolePrefix,
+		bitLen:            bitLen,
+		hexDigitLen:       hexDigitLen,
+		numBins:           numBins,
+		numPrefixesPerBin: numPrefixesPerBin,
+	}, nil
+}
+
+// NumBins returns the number of hash bin partitions.
+func (hb *HashBins) NumBins() uint64 {
+	return hb.numBins
+}
+
+// GetBin returns the HashBin at index i, or nil if i is out of bounds.
+func (hb *HashBins) GetBin(i uint64) *HashBin {
+	if i >= hb.numBins {
+		return nil
+	}
+
+	return &HashBin{
+		rolePrefix:  hb.rolePrefix,
+		hexDigitLen: hb.hexDigitLen,
+		first:       i * hb.numPrefixesPerBin,
+		last:        ((i + 1) * hb.numPrefixesPerBin) - 1,
+	}
+}
+
+// HashBin represents a hex prefix range. First should be less than Last.
+type HashBin struct {
+	rolePrefix  string
+	hexDigitLen int
+	first       uint64
+	last        uint64
+}
+
+// RoleName returns the name of the role that signs for the HashBin.
+func (b *HashBin) RoleName() string {
+	if b.first == b.last {
+		return b.rolePrefix + hexEncode(b.first, b.hexDigitLen)
+	}
+
+	return b.rolePrefix + hexEncode(b.first, b.hexDigitLen) + "-" + hexEncode(b.last, b.hexDigitLen)
+}
+
+// HashPrefixes returns a slice of all hash prefixes in the bin.
+func (b *HashBin) HashPrefixes() []string {
+	n := int(b.last - b.first + 1)
+	ret := make([]string, int(n))
+
+	x := b.first
+	for i := 0; i < n; i++ {
+		ret[i] = hexEncode(x, b.hexDigitLen)
+		x++
+	}
+
+	return ret
+}
diff --git a/vendor/github.com/DataDog/go-tuf/util/util.go b/vendor/github.com/DataDog/go-tuf/util/util.go
new file mode 100644
index 0000000000..4c1c0dff40
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/util/util.go
@@ -0,0 +1,305 @@
+package util
+
+import (
+	"bytes"
+	"crypto/hmac"
+	"crypto/sha256"
+	"crypto/sha512"
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"hash"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"strconv"
+	"strings"
+
+	"github.com/DataDog/go-tuf/data"
+)
+
+type ErrWrongLength struct {
+	Expected int64
+	Actual   int64
+}
+
+func (e ErrWrongLength) Error() string {
+	return fmt.Sprintf("wrong length, expected %d got %d", e.Expected, e.Actual)
+}
+
+type ErrWrongVersion struct {
+	Expected int64
+	Actual   int64
+}
+
+func (e ErrWrongVersion) Error() string {
+	return fmt.Sprintf("wrong version, expected %d got %d", e.Expected, e.Actual)
+}
+
+type ErrWrongHash struct {
+	Type     string
+	Expected data.HexBytes
+	Actual   data.HexBytes
+}
+
+func (e ErrWrongHash) Error() string {
+	return fmt.Sprintf("wrong %s hash, expected %s got %s", e.Type, hex.EncodeToString(e.Expected), hex.EncodeToString(e.Actual))
+}
+
+type ErrNoCommonHash struct {
+	Expected data.Hashes
+	Actual   data.Hashes
+}
+
+func (e ErrNoCommonHash) Error() string {
+	types := func(a data.Hashes) []string {
+		t := make([]string, 0, len(a))
+		for typ := range a {
+			t = append(t, typ)
+		}
+		return t
+	}
+	return fmt.Sprintf("no common hash function, expected one of %s, got %s", types(e.Expected), types(e.Actual))
+}
+
+type ErrUnknownHashAlgorithm struct {
+	Name string
+}
+
+func (e ErrUnknownHashAlgorithm) Error() string {
+	return fmt.Sprintf("unknown hash algorithm: %s", e.Name)
+}
+
+type PassphraseFunc func(role string, confirm bool, change bool) ([]byte, error)
+
+func FileMetaEqual(actual data.FileMeta, expected data.FileMeta) error {
+	if actual.Length != expected.Length {
+		return ErrWrongLength{expected.Length, actual.Length}
+	}
+
+	if err := hashEqual(actual.Hashes, expected.Hashes); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func hashEqual(actual data.Hashes, expected data.Hashes) error {
+	hashChecked := false
+	for typ, hash := range expected {
+		if h, ok := actual[typ]; ok {
+			hashChecked = true
+			if !hmac.Equal(h, hash) {
+				return ErrWrongHash{typ, hash, h}
+			}
+		}
+	}
+	if !hashChecked {
+		return ErrNoCommonHash{expected, actual}
+	}
+	return nil
+}
+
+func versionEqual(actual int64, expected int64) error {
+	if actual != expected {
+		return ErrWrongVersion{expected, actual}
+	}
+	return nil
+}
+
+func SnapshotFileMetaEqual(actual data.SnapshotFileMeta, expected data.SnapshotFileMeta) error {
+	// TUF-1.0 no longer considers the length and hashes to be a required
+	// member of snapshots. However they are considering requiring hashes
+	// for delegated roles to avoid an attack described in Section 5.6 of
+	// the Mercury paper:
+	// https://github.com/DataDog/specification/pull/40
+	if expected.Length != 0 && actual.Length != expected.Length {
+		return ErrWrongLength{expected.Length, actual.Length}
+	}
+	// 5.6.2 - Check against snapshot role's targets hash
+	if len(expected.Hashes) != 0 {
+		if err := hashEqual(actual.Hashes, expected.Hashes); err != nil {
+			return err
+		}
+	}
+	// 5.6.4 - Check against snapshot role's snapshot version
+	if err := versionEqual(actual.Version, expected.Version); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func TargetFileMetaEqual(actual data.TargetFileMeta, expected data.TargetFileMeta) error {
+	return FileMetaEqual(actual.FileMeta, expected.FileMeta)
+}
+
+func TimestampFileMetaEqual(actual data.TimestampFileMeta, expected data.TimestampFileMeta) error {
+	// TUF no longer considers the length and hashes to be a required
+	// member of Timestamp.
+	if expected.Length != 0 && actual.Length != expected.Length {
+		return ErrWrongLength{expected.Length, actual.Length}
+	}
+	// 5.5.2 - Check against timestamp role's snapshot hash
+	if len(expected.Hashes) != 0 {
+		if err := hashEqual(actual.Hashes, expected.Hashes); err != nil {
+			return err
+		}
+	}
+	// 5.5.4 - Check against timestamp role's snapshot version
+	if err := versionEqual(actual.Version, expected.Version); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+const defaultHashAlgorithm = "sha512"
+
+func GenerateFileMeta(r io.Reader, hashAlgorithms ...string) (data.FileMeta, error) {
+	if len(hashAlgorithms) == 0 {
+		hashAlgorithms = []string{defaultHashAlgorithm}
+	}
+	hashes := make(map[string]hash.Hash, len(hashAlgorithms))
+	for _, hashAlgorithm := range hashAlgorithms {
+		var h hash.Hash
+		switch hashAlgorithm {
+		case "sha256":
+			h = sha256.New()
+		case "sha512":
+			h = sha512.New()
+		default:
+			return data.FileMeta{}, ErrUnknownHashAlgorithm{hashAlgorithm}
+		}
+		hashes[hashAlgorithm] = h
+		r = io.TeeReader(r, h)
+	}
+	n, err := io.Copy(ioutil.Discard, r)
+	if err != nil {
+		return data.FileMeta{}, err
+	}
+	m := data.FileMeta{Length: n, Hashes: make(data.Hashes, len(hashes))}
+	for hashAlgorithm, h := range hashes {
+		m.Hashes[hashAlgorithm] = h.Sum(nil)
+	}
+	return m, nil
+}
+
+type versionedMeta struct {
+	Version int64 `json:"version"`
+}
+
+func generateVersionedFileMeta(r io.Reader, hashAlgorithms ...string) (data.FileMeta, int64, error) {
+	b, err := ioutil.ReadAll(r)
+	if err != nil {
+		return data.FileMeta{}, 0, err
+	}
+
+	m, err := GenerateFileMeta(bytes.NewReader(b), hashAlgorithms...)
+	if err != nil {
+		return data.FileMeta{}, 0, err
+	}
+
+	s := data.Signed{}
+	if err := json.Unmarshal(b, &s); err != nil {
+		return data.FileMeta{}, 0, err
+	}
+
+	vm := versionedMeta{}
+	if err := json.Unmarshal(s.Signed, &vm); err != nil {
+		return data.FileMeta{}, 0, err
+	}
+
+	return m, vm.Version, nil
+}
+
+func GenerateSnapshotFileMeta(r io.Reader, hashAlgorithms ...string) (data.SnapshotFileMeta, error) {
+	m, v, err := generateVersionedFileMeta(r, hashAlgorithms...)
+	if err != nil {
+		return data.SnapshotFileMeta{}, err
+	}
+	return data.SnapshotFileMeta{
+		FileMeta: m,
+		Version:  v,
+	}, nil
+}
+
+func GenerateTargetFileMeta(r io.Reader, hashAlgorithms ...string) (data.TargetFileMeta, error) {
+	m, err := GenerateFileMeta(r, hashAlgorithms...)
+	if err != nil {
+		return data.TargetFileMeta{}, err
+	}
+	return data.TargetFileMeta{
+		FileMeta: m,
+	}, nil
+}
+
+func GenerateTimestampFileMeta(r io.Reader, hashAlgorithms ...string) (data.TimestampFileMeta, error) {
+	m, v, err := generateVersionedFileMeta(r, hashAlgorithms...)
+	if err != nil {
+		return data.TimestampFileMeta{}, err
+	}
+	return data.TimestampFileMeta{
+		FileMeta: m,
+		Version:  v,
+	}, nil
+}
+
+func NormalizeTarget(p string) string {
+	// FIXME(TUF-0.9) TUF-1.0 is considering banning paths that begin with
+	// a leading path separator, to avoid surprising behavior when joining
+	// target and delgated paths. python-tuf raises an exception if any
+	// path starts with '/', but since we need to be cross compatible with
+	// TUF-0.9 we still need to support leading slashes. For now, we will
+	// just strip them out, but eventually we should also consider turning
+	// them into an error.
+	return strings.TrimPrefix(path.Join("/", p), "/")
+}
+
+func VersionedPath(p string, version int64) string {
+	return path.Join(path.Dir(p), strconv.FormatInt(version, 10)+"."+path.Base(p))
+}
+
+func HashedPaths(p string, hashes data.Hashes) []string {
+	paths := make([]string, 0, len(hashes))
+	for _, hash := range hashes {
+		hashedPath := path.Join(path.Dir(p), hash.String()+"."+path.Base(p))
+		paths = append(paths, hashedPath)
+	}
+	return paths
+}
+
+func AtomicallyWriteFile(filename string, data []byte, perm os.FileMode) error {
+	dir, name := filepath.Split(filename)
+	f, err := ioutil.TempFile(dir, name)
+	if err != nil {
+		return err
+	}
+
+	_, err = f.Write(data)
+	if err != nil {
+		f.Close()
+		os.Remove(f.Name())
+		return err
+	}
+
+	if err = f.Chmod(perm); err != nil {
+		f.Close()
+		os.Remove(f.Name())
+		return err
+	}
+
+	if err := f.Close(); err != nil {
+		os.Remove(f.Name())
+		return err
+	}
+
+	if err := os.Rename(f.Name(), filename); err != nil {
+		os.Remove(f.Name())
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/DataDog/go-tuf/verify/db.go b/vendor/github.com/DataDog/go-tuf/verify/db.go
new file mode 100644
index 0000000000..02b20063a2
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/verify/db.go
@@ -0,0 +1,97 @@
+package verify
+
+import (
+	"github.com/DataDog/go-tuf/data"
+	"github.com/DataDog/go-tuf/internal/roles"
+	"github.com/DataDog/go-tuf/pkg/keys"
+)
+
+type Role struct {
+	KeyIDs    map[string]struct{}
+	Threshold int
+}
+
+func (r *Role) ValidKey(id string) bool {
+	_, ok := r.KeyIDs[id]
+	return ok
+}
+
+type DB struct {
+	roles     map[string]*Role
+	verifiers map[string]keys.Verifier
+}
+
+func NewDB() *DB {
+	return &DB{
+		roles:     make(map[string]*Role),
+		verifiers: make(map[string]keys.Verifier),
+	}
+}
+
+// NewDBFromDelegations returns a DB that verifies delegations
+// of a given Targets.
+func NewDBFromDelegations(d *data.Delegations) (*DB, error) {
+	db := &DB{
+		roles:     make(map[string]*Role, len(d.Roles)),
+		verifiers: make(map[string]keys.Verifier, len(d.Keys)),
+	}
+	for _, r := range d.Roles {
+		if _, ok := roles.TopLevelRoles[r.Name]; ok {
+			return nil, ErrInvalidDelegatedRole
+		}
+		role := &data.Role{Threshold: r.Threshold, KeyIDs: r.KeyIDs}
+		if err := db.AddRole(r.Name, role); err != nil {
+			return nil, err
+		}
+	}
+	for id, k := range d.Keys {
+		if err := db.AddKey(id, k); err != nil {
+			return nil, err
+		}
+	}
+	return db, nil
+}
+
+func (db *DB) AddKey(id string, k *data.PublicKey) error {
+	if !k.ContainsID(id) {
+		return ErrWrongID{}
+	}
+	verifier, err := keys.GetVerifier(k)
+	if err != nil {
+		return ErrInvalidKey
+	}
+	db.verifiers[id] = verifier
+	return nil
+}
+
+func (db *DB) AddRole(name string, r *data.Role) error {
+	if r.Threshold < 1 {
+		return ErrInvalidThreshold
+	}
+
+	role := &Role{
+		KeyIDs:    make(map[string]struct{}),
+		Threshold: r.Threshold,
+	}
+	for _, id := range r.KeyIDs {
+		if len(id) != data.KeyIDLength {
+			return ErrInvalidKeyID
+		}
+		role.KeyIDs[id] = struct{}{}
+	}
+
+	db.roles[name] = role
+	return nil
+}
+
+func (db *DB) GetVerifier(id string) (keys.Verifier, error) {
+	k, ok := db.verifiers[id]
+	if !ok {
+		return nil, ErrMissingKey
+	}
+	return k, nil
+}
+
+func (db *DB) GetRole(name string) *Role {
+	return db.roles[name]
+}
diff --git a/vendor/github.com/DataDog/go-tuf/verify/errors.go b/vendor/github.com/DataDog/go-tuf/verify/errors.go
new file mode 100644
index 0000000000..f94321e29d
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/verify/errors.go
@@ -0,0 +1,71 @@
+package verify
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+var (
+	ErrMissingKey           = errors.New("tuf: missing key")
+	ErrNoSignatures         = errors.New("tuf: data has no signatures")
+	ErrInvalid              = errors.New("tuf: signature verification failed")
+	ErrWrongMethod          = errors.New("tuf: invalid signature type")
+	ErrWrongMetaType        = errors.New("tuf: meta file has wrong type")
+	ErrExists               = errors.New("tuf: key already in db")
+	ErrInvalidKey           = errors.New("tuf: invalid key")
+	ErrInvalidRole          = errors.New("tuf: invalid role")
+	ErrInvalidDelegatedRole = errors.New("tuf: invalid delegated role")
+	ErrInvalidKeyID         = errors.New("tuf: invalid key id")
+	ErrInvalidThreshold     = errors.New("tuf: invalid role threshold")
+	ErrMissingTargetFile    = errors.New("tuf: missing previously listed targets metadata file")
+)
+
+type ErrWrongID struct{}
+
+func (ErrWrongID) Error() string {
+	return "tuf: key id mismatch"
+}
+
+type ErrUnknownRole struct {
+	Role string
+}
+
+func (e ErrUnknownRole) Error() string {
+	return fmt.Sprintf("tuf: unknown role %q", e.Role)
+}
+
+type ErrExpired struct {
+	Expired time.Time
+}
+
+func (e ErrExpired) Error() string {
+	return fmt.Sprintf("expired at %s", e.Expired)
+}
+
+type ErrLowVersion struct {
+	Actual  int64
+	Current int64
+}
+
+func (e ErrLowVersion) Error() string {
+	return fmt.Sprintf("version %d is lower than current version %d", e.Actual, e.Current)
+}
+
+type ErrWrongVersion struct {
+	Given    int64
+	Expected int64
+}
+
+func (e ErrWrongVersion) Error() string {
+	return fmt.Sprintf("version %d does not match the expected version %d", e.Given, e.Expected)
+}
+
+type ErrRoleThreshold struct {
+	Expected int
+	Actual   int
+}
+
+func (e ErrRoleThreshold) Error() string {
+	return "tuf: valid signatures did not meet threshold"
+}
diff --git a/vendor/github.com/DataDog/go-tuf/verify/verify.go b/vendor/github.com/DataDog/go-tuf/verify/verify.go
new file mode 100644
index 0000000000..e892c537a8
--- /dev/null
+++ b/vendor/github.com/DataDog/go-tuf/verify/verify.go
@@ -0,0 +1,164 @@
+package verify
+
+import (
+	"encoding/json"
+	"strings"
+	"time"
+
+	"github.com/DataDog/go-tuf/data"
+	"github.com/DataDog/go-tuf/internal/roles"
+	"github.com/secure-systems-lab/go-securesystemslib/cjson"
+)
+
+type signedMeta struct {
+	Type    string    `json:"_type"`
+	Expires time.Time `json:"expires"`
+	Version int64     `json:"version"`
+}
+
+func (db *DB) VerifyIgnoreExpiredCheck(s *data.Signed, role string, minVersion int64) error {
+	if err := db.VerifySignatures(s, role); err != nil {
+		return err
+	}
+
+	sm := &signedMeta{}
+	if err := json.Unmarshal(s.Signed, sm); err != nil {
+		return err
+	}
+
+	if roles.IsTopLevelRole(role) {
+		// Top-level roles can only sign metadata of the same type (e.g. snapshot
+		// metadata must be signed by the snapshot role).
+		if !strings.EqualFold(sm.Type, role) {
+			return ErrWrongMetaType
+		}
+	} else {
+		// Delegated (non-top-level) roles may only sign targets metadata.
+		if strings.ToLower(sm.Type) != "targets" {
+			return ErrWrongMetaType
+		}
+	}
+
+	if sm.Version < minVersion {
+		return ErrLowVersion{sm.Version, minVersion}
+	}
+
+	return nil
+}
+
+func (db *DB) Verify(s *data.Signed, role string, minVersion int64) error {
+	// Verify signatures and versions
+	err := db.VerifyIgnoreExpiredCheck(s, role, minVersion)
+
+	if err != nil {
+		return err
+	}
+
+	sm := &signedMeta{}
+	if err := json.Unmarshal(s.Signed, sm); err != nil {
+		return err
+	}
+	// Verify expiration
+	if IsExpired(sm.Expires) {
+		return ErrExpired{sm.Expires}
+	}
+
+	return nil
+}
+
+var IsExpired = func(t time.Time) bool {
+	return time.Until(t) <= 0
+}
+
+func (db *DB) VerifySignatures(s *data.Signed, role string) error {
+	if len(s.Signatures) == 0 {
+		return ErrNoSignatures
+	}
+
+	roleData := db.GetRole(role)
+	if roleData == nil {
+		return ErrUnknownRole{role}
+	}
+
+	var decoded map[string]interface{}
+	if err := json.Unmarshal(s.Signed, &decoded); err != nil {
+		return err
+	}
+	msg, err := cjson.EncodeCanonical(decoded)
+	if err != nil {
+		return err
+	}
+
+	// Verify that a threshold of keys signed the data. Since keys can have
+	// multiple key ids, we need to protect against multiple attached
+	// signatures that just differ on the key id.
+	seen := make(map[string]struct{})
+	valid := 0
+	for _, sig := range s.Signatures {
+		if !roleData.ValidKey(sig.KeyID) {
+			continue
+		}
+		verifier, err := db.GetVerifier(sig.KeyID)
+		if err != nil {
+			continue
+		}
+
+		if err := verifier.Verify(msg, sig.Signature); err != nil {
+			return ErrInvalid
+		}
+
+		// Only consider this key valid if we haven't seen any of it's
+		// key ids before.
+		if _, ok := seen[sig.KeyID]; !ok {
+			for _, id := range verifier.MarshalPublicKey().IDs() {
+				seen[id] = struct{}{}
+			}
+
+			valid++
+		}
+	}
+	if valid < roleData.Threshold {
+		return ErrRoleThreshold{roleData.Threshold, valid}
+	}
+
+	return nil
+}
+
+func (db *DB) Unmarshal(b []byte, v interface{}, role string, minVersion int64) error {
+	s := &data.Signed{}
+	if err := json.Unmarshal(b, s); err != nil {
+		return err
+	}
+	if err := db.Verify(s, role, minVersion); err != nil {
+		return err
+	}
+	return json.Unmarshal(s.Signed, v)
+}
+
+// UnmarshalExpired is exactly like Unmarshal except ignores expired timestamp error.
+func (db *DB) UnmarshalIgnoreExpired(b []byte, v interface{}, role string, minVersion int64) error {
+	s := &data.Signed{}
+	if err := json.Unmarshal(b, s); err != nil {
+		return err
+	}
+	// Note: If verification fails, then we wont attempt to unmarshal
+	// unless when verification error is errExpired.
+	verifyErr := db.Verify(s, role, minVersion)
+	if verifyErr != nil {
+		if _, ok := verifyErr.(ErrExpired); !ok {
+			return verifyErr
+		}
+	}
+	return json.Unmarshal(s.Signed, v)
+}
+
+func (db *DB) UnmarshalTrusted(b []byte, v interface{}, role string) error {
+	s := &data.Signed{}
+	if err := json.Unmarshal(b, s); err != nil {
+		return err
+	}
+	if err := db.VerifySignatures(s, role); err != nil {
+		return err
+	}
+	return json.Unmarshal(s.Signed, v)
+}
diff --git a/vendor/github.com/DataDog/sketches-go/LICENSE b/vendor/github.com/DataDog/sketches-go/LICENSE
new file mode 100644
index 0000000000..7d3693beef
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2021 DataDog, Inc. 
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/DataDog/sketches-go/LICENSE-3rdparty.csv b/vendor/github.com/DataDog/sketches-go/LICENSE-3rdparty.csv
new file mode 100644
index 0000000000..db2f8cca0a
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/LICENSE-3rdparty.csv
@@ -0,0 +1,3 @@
+Component,Origin,License
+import (test),github.com/google/gofuzz,Apache-2.0
+import (test),github.com/stretchr/testify,MIT
diff --git a/vendor/github.com/DataDog/sketches-go/NOTICE b/vendor/github.com/DataDog/sketches-go/NOTICE
new file mode 100644
index 0000000000..7ae253a014
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/NOTICE
@@ -0,0 +1,4 @@
+Datadog sketches-go 
+Copyright 2021 Datadog, Inc.
+
+This product includes software developed at Datadog (https://www.datadoghq.com/).
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/ddsketch.go b/vendor/github.com/DataDog/sketches-go/ddsketch/ddsketch.go
new file mode 100644
index 0000000000..3f0f943c27
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/ddsketch.go
@@ -0,0 +1,716 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package ddsketch
+
+import (
+	"errors"
+	"io"
+	"math"
+
+	enc "github.com/DataDog/sketches-go/ddsketch/encoding"
+	"github.com/DataDog/sketches-go/ddsketch/mapping"
+	"github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"
+	"github.com/DataDog/sketches-go/ddsketch/stat"
+	"github.com/DataDog/sketches-go/ddsketch/store"
+)
+
+var (
+	errEmptySketch error = errors.New("no such element exists")
+	errUnknownFlag error = errors.New("unknown encoding flag")
+)
+
+// Unexported to prevent usage and avoid the cost of dynamic dispatch
+type quantileSketch interface {
+	RelativeAccuracy() float64
+	IsEmpty() bool
+	GetCount() float64
+	GetSum() float64
+	GetMinValue() (float64, error)
+	GetMaxValue() (float64, error)
+	GetValueAtQuantile(quantile float64) (float64, error)
+	GetValuesAtQuantiles(quantiles []float64) ([]float64, error)
+	ForEach(f func(value, count float64) (stop bool))
+	Add(value float64) error
+	AddWithCount(value, count float64) error
+	// MergeWith
+	// ChangeMapping
+	Reweight(factor float64) error
+	Clear()
+	// Copy
+	Encode(b *[]byte, omitIndexMapping bool)
+	DecodeAndMergeWith(b []byte) error
+}
+
+var _ quantileSketch = (*DDSketch)(nil)
+var _ quantileSketch = (*DDSketchWithExactSummaryStatistics)(nil)
+
+type DDSketch struct {
+	mapping.IndexMapping
+	positiveValueStore        store.Store
+	negativeValueStore        store.Store
+	zeroCount                 float64
+	minIndexableAbsoluteValue float64
+	maxIndexableValue         float64
+}
+
+func NewDDSketchFromStoreProvider(indexMapping mapping.IndexMapping, storeProvider store.Provider) *DDSketch {
+	return NewDDSketch(indexMapping, storeProvider(), storeProvider())
+}
+
+func NewDDSketch(indexMapping mapping.IndexMapping, positiveValueStore store.Store, negativeValueStore store.Store) *DDSketch {
+	return &DDSketch{
+		IndexMapping:              indexMapping,
+		positiveValueStore:        positiveValueStore,
+		negativeValueStore:        negativeValueStore,
+		minIndexableAbsoluteValue: indexMapping.MinIndexableValue(),
+		maxIndexableValue:         indexMapping.MaxIndexableValue(),
+	}
+}
+
+func NewDefaultDDSketch(relativeAccuracy float64) (*DDSketch, error) {
+	m, err := mapping.NewDefaultMapping(relativeAccuracy)
+	if err != nil {
+		return nil, err
+	}
+	return NewDDSketchFromStoreProvider(m, store.DefaultProvider), nil
+}
+
+// Constructs an instance of DDSketch that offers constant-time insertion and whose size grows indefinitely
+// to accommodate for the range of input values.
+func LogUnboundedDenseDDSketch(relativeAccuracy float64) (*DDSketch, error) {
+	indexMapping, err := mapping.NewLogarithmicMapping(relativeAccuracy)
+	if err != nil {
+		return nil, err
+	}
+	return NewDDSketch(indexMapping, store.NewDenseStore(), store.NewDenseStore()), nil
+}
+
+// Constructs an instance of DDSketch that offers constant-time insertion and whose size grows until the
+// maximum number of bins is reached, at which point bins with lowest indices are collapsed, which causes the
+// relative accuracy guarantee to be lost on lowest quantiles if values are all positive, or the mid-range
+// quantiles for values closest to zero if values include negative numbers.
+func LogCollapsingLowestDenseDDSketch(relativeAccuracy float64, maxNumBins int) (*DDSketch, error) {
+	indexMapping, err := mapping.NewLogarithmicMapping(relativeAccuracy)
+	if err != nil {
+		return nil, err
+	}
+	return NewDDSketch(indexMapping, store.NewCollapsingLowestDenseStore(maxNumBins), store.NewCollapsingLowestDenseStore(maxNumBins)), nil
+}
+
+// Constructs an instance of DDSketch that offers constant-time insertion and whose size grows until the
+// maximum number of bins is reached, at which point bins with highest indices are collapsed, which causes the
+// relative accuracy guarantee to be lost on highest quantiles if values are all positive, or the lowest and
+// highest quantiles if values include negative numbers.
+func LogCollapsingHighestDenseDDSketch(relativeAccuracy float64, maxNumBins int) (*DDSketch, error) {
+	indexMapping, err := mapping.NewLogarithmicMapping(relativeAccuracy)
+	if err != nil {
+		return nil, err
+	}
+	return NewDDSketch(indexMapping, store.NewCollapsingHighestDenseStore(maxNumBins), store.NewCollapsingHighestDenseStore(maxNumBins)), nil
+}
+
+// Adds a value to the sketch.
+func (s *DDSketch) Add(value float64) error {
+	return s.AddWithCount(value, float64(1))
+}
+
+// Adds a value to the sketch with a float64 count.
+func (s *DDSketch) AddWithCount(value, count float64) error {
+	if value < -s.maxIndexableValue || value > s.maxIndexableValue {
+		return errors.New("The input value is outside the range that is tracked by the sketch.")
+	}
+	if count < 0 {
+		return errors.New("The count cannot be negative.")
+	}
+
+	if value > s.minIndexableAbsoluteValue {
+		s.positiveValueStore.AddWithCount(s.Index(value), count)
+	} else if value < -s.minIndexableAbsoluteValue {
+		s.negativeValueStore.AddWithCount(s.Index(-value), count)
+	} else {
+		s.zeroCount += count
+	}
+	return nil
+}
+
+// Return a (deep) copy of this sketch.
+func (s *DDSketch) Copy() *DDSketch {
+	return &DDSketch{
+		IndexMapping:              s.IndexMapping,
+		positiveValueStore:        s.positiveValueStore.Copy(),
+		negativeValueStore:        s.negativeValueStore.Copy(),
+		zeroCount:                 s.zeroCount,
+		minIndexableAbsoluteValue: s.minIndexableAbsoluteValue,
+		maxIndexableValue:         s.maxIndexableValue,
+	}
+}
+
+// Clear empties the sketch while allowing reusing already allocated memory.
+func (s *DDSketch) Clear() {
+	s.positiveValueStore.Clear()
+	s.negativeValueStore.Clear()
+	s.zeroCount = 0
+}
+
+// Return the value at the specified quantile. Return a non-nil error if the quantile is invalid
+// or if the sketch is empty.
+func (s *DDSketch) GetValueAtQuantile(quantile float64) (float64, error) {
+	if quantile < 0 || quantile > 1 {
+		return math.NaN(), errors.New("The quantile must be between 0 and 1.")
+	}
+
+	count := s.GetCount()
+	if count == 0 {
+		return math.NaN(), errEmptySketch
+	}
+
+	rank := quantile * (count - 1)
+	negativeValueCount := s.negativeValueStore.TotalCount()
+	if rank < negativeValueCount {
+		return -s.Value(s.negativeValueStore.KeyAtRank(negativeValueCount - 1 - rank)), nil
+	} else if rank < s.zeroCount+negativeValueCount {
+		return 0, nil
+	} else {
+		return s.Value(s.positiveValueStore.KeyAtRank(rank - s.zeroCount - negativeValueCount)), nil
+	}
+}
+
+// Return the values at the respective specified quantiles. Return a non-nil error if any of the quantiles
+// is invalid or if the sketch is empty.
+func (s *DDSketch) GetValuesAtQuantiles(quantiles []float64) ([]float64, error) {
+	values := make([]float64, len(quantiles))
+	for i, q := range quantiles {
+		val, err := s.GetValueAtQuantile(q)
+		if err != nil {
+			return nil, err
+		}
+		values[i] = val
+	}
+	return values, nil
+}
+
+// Return the total number of values that have been added to this sketch.
+func (s *DDSketch) GetCount() float64 {
+	return s.zeroCount + s.positiveValueStore.TotalCount() + s.negativeValueStore.TotalCount()
+}
+
+// Return true iff no value has been added to this sketch.
+func (s *DDSketch) IsEmpty() bool {
+	return s.zeroCount == 0 && s.positiveValueStore.IsEmpty() && s.negativeValueStore.IsEmpty()
+}
+
+// Return the maximum value that has been added to this sketch. Return a non-nil error if the sketch
+// is empty.
+func (s *DDSketch) GetMaxValue() (float64, error) {
+	if !s.positiveValueStore.IsEmpty() {
+		maxIndex, _ := s.positiveValueStore.MaxIndex()
+		return s.Value(maxIndex), nil
+	} else if s.zeroCount > 0 {
+		return 0, nil
+	} else {
+		minIndex, err := s.negativeValueStore.MinIndex()
+		if err != nil {
+			return math.NaN(), err
+		}
+		return -s.Value(minIndex), nil
+	}
+}
+
+// Return the minimum value that has been added to this sketch. Returns a non-nil error if the sketch
+// is empty.
+func (s *DDSketch) GetMinValue() (float64, error) {
+	if !s.negativeValueStore.IsEmpty() {
+		maxIndex, _ := s.negativeValueStore.MaxIndex()
+		return -s.Value(maxIndex), nil
+	} else if s.zeroCount > 0 {
+		return 0, nil
+	} else {
+		minIndex, err := s.positiveValueStore.MinIndex()
+		if err != nil {
+			return math.NaN(), err
+		}
+		return s.Value(minIndex), nil
+	}
+}
+
+// GetSum returns an approximation of the sum of the values that have been added to the sketch. If the
+// values that have been added to the sketch all have the same sign, the approximation error has
+// the relative accuracy guarantees of the mapping used for this sketch.
+func (s *DDSketch) GetSum() (sum float64) {
+	s.ForEach(func(value float64, count float64) (stop bool) {
+		sum += value * count
+		return false
+	})
+	return sum
+}
+
+// ForEach applies f on the bins of the sketches until f returns true.
+// There is no guarantee on the bin iteration order.
+func (s *DDSketch) ForEach(f func(value, count float64) (stop bool)) {
+	if s.zeroCount != 0 && f(0, s.zeroCount) {
+		return
+	}
+	stopped := false
+	s.positiveValueStore.ForEach(func(index int, count float64) bool {
+		stopped = f(s.IndexMapping.Value(index), count)
+		return stopped
+	})
+	if stopped {
+		return
+	}
+	s.negativeValueStore.ForEach(func(index int, count float64) bool {
+		return f(-s.IndexMapping.Value(index), count)
+	})
+}
+
+// Merges the other sketch into this one. After this operation, this sketch encodes the values that
+// were added to both this and the other sketches.
+func (s *DDSketch) MergeWith(other *DDSketch) error {
+	if !s.IndexMapping.Equals(other.IndexMapping) {
+		return errors.New("Cannot merge sketches with different index mappings.")
+	}
+	s.positiveValueStore.MergeWith(other.positiveValueStore)
+	s.negativeValueStore.MergeWith(other.negativeValueStore)
+	s.zeroCount += other.zeroCount
+	return nil
+}
+
+// Generates a protobuf representation of this DDSketch.
+func (s *DDSketch) ToProto() *sketchpb.DDSketch {
+	return &sketchpb.DDSketch{
+		Mapping:        s.IndexMapping.ToProto(),
+		PositiveValues: s.positiveValueStore.ToProto(),
+		NegativeValues: s.negativeValueStore.ToProto(),
+		ZeroCount:      s.zeroCount,
+	}
+}
+
+// FromProto builds a new instance of DDSketch based on the provided protobuf representation, using a Dense store.
+func FromProto(pb *sketchpb.DDSketch) (*DDSketch, error) {
+	return FromProtoWithStoreProvider(pb, store.DenseStoreConstructor)
+}
+
+func FromProtoWithStoreProvider(pb *sketchpb.DDSketch, storeProvider store.Provider) (*DDSketch, error) {
+	positiveValueStore := storeProvider()
+	store.MergeWithProto(positiveValueStore, pb.PositiveValues)
+	negativeValueStore := storeProvider()
+	store.MergeWithProto(negativeValueStore, pb.NegativeValues)
+	m, err := mapping.FromProto(pb.Mapping)
+	if err != nil {
+		return nil, err
+	}
+	return &DDSketch{
+		IndexMapping:              m,
+		positiveValueStore:        positiveValueStore,
+		negativeValueStore:        negativeValueStore,
+		zeroCount:                 pb.ZeroCount,
+		minIndexableAbsoluteValue: m.MinIndexableValue(),
+		maxIndexableValue:         m.MaxIndexableValue(),
+	}, nil
+}
+
+// Encode serializes the sketch and appends the serialized content to the provided []byte.
+// If the capacity of the provided []byte is large enough, Encode does not allocate memory space.
+// When the index mapping is known at the time of deserialization, omitIndexMapping can be set to true to avoid encoding it and to make the serialized content smaller.
+// The encoding format is described in the encoding/flag module.
+func (s *DDSketch) Encode(b *[]byte, omitIndexMapping bool) {
+	if s.zeroCount != 0 {
+		enc.EncodeFlag(b, enc.FlagZeroCountVarFloat)
+		enc.EncodeVarfloat64(b, s.zeroCount)
+	}
+
+	if !omitIndexMapping {
+		s.IndexMapping.Encode(b)
+	}
+
+	s.positiveValueStore.Encode(b, enc.FlagTypePositiveStore)
+	s.negativeValueStore.Encode(b, enc.FlagTypeNegativeStore)
+}
+
+// DecodeDDSketch deserializes a sketch.
+// Stores are built using storeProvider. The store type needs not match the
+// store that the serialized sketch initially used. However, using the same
+// store type may make decoding faster. In the absence of high performance
+// requirements, store.BufferedPaginatedStoreConstructor is a sound enough
+// choice of store provider.
+// To avoid memory allocations, it is possible to use a store provider that
+// reuses stores, by calling Clear() on previously used stores before providing
+// the store.
+// If the serialized data does not contain the index mapping, you need to
+// specify the index mapping that was used in the sketch that was encoded.
+// Otherwise, you can use nil and the index mapping will be decoded from the
+// serialized data.
+// It is possible to decode with this function an encoded
+// DDSketchWithExactSummaryStatistics, but the exact summary statistics will be
+// lost.
+func DecodeDDSketch(b []byte, storeProvider store.Provider, indexMapping mapping.IndexMapping) (*DDSketch, error) {
+	s := &DDSketch{
+		IndexMapping:       indexMapping,
+		positiveValueStore: storeProvider(),
+		negativeValueStore: storeProvider(),
+		zeroCount:          float64(0),
+	}
+	err := s.DecodeAndMergeWith(b)
+	return s, err
+}
+
+// DecodeAndMergeWith deserializes a sketch and merges its content in the
+// receiver sketch.
+// If the serialized content contains an index mapping that differs from the one
+// of the receiver, DecodeAndMergeWith returns an error.
+func (s *DDSketch) DecodeAndMergeWith(bb []byte) error {
+	return s.decodeAndMergeWith(bb, func(b *[]byte, flag enc.Flag) error {
+		switch flag {
+		case enc.FlagCount, enc.FlagSum, enc.FlagMin, enc.FlagMax:
+			// Exact summary stats are ignored.
+			if len(*b) < 8 {
+				return io.EOF
+			}
+			*b = (*b)[8:]
+			return nil
+		default:
+			return errUnknownFlag
+		}
+	})
+}
+
+func (s *DDSketch) decodeAndMergeWith(bb []byte, fallbackDecode func(b *[]byte, flag enc.Flag) error) error {
+	b := &bb
+	for len(*b) > 0 {
+		flag, err := enc.DecodeFlag(b)
+		if err != nil {
+			return err
+		}
+		switch flag.Type() {
+		case enc.FlagTypePositiveStore:
+			s.positiveValueStore.DecodeAndMergeWith(b, flag.SubFlag())
+		case enc.FlagTypeNegativeStore:
+			s.negativeValueStore.DecodeAndMergeWith(b, flag.SubFlag())
+		case enc.FlagTypeIndexMapping:
+			decodedIndexMapping, err := mapping.Decode(b, flag)
+			if err != nil {
+				return err
+			}
+			if s.IndexMapping != nil && !s.IndexMapping.Equals(decodedIndexMapping) {
+				return errors.New("index mapping mismatch")
+			}
+			s.IndexMapping = decodedIndexMapping
+		default:
+			switch flag {
+
+			case enc.FlagZeroCountVarFloat:
+				decodedZeroCount, err := enc.DecodeVarfloat64(b)
+				if err != nil {
+					return err
+				}
+				s.zeroCount += decodedZeroCount
+
+			default:
+				err := fallbackDecode(b, flag)
+				if err != nil {
+					return err
+				}
+			}
+		}
+	}
+
+	if s.IndexMapping == nil {
+		return errors.New("missing index mapping")
+	}
+	s.minIndexableAbsoluteValue = s.IndexMapping.MinIndexableValue()
+	s.maxIndexableValue = s.IndexMapping.MaxIndexableValue()
+	return nil
+}
+
+// ChangeMapping changes the store to a new mapping.
+// it doesn't change s but returns a newly created sketch.
+// positiveStore and negativeStore must be different stores, and be empty when the function is called.
+// It is not the conversion that minimizes the loss in relative
+// accuracy, but it avoids artefacts like empty bins that make the histograms look bad.
+// scaleFactor allows to scale out / in all values. (changing units for eg)
+func (s *DDSketch) ChangeMapping(newMapping mapping.IndexMapping, positiveStore store.Store, negativeStore store.Store, scaleFactor float64) *DDSketch {
+	if scaleFactor == 1 && s.IndexMapping.Equals(newMapping) {
+		return s.Copy()
+	}
+	changeStoreMapping(s.IndexMapping, newMapping, s.positiveValueStore, positiveStore, scaleFactor)
+	changeStoreMapping(s.IndexMapping, newMapping, s.negativeValueStore, negativeStore, scaleFactor)
+	newSketch := NewDDSketch(newMapping, positiveStore, negativeStore)
+	newSketch.zeroCount = s.zeroCount
+	return newSketch
+}
+
+func changeStoreMapping(oldMapping, newMapping mapping.IndexMapping, oldStore, newStore store.Store, scaleFactor float64) {
+	oldStore.ForEach(func(index int, count float64) (stop bool) {
+		inLowerBound := oldMapping.LowerBound(index) * scaleFactor
+		inHigherBound := oldMapping.LowerBound(index+1) * scaleFactor
+		inSize := inHigherBound - inLowerBound
+		for outIndex := newMapping.Index(inLowerBound); newMapping.LowerBound(outIndex) < inHigherBound; outIndex++ {
+			outLowerBound := newMapping.LowerBound(outIndex)
+			outHigherBound := newMapping.LowerBound(outIndex + 1)
+			lowerIntersectionBound := math.Max(outLowerBound, inLowerBound)
+			higherIntersectionBound := math.Min(outHigherBound, inHigherBound)
+			intersectionSize := higherIntersectionBound - lowerIntersectionBound
+			proportion := intersectionSize / inSize
+			newStore.AddWithCount(outIndex, proportion*count)
+		}
+		return false
+	})
+}
+
+// Reweight multiplies all values from the sketch by w, but keeps the same global distribution.
+// w has to be strictly greater than 0.
+func (s *DDSketch) Reweight(w float64) error {
+	if w <= 0 {
+		return errors.New("can't reweight by a negative factor")
+	}
+	if w == 1 {
+		return nil
+	}
+	s.zeroCount *= w
+	if err := s.positiveValueStore.Reweight(w); err != nil {
+		return err
+	}
+	if err := s.negativeValueStore.Reweight(w); err != nil {
+		return err
+	}
+	return nil
+}
+
+// DDSketchWithExactSummaryStatistics returns exact count, sum, min and max, as
+// opposed to DDSketch, which may return approximate values for those
+// statistics. Because of the need to track them exactly, adding and merging
+// operations are slightly more exepensive than those of DDSketch.
+type DDSketchWithExactSummaryStatistics struct {
+	sketch            *DDSketch
+	summaryStatistics *stat.SummaryStatistics
+}
+
+func NewDefaultDDSketchWithExactSummaryStatistics(relativeAccuracy float64) (*DDSketchWithExactSummaryStatistics, error) {
+	sketch, err := NewDefaultDDSketch(relativeAccuracy)
+	if err != nil {
+		return nil, err
+	}
+	return &DDSketchWithExactSummaryStatistics{
+		sketch:            sketch,
+		summaryStatistics: stat.NewSummaryStatistics(),
+	}, nil
+}
+
+func NewDDSketchWithExactSummaryStatistics(mapping mapping.IndexMapping, storeProvider store.Provider) *DDSketchWithExactSummaryStatistics {
+	return &DDSketchWithExactSummaryStatistics{
+		sketch:            NewDDSketchFromStoreProvider(mapping, storeProvider),
+		summaryStatistics: stat.NewSummaryStatistics(),
+	}
+}
+
+func (s *DDSketchWithExactSummaryStatistics) RelativeAccuracy() float64 {
+	return s.sketch.RelativeAccuracy()
+}
+
+func (s *DDSketchWithExactSummaryStatistics) IsEmpty() bool {
+	return s.summaryStatistics.Count() == 0
+}
+
+func (s *DDSketchWithExactSummaryStatistics) GetCount() float64 {
+	return s.summaryStatistics.Count()
+}
+
+func (s *DDSketchWithExactSummaryStatistics) GetSum() float64 {
+	return s.summaryStatistics.Sum()
+}
+
+func (s *DDSketchWithExactSummaryStatistics) GetMinValue() (float64, error) {
+	if s.sketch.IsEmpty() {
+		return math.NaN(), errEmptySketch
+	}
+	return s.summaryStatistics.Min(), nil
+}
+
+func (s *DDSketchWithExactSummaryStatistics) GetMaxValue() (float64, error) {
+	if s.sketch.IsEmpty() {
+		return math.NaN(), errEmptySketch
+	}
+	return s.summaryStatistics.Max(), nil
+}
+
+func (s *DDSketchWithExactSummaryStatistics) GetValueAtQuantile(quantile float64) (float64, error) {
+	value, err := s.sketch.GetValueAtQuantile(quantile)
+	min := s.summaryStatistics.Min()
+	if value < min {
+		return min, err
+	}
+	max := s.summaryStatistics.Max()
+	if value > max {
+		return max, err
+	}
+	return value, err
+}
+
+func (s *DDSketchWithExactSummaryStatistics) GetValuesAtQuantiles(quantiles []float64) ([]float64, error) {
+	values, err := s.sketch.GetValuesAtQuantiles(quantiles)
+	min := s.summaryStatistics.Min()
+	max := s.summaryStatistics.Max()
+	for i := range values {
+		if values[i] < min {
+			values[i] = min
+		} else if values[i] > max {
+			values[i] = max
+		}
+	}
+	return values, err
+}
+
+func (s *DDSketchWithExactSummaryStatistics) ForEach(f func(value, count float64) (stop bool)) {
+	s.sketch.ForEach(f)
+}
+
+func (s *DDSketchWithExactSummaryStatistics) Clear() {
+	s.sketch.Clear()
+	s.summaryStatistics.Clear()
+}
+
+func (s *DDSketchWithExactSummaryStatistics) Add(value float64) error {
+	err := s.sketch.Add(value)
+	if err != nil {
+		return err
+	}
+	s.summaryStatistics.Add(value, 1)
+	return nil
+}
+
+func (s *DDSketchWithExactSummaryStatistics) AddWithCount(value, count float64) error {
+	if count == 0 {
+		return nil
+	}
+	err := s.sketch.AddWithCount(value, count)
+	if err != nil {
+		return err
+	}
+	s.summaryStatistics.Add(value, count)
+	return nil
+}
+
+func (s *DDSketchWithExactSummaryStatistics) MergeWith(o *DDSketchWithExactSummaryStatistics) error {
+	err := s.sketch.MergeWith(o.sketch)
+	if err != nil {
+		return err
+	}
+	s.summaryStatistics.MergeWith(o.summaryStatistics)
+	return nil
+}
+
+func (s *DDSketchWithExactSummaryStatistics) Copy() *DDSketchWithExactSummaryStatistics {
+	return &DDSketchWithExactSummaryStatistics{
+		sketch:            s.sketch.Copy(),
+		summaryStatistics: s.summaryStatistics.Copy(),
+	}
+}
+
+func (s *DDSketchWithExactSummaryStatistics) Reweight(factor float64) error {
+	err := s.sketch.Reweight(factor)
+	if err != nil {
+		return err
+	}
+	s.summaryStatistics.Reweight(factor)
+	return nil
+}
+
+func (s *DDSketchWithExactSummaryStatistics) ChangeMapping(newMapping mapping.IndexMapping, storeProvider store.Provider, scaleFactor float64) *DDSketchWithExactSummaryStatistics {
+	summaryStatisticsCopy := s.summaryStatistics.Copy()
+	summaryStatisticsCopy.Rescale(scaleFactor)
+	return &DDSketchWithExactSummaryStatistics{
+		sketch:            s.sketch.ChangeMapping(newMapping, storeProvider(), storeProvider(), scaleFactor),
+		summaryStatistics: summaryStatisticsCopy,
+	}
+}
+
+func (s *DDSketchWithExactSummaryStatistics) Encode(b *[]byte, omitIndexMapping bool) {
+	if s.summaryStatistics.Count() != 0 {
+		enc.EncodeFlag(b, enc.FlagCount)
+		enc.EncodeVarfloat64(b, s.summaryStatistics.Count())
+	}
+	if s.summaryStatistics.Sum() != 0 {
+		enc.EncodeFlag(b, enc.FlagSum)
+		enc.EncodeFloat64LE(b, s.summaryStatistics.Sum())
+	}
+	if s.summaryStatistics.Min() != math.Inf(1) {
+		enc.EncodeFlag(b, enc.FlagMin)
+		enc.EncodeFloat64LE(b, s.summaryStatistics.Min())
+	}
+	if s.summaryStatistics.Max() != math.Inf(-1) {
+		enc.EncodeFlag(b, enc.FlagMax)
+		enc.EncodeFloat64LE(b, s.summaryStatistics.Max())
+	}
+	s.sketch.Encode(b, omitIndexMapping)
+}
+
+// DecodeDDSketchWithExactSummaryStatistics deserializes a sketch.
+// Stores are built using storeProvider. The store type needs not match the
+// store that the serialized sketch initially used. However, using the same
+// store type may make decoding faster. In the absence of high performance
+// requirements, store.DefaultProvider is a sound enough choice of store
+// provider.
+// To avoid memory allocations, it is possible to use a store provider that
+// reuses stores, by calling Clear() on previously used stores before providing
+// the store.
+// If the serialized data does not contain the index mapping, you need to
+// specify the index mapping that was used in the sketch that was encoded.
+// Otherwise, you can use nil and the index mapping will be decoded from the
+// serialized data.
+// It is not possible to decode with this function an encoded DDSketch (unless
+// it is empty), because it does not track exact summary statistics
+func DecodeDDSketchWithExactSummaryStatistics(b []byte, storeProvider store.Provider, indexMapping mapping.IndexMapping) (*DDSketchWithExactSummaryStatistics, error) {
+	s := &DDSketchWithExactSummaryStatistics{
+		sketch: &DDSketch{
+			IndexMapping:       indexMapping,
+			positiveValueStore: storeProvider(),
+			negativeValueStore: storeProvider(),
+			zeroCount:          float64(0),
+		},
+		summaryStatistics: stat.NewSummaryStatistics(),
+	}
+	err := s.DecodeAndMergeWith(b)
+	return s, err
+}
+
+func (s *DDSketchWithExactSummaryStatistics) DecodeAndMergeWith(bb []byte) error {
+	err := s.sketch.decodeAndMergeWith(bb, func(b *[]byte, flag enc.Flag) error {
+		switch flag {
+		case enc.FlagCount:
+			count, err := enc.DecodeVarfloat64(b)
+			if err != nil {
+				return err
+			}
+			s.summaryStatistics.AddToCount(count)
+			return nil
+		case enc.FlagSum:
+			sum, err := enc.DecodeFloat64LE(b)
+			if err != nil {
+				return err
+			}
+			s.summaryStatistics.AddToSum(sum)
+			return nil
+		case enc.FlagMin, enc.FlagMax:
+			stat, err := enc.DecodeFloat64LE(b)
+			if err != nil {
+				return err
+			}
+			s.summaryStatistics.Add(stat, 0)
+			return nil
+		default:
+			return errUnknownFlag
+		}
+	})
+	if err != nil {
+		return err
+	}
+	// It is assumed that if the count is encoded, other exact summary
+	// statistics are encoded as well, which is the case if Encode is used.
+	if s.summaryStatistics.Count() == 0 && !s.sketch.IsEmpty() {
+		return errors.New("missing exact summary statistics")
+	}
+	return nil
+}
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/encoding/encoding.go b/vendor/github.com/DataDog/sketches-go/ddsketch/encoding/encoding.go
new file mode 100644
index 0000000000..c50dc1adb9
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/encoding/encoding.go
@@ -0,0 +1,208 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package encoding
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+	"math"
+	"math/bits"
+)
+
+// Encoding functions append bytes to the provided *[]byte, allowing avoiding
+// allocations if the slice initially has a large enough capacity.
+// Decoding functions also take *[]byte as input, and when they do not return an
+// error, advance the slice so that it starts at the immediate byte after the
+// decoded part (or so that it is empty if there is no such byte).
+
+const (
+	MaxVarLen64      = 9
+	varfloat64Rotate = 6
+)
+
+var uvarint64Sizes = initUvarint64Sizes()
+var varfloat64Sizes = initVarfloat64Sizes()
+
+// EncodeUvarint64 serializes 64-bit unsigned integers 7 bits at a time,
+// starting with the least significant bits. The most significant bit in each
+// output byte is the continuation bit and indicates whether there are
+// additional non-zero bits encoded in following bytes. There are at most 9
+// output bytes and the last one does not have a continuation bit, allowing for
+// it to encode 8 bits (8*7+8 = 64).
+func EncodeUvarint64(b *[]byte, v uint64) {
+	for i := 0; i < MaxVarLen64-1; i++ {
+		if v < 0x80 {
+			break
+		}
+		*b = append(*b, byte(v)|byte(0x80))
+		v >>= 7
+	}
+	*b = append(*b, byte(v))
+}
+
+// DecodeUvarint64 deserializes 64-bit unsigned integers that have been encoded
+// using EncodeUvarint64.
+func DecodeUvarint64(b *[]byte) (uint64, error) {
+	x := uint64(0)
+	s := uint(0)
+	for i := 0; ; i++ {
+		if len(*b) <= i {
+			return 0, io.EOF
+		}
+		n := (*b)[i]
+		if n < 0x80 || i == MaxVarLen64-1 {
+			*b = (*b)[i+1:]
+			return x | uint64(n)<<s, nil
+		}
+		x |= uint64(n&0x7F) << s
+		s += 7
+	}
+}
+
+// Uvarint64Size returns the number of bytes that EncodeUvarint64 encodes a
+// 64-bit unsigned integer into.
+func Uvarint64Size(v uint64) int {
+	return uvarint64Sizes[bits.LeadingZeros64(v)]
+}
+
+func initUvarint64Sizes() [65]int {
+	var sizes [65]int
+	b := []byte{}
+	for i := 0; i <= 64; i++ {
+		b = b[:0]
+		EncodeUvarint64(&b, ^uint64(0)>>i)
+		sizes[i] = len(b)
+	}
+	return sizes
+}
+
+// EncodeVarint64 serializes 64-bit signed integers using zig-zag encoding,
+// which ensures small-scale integers are turned into unsigned integers that
+// have leading zeros, whether they are positive or negative, hence allows for
+// space-efficient varuint encoding of those values.
+func EncodeVarint64(b *[]byte, v int64) {
+	EncodeUvarint64(b, uint64(v>>(64-1)^(v<<1)))
+}
+
+// DecodeVarint64 deserializes 64-bit signed integers that have been encoded
+// using EncodeVarint32.
+func DecodeVarint64(b *[]byte) (int64, error) {
+	v, err := DecodeUvarint64(b)
+	return int64((v >> 1) ^ -(v & 1)), err
+}
+
+// Varint64Size returns the number of bytes that EncodeVarint64 encodes a 64-bit
+// signed integer into.
+func Varint64Size(v int64) int {
+	return Uvarint64Size(uint64(v>>(64-1) ^ (v << 1)))
+}
+
+var errVarint32Overflow = errors.New("varint overflows a 32-bit integer")
+
+// DecodeVarint32 deserializes 32-bit signed integers that have been encoded
+// using EncodeVarint64.
+func DecodeVarint32(b *[]byte) (int32, error) {
+	v, err := DecodeVarint64(b)
+	if err != nil {
+		return 0, err
+	}
+	if v > math.MaxInt32 || v < math.MinInt32 {
+		return 0, errVarint32Overflow
+	}
+	return int32(v), nil
+}
+
+// EncodeFloat64LE serializes 64-bit floating-point values, starting with the
+// least significant bytes.
+func EncodeFloat64LE(b *[]byte, v float64) {
+	*b = append(*b, make([]byte, 8)...)
+	binary.LittleEndian.PutUint64((*b)[len(*b)-8:], math.Float64bits(v))
+}
+
+// DecodeFloat64LE deserializes 64-bit floating-point values that have been
+// encoded with EncodeFloat64LE.
+func DecodeFloat64LE(b *[]byte) (float64, error) {
+	if len(*b) < 8 {
+		return 0, io.EOF
+	}
+	v := math.Float64frombits(binary.LittleEndian.Uint64(*b))
+	*b = (*b)[8:]
+	return v, nil
+}
+
+// EncodeVarfloat64 serializes 64-bit floating-point values using a method that
+// is similar to the varuint encoding and that is space-efficient for
+// non-negative integer values. The output takes at most 9 bytes.
+// Input values are first shifted as floating-point values (+1), then transmuted
+// to integer values, then shifted again as integer values (-Float64bits(1)).
+// That is in order to minimize the number of non-zero bits when dealing with
+// non-negative integer values.
+// After that transformation, any input integer value no greater than 2^53 (the
+// largest integer value that can be encoded exactly as a 64-bit floating-point
+// value) will have at least 6 leading zero bits. By rotating bits to the left,
+// those bits end up at the right of the binary representation.
+// The resulting bits are then encoded similarly to the varuint method, but
+// starting with the most significant bits.
+func EncodeVarfloat64(b *[]byte, v float64) {
+	x := bits.RotateLeft64(math.Float64bits(v+1)-math.Float64bits(1), varfloat64Rotate)
+	for i := 0; i < MaxVarLen64-1; i++ {
+		n := byte(x >> (8*8 - 7))
+		x <<= 7
+		if x == 0 {
+			*b = append(*b, n)
+			return
+		}
+		*b = append(*b, n|byte(0x80))
+	}
+	n := byte(x >> (8 * 7))
+	*b = append(*b, n)
+}
+
+// DecodeVarfloat64 deserializes 64-bit floating-point values that have been
+// encoded with EncodeVarfloat64.
+func DecodeVarfloat64(b *[]byte) (float64, error) {
+	x := uint64(0)
+	i := int(0)
+	s := uint(8*8 - 7)
+	for {
+		if len(*b) <= i {
+			return 0, io.EOF
+		}
+		n := (*b)[i]
+		if i == MaxVarLen64-1 {
+			x |= uint64(n)
+			break
+		}
+		if n < 0x80 {
+			x |= uint64(n) << s
+			break
+		}
+		x |= uint64(n&0x7F) << s
+		i++
+		s -= 7
+	}
+	*b = (*b)[i+1:]
+	return math.Float64frombits(bits.RotateLeft64(x, -varfloat64Rotate)+math.Float64bits(1)) - 1, nil
+}
+
+// Varfloat64Size returns the number of bytes that EncodeVarfloat64 encodes a
+// 64-bit floating-point value into.
+func Varfloat64Size(v float64) int {
+	x := bits.RotateLeft64(math.Float64bits(v+1)-math.Float64bits(1), varfloat64Rotate)
+	return varfloat64Sizes[bits.TrailingZeros64(x)]
+}
+
+func initVarfloat64Sizes() [65]int {
+	var sizes [65]int
+	b := []byte{}
+	for i := 0; i <= 64; i++ {
+		b = b[:0]
+		EncodeVarfloat64(&b, math.Float64frombits(bits.RotateLeft64(^uint64(0)<<i, -varfloat64Rotate)+math.Float64bits(1))-1)
+		sizes[i] = len(b)
+	}
+	return sizes
+}
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/encoding/flag.go b/vendor/github.com/DataDog/sketches-go/ddsketch/encoding/flag.go
new file mode 100644
index 0000000000..0f4f50fdce
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/encoding/flag.go
@@ -0,0 +1,160 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package encoding
+
+import (
+	"io"
+)
+
+// An encoded DDSketch comprises multiple contiguous blocks (sequences of
+// bytes). Each block is prefixed with a flag that indicates what the block
+// contains and how the data is encoded in the block.
+//
+// A flag is a single byte, which itself contains two parts:
+// - the flag type (the 2 least significant bits),
+// - the subflag (the 6 most significant bits).
+//
+// There are four flag types, for:
+// - sketch features,
+// - index mapping,
+// - positive value store,
+// - negative value store.
+//
+// The meaning of the subflag depends on the flag type:
+// - for the sketch feature flag type, it indicates what feature is encoded,
+// - for the index mapping flag type, it indicates what mapping is encoded and
+// how,
+// - for the store flag types, it indicates how bins are encoded.
+
+const (
+	numBitsForType byte = 2
+	flagTypeMask   byte = (1 << numBitsForType) - 1
+	subFlagMask    byte = ^flagTypeMask
+)
+
+type Flag struct{ byte }
+type FlagType struct{ byte } // mask: 0b00000011
+type SubFlag struct{ byte }  // mask: 0b11111100
+
+var (
+	// FLAG TYPES
+
+	flagTypeSketchFeatures = FlagType{0b00}
+	FlagTypeIndexMapping   = FlagType{0b10}
+	FlagTypePositiveStore  = FlagType{0b01}
+	FlagTypeNegativeStore  = FlagType{0b11}
+
+	// SKETCH FEATURES
+
+	// Encodes the count of the zero bin.
+	// Encoding format:
+	// - [byte] flag
+	// - [varfloat64] count of the zero bin
+	FlagZeroCountVarFloat = NewFlag(flagTypeSketchFeatures, newSubFlag(1))
+
+	// Encode the total count.
+	// Encoding format:
+	// - [byte] flag
+	// - [varfloat64] total count
+	FlagCount = NewFlag(flagTypeSketchFeatures, newSubFlag(0x28))
+
+	// Encode the summary statistics.
+	// Encoding format:
+	// - [byte] flag
+	// - [float64LE] summary stat
+	FlagSum = NewFlag(flagTypeSketchFeatures, newSubFlag(0x21))
+	FlagMin = NewFlag(flagTypeSketchFeatures, newSubFlag(0x22))
+	FlagMax = NewFlag(flagTypeSketchFeatures, newSubFlag(0x23))
+
+	// INDEX MAPPING
+
+	// Encodes log-like index mappings, specifying the base (gamma) and the index offset
+	// The subflag specifies the interpolation method.
+	// Encoding format:
+	// - [byte] flag
+	// - [float64LE] gamma
+	// - [float64LE] index offset
+	FlagIndexMappingBaseLogarithmic = NewFlag(FlagTypeIndexMapping, newSubFlag(0))
+	FlagIndexMappingBaseLinear      = NewFlag(FlagTypeIndexMapping, newSubFlag(1))
+	FlagIndexMappingBaseQuadratic   = NewFlag(FlagTypeIndexMapping, newSubFlag(2))
+	FlagIndexMappingBaseCubic       = NewFlag(FlagTypeIndexMapping, newSubFlag(3))
+	FlagIndexMappingBaseQuartic     = NewFlag(FlagTypeIndexMapping, newSubFlag(4))
+
+	// BINS
+
+	// Encodes N bins, each one with its index and its count.
+	// Indexes are delta-encoded.
+	// Encoding format:
+	// - [byte] flag
+	// - [uvarint64] number of bins N
+	// - [varint64] index of first bin
+	// - [varfloat64] count of first bin
+	// - [varint64] difference between the index of the second bin and the index
+	// of the first bin
+	// - [varfloat64] count of second bin
+	// - ...
+	// - [varint64] difference between the index of the N-th bin and the index
+	// of the (N-1)-th bin
+	// - [varfloat64] count of N-th bin
+	BinEncodingIndexDeltasAndCounts = newSubFlag(1)
+
+	// Encodes N bins whose counts are each equal to 1.
+	// Indexes are delta-encoded.
+	// Encoding format:
+	// - [byte] flag
+	// - [uvarint64] number of bins N
+	// - [varint64] index of first bin
+	// - [varint64] difference between the index of the second bin and the index
+	// of the first bin
+	// - ...
+	// - [varint64] difference between the index of the N-th bin and the index
+	// of the (N-1)-th bin
+	BinEncodingIndexDeltas = newSubFlag(2)
+
+	// Encodes N contiguous bins, specifiying the count of each one
+	// Encoding format:
+	// - [byte] flag
+	// - [uvarint64] number of bins N
+	// - [varint64] index of first bin
+	// - [varint64] difference between two successive indexes
+	// - [varfloat64] count of first bin
+	// - [varfloat64] count of second bin
+	// - ...
+	// - [varfloat64] count of N-th bin
+	BinEncodingContiguousCounts = newSubFlag(3)
+)
+
+func NewFlag(t FlagType, s SubFlag) Flag {
+	return Flag{t.byte | s.byte}
+}
+
+func (f Flag) Type() FlagType {
+	return FlagType{f.byte & flagTypeMask}
+}
+
+func (f Flag) SubFlag() SubFlag {
+	return SubFlag{f.byte & subFlagMask}
+}
+
+func newSubFlag(b byte) SubFlag {
+	return SubFlag{b << numBitsForType}
+}
+
+// EncodeFlag encodes a flag and appends its content to the provided []byte.
+func EncodeFlag(b *[]byte, f Flag) {
+	*b = append(*b, f.byte)
+}
+
+// DecodeFlag decodes a flag and updates the provided []byte so that it starts
+// immediately after the encoded flag.
+func DecodeFlag(b *[]byte) (Flag, error) {
+	if len(*b) == 0 {
+		return Flag{}, io.EOF
+	}
+	flag := Flag{(*b)[0]}
+	*b = (*b)[1:]
+	return flag, nil
+}
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/bit_operation_helper.go b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/bit_operation_helper.go
new file mode 100644
index 0000000000..756ef85c0a
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/bit_operation_helper.go
@@ -0,0 +1,35 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package mapping
+
+import (
+	"math"
+)
+
+const (
+	exponentBias     = 1023
+	exponentMask     = uint64(0x7FF0000000000000)
+	exponentShift    = 52
+	significandMask  = uint64(0x000fffffffffffff)
+	significandWidth = 53
+	oneMask          = uint64(0x3ff0000000000000)
+)
+
+func getExponent(float64Bits uint64) float64 {
+	return float64(int((float64Bits&exponentMask)>>exponentShift) - exponentBias)
+}
+
+func getSignificandPlusOne(float64Bits uint64) float64 {
+	return math.Float64frombits((float64Bits & significandMask) | oneMask)
+}
+
+// exponent should be >= -1022 and <= 1023
+// significandPlusOne should be >= 1 and < 2
+func buildFloat64(exponent int, significandPlusOne float64) float64 {
+	return math.Float64frombits(
+		(uint64((exponent+exponentBias)<<exponentShift) & exponentMask) | (math.Float64bits(significandPlusOne) & significandMask),
+	)
+}
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/cubically_interpolated_mapping.go b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/cubically_interpolated_mapping.go
new file mode 100644
index 0000000000..dbae9bb16b
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/cubically_interpolated_mapping.go
@@ -0,0 +1,144 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package mapping
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"math"
+
+	enc "github.com/DataDog/sketches-go/ddsketch/encoding"
+	"github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"
+)
+
+const (
+	A = 6.0 / 35.0
+	B = -3.0 / 5.0
+	C = 10.0 / 7.0
+)
+
+// A fast IndexMapping that approximates the memory-optimal LogarithmicMapping by extracting the floor value
+// of the logarithm to the base 2 from the binary representations of floating-point values and cubically
+// interpolating the logarithm in-between.
+// More detailed documentation of this method can be found in:
+// <a href="https://github.com/DataDog/sketches-java/">sketches-java</a>
+type CubicallyInterpolatedMapping struct {
+	relativeAccuracy      float64
+	multiplier            float64
+	normalizedIndexOffset float64
+}
+
+func NewCubicallyInterpolatedMapping(relativeAccuracy float64) (*CubicallyInterpolatedMapping, error) {
+	if relativeAccuracy <= 0 || relativeAccuracy >= 1 {
+		return nil, errors.New("The relative accuracy must be between 0 and 1.")
+	}
+	return &CubicallyInterpolatedMapping{
+		relativeAccuracy: relativeAccuracy,
+		multiplier:       7.0 / (10 * math.Log1p(2*relativeAccuracy/(1-relativeAccuracy))),
+	}, nil
+}
+
+func NewCubicallyInterpolatedMappingWithGamma(gamma, indexOffset float64) (*CubicallyInterpolatedMapping, error) {
+	if gamma <= 1 {
+		return nil, errors.New("Gamma must be greater than 1.")
+	}
+	m := CubicallyInterpolatedMapping{
+		relativeAccuracy: 1 - 2/(1+math.Exp(7.0/10*math.Log2(gamma))),
+		multiplier:       1 / math.Log2(gamma),
+	}
+	m.normalizedIndexOffset = indexOffset - m.approximateLog(1)*m.multiplier
+	return &m, nil
+}
+
+func (m *CubicallyInterpolatedMapping) Equals(other IndexMapping) bool {
+	o, ok := other.(*CubicallyInterpolatedMapping)
+	if !ok {
+		return false
+	}
+	tol := 1e-12
+	return (withinTolerance(m.multiplier, o.multiplier, tol) && withinTolerance(m.normalizedIndexOffset, o.normalizedIndexOffset, tol))
+}
+
+func (m *CubicallyInterpolatedMapping) Index(value float64) int {
+	index := m.approximateLog(value)*m.multiplier + m.normalizedIndexOffset
+	if index >= 0 {
+		return int(index)
+	} else {
+		return int(index) - 1
+	}
+}
+
+func (m *CubicallyInterpolatedMapping) Value(index int) float64 {
+	return m.LowerBound(index) * (1 + m.relativeAccuracy)
+}
+
+func (m *CubicallyInterpolatedMapping) LowerBound(index int) float64 {
+	return m.approximateInverseLog((float64(index) - m.normalizedIndexOffset) / m.multiplier)
+}
+
+// Return an approximation of log(1) + Math.log(x) / Math.log(base(2)).
+func (m *CubicallyInterpolatedMapping) approximateLog(x float64) float64 {
+	bits := math.Float64bits(x)
+	e := getExponent(bits)
+	s := getSignificandPlusOne(bits) - 1
+	return ((A*s+B)*s+C)*s + e
+}
+
+// The exact inverse of approximateLog.
+func (m *CubicallyInterpolatedMapping) approximateInverseLog(x float64) float64 {
+	exponent := math.Floor(x)
+	// Derived from Cardano's formula
+	d0 := B*B - 3*A*C
+	d1 := 2*B*B*B - 9*A*B*C - 27*A*A*(x-exponent)
+	p := math.Cbrt((d1 - math.Sqrt(d1*d1-4*d0*d0*d0)) / 2)
+	significandPlusOne := -(B+p+d0/p)/(3*A) + 1
+	return buildFloat64(int(exponent), significandPlusOne)
+}
+
+func (m *CubicallyInterpolatedMapping) MinIndexableValue() float64 {
+	return math.Max(
+		math.Exp2((math.MinInt32-m.normalizedIndexOffset)/m.multiplier-m.approximateLog(1)+1), // so that index >= MinInt32:w
+		minNormalFloat64*(1+m.relativeAccuracy)/(1-m.relativeAccuracy),
+	)
+}
+
+func (m *CubicallyInterpolatedMapping) MaxIndexableValue() float64 {
+	return math.Min(
+		math.Exp2((math.MaxInt32-m.normalizedIndexOffset)/m.multiplier-m.approximateLog(float64(1))-1), // so that index <= MaxInt32
+		math.Exp(expOverflow)/(1+m.relativeAccuracy),                                                   // so that math.Exp does not overflow
+	)
+}
+
+func (m *CubicallyInterpolatedMapping) RelativeAccuracy() float64 {
+	return m.relativeAccuracy
+}
+
+func (m *CubicallyInterpolatedMapping) gamma() float64 {
+	return math.Exp2(1 / m.multiplier)
+}
+
+func (m *CubicallyInterpolatedMapping) ToProto() *sketchpb.IndexMapping {
+	return &sketchpb.IndexMapping{
+		Gamma:         m.gamma(),
+		IndexOffset:   m.normalizedIndexOffset + m.approximateLog(1)*m.multiplier,
+		Interpolation: sketchpb.IndexMapping_CUBIC,
+	}
+}
+
+func (m *CubicallyInterpolatedMapping) Encode(b *[]byte) {
+	enc.EncodeFlag(b, enc.FlagIndexMappingBaseCubic)
+	enc.EncodeFloat64LE(b, m.gamma())
+	enc.EncodeFloat64LE(b, m.normalizedIndexOffset)
+}
+
+func (m *CubicallyInterpolatedMapping) string() string {
+	var buffer bytes.Buffer
+	buffer.WriteString(fmt.Sprintf("relativeAccuracy: %v, multiplier: %v, normalizedIndexOffset: %v\n", m.relativeAccuracy, m.multiplier, m.normalizedIndexOffset))
+	return buffer.String()
+}
+
+var _ IndexMapping = (*CubicallyInterpolatedMapping)(nil)
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/index_mapping.go b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/index_mapping.go
new file mode 100644
index 0000000000..d8cea77797
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/index_mapping.go
@@ -0,0 +1,90 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package mapping
+
+import (
+	"errors"
+	"fmt"
+
+	enc "github.com/DataDog/sketches-go/ddsketch/encoding"
+	"github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"
+)
+
+const (
+	expOverflow      = 7.094361393031e+02      // The value at which math.Exp overflows
+	minNormalFloat64 = 2.2250738585072014e-308 //2^(-1022)
+)
+
+type IndexMapping interface {
+	Equals(other IndexMapping) bool
+	Index(value float64) int
+	Value(index int) float64
+	LowerBound(index int) float64
+	RelativeAccuracy() float64
+	MinIndexableValue() float64
+	MaxIndexableValue() float64
+	ToProto() *sketchpb.IndexMapping
+	// Encode encodes a mapping and appends its content to the provided []byte.
+	Encode(b *[]byte)
+}
+
+func NewDefaultMapping(relativeAccuracy float64) (IndexMapping, error) {
+	return NewLogarithmicMapping(relativeAccuracy)
+}
+
+// FromProto returns an Index mapping from the protobuf definition of it
+func FromProto(m *sketchpb.IndexMapping) (IndexMapping, error) {
+	switch m.Interpolation {
+	case sketchpb.IndexMapping_NONE:
+		return NewLogarithmicMappingWithGamma(m.Gamma, m.IndexOffset)
+	case sketchpb.IndexMapping_LINEAR:
+		return NewLinearlyInterpolatedMappingWithGamma(m.Gamma, m.IndexOffset)
+	case sketchpb.IndexMapping_CUBIC:
+		return NewCubicallyInterpolatedMappingWithGamma(m.Gamma, m.IndexOffset)
+	default:
+		return nil, fmt.Errorf("interpolation not supported: %d", m.Interpolation)
+	}
+}
+
+// Decode decodes a mapping and updates the provided []byte so that it starts
+// immediately after the encoded mapping.
+func Decode(b *[]byte, flag enc.Flag) (IndexMapping, error) {
+	switch flag {
+
+	case enc.FlagIndexMappingBaseLogarithmic:
+		gamma, indexOffset, err := decodeLogLikeIndexMapping(b)
+		if err != nil {
+			return nil, err
+		}
+		return NewLogarithmicMappingWithGamma(gamma, indexOffset)
+
+	case enc.FlagIndexMappingBaseLinear:
+		gamma, indexOffset, err := decodeLogLikeIndexMapping(b)
+		if err != nil {
+			return nil, err
+		}
+		return NewLinearlyInterpolatedMappingWithGamma(gamma, indexOffset)
+
+	case enc.FlagIndexMappingBaseCubic:
+		gamma, indexOffset, err := decodeLogLikeIndexMapping(b)
+		if err != nil {
+			return nil, err
+		}
+		return NewCubicallyInterpolatedMappingWithGamma(gamma, indexOffset)
+
+	default:
+		return nil, errors.New("unknown mapping")
+	}
+}
+
+func decodeLogLikeIndexMapping(b *[]byte) (gamma, indexOffset float64, err error) {
+	gamma, err = enc.DecodeFloat64LE(b)
+	if err != nil {
+		return
+	}
+	indexOffset, err = enc.DecodeFloat64LE(b)
+	return
+}
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/linearly_interpolated_mapping.go b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/linearly_interpolated_mapping.go
new file mode 100644
index 0000000000..d17bb46612
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/linearly_interpolated_mapping.go
@@ -0,0 +1,139 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package mapping
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"math"
+
+	enc "github.com/DataDog/sketches-go/ddsketch/encoding"
+	"github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"
+)
+
+// A fast IndexMapping that approximates the memory-optimal LogarithmicMapping by extracting the floor value
+// of the logarithm to the base 2 from the binary representations of floating-point values and linearly
+// interpolating the logarithm in-between.
+type LinearlyInterpolatedMapping struct {
+	relativeAccuracy      float64
+	multiplier            float64
+	normalizedIndexOffset float64
+}
+
+func NewLinearlyInterpolatedMapping(relativeAccuracy float64) (*LinearlyInterpolatedMapping, error) {
+	if relativeAccuracy <= 0 || relativeAccuracy >= 1 {
+		return nil, errors.New("The relative accuracy must be between 0 and 1.")
+	}
+	return &LinearlyInterpolatedMapping{
+		relativeAccuracy: relativeAccuracy,
+		multiplier:       1.0 / math.Log1p(2*relativeAccuracy/(1-relativeAccuracy)),
+	}, nil
+}
+
+func NewLinearlyInterpolatedMappingWithGamma(gamma, indexOffset float64) (*LinearlyInterpolatedMapping, error) {
+	if gamma <= 1 {
+		return nil, errors.New("Gamma must be greater than 1.")
+	}
+	m := LinearlyInterpolatedMapping{
+		relativeAccuracy: 1 - 2/(1+math.Exp(math.Log2(gamma))),
+		multiplier:       1 / math.Log2(gamma),
+	}
+	m.normalizedIndexOffset = indexOffset - m.approximateLog(1)*m.multiplier
+	return &m, nil
+}
+
+func (m *LinearlyInterpolatedMapping) Equals(other IndexMapping) bool {
+	o, ok := other.(*LinearlyInterpolatedMapping)
+	if !ok {
+		return false
+	}
+	tol := 1e-12
+	return (withinTolerance(m.multiplier, o.multiplier, tol) && withinTolerance(m.normalizedIndexOffset, o.normalizedIndexOffset, tol))
+}
+
+func (m *LinearlyInterpolatedMapping) Index(value float64) int {
+	index := m.approximateLog(value)*m.multiplier + m.normalizedIndexOffset
+	if index >= 0 {
+		return int(index)
+	} else {
+		return int(index) - 1
+	}
+}
+
+func (m *LinearlyInterpolatedMapping) Value(index int) float64 {
+	return m.LowerBound(index) * (1 + m.relativeAccuracy)
+}
+
+func (m *LinearlyInterpolatedMapping) LowerBound(index int) float64 {
+	return m.approximateInverseLog((float64(index) - m.normalizedIndexOffset) / m.multiplier)
+}
+
+// Return an approximation of log(1) + Math.log(x) / Math.log(2)}
+func (m *LinearlyInterpolatedMapping) approximateLog(x float64) float64 {
+	bits := math.Float64bits(x)
+	return getExponent(bits) + getSignificandPlusOne(bits)
+}
+
+// The exact inverse of approximateLog.
+func (m *LinearlyInterpolatedMapping) approximateInverseLog(x float64) float64 {
+	exponent := math.Floor(x - 1)
+	significandPlusOne := x - exponent
+	return buildFloat64(int(exponent), significandPlusOne)
+}
+
+func (m *LinearlyInterpolatedMapping) MinIndexableValue() float64 {
+	return math.Max(
+		math.Exp2((math.MinInt32-m.normalizedIndexOffset)/m.multiplier-m.approximateLog(1)+1), // so that index >= MinInt32
+		minNormalFloat64*(1+m.relativeAccuracy)/(1-m.relativeAccuracy),
+	)
+}
+
+func (m *LinearlyInterpolatedMapping) MaxIndexableValue() float64 {
+	return math.Min(
+		math.Exp2((math.MaxInt32-m.normalizedIndexOffset)/m.multiplier-m.approximateLog(float64(1))-1), // so that index <= MaxInt32
+		math.Exp(expOverflow)/(1+m.relativeAccuracy),                                                   // so that math.Exp does not overflow
+	)
+}
+
+func (m *LinearlyInterpolatedMapping) RelativeAccuracy() float64 {
+	return m.relativeAccuracy
+}
+
+func (m *LinearlyInterpolatedMapping) gamma() float64 {
+	return math.Exp2(1 / m.multiplier)
+}
+
+// Generates a protobuf representation of this LinearlyInterpolatedMapping.
+func (m *LinearlyInterpolatedMapping) ToProto() *sketchpb.IndexMapping {
+	return &sketchpb.IndexMapping{
+		Gamma:         m.gamma(),
+		IndexOffset:   m.normalizedIndexOffset + m.approximateLog(1)*m.multiplier,
+		Interpolation: sketchpb.IndexMapping_LINEAR,
+	}
+}
+
+func (m *LinearlyInterpolatedMapping) Encode(b *[]byte) {
+	enc.EncodeFlag(b, enc.FlagIndexMappingBaseLinear)
+	enc.EncodeFloat64LE(b, m.gamma())
+	enc.EncodeFloat64LE(b, m.normalizedIndexOffset+m.approximateLog(1)*m.multiplier)
+}
+
+func (m *LinearlyInterpolatedMapping) string() string {
+	var buffer bytes.Buffer
+	buffer.WriteString(fmt.Sprintf("relativeAccuracy: %v, multiplier: %v, normalizedIndexOffset: %v\n", m.relativeAccuracy, m.multiplier, m.normalizedIndexOffset))
+	return buffer.String()
+}
+
+func withinTolerance(x, y, tolerance float64) bool {
+	if x == 0 || y == 0 {
+		return math.Abs(x) <= tolerance && math.Abs(y) <= tolerance
+	} else {
+		return math.Abs(x-y) <= tolerance*math.Max(math.Abs(x), math.Abs(y))
+	}
+}
+
+var _ IndexMapping = (*LinearlyInterpolatedMapping)(nil)
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/logarithmic_mapping.go b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/logarithmic_mapping.go
new file mode 100644
index 0000000000..f7c33e4350
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/logarithmic_mapping.go
@@ -0,0 +1,119 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package mapping
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"math"
+
+	enc "github.com/DataDog/sketches-go/ddsketch/encoding"
+	"github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"
+)
+
+// An IndexMapping that is memory-optimal, that is to say that given a targeted relative accuracy, it
+// requires the least number of indices to cover a given range of values. This is done by logarithmically
+// mapping floating-point values to integers.
+type LogarithmicMapping struct {
+	relativeAccuracy      float64
+	multiplier            float64
+	normalizedIndexOffset float64
+}
+
+func NewLogarithmicMapping(relativeAccuracy float64) (*LogarithmicMapping, error) {
+	if relativeAccuracy <= 0 || relativeAccuracy >= 1 {
+		return nil, errors.New("The relative accuracy must be between 0 and 1.")
+	}
+	m := &LogarithmicMapping{
+		relativeAccuracy: relativeAccuracy,
+		multiplier:       1 / math.Log1p(2*relativeAccuracy/(1-relativeAccuracy)),
+	}
+	return m, nil
+}
+
+func NewLogarithmicMappingWithGamma(gamma, indexOffset float64) (*LogarithmicMapping, error) {
+	if gamma <= 1 {
+		return nil, errors.New("Gamma must be greater than 1.")
+	}
+	m := &LogarithmicMapping{
+		relativeAccuracy:      1 - 2/(1+gamma),
+		multiplier:            1 / math.Log(gamma),
+		normalizedIndexOffset: indexOffset,
+	}
+	return m, nil
+}
+
+func (m *LogarithmicMapping) Equals(other IndexMapping) bool {
+	o, ok := other.(*LogarithmicMapping)
+	if !ok {
+		return false
+	}
+	tol := 1e-12
+	return (withinTolerance(m.multiplier, o.multiplier, tol) && withinTolerance(m.normalizedIndexOffset, o.normalizedIndexOffset, tol))
+}
+
+func (m *LogarithmicMapping) Index(value float64) int {
+	index := math.Log(value)*m.multiplier + m.normalizedIndexOffset
+	if index >= 0 {
+		return int(index)
+	} else {
+		return int(index) - 1 // faster than Math.Floor
+	}
+}
+
+func (m *LogarithmicMapping) Value(index int) float64 {
+	return m.LowerBound(index) * (1 + m.relativeAccuracy)
+}
+
+func (m *LogarithmicMapping) LowerBound(index int) float64 {
+	return math.Exp((float64(index) - m.normalizedIndexOffset) / m.multiplier)
+}
+
+func (m *LogarithmicMapping) MinIndexableValue() float64 {
+	return math.Max(
+		math.Exp((math.MinInt32-m.normalizedIndexOffset)/m.multiplier+1), // so that index >= MinInt32
+		minNormalFloat64*(1+m.relativeAccuracy)/(1-m.relativeAccuracy),
+	)
+}
+
+func (m *LogarithmicMapping) MaxIndexableValue() float64 {
+	return math.Min(
+		math.Exp((math.MaxInt32-m.normalizedIndexOffset)/m.multiplier-1), // so that index <= MaxInt32
+		math.Exp(expOverflow)/(1+m.relativeAccuracy),                     // so that math.Exp does not overflow
+	)
+}
+
+func (m *LogarithmicMapping) RelativeAccuracy() float64 {
+	return m.relativeAccuracy
+}
+
+func (m *LogarithmicMapping) gamma() float64 {
+	return (1 + m.relativeAccuracy) / (1 - m.relativeAccuracy)
+}
+
+// Generates a protobuf representation of this LogarithicMapping.
+func (m *LogarithmicMapping) ToProto() *sketchpb.IndexMapping {
+	return &sketchpb.IndexMapping{
+		Gamma:         m.gamma(),
+		IndexOffset:   m.normalizedIndexOffset,
+		Interpolation: sketchpb.IndexMapping_NONE,
+	}
+}
+
+func (m *LogarithmicMapping) Encode(b *[]byte) {
+	enc.EncodeFlag(b, enc.FlagIndexMappingBaseLogarithmic)
+	enc.EncodeFloat64LE(b, m.gamma())
+	enc.EncodeFloat64LE(b, m.normalizedIndexOffset)
+}
+
+func (m *LogarithmicMapping) string() string {
+	var buffer bytes.Buffer
+	buffer.WriteString(fmt.Sprintf("relativeAccuracy: %v, multiplier: %v, normalizedIndexOffset: %v\n", m.relativeAccuracy, m.multiplier, m.normalizedIndexOffset))
+	return buffer.String()
+}
+
+var _ IndexMapping = (*LogarithmicMapping)(nil)
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/pb/sketchpb/ddsketch.pb.go b/vendor/github.com/DataDog/sketches-go/ddsketch/pb/sketchpb/ddsketch.pb.go
new file mode 100644
index 0000000000..424dedfed3
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/pb/sketchpb/ddsketch.pb.go
@@ -0,0 +1,448 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.25.0-devel
+// 	protoc        v3.14.0
+// source: ddsketch.proto
+
+package sketchpb
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type IndexMapping_Interpolation int32
+
+const (
+	IndexMapping_NONE      IndexMapping_Interpolation = 0
+	IndexMapping_LINEAR    IndexMapping_Interpolation = 1
+	IndexMapping_QUADRATIC IndexMapping_Interpolation = 2
+	IndexMapping_CUBIC     IndexMapping_Interpolation = 3
+)
+
+// Enum value maps for IndexMapping_Interpolation.
+var (
+	IndexMapping_Interpolation_name = map[int32]string{
+		0: "NONE",
+		1: "LINEAR",
+		2: "QUADRATIC",
+		3: "CUBIC",
+	}
+	IndexMapping_Interpolation_value = map[string]int32{
+		"NONE":      0,
+		"LINEAR":    1,
+		"QUADRATIC": 2,
+		"CUBIC":     3,
+	}
+)
+
+func (x IndexMapping_Interpolation) Enum() *IndexMapping_Interpolation {
+	p := new(IndexMapping_Interpolation)
+	*p = x
+	return p
+}
+
+func (x IndexMapping_Interpolation) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (IndexMapping_Interpolation) Descriptor() protoreflect.EnumDescriptor {
+	return file_ddsketch_proto_enumTypes[0].Descriptor()
+}
+
+func (IndexMapping_Interpolation) Type() protoreflect.EnumType {
+	return &file_ddsketch_proto_enumTypes[0]
+}
+
+func (x IndexMapping_Interpolation) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use IndexMapping_Interpolation.Descriptor instead.
+func (IndexMapping_Interpolation) EnumDescriptor() ([]byte, []int) {
+	return file_ddsketch_proto_rawDescGZIP(), []int{1, 0}
+}
+
+// A DDSketch is essentially a histogram that partitions the range of positive values into an infinite number of
+// indexed bins whose size grows exponentially. It keeps track of the number of values (or possibly floating-point
+// weights) added to each bin. Negative values are partitioned like positive values, symmetrically to zero.
+// The value zero as well as its close neighborhood that would be mapped to extreme bin indexes is mapped to a specific
+// counter.
+type DDSketch struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The mapping between positive values and the bin indexes they belong to.
+	Mapping *IndexMapping `protobuf:"bytes,1,opt,name=mapping,proto3" json:"mapping,omitempty"`
+	// The store for keeping track of positive values.
+	PositiveValues *Store `protobuf:"bytes,2,opt,name=positiveValues,proto3" json:"positiveValues,omitempty"`
+	// The store for keeping track of negative values. A negative value v is mapped using its positive opposite -v.
+	NegativeValues *Store `protobuf:"bytes,3,opt,name=negativeValues,proto3" json:"negativeValues,omitempty"`
+	// The count for the value zero and its close neighborhood (whose width depends on the mapping).
+	ZeroCount float64 `protobuf:"fixed64,4,opt,name=zeroCount,proto3" json:"zeroCount,omitempty"`
+}
+
+func (x *DDSketch) Reset() {
+	*x = DDSketch{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_ddsketch_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *DDSketch) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DDSketch) ProtoMessage() {}
+
+func (x *DDSketch) ProtoReflect() protoreflect.Message {
+	mi := &file_ddsketch_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use DDSketch.ProtoReflect.Descriptor instead.
+func (*DDSketch) Descriptor() ([]byte, []int) {
+	return file_ddsketch_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *DDSketch) GetMapping() *IndexMapping {
+	if x != nil {
+		return x.Mapping
+	}
+	return nil
+}
+
+func (x *DDSketch) GetPositiveValues() *Store {
+	if x != nil {
+		return x.PositiveValues
+	}
+	return nil
+}
+
+func (x *DDSketch) GetNegativeValues() *Store {
+	if x != nil {
+		return x.NegativeValues
+	}
+	return nil
+}
+
+func (x *DDSketch) GetZeroCount() float64 {
+	if x != nil {
+		return x.ZeroCount
+	}
+	return 0
+}
+
+// How to map positive values to the bins they belong to.
+type IndexMapping struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The gamma parameter of the mapping, such that bin index that a value v belongs to is roughly equal to
+	// log(v)/log(gamma).
+	Gamma float64 `protobuf:"fixed64,1,opt,name=gamma,proto3" json:"gamma,omitempty"`
+	// An offset that can be used to shift all bin indexes.
+	IndexOffset float64 `protobuf:"fixed64,2,opt,name=indexOffset,proto3" json:"indexOffset,omitempty"`
+	// To speed up the computation of the index a value belongs to, the computation of the log may be approximated using
+	// the fact that the log to the base 2 of powers of 2 can be computed at a low cost from the binary representation of
+	// the input value. Other values can be approximated by interpolating between successive powers of 2 (linearly,
+	// quadratically or cubically).
+	// NONE means that the log is to be computed exactly (no interpolation).
+	Interpolation IndexMapping_Interpolation `protobuf:"varint,3,opt,name=interpolation,proto3,enum=IndexMapping_Interpolation" json:"interpolation,omitempty"`
+}
+
+func (x *IndexMapping) Reset() {
+	*x = IndexMapping{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_ddsketch_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *IndexMapping) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IndexMapping) ProtoMessage() {}
+
+func (x *IndexMapping) ProtoReflect() protoreflect.Message {
+	mi := &file_ddsketch_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use IndexMapping.ProtoReflect.Descriptor instead.
+func (*IndexMapping) Descriptor() ([]byte, []int) {
+	return file_ddsketch_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *IndexMapping) GetGamma() float64 {
+	if x != nil {
+		return x.Gamma
+	}
+	return 0
+}
+
+func (x *IndexMapping) GetIndexOffset() float64 {
+	if x != nil {
+		return x.IndexOffset
+	}
+	return 0
+}
+
+func (x *IndexMapping) GetInterpolation() IndexMapping_Interpolation {
+	if x != nil {
+		return x.Interpolation
+	}
+	return IndexMapping_NONE
+}
+
+// A Store maps bin indexes to their respective counts.
+// Counts can be encoded sparsely using binCounts, but also in a contiguous way using contiguousBinCounts and
+// contiguousBinIndexOffset. Given that non-empty bins are in practice usually contiguous or close to one another, the
+// latter contiguous encoding method is usually more efficient than the sparse one.
+// Both encoding methods can be used conjointly. If a bin appears in both the sparse and the contiguous encodings, its
+// count value is the sum of the counts in each encodings.
+type Store struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The bin counts, encoded sparsely.
+	BinCounts map[int32]float64 `protobuf:"bytes,1,rep,name=binCounts,proto3" json:"binCounts,omitempty" protobuf_key:"zigzag32,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"`
+	// The bin counts, encoded contiguously. The values of contiguousBinCounts are the counts for the bins of indexes
+	// o, o+1, o+2, etc., where o is contiguousBinIndexOffset.
+	ContiguousBinCounts      []float64 `protobuf:"fixed64,2,rep,packed,name=contiguousBinCounts,proto3" json:"contiguousBinCounts,omitempty"`
+	ContiguousBinIndexOffset int32     `protobuf:"zigzag32,3,opt,name=contiguousBinIndexOffset,proto3" json:"contiguousBinIndexOffset,omitempty"`
+}
+
+func (x *Store) Reset() {
+	*x = Store{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_ddsketch_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Store) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Store) ProtoMessage() {}
+
+func (x *Store) ProtoReflect() protoreflect.Message {
+	mi := &file_ddsketch_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Store.ProtoReflect.Descriptor instead.
+func (*Store) Descriptor() ([]byte, []int) {
+	return file_ddsketch_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Store) GetBinCounts() map[int32]float64 {
+	if x != nil {
+		return x.BinCounts
+	}
+	return nil
+}
+
+func (x *Store) GetContiguousBinCounts() []float64 {
+	if x != nil {
+		return x.ContiguousBinCounts
+	}
+	return nil
+}
+
+func (x *Store) GetContiguousBinIndexOffset() int32 {
+	if x != nil {
+		return x.ContiguousBinIndexOffset
+	}
+	return 0
+}
+
+var File_ddsketch_proto protoreflect.FileDescriptor
+
+var file_ddsketch_proto_rawDesc = []byte{
+	0x0a, 0x0e, 0x64, 0x64, 0x73, 0x6b, 0x65, 0x74, 0x63, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x22, 0xb1, 0x01, 0x0a, 0x08, 0x44, 0x44, 0x53, 0x6b, 0x65, 0x74, 0x63, 0x68, 0x12, 0x27, 0x0a,
+	0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d,
+	0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6d,
+	0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x2e, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69,
+	0x76, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x06,
+	0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+	0x76, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x06,
+	0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f,
+	0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f, 0x43,
+	0x6f, 0x75, 0x6e, 0x74, 0x22, 0xca, 0x01, 0x0a, 0x0c, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4d, 0x61,
+	0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x67, 0x61, 0x6d, 0x6d, 0x61, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x67, 0x61, 0x6d, 0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x69,
+	0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01,
+	0x52, 0x0b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x41, 0x0a,
+	0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03,
+	0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4d, 0x61, 0x70, 0x70,
+	0x69, 0x6e, 0x67, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f,
+	0x6e, 0x52, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x22, 0x3f, 0x0a, 0x0d, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f,
+	0x6e, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4c,
+	0x49, 0x4e, 0x45, 0x41, 0x52, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x51, 0x55, 0x41, 0x44, 0x52,
+	0x41, 0x54, 0x49, 0x43, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x55, 0x42, 0x49, 0x43, 0x10,
+	0x03, 0x22, 0xec, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x33, 0x0a, 0x09, 0x62,
+	0x69, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15,
+	0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73,
+	0x12, 0x34, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x67, 0x75, 0x6f, 0x75, 0x73, 0x42, 0x69,
+	0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x01, 0x42, 0x02, 0x10,
+	0x01, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x67, 0x75, 0x6f, 0x75, 0x73, 0x42, 0x69, 0x6e,
+	0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x18, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x67,
+	0x75, 0x6f, 0x75, 0x73, 0x42, 0x69, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x66, 0x73,
+	0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x18, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x67,
+	0x75, 0x6f, 0x75, 0x73, 0x42, 0x69, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x66, 0x73,
+	0x65, 0x74, 0x1a, 0x3c, 0x0a, 0x0e, 0x42, 0x69, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45,
+	0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x11, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+	0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44,
+	0x61, 0x74, 0x61, 0x44, 0x6f, 0x67, 0x2f, 0x73, 0x6b, 0x65, 0x74, 0x63, 0x68, 0x65, 0x73, 0x2d,
+	0x67, 0x6f, 0x2f, 0x64, 0x64, 0x73, 0x6b, 0x65, 0x74, 0x63, 0x68, 0x2f, 0x70, 0x62, 0x2f, 0x73,
+	0x6b, 0x65, 0x74, 0x63, 0x68, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_ddsketch_proto_rawDescOnce sync.Once
+	file_ddsketch_proto_rawDescData = file_ddsketch_proto_rawDesc
+)
+
+func file_ddsketch_proto_rawDescGZIP() []byte {
+	file_ddsketch_proto_rawDescOnce.Do(func() {
+		file_ddsketch_proto_rawDescData = protoimpl.X.CompressGZIP(file_ddsketch_proto_rawDescData)
+	})
+	return file_ddsketch_proto_rawDescData
+}
+
+var file_ddsketch_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_ddsketch_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_ddsketch_proto_goTypes = []interface{}{
+	(IndexMapping_Interpolation)(0), // 0: IndexMapping.Interpolation
+	(*DDSketch)(nil),                // 1: DDSketch
+	(*IndexMapping)(nil),            // 2: IndexMapping
+	(*Store)(nil),                   // 3: Store
+	nil,                             // 4: Store.BinCountsEntry
+}
+var file_ddsketch_proto_depIdxs = []int32{
+	2, // 0: DDSketch.mapping:type_name -> IndexMapping
+	3, // 1: DDSketch.positiveValues:type_name -> Store
+	3, // 2: DDSketch.negativeValues:type_name -> Store
+	0, // 3: IndexMapping.interpolation:type_name -> IndexMapping.Interpolation
+	4, // 4: Store.binCounts:type_name -> Store.BinCountsEntry
+	5, // [5:5] is the sub-list for method output_type
+	5, // [5:5] is the sub-list for method input_type
+	5, // [5:5] is the sub-list for extension type_name
+	5, // [5:5] is the sub-list for extension extendee
+	0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_ddsketch_proto_init() }
+func file_ddsketch_proto_init() {
+	if File_ddsketch_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_ddsketch_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*DDSketch); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_ddsketch_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*IndexMapping); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_ddsketch_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Store); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_ddsketch_proto_rawDesc,
+			NumEnums:      1,
+			NumMessages:   4,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_ddsketch_proto_goTypes,
+		DependencyIndexes: file_ddsketch_proto_depIdxs,
+		EnumInfos:         file_ddsketch_proto_enumTypes,
+		MessageInfos:      file_ddsketch_proto_msgTypes,
+	}.Build()
+	File_ddsketch_proto = out.File
+	file_ddsketch_proto_rawDesc = nil
+	file_ddsketch_proto_goTypes = nil
+	file_ddsketch_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/stat/summary.go b/vendor/github.com/DataDog/sketches-go/ddsketch/stat/summary.go
new file mode 100644
index 0000000000..0381a055b5
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/stat/summary.go
@@ -0,0 +1,147 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package stat
+
+import "math"
+
+// SummaryStatistics keeps track of the count, the sum, the min and the max of
+// recorded values. We use a compensated sum to avoid accumulating rounding
+// errors (see https://en.wikipedia.org/wiki/Kahan_summation_algorithm).
+type SummaryStatistics struct {
+	count           float64
+	sum             float64
+	sumCompensation float64
+	simpleSum       float64
+	min             float64
+	max             float64
+}
+
+func NewSummaryStatistics() *SummaryStatistics {
+	return &SummaryStatistics{
+		count:           0,
+		sum:             0,
+		sumCompensation: 0,
+		simpleSum:       0,
+		min:             math.Inf(1),
+		max:             math.Inf(-1),
+	}
+}
+
+func (s *SummaryStatistics) Count() float64 {
+	return s.count
+}
+
+func (s *SummaryStatistics) Sum() float64 {
+	// Better error bounds to add both terms as the final sum
+	tmp := s.sum + s.sumCompensation
+	if math.IsNaN(tmp) && math.IsInf(s.simpleSum, 0) {
+		// If the compensated sum is spuriously NaN from accumulating one or more same-signed infinite
+		// values, return the correctly-signed infinity stored in simpleSum.
+		return s.simpleSum
+	} else {
+		return tmp
+	}
+}
+
+func (s *SummaryStatistics) Min() float64 {
+	return s.min
+}
+
+func (s *SummaryStatistics) Max() float64 {
+	return s.max
+}
+
+func (s *SummaryStatistics) Add(value, count float64) {
+	s.AddToCount(count)
+	s.AddToSum(value * count)
+	if value < s.min {
+		s.min = value
+	}
+	if value > s.max {
+		s.max = value
+	}
+}
+
+func (s *SummaryStatistics) AddToCount(addend float64) {
+	s.count += addend
+}
+
+func (s *SummaryStatistics) AddToSum(addend float64) {
+	s.sumWithCompensation(addend)
+	s.simpleSum += addend
+}
+
+func (s *SummaryStatistics) MergeWith(o *SummaryStatistics) {
+	s.count += o.count
+	s.sumWithCompensation(o.sum)
+	s.sumWithCompensation(o.sumCompensation)
+	s.simpleSum += o.simpleSum
+	if o.min < s.min {
+		s.min = o.min
+	}
+	if o.max > s.max {
+		s.max = o.max
+	}
+}
+
+func (s *SummaryStatistics) sumWithCompensation(value float64) {
+	tmp := value - s.sumCompensation
+	velvel := s.sum + tmp // little wolf of rounding error
+	s.sumCompensation = velvel - s.sum - tmp
+	s.sum = velvel
+}
+
+// Reweight adjusts the statistics so that they are equal to what they would
+// have been if AddWithCount had been called with counts multiplied by factor.
+func (s *SummaryStatistics) Reweight(factor float64) {
+	s.count *= factor
+	s.sum *= factor
+	s.sumCompensation *= factor
+	s.simpleSum *= factor
+	if factor == 0 {
+		s.min = math.Inf(1)
+		s.max = math.Inf(-1)
+	}
+}
+
+// Rescale adjusts the statistics so that they are equal to what they would have
+// been if AddWithCount had been called with values multiplied by factor.
+func (s *SummaryStatistics) Rescale(factor float64) {
+	s.sum *= factor
+	s.sumCompensation *= factor
+	s.simpleSum *= factor
+	if factor > 0 {
+		s.min *= factor
+		s.max *= factor
+	} else if factor < 0 {
+		tmp := s.max * factor
+		s.max = s.min * factor
+		s.min = tmp
+	} else if s.count != 0 {
+		s.min = 0
+		s.max = 0
+	}
+}
+
+func (s *SummaryStatistics) Clear() {
+	s.count = 0
+	s.sum = 0
+	s.sumCompensation = 0
+	s.simpleSum = 0
+	s.min = math.Inf(1)
+	s.max = math.Inf(-1)
+}
+
+func (s *SummaryStatistics) Copy() *SummaryStatistics {
+	return &SummaryStatistics{
+		count:           s.count,
+		sum:             s.sum,
+		sumCompensation: s.sumCompensation,
+		simpleSum:       s.simpleSum,
+		min:             s.min,
+		max:             s.max,
+	}
+}
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/bin.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/bin.go
new file mode 100644
index 0000000000..19843ba9e1
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/bin.go
@@ -0,0 +1,28 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package store
+
+import "errors"
+
+type Bin struct {
+	index int
+	count float64
+}
+
+func NewBin(index int, count float64) (*Bin, error) {
+	if count < 0 {
+		return nil, errors.New("The count cannot be negative")
+	}
+	return &Bin{index: index, count: count}, nil
+}
+
+func (b Bin) Index() int {
+	return b.index
+}
+
+func (b Bin) Count() float64 {
+	return b.count
+}
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/buffered_paginated.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/buffered_paginated.go
new file mode 100644
index 0000000000..f08bf555e5
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/buffered_paginated.go
@@ -0,0 +1,660 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package store
+
+import (
+	"errors"
+	"sort"
+
+	enc "github.com/DataDog/sketches-go/ddsketch/encoding"
+	"github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"
+)
+
+const (
+	ptrSize         = 32 << (^uintptr(0) >> 63)
+	intSize         = 32 << (^uint(0) >> 63)
+	float64size     = 64
+	bufferEntrySize = intSize
+	countSize       = float64size
+
+	defaultPageLenLog2 = 5 // pageLen = 32
+)
+
+// BufferedPaginatedStore allocates storage for counts in aligned fixed-size
+// pages, themselves stored in a dynamically-sized slice. A page encodes the
+// counts for a contiguous range of indexes, and two pages that are contiguous
+// in the slice encode ranges that are contiguous. In addition, input indexes
+// that are added to the store with a count equal to 1 can be stored in a
+// buffer.
+// The store favors using the buffer and only creates pages when the memory size
+// of the page is no greater than the memory space that is needed to keep in the
+// buffer the indexes that could otherwise be encoded in that page. That means
+// that some indexes may stay indefinitely in the buffer if, to be removed from
+// the buffer, they would create a page that is almost empty. The process that
+// transfers indexes from the buffer to pages is called compaction.
+// This store never collapses or merges bins, therefore, it does not introduce
+// any error in itself. In particular, MinIndex(), MaxIndex(), Bins() and
+// KeyAtRank() return exact results.
+// There is no upper bound on the memory size that this store needs to encode
+// input indexes, and some input data distributions may make it reach large
+// sizes. However, thanks to the buffer and the fact that only required pages
+// are allocated, it can be much more space efficient than alternative stores,
+// especially dense stores, in various situations, including when only few
+// indexes are added (with their counts equal to 1), when the input data has a
+// few outliers or when the input data distribution is multimodal.
+type BufferedPaginatedStore struct {
+	buffer                     []int // FIXME: in practice, int32 (even int16, depending on the accuracy parameter) is enough
+	bufferCompactionTriggerLen int   // compaction happens only after this buffer length is reached
+
+	pages        [][]float64 // len == cap, the slice is always used to its maximum capacity
+	minPageIndex int         // minPageIndex == maxInt iff pages are unused (they may still be allocated)
+	pageLenLog2  int
+	pageLenMask  int
+}
+
+func NewBufferedPaginatedStore() *BufferedPaginatedStore {
+	initialBufferCapacity := 4
+	pageLenLog2 := defaultPageLenLog2
+	pageLen := 1 << pageLenLog2
+
+	return &BufferedPaginatedStore{
+		buffer:                     make([]int, 0, initialBufferCapacity),
+		bufferCompactionTriggerLen: 2 * pageLen,
+		pages:                      nil,
+		minPageIndex:               maxInt,
+		pageLenLog2:                pageLenLog2,
+		pageLenMask:                pageLen - 1,
+	}
+}
+
+// pageIndex returns the page number the given index falls on.
+func (s *BufferedPaginatedStore) pageIndex(index int) int {
+	return index >> s.pageLenLog2
+}
+
+// lineIndex returns the line number within a page that the given index falls on.
+func (s *BufferedPaginatedStore) lineIndex(index int) int {
+	return index & s.pageLenMask
+}
+
+// index returns the store-level index for a given page number and a line within that page.
+func (s *BufferedPaginatedStore) index(pageIndex, lineIndex int) int {
+	return pageIndex<<s.pageLenLog2 + lineIndex
+}
+
+// page returns the page for the provided pageIndex, or nil. When unexisting,
+// the page is created if and only if ensureExists is true.
+func (s *BufferedPaginatedStore) page(pageIndex int, ensureExists bool) []float64 {
+	pageLen := 1 << s.pageLenLog2
+
+	if pageIndex >= s.minPageIndex && pageIndex < s.minPageIndex+len(s.pages) {
+		// No need to extend s.pages.
+		page := &s.pages[pageIndex-s.minPageIndex]
+		if ensureExists && len(*page) == 0 {
+			*page = append(*page, make([]float64, pageLen)...)
+		}
+		return *page
+	}
+
+	if !ensureExists {
+		return nil
+	}
+
+	if pageIndex < s.minPageIndex {
+		if s.minPageIndex == maxInt {
+			if len(s.pages) == 0 {
+				s.pages = append(s.pages, make([][]float64, s.newPagesLen(1))...)
+			}
+			s.minPageIndex = pageIndex - len(s.pages)/2
+		} else {
+			// Extends s.pages left.
+			newLen := s.newPagesLen(s.minPageIndex - pageIndex + 1 + len(s.pages))
+			addedLen := newLen - len(s.pages)
+			s.pages = append(s.pages, make([][]float64, addedLen)...)
+			copy(s.pages[addedLen:], s.pages)
+			for i := 0; i < addedLen; i++ {
+				s.pages[i] = nil
+			}
+			s.minPageIndex -= addedLen
+		}
+	} else {
+		// Extends s.pages right.
+		s.pages = append(s.pages, make([][]float64, s.newPagesLen(pageIndex-s.minPageIndex+1)-len(s.pages))...)
+	}
+
+	page := &s.pages[pageIndex-s.minPageIndex]
+	if len(*page) == 0 {
+		*page = append(*page, make([]float64, pageLen)...)
+	}
+	return *page
+}
+
+func (s *BufferedPaginatedStore) newPagesLen(required int) int {
+	// Grow in size by multiples of 64 bytes
+	pageGrowthIncrement := 64 * 8 / ptrSize
+	return (required + pageGrowthIncrement - 1) & -pageGrowthIncrement
+}
+
+// compact transfers indexes from the buffer to the pages. It only creates new
+// pages if they can encode enough buffered indexes so that it frees more space
+// in the buffer than the new page takes.
+func (s *BufferedPaginatedStore) compact() {
+	pageLen := 1 << s.pageLenLog2
+
+	s.sortBuffer()
+
+	for bufferPos := 0; bufferPos < len(s.buffer); {
+		bufferPageStart := bufferPos
+		pageIndex := s.pageIndex(s.buffer[bufferPageStart])
+		bufferPos++
+		for bufferPos < len(s.buffer) && s.pageIndex(s.buffer[bufferPos]) == pageIndex {
+			bufferPos++
+		}
+		bufferPageEnd := bufferPos
+
+		// We avoid creating a new page if it would take more memory space than
+		// what we would free in the buffer. Note that even when the page itself
+		// takes less memory space than the buffered indexes that can be encoded
+		// in the page, because we may have to extend s.pages, the store may end
+		// up larger. However, for the sake of simplicity, we ignore the length
+		// of s.pages.
+		ensureExists := (bufferPageEnd-bufferPageStart)*bufferEntrySize >= pageLen*float64size
+		newPage := s.page(pageIndex, ensureExists)
+		if len(newPage) > 0 {
+			for _, index := range s.buffer[bufferPageStart:bufferPageEnd] {
+				newPage[s.lineIndex(index)]++
+			}
+			copy(s.buffer[bufferPageStart:], s.buffer[bufferPageEnd:])
+			s.buffer = s.buffer[:len(s.buffer)+bufferPageStart-bufferPageEnd]
+			bufferPos = bufferPageStart
+		}
+	}
+
+	s.bufferCompactionTriggerLen = len(s.buffer) + pageLen
+}
+
+func (s *BufferedPaginatedStore) sortBuffer() {
+	sort.Slice(s.buffer, func(i, j int) bool { return s.buffer[i] < s.buffer[j] })
+}
+
+func (s *BufferedPaginatedStore) Add(index int) {
+	pageIndex := s.pageIndex(index)
+	if pageIndex >= s.minPageIndex && pageIndex < s.minPageIndex+len(s.pages) {
+		page := s.pages[pageIndex-s.minPageIndex]
+		if len(page) > 0 {
+			page[s.lineIndex(index)]++
+			return
+		}
+	}
+
+	// The page does not exist, use the buffer.
+	if len(s.buffer) == cap(s.buffer) && len(s.buffer) >= s.bufferCompactionTriggerLen {
+		s.compact()
+	}
+
+	s.buffer = append(s.buffer, index)
+}
+
+func (s *BufferedPaginatedStore) AddBin(bin Bin) {
+	s.AddWithCount(bin.Index(), bin.Count())
+}
+
+func (s *BufferedPaginatedStore) AddWithCount(index int, count float64) {
+	if count == 0 {
+		return
+	} else if count == 1 {
+		s.Add(index)
+	} else {
+		s.page(s.pageIndex(index), true)[s.lineIndex(index)] += count
+	}
+}
+
+func (s *BufferedPaginatedStore) IsEmpty() bool {
+	if len(s.buffer) > 0 {
+		return false
+	}
+	for _, page := range s.pages {
+		for _, count := range page {
+			if count > 0 {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+func (s *BufferedPaginatedStore) TotalCount() float64 {
+	totalCount := float64(len(s.buffer))
+	for _, page := range s.pages {
+		for _, count := range page {
+			totalCount += count
+		}
+	}
+	return totalCount
+}
+
+func (s *BufferedPaginatedStore) MinIndex() (int, error) {
+	isEmpty := true
+
+	// Iterate over the buffer.
+	var minIndex int
+	for _, index := range s.buffer {
+		if isEmpty || index < minIndex {
+			isEmpty = false
+			minIndex = index
+		}
+	}
+
+	// Iterate over the pages.
+	for pageIndex := s.minPageIndex; pageIndex < s.minPageIndex+len(s.pages) && (isEmpty || pageIndex <= s.pageIndex(minIndex)); pageIndex++ {
+		page := s.pages[pageIndex-s.minPageIndex]
+		if len(page) == 0 {
+			continue
+		}
+
+		var lineIndexRangeEnd int
+		if !isEmpty && pageIndex == s.pageIndex(minIndex) {
+			lineIndexRangeEnd = s.lineIndex(minIndex)
+		} else {
+			lineIndexRangeEnd = 1 << s.pageLenLog2
+		}
+
+		for lineIndex := 0; lineIndex < lineIndexRangeEnd; lineIndex++ {
+			if page[lineIndex] > 0 {
+				return s.index(pageIndex, lineIndex), nil
+			}
+		}
+	}
+
+	if isEmpty {
+		return 0, errUndefinedMinIndex
+	} else {
+		return minIndex, nil
+	}
+}
+
+func (s *BufferedPaginatedStore) MaxIndex() (int, error) {
+	isEmpty := true
+
+	// Iterate over the buffer.
+	var maxIndex int
+	for _, index := range s.buffer {
+		if isEmpty || index > maxIndex {
+			isEmpty = false
+			maxIndex = index
+		}
+	}
+
+	// Iterate over the pages.
+	for pageIndex := s.minPageIndex + len(s.pages) - 1; pageIndex >= s.minPageIndex && (isEmpty || pageIndex >= s.pageIndex(maxIndex)); pageIndex-- {
+		page := s.pages[pageIndex-s.minPageIndex]
+		if len(page) == 0 {
+			continue
+		}
+
+		var lineIndexRangeStart int
+		if !isEmpty && pageIndex == s.pageIndex(maxIndex) {
+			lineIndexRangeStart = s.lineIndex(maxIndex)
+		} else {
+			lineIndexRangeStart = 0
+		}
+
+		for lineIndex := len(page) - 1; lineIndex >= lineIndexRangeStart; lineIndex-- {
+			if page[lineIndex] > 0 {
+				return s.index(pageIndex, lineIndex), nil
+			}
+		}
+	}
+
+	if isEmpty {
+		return 0, errUndefinedMaxIndex
+	} else {
+		return maxIndex, nil
+	}
+}
+
+func (s *BufferedPaginatedStore) KeyAtRank(rank float64) int {
+	if rank < 0 {
+		rank = 0
+	}
+	key, err := s.minIndexWithCumulCount(func(cumulCount float64) bool {
+		return cumulCount > rank
+	})
+
+	if err != nil {
+		maxIndex, err := s.MaxIndex()
+		if err == nil {
+			return maxIndex
+		} else {
+			// FIXME: make Store's KeyAtRank consistent with MinIndex and MaxIndex
+			return 0
+		}
+	}
+	return key
+}
+
+// minIndexWithCumulCount returns the minimum index whose cumulative count (that
+// is, the sum of the counts associated with the indexes less than or equal to
+// the index) verifies the predicate.
+func (s *BufferedPaginatedStore) minIndexWithCumulCount(predicate func(float64) bool) (int, error) {
+	s.sortBuffer()
+	cumulCount := float64(0)
+
+	// Iterate over the pages and the buffer simultaneously.
+	bufferPos := 0
+	for pageOffset, page := range s.pages {
+		for lineIndex, count := range page {
+			index := s.index(s.minPageIndex+pageOffset, lineIndex)
+
+			// Iterate over the buffer until index is reached.
+			for ; bufferPos < len(s.buffer) && s.buffer[bufferPos] < index; bufferPos++ {
+				cumulCount++
+				if predicate(cumulCount) {
+					return s.buffer[bufferPos], nil
+				}
+			}
+			cumulCount += count
+			if predicate(cumulCount) {
+				return index, nil
+			}
+		}
+	}
+
+	// Iterate over the rest of the buffer
+	for ; bufferPos < len(s.buffer); bufferPos++ {
+		cumulCount++
+		if predicate(cumulCount) {
+			return s.buffer[bufferPos], nil
+		}
+	}
+
+	return 0, errors.New("the predicate on the cumulative count is never verified")
+}
+
+func (s *BufferedPaginatedStore) MergeWith(other Store) {
+	o, ok := other.(*BufferedPaginatedStore)
+	if ok && len(o.pages) == 0 {
+		// Optimized merging if the other store only has buffered data.
+		oBufferOffset := 0
+		for {
+			bufferCapOverhead := max(cap(s.buffer), s.bufferCompactionTriggerLen) - len(s.buffer)
+			if bufferCapOverhead >= len(o.buffer)-oBufferOffset {
+				s.buffer = append(s.buffer, o.buffer[oBufferOffset:]...)
+				return
+			}
+			s.buffer = append(s.buffer, o.buffer[oBufferOffset:oBufferOffset+bufferCapOverhead]...)
+			oBufferOffset += bufferCapOverhead
+			s.compact()
+		}
+	}
+
+	// Fallback merging.
+	for bin := range other.Bins() {
+		s.AddBin(bin)
+	}
+}
+
+func (s *BufferedPaginatedStore) MergeWithProto(pb *sketchpb.Store) {
+	for index, count := range pb.BinCounts {
+		s.AddWithCount(int(index), count)
+	}
+	for indexOffset, count := range pb.ContiguousBinCounts {
+		s.AddWithCount(int(pb.ContiguousBinIndexOffset)+indexOffset, count)
+	}
+}
+
+func (s *BufferedPaginatedStore) Bins() <-chan Bin {
+	s.sortBuffer()
+	ch := make(chan Bin)
+	go func() {
+		defer close(ch)
+		bufferPos := 0
+
+		// Iterate over the pages and the buffer simultaneously.
+		for pageOffset, page := range s.pages {
+			for lineIndex, count := range page {
+				if count == 0 {
+					continue
+				}
+
+				index := s.index(s.minPageIndex+pageOffset, lineIndex)
+
+				// Iterate over the buffer until index is reached.
+				var indexBufferStartPos int
+				for {
+					indexBufferStartPos = bufferPos
+					if indexBufferStartPos >= len(s.buffer) || s.buffer[indexBufferStartPos] > index {
+						break
+					}
+					bufferPos++
+					for bufferPos < len(s.buffer) && s.buffer[bufferPos] == s.buffer[indexBufferStartPos] {
+						bufferPos++
+					}
+					if s.buffer[indexBufferStartPos] == index {
+						break
+					}
+					ch <- Bin{index: s.buffer[indexBufferStartPos], count: float64(bufferPos - indexBufferStartPos)}
+				}
+				ch <- Bin{index: index, count: count + float64(bufferPos-indexBufferStartPos)}
+			}
+		}
+
+		// Iterate over the rest of the buffer.
+		for bufferPos < len(s.buffer) {
+			indexBufferStartPos := bufferPos
+			bufferPos++
+			for bufferPos < len(s.buffer) && s.buffer[bufferPos] == s.buffer[indexBufferStartPos] {
+				bufferPos++
+			}
+			bin := Bin{index: s.buffer[indexBufferStartPos], count: float64(bufferPos - indexBufferStartPos)}
+			ch <- bin
+		}
+	}()
+	return ch
+}
+
+func (s *BufferedPaginatedStore) ForEach(f func(index int, count float64) (stop bool)) {
+	s.sortBuffer()
+	bufferPos := 0
+
+	// Iterate over the pages and the buffer simultaneously.
+	for pageOffset, page := range s.pages {
+		for lineIndex, count := range page {
+			if count == 0 {
+				continue
+			}
+
+			index := s.index(s.minPageIndex+pageOffset, lineIndex)
+
+			// Iterate over the buffer until index is reached.
+			var indexBufferStartPos int
+			for {
+				indexBufferStartPos = bufferPos
+				if indexBufferStartPos >= len(s.buffer) || s.buffer[indexBufferStartPos] > index {
+					break
+				}
+				bufferPos++
+				for bufferPos < len(s.buffer) && s.buffer[bufferPos] == s.buffer[indexBufferStartPos] {
+					bufferPos++
+				}
+				if s.buffer[indexBufferStartPos] == index {
+					break
+				}
+				if f(s.buffer[indexBufferStartPos], float64(bufferPos-indexBufferStartPos)) {
+					return
+				}
+			}
+			if f(index, count+float64(bufferPos-indexBufferStartPos)) {
+				return
+			}
+		}
+	}
+
+	// Iterate over the rest of the buffer.
+	for bufferPos < len(s.buffer) {
+		indexBufferStartPos := bufferPos
+		bufferPos++
+		for bufferPos < len(s.buffer) && s.buffer[bufferPos] == s.buffer[indexBufferStartPos] {
+			bufferPos++
+		}
+		if f(s.buffer[indexBufferStartPos], float64(bufferPos-indexBufferStartPos)) {
+			return
+		}
+	}
+}
+
+func (s *BufferedPaginatedStore) Copy() Store {
+	bufferCopy := make([]int, len(s.buffer))
+	copy(bufferCopy, s.buffer)
+	pagesCopy := make([][]float64, len(s.pages))
+	for i, page := range s.pages {
+		if len(page) > 0 {
+			pageCopy := make([]float64, len(page))
+			copy(pageCopy, page)
+			pagesCopy[i] = pageCopy
+		}
+	}
+	return &BufferedPaginatedStore{
+		buffer:                     bufferCopy,
+		bufferCompactionTriggerLen: s.bufferCompactionTriggerLen,
+		pages:                      pagesCopy,
+		minPageIndex:               s.minPageIndex,
+		pageLenLog2:                s.pageLenLog2,
+		pageLenMask:                s.pageLenMask,
+	}
+}
+
+func (s *BufferedPaginatedStore) Clear() {
+	s.buffer = s.buffer[:0]
+	for i := range s.pages {
+		s.pages[i] = s.pages[i][:0]
+	}
+	s.minPageIndex = maxInt
+}
+
+func (s *BufferedPaginatedStore) ToProto() *sketchpb.Store {
+	if s.IsEmpty() {
+		return &sketchpb.Store{}
+	}
+	// FIXME: add heuristic to use contiguousBinCounts when cheaper.
+	binCounts := make(map[int32]float64)
+	for bin := range s.Bins() {
+		binCounts[int32(bin.index)] = bin.count
+	}
+	return &sketchpb.Store{
+		BinCounts: binCounts,
+	}
+}
+
+func (s *BufferedPaginatedStore) Reweight(w float64) error {
+	if w <= 0 {
+		return errors.New("can't reweight by a negative factor")
+	}
+	if w == 1 {
+		return nil
+	}
+	buffer := s.buffer
+	s.buffer = s.buffer[:0]
+	for _, p := range s.pages {
+		for i := range p {
+			p[i] *= w
+		}
+	}
+	for _, index := range buffer {
+		s.AddWithCount(index, w)
+	}
+	return nil
+}
+
+func (s *BufferedPaginatedStore) Encode(b *[]byte, t enc.FlagType) {
+	if len(s.buffer) > 0 {
+		enc.EncodeFlag(b, enc.NewFlag(t, enc.BinEncodingIndexDeltas))
+		enc.EncodeUvarint64(b, uint64(len(s.buffer)))
+		previousIndex := 0
+		for _, index := range s.buffer {
+			enc.EncodeVarint64(b, int64(index-previousIndex))
+			previousIndex = index
+		}
+	}
+
+	for pageOffset, page := range s.pages {
+		if len(page) > 0 {
+			enc.EncodeFlag(b, enc.NewFlag(t, enc.BinEncodingContiguousCounts))
+			enc.EncodeUvarint64(b, uint64(len(page)))
+			enc.EncodeVarint64(b, int64(s.index(s.minPageIndex+pageOffset, 0)))
+			enc.EncodeVarint64(b, 1)
+			for _, count := range page {
+				enc.EncodeVarfloat64(b, count)
+			}
+		}
+	}
+}
+
+func (s *BufferedPaginatedStore) DecodeAndMergeWith(b *[]byte, encodingMode enc.SubFlag) error {
+	switch encodingMode {
+
+	case enc.BinEncodingIndexDeltas:
+		numBins, err := enc.DecodeUvarint64(b)
+		if err != nil {
+			return err
+		}
+		remaining := int(numBins)
+		index := int64(0)
+		// Process indexes in batches to avoid checking after each insertion
+		// whether compaction should happen.
+		for {
+			batchSize := min(remaining, max(cap(s.buffer), s.bufferCompactionTriggerLen)-len(s.buffer))
+			for i := 0; i < batchSize; i++ {
+				indexDelta, err := enc.DecodeVarint64(b)
+				if err != nil {
+					return err
+				}
+				index += indexDelta
+				s.buffer = append(s.buffer, int(index))
+			}
+			remaining -= batchSize
+			if remaining == 0 {
+				return nil
+			}
+			s.compact()
+		}
+
+	case enc.BinEncodingContiguousCounts:
+		numBins, err := enc.DecodeUvarint64(b)
+		if err != nil {
+			return err
+		}
+		indexOffset, err := enc.DecodeVarint64(b)
+		if err != nil {
+			return err
+		}
+		indexDelta, err := enc.DecodeVarint64(b)
+		if err != nil {
+			return err
+		}
+		pageLen := 1 << s.pageLenLog2
+		for i := uint64(0); i < numBins; {
+			page := s.page(s.pageIndex(int(indexOffset)), true)
+			lineIndex := s.lineIndex(int(indexOffset))
+			for lineIndex >= 0 && lineIndex < pageLen && i < numBins {
+				count, err := enc.DecodeVarfloat64(b)
+				if err != nil {
+					return err
+				}
+				page[lineIndex] += count
+				lineIndex += int(indexDelta)
+				indexOffset += indexDelta
+				i++
+			}
+		}
+		return nil
+
+	default:
+		return DecodeAndMergeWith(s, b, encodingMode)
+	}
+}
+
+var _ Store = (*BufferedPaginatedStore)(nil)
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/collapsing_highest_dense_store.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/collapsing_highest_dense_store.go
new file mode 100644
index 0000000000..4cc78324af
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/collapsing_highest_dense_store.go
@@ -0,0 +1,187 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package store
+
+import (
+	"math"
+
+	enc "github.com/DataDog/sketches-go/ddsketch/encoding"
+)
+
+type CollapsingHighestDenseStore struct {
+	DenseStore
+	maxNumBins  int
+	isCollapsed bool
+}
+
+func NewCollapsingHighestDenseStore(maxNumBins int) *CollapsingHighestDenseStore {
+	return &CollapsingHighestDenseStore{
+		DenseStore:  DenseStore{minIndex: math.MaxInt32, maxIndex: math.MinInt32},
+		maxNumBins:  maxNumBins,
+		isCollapsed: false,
+	}
+}
+
+func (s *CollapsingHighestDenseStore) Add(index int) {
+	s.AddWithCount(index, float64(1))
+}
+
+func (s *CollapsingHighestDenseStore) AddBin(bin Bin) {
+	index := bin.Index()
+	count := bin.Count()
+	if count == 0 {
+		return
+	}
+	s.AddWithCount(index, count)
+}
+
+func (s *CollapsingHighestDenseStore) AddWithCount(index int, count float64) {
+	if count == 0 {
+		return
+	}
+	arrayIndex := s.normalize(index)
+	s.bins[arrayIndex] += count
+	s.count += count
+}
+
+// Normalize the store, if necessary, so that the counter of the specified index can be updated.
+func (s *CollapsingHighestDenseStore) normalize(index int) int {
+	if index > s.maxIndex {
+		if s.isCollapsed {
+			return len(s.bins) - 1
+		} else {
+			s.extendRange(index, index)
+			if s.isCollapsed {
+				return len(s.bins) - 1
+			}
+		}
+	} else if index < s.minIndex {
+		s.extendRange(index, index)
+	}
+	return index - s.offset
+}
+
+func (s *CollapsingHighestDenseStore) getNewLength(newMinIndex, newMaxIndex int) int {
+	return min(s.DenseStore.getNewLength(newMinIndex, newMaxIndex), s.maxNumBins)
+}
+
+func (s *CollapsingHighestDenseStore) extendRange(newMinIndex, newMaxIndex int) {
+	newMinIndex = min(newMinIndex, s.minIndex)
+	newMaxIndex = max(newMaxIndex, s.maxIndex)
+	if s.IsEmpty() {
+		initialLength := s.getNewLength(newMinIndex, newMaxIndex)
+		s.bins = append(s.bins, make([]float64, initialLength)...)
+		s.offset = newMinIndex
+		s.minIndex = newMinIndex
+		s.maxIndex = newMaxIndex
+		s.adjust(newMinIndex, newMaxIndex)
+	} else if newMinIndex >= s.offset && newMaxIndex < s.offset+len(s.bins) {
+		s.minIndex = newMinIndex
+		s.maxIndex = newMaxIndex
+	} else {
+		// To avoid shifting too often when nearing the capacity of the array,
+		// we may grow it before we actually reach the capacity.
+		newLength := s.getNewLength(newMinIndex, newMaxIndex)
+		if newLength > len(s.bins) {
+			s.bins = append(s.bins, make([]float64, newLength-len(s.bins))...)
+		}
+		s.adjust(newMinIndex, newMaxIndex)
+	}
+}
+
+// Adjust bins, offset, minIndex and maxIndex, without resizing the bins slice in order to make it fit the
+// specified range.
+func (s *CollapsingHighestDenseStore) adjust(newMinIndex, newMaxIndex int) {
+	if newMaxIndex-newMinIndex+1 > len(s.bins) {
+		// The range of indices is too wide, buckets of lowest indices need to be collapsed.
+		newMaxIndex = newMinIndex + len(s.bins) - 1
+		if newMaxIndex <= s.minIndex {
+			// There will be only one non-empty bucket.
+			s.bins = make([]float64, len(s.bins))
+			s.offset = newMinIndex
+			s.maxIndex = newMaxIndex
+			s.bins[len(s.bins)-1] = s.count
+		} else {
+			shift := s.offset - newMinIndex
+			if shift > 0 {
+				// Collapse the buckets.
+				n := float64(0)
+				for i := newMaxIndex + 1; i <= s.maxIndex; i++ {
+					n += s.bins[i-s.offset]
+				}
+				s.resetBins(newMaxIndex+1, s.maxIndex)
+				s.bins[newMaxIndex-s.offset] += n
+				s.maxIndex = newMaxIndex
+				// Shift the buckets to make room for newMinIndex.
+				s.shiftCounts(shift)
+			} else {
+				// Shift the buckets to make room for newMaxIndex.
+				s.shiftCounts(shift)
+				s.maxIndex = newMaxIndex
+			}
+		}
+		s.minIndex = newMinIndex
+		s.isCollapsed = true
+	} else {
+		s.centerCounts(newMinIndex, newMaxIndex)
+	}
+}
+
+func (s *CollapsingHighestDenseStore) MergeWith(other Store) {
+	if other.IsEmpty() {
+		return
+	}
+	o, ok := other.(*CollapsingHighestDenseStore)
+	if !ok {
+		for bin := range other.Bins() {
+			s.AddBin(bin)
+		}
+		return
+	}
+	if o.minIndex < s.minIndex || o.maxIndex > s.maxIndex {
+		s.extendRange(o.minIndex, o.maxIndex)
+	}
+	idx := o.maxIndex
+	for ; idx > s.maxIndex && idx >= o.minIndex; idx-- {
+		s.bins[len(s.bins)-1] += o.bins[idx-o.offset]
+	}
+	for ; idx > o.minIndex; idx-- {
+		s.bins[idx-s.offset] += o.bins[idx-o.offset]
+	}
+	// This is a separate test so that the comparison in the previous loop is strict (>) and handles
+	// o.minIndex = Integer.MIN_VALUE.
+	if idx == o.minIndex {
+		s.bins[idx-s.offset] += o.bins[idx-o.offset]
+	}
+	s.count += o.count
+}
+
+func (s *CollapsingHighestDenseStore) Copy() Store {
+	bins := make([]float64, len(s.bins))
+	copy(bins, s.bins)
+	return &CollapsingHighestDenseStore{
+		DenseStore: DenseStore{
+			bins:     bins,
+			count:    s.count,
+			offset:   s.offset,
+			minIndex: s.minIndex,
+			maxIndex: s.maxIndex,
+		},
+		maxNumBins:  s.maxNumBins,
+		isCollapsed: s.isCollapsed,
+	}
+}
+
+func (s *CollapsingHighestDenseStore) Clear() {
+	s.DenseStore.Clear()
+	s.isCollapsed = false
+}
+
+func (s *CollapsingHighestDenseStore) DecodeAndMergeWith(r *[]byte, encodingMode enc.SubFlag) error {
+	return DecodeAndMergeWith(s, r, encodingMode)
+}
+
+var _ Store = (*CollapsingHighestDenseStore)(nil)
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/collapsing_lowest_dense_store.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/collapsing_lowest_dense_store.go
new file mode 100644
index 0000000000..7549083b64
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/collapsing_lowest_dense_store.go
@@ -0,0 +1,206 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package store
+
+import (
+	"math"
+
+	enc "github.com/DataDog/sketches-go/ddsketch/encoding"
+)
+
+// CollapsingLowestDenseStore is a dynamically growing contiguous (non-sparse) store.
+// The lower bins get combined so that the total number of bins do not exceed maxNumBins.
+type CollapsingLowestDenseStore struct {
+	DenseStore
+	maxNumBins  int
+	isCollapsed bool
+}
+
+func NewCollapsingLowestDenseStore(maxNumBins int) *CollapsingLowestDenseStore {
+	// Bins are not allocated until values are added.
+	// When the first value is added, a small number of bins are allocated. The number of bins will
+	// grow as needed up to maxNumBins.
+	return &CollapsingLowestDenseStore{
+		DenseStore:  DenseStore{minIndex: math.MaxInt32, maxIndex: math.MinInt32},
+		maxNumBins:  maxNumBins,
+		isCollapsed: false,
+	}
+}
+
+func (s *CollapsingLowestDenseStore) Add(index int) {
+	s.AddWithCount(index, float64(1))
+}
+
+func (s *CollapsingLowestDenseStore) AddBin(bin Bin) {
+	index := bin.Index()
+	count := bin.Count()
+	if count == 0 {
+		return
+	}
+	s.AddWithCount(index, count)
+}
+
+func (s *CollapsingLowestDenseStore) AddWithCount(index int, count float64) {
+	if count == 0 {
+		return
+	}
+	arrayIndex := s.normalize(index)
+	s.bins[arrayIndex] += count
+	s.count += count
+}
+
+// Normalize the store, if necessary, so that the counter of the specified index can be updated.
+func (s *CollapsingLowestDenseStore) normalize(index int) int {
+	if index < s.minIndex {
+		if s.isCollapsed {
+			return 0
+		} else {
+			s.extendRange(index, index)
+			if s.isCollapsed {
+				return 0
+			}
+		}
+	} else if index > s.maxIndex {
+		s.extendRange(index, index)
+	}
+	return index - s.offset
+}
+
+func (s *CollapsingLowestDenseStore) getNewLength(newMinIndex, newMaxIndex int) int {
+	return min(s.DenseStore.getNewLength(newMinIndex, newMaxIndex), s.maxNumBins)
+}
+
+func (s *CollapsingLowestDenseStore) extendRange(newMinIndex, newMaxIndex int) {
+	newMinIndex = min(newMinIndex, s.minIndex)
+	newMaxIndex = max(newMaxIndex, s.maxIndex)
+	if s.IsEmpty() {
+		initialLength := s.getNewLength(newMinIndex, newMaxIndex)
+		s.bins = append(s.bins, make([]float64, initialLength)...)
+		s.offset = newMinIndex
+		s.minIndex = newMinIndex
+		s.maxIndex = newMaxIndex
+		s.adjust(newMinIndex, newMaxIndex)
+	} else if newMinIndex >= s.offset && newMaxIndex < s.offset+len(s.bins) {
+		s.minIndex = newMinIndex
+		s.maxIndex = newMaxIndex
+	} else {
+		// To avoid shifting too often when nearing the capacity of the array,
+		// we may grow it before we actually reach the capacity.
+		newLength := s.getNewLength(newMinIndex, newMaxIndex)
+		if newLength > len(s.bins) {
+			s.bins = append(s.bins, make([]float64, newLength-len(s.bins))...)
+		}
+		s.adjust(newMinIndex, newMaxIndex)
+	}
+}
+
+// Adjust bins, offset, minIndex and maxIndex, without resizing the bins slice in order to make it fit the
+// specified range.
+func (s *CollapsingLowestDenseStore) adjust(newMinIndex, newMaxIndex int) {
+	if newMaxIndex-newMinIndex+1 > len(s.bins) {
+		// The range of indices is too wide, buckets of lowest indices need to be collapsed.
+		newMinIndex = newMaxIndex - len(s.bins) + 1
+		if newMinIndex >= s.maxIndex {
+			// There will be only one non-empty bucket.
+			s.bins = make([]float64, len(s.bins))
+			s.offset = newMinIndex
+			s.minIndex = newMinIndex
+			s.bins[0] = s.count
+		} else {
+			shift := s.offset - newMinIndex
+			if shift < 0 {
+				// Collapse the buckets.
+				n := float64(0)
+				for i := s.minIndex; i < newMinIndex; i++ {
+					n += s.bins[i-s.offset]
+				}
+				s.resetBins(s.minIndex, newMinIndex-1)
+				s.bins[newMinIndex-s.offset] += n
+				s.minIndex = newMinIndex
+				// Shift the buckets to make room for newMaxIndex.
+				s.shiftCounts(shift)
+			} else {
+				// Shift the buckets to make room for newMinIndex.
+				s.shiftCounts(shift)
+				s.minIndex = newMinIndex
+			}
+		}
+		s.maxIndex = newMaxIndex
+		s.isCollapsed = true
+	} else {
+		s.centerCounts(newMinIndex, newMaxIndex)
+	}
+}
+
+func (s *CollapsingLowestDenseStore) MergeWith(other Store) {
+	if other.IsEmpty() {
+		return
+	}
+	o, ok := other.(*CollapsingLowestDenseStore)
+	if !ok {
+		for bin := range other.Bins() {
+			s.AddBin(bin)
+		}
+		return
+	}
+	if o.minIndex < s.minIndex || o.maxIndex > s.maxIndex {
+		s.extendRange(o.minIndex, o.maxIndex)
+	}
+	idx := o.minIndex
+	for ; idx < s.minIndex && idx <= o.maxIndex; idx++ {
+		s.bins[0] += o.bins[idx-o.offset]
+	}
+	for ; idx < o.maxIndex; idx++ {
+		s.bins[idx-s.offset] += o.bins[idx-o.offset]
+	}
+	// This is a separate test so that the comparison in the previous loop is strict (<) and handles
+	// store.maxIndex = Integer.MAX_VALUE.
+	if idx == o.maxIndex {
+		s.bins[idx-s.offset] += o.bins[idx-o.offset]
+	}
+	s.count += o.count
+}
+
+func (s *CollapsingLowestDenseStore) Copy() Store {
+	bins := make([]float64, len(s.bins))
+	copy(bins, s.bins)
+	return &CollapsingLowestDenseStore{
+		DenseStore: DenseStore{
+			bins:     bins,
+			count:    s.count,
+			offset:   s.offset,
+			minIndex: s.minIndex,
+			maxIndex: s.maxIndex,
+		},
+		maxNumBins:  s.maxNumBins,
+		isCollapsed: s.isCollapsed,
+	}
+}
+
+func (s *CollapsingLowestDenseStore) Clear() {
+	s.DenseStore.Clear()
+	s.isCollapsed = false
+}
+
+func (s *CollapsingLowestDenseStore) DecodeAndMergeWith(r *[]byte, encodingMode enc.SubFlag) error {
+	return DecodeAndMergeWith(s, r, encodingMode)
+}
+
+var _ Store = (*CollapsingLowestDenseStore)(nil)
+
+func max(x, y int) int {
+	if x > y {
+		return x
+	}
+	return y
+}
+
+func min(x, y int) int {
+	if x < y {
+		return x
+	}
+	return y
+}
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/dense_store.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/dense_store.go
new file mode 100644
index 0000000000..5d77e487bc
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/dense_store.go
@@ -0,0 +1,329 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package store
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"math"
+
+	enc "github.com/DataDog/sketches-go/ddsketch/encoding"
+	"github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"
+)
+
+const (
+	arrayLengthOverhead        = 64
+	arrayLengthGrowthIncrement = 0.1
+
+	// Grow the bins with an extra growthBuffer bins to prevent growing too often
+	growthBuffer = 128
+)
+
+// DenseStore is a dynamically growing contiguous (non-sparse) store. The number of bins are
+// bound only by the size of the slice that can be allocated.
+type DenseStore struct {
+	bins     []float64
+	count    float64
+	offset   int
+	minIndex int
+	maxIndex int
+}
+
+func NewDenseStore() *DenseStore {
+	return &DenseStore{minIndex: math.MaxInt32, maxIndex: math.MinInt32}
+}
+
+func (s *DenseStore) Add(index int) {
+	s.AddWithCount(index, float64(1))
+}
+
+func (s *DenseStore) AddBin(bin Bin) {
+	if bin.count == 0 {
+		return
+	}
+	s.AddWithCount(bin.index, bin.count)
+}
+
+func (s *DenseStore) AddWithCount(index int, count float64) {
+	if count == 0 {
+		return
+	}
+	arrayIndex := s.normalize(index)
+	s.bins[arrayIndex] += count
+	s.count += count
+}
+
+// Normalize the store, if necessary, so that the counter of the specified index can be updated.
+func (s *DenseStore) normalize(index int) int {
+	if index < s.minIndex || index > s.maxIndex {
+		s.extendRange(index, index)
+	}
+	return index - s.offset
+}
+
+func (s *DenseStore) getNewLength(newMinIndex, newMaxIndex int) int {
+	desiredLength := newMaxIndex - newMinIndex + 1
+	return int((float64(desiredLength+arrayLengthOverhead-1)/arrayLengthGrowthIncrement + 1) * arrayLengthGrowthIncrement)
+}
+
+func (s *DenseStore) extendRange(newMinIndex, newMaxIndex int) {
+
+	newMinIndex = min(newMinIndex, s.minIndex)
+	newMaxIndex = max(newMaxIndex, s.maxIndex)
+
+	if s.IsEmpty() {
+		initialLength := s.getNewLength(newMinIndex, newMaxIndex)
+		s.bins = append(s.bins, make([]float64, initialLength)...)
+		s.offset = newMinIndex
+		s.minIndex = newMinIndex
+		s.maxIndex = newMaxIndex
+		s.adjust(newMinIndex, newMaxIndex)
+	} else if newMinIndex >= s.offset && newMaxIndex < s.offset+len(s.bins) {
+		s.minIndex = newMinIndex
+		s.maxIndex = newMaxIndex
+	} else {
+		// To avoid shifting too often when nearing the capacity of the array,
+		// we may grow it before we actually reach the capacity.
+		newLength := s.getNewLength(newMinIndex, newMaxIndex)
+		if newLength > len(s.bins) {
+			s.bins = append(s.bins, make([]float64, newLength-len(s.bins))...)
+		}
+		s.adjust(newMinIndex, newMaxIndex)
+	}
+}
+
+// Adjust bins, offset, minIndex and maxIndex, without resizing the bins slice in order to make it fit the
+// specified range.
+func (s *DenseStore) adjust(newMinIndex, newMaxIndex int) {
+	s.centerCounts(newMinIndex, newMaxIndex)
+}
+
+func (s *DenseStore) centerCounts(newMinIndex, newMaxIndex int) {
+	midIndex := newMinIndex + (newMaxIndex-newMinIndex+1)/2
+	s.shiftCounts(s.offset + len(s.bins)/2 - midIndex)
+	s.minIndex = newMinIndex
+	s.maxIndex = newMaxIndex
+}
+
+func (s *DenseStore) shiftCounts(shift int) {
+	minArrIndex := s.minIndex - s.offset
+	maxArrIndex := s.maxIndex - s.offset
+	copy(s.bins[minArrIndex+shift:], s.bins[minArrIndex:maxArrIndex+1])
+	if shift > 0 {
+		s.resetBins(s.minIndex, s.minIndex+shift-1)
+	} else {
+		s.resetBins(s.maxIndex+shift+1, s.maxIndex)
+	}
+	s.offset -= shift
+}
+
+func (s *DenseStore) resetBins(fromIndex, toIndex int) {
+	for i := fromIndex - s.offset; i <= toIndex-s.offset; i++ {
+		s.bins[i] = 0
+	}
+}
+
+func (s *DenseStore) IsEmpty() bool {
+	return s.count == 0
+}
+
+func (s *DenseStore) TotalCount() float64 {
+	return s.count
+}
+
+func (s *DenseStore) MinIndex() (int, error) {
+	if s.IsEmpty() {
+		return 0, errUndefinedMinIndex
+	}
+	return s.minIndex, nil
+}
+
+func (s *DenseStore) MaxIndex() (int, error) {
+	if s.IsEmpty() {
+		return 0, errUndefinedMaxIndex
+	}
+	return s.maxIndex, nil
+}
+
+// Return the key for the value at rank
+func (s *DenseStore) KeyAtRank(rank float64) int {
+	if rank < 0 {
+		rank = 0
+	}
+	var n float64
+	for i, b := range s.bins {
+		n += b
+		if n > rank {
+			return i + s.offset
+		}
+	}
+	return s.maxIndex
+}
+
+func (s *DenseStore) MergeWith(other Store) {
+	if other.IsEmpty() {
+		return
+	}
+	o, ok := other.(*DenseStore)
+	if !ok {
+		for bin := range other.Bins() {
+			s.AddBin(bin)
+		}
+		return
+	}
+	if o.minIndex < s.minIndex || o.maxIndex > s.maxIndex {
+		s.extendRange(o.minIndex, o.maxIndex)
+	}
+	for idx := o.minIndex; idx <= o.maxIndex; idx++ {
+		s.bins[idx-s.offset] += o.bins[idx-o.offset]
+	}
+	s.count += o.count
+}
+
+func (s *DenseStore) Bins() <-chan Bin {
+	ch := make(chan Bin)
+	go func() {
+		defer close(ch)
+		for idx := s.minIndex; idx <= s.maxIndex; idx++ {
+			if s.bins[idx-s.offset] > 0 {
+				ch <- Bin{index: idx, count: s.bins[idx-s.offset]}
+			}
+		}
+	}()
+	return ch
+}
+
+func (s *DenseStore) ForEach(f func(index int, count float64) (stop bool)) {
+	for idx := s.minIndex; idx <= s.maxIndex; idx++ {
+		if s.bins[idx-s.offset] > 0 {
+			if f(idx, s.bins[idx-s.offset]) {
+				return
+			}
+		}
+	}
+}
+
+func (s *DenseStore) Copy() Store {
+	bins := make([]float64, len(s.bins))
+	copy(bins, s.bins)
+	return &DenseStore{
+		bins:     bins,
+		count:    s.count,
+		offset:   s.offset,
+		minIndex: s.minIndex,
+		maxIndex: s.maxIndex,
+	}
+}
+
+func (s *DenseStore) Clear() {
+	s.bins = s.bins[:0]
+	s.count = 0
+	s.minIndex = math.MaxInt32
+	s.maxIndex = math.MinInt32
+}
+
+func (s *DenseStore) string() string {
+	var buffer bytes.Buffer
+	buffer.WriteString("{")
+	for i := 0; i < len(s.bins); i++ {
+		index := i + s.offset
+		buffer.WriteString(fmt.Sprintf("%d: %f, ", index, s.bins[i]))
+	}
+	buffer.WriteString(fmt.Sprintf("count: %v, offset: %d, minIndex: %d, maxIndex: %d}", s.count, s.offset, s.minIndex, s.maxIndex))
+	return buffer.String()
+}
+
+func (s *DenseStore) ToProto() *sketchpb.Store {
+	if s.IsEmpty() {
+		return &sketchpb.Store{ContiguousBinCounts: nil}
+	}
+	bins := make([]float64, s.maxIndex-s.minIndex+1)
+	copy(bins, s.bins[s.minIndex-s.offset:s.maxIndex-s.offset+1])
+	return &sketchpb.Store{
+		ContiguousBinCounts:      bins,
+		ContiguousBinIndexOffset: int32(s.minIndex),
+	}
+}
+
+func (s *DenseStore) Reweight(w float64) error {
+	if w <= 0 {
+		return errors.New("can't reweight by a negative factor")
+	}
+	if w == 1 {
+		return nil
+	}
+	s.count *= w
+	for idx := s.minIndex; idx <= s.maxIndex; idx++ {
+		s.bins[idx-s.offset] *= w
+	}
+	return nil
+}
+
+func (s *DenseStore) Encode(b *[]byte, t enc.FlagType) {
+	if s.IsEmpty() {
+		return
+	}
+
+	denseEncodingSize := 0
+	numBins := uint64(s.maxIndex-s.minIndex) + 1
+	denseEncodingSize += enc.Uvarint64Size(numBins)
+	denseEncodingSize += enc.Varint64Size(int64(s.minIndex))
+	denseEncodingSize += enc.Varint64Size(1)
+
+	sparseEncodingSize := 0
+	numNonEmptyBins := uint64(0)
+
+	previousIndex := s.minIndex
+	for index := s.minIndex; index <= s.maxIndex; index++ {
+		count := s.bins[index-s.offset]
+		countVarFloat64Size := enc.Varfloat64Size(count)
+		denseEncodingSize += countVarFloat64Size
+		if count != 0 {
+			numNonEmptyBins++
+			sparseEncodingSize += enc.Varint64Size(int64(index - previousIndex))
+			sparseEncodingSize += countVarFloat64Size
+			previousIndex = index
+		}
+	}
+	sparseEncodingSize += enc.Uvarint64Size(numNonEmptyBins)
+
+	if denseEncodingSize <= sparseEncodingSize {
+		s.encodeDensely(b, t, numBins)
+	} else {
+		s.encodeSparsely(b, t, numNonEmptyBins)
+	}
+}
+
+func (s *DenseStore) encodeDensely(b *[]byte, t enc.FlagType, numBins uint64) {
+	enc.EncodeFlag(b, enc.NewFlag(t, enc.BinEncodingContiguousCounts))
+	enc.EncodeUvarint64(b, numBins)
+	enc.EncodeVarint64(b, int64(s.minIndex))
+	enc.EncodeVarint64(b, 1)
+	for index := s.minIndex; index <= s.maxIndex; index++ {
+		enc.EncodeVarfloat64(b, s.bins[index-s.offset])
+	}
+}
+
+func (s *DenseStore) encodeSparsely(b *[]byte, t enc.FlagType, numNonEmptyBins uint64) {
+	enc.EncodeFlag(b, enc.NewFlag(t, enc.BinEncodingIndexDeltasAndCounts))
+	enc.EncodeUvarint64(b, numNonEmptyBins)
+	previousIndex := 0
+	for index := s.minIndex; index <= s.maxIndex; index++ {
+		count := s.bins[index-s.offset]
+		if count != 0 {
+			enc.EncodeVarint64(b, int64(index-previousIndex))
+			enc.EncodeVarfloat64(b, count)
+			previousIndex = index
+		}
+	}
+}
+
+func (s *DenseStore) DecodeAndMergeWith(b *[]byte, encodingMode enc.SubFlag) error {
+	return DecodeAndMergeWith(s, b, encodingMode)
+}
+
+var _ Store = (*DenseStore)(nil)
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/sparse.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/sparse.go
new file mode 100644
index 0000000000..310e4fc256
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/sparse.go
@@ -0,0 +1,183 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package store
+
+import (
+	"errors"
+	"sort"
+
+	enc "github.com/DataDog/sketches-go/ddsketch/encoding"
+	"github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"
+)
+
+type SparseStore struct {
+	counts map[int]float64
+}
+
+func NewSparseStore() *SparseStore {
+	return &SparseStore{counts: make(map[int]float64)}
+}
+
+func (s *SparseStore) Add(index int) {
+	s.counts[index]++
+}
+
+func (s *SparseStore) AddBin(bin Bin) {
+	s.AddWithCount(bin.index, bin.count)
+}
+
+func (s *SparseStore) AddWithCount(index int, count float64) {
+	if count == 0 {
+		return
+	}
+	s.counts[index] += count
+}
+
+func (s *SparseStore) Bins() <-chan Bin {
+	orderedBins := s.orderedBins()
+	ch := make(chan Bin)
+	go func() {
+		defer close(ch)
+		for _, bin := range orderedBins {
+			ch <- bin
+		}
+	}()
+	return ch
+}
+
+func (s *SparseStore) orderedBins() []Bin {
+	bins := make([]Bin, 0, len(s.counts))
+	for index, count := range s.counts {
+		bins = append(bins, Bin{index: index, count: count})
+	}
+	sort.Slice(bins, func(i, j int) bool { return bins[i].index < bins[j].index })
+	return bins
+}
+
+func (s *SparseStore) ForEach(f func(index int, count float64) (stop bool)) {
+	for index, count := range s.counts {
+		if f(index, count) {
+			return
+		}
+	}
+}
+
+func (s *SparseStore) Copy() Store {
+	countsCopy := make(map[int]float64)
+	for index, count := range s.counts {
+		countsCopy[index] = count
+	}
+	return &SparseStore{counts: countsCopy}
+}
+
+func (s *SparseStore) Clear() {
+	for index := range s.counts {
+		delete(s.counts, index)
+	}
+}
+
+func (s *SparseStore) IsEmpty() bool {
+	return len(s.counts) == 0
+}
+
+func (s *SparseStore) MaxIndex() (int, error) {
+	if s.IsEmpty() {
+		return 0, errUndefinedMaxIndex
+	}
+	maxIndex := minInt
+	for index := range s.counts {
+		if index > maxIndex {
+			maxIndex = index
+		}
+	}
+	return maxIndex, nil
+}
+
+func (s *SparseStore) MinIndex() (int, error) {
+	if s.IsEmpty() {
+		return 0, errUndefinedMinIndex
+	}
+	minIndex := maxInt
+	for index := range s.counts {
+		if index < minIndex {
+			minIndex = index
+		}
+	}
+	return minIndex, nil
+}
+
+func (s *SparseStore) TotalCount() float64 {
+	totalCount := float64(0)
+	for _, count := range s.counts {
+		totalCount += count
+	}
+	return totalCount
+}
+
+func (s *SparseStore) KeyAtRank(rank float64) int {
+	orderedBins := s.orderedBins()
+	cumulCount := float64(0)
+	for _, bin := range orderedBins {
+		cumulCount += bin.count
+		if cumulCount > rank {
+			return bin.index
+		}
+	}
+	maxIndex, err := s.MaxIndex()
+	if err == nil {
+		return maxIndex
+	} else {
+		// FIXME: make Store's KeyAtRank consistent with MinIndex and MaxIndex
+		return 0
+	}
+}
+
+func (s *SparseStore) MergeWith(store Store) {
+	for bin := range store.Bins() {
+		s.AddBin(bin)
+	}
+}
+
+func (s *SparseStore) ToProto() *sketchpb.Store {
+	binCounts := make(map[int32]float64)
+	for index, count := range s.counts {
+		binCounts[int32(index)] = count
+	}
+	return &sketchpb.Store{BinCounts: binCounts}
+}
+
+func (s *SparseStore) Reweight(w float64) error {
+	if w <= 0 {
+		return errors.New("can't reweight by a negative factor")
+	}
+	if w == 1 {
+		return nil
+	}
+	for index := range s.counts {
+		s.counts[index] *= w
+	}
+	return nil
+}
+
+func (s *SparseStore) Encode(b *[]byte, t enc.FlagType) {
+	if s.IsEmpty() {
+		return
+	}
+	enc.EncodeFlag(b, enc.NewFlag(t, enc.BinEncodingIndexDeltasAndCounts))
+	enc.EncodeUvarint64(b, uint64(len(s.counts)))
+	previousIndex := 0
+	for index, count := range s.counts {
+		enc.EncodeVarint64(b, int64(index-previousIndex))
+		enc.EncodeVarfloat64(b, count)
+		previousIndex = index
+	}
+}
+
+func (s *SparseStore) DecodeAndMergeWith(b *[]byte, encodingMode enc.SubFlag) error {
+	return DecodeAndMergeWith(s, b, encodingMode)
+}
+
+var _ Store = (*SparseStore)(nil)
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/store.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/store.go
new file mode 100644
index 0000000000..64a5e3d508
--- /dev/null
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/store.go
@@ -0,0 +1,153 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+package store
+
+import (
+	"errors"
+
+	enc "github.com/DataDog/sketches-go/ddsketch/encoding"
+	"github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"
+)
+
+type Provider func() Store
+
+var (
+	DefaultProvider                   = Provider(BufferedPaginatedStoreConstructor)
+	DenseStoreConstructor             = Provider(func() Store { return NewDenseStore() })
+	BufferedPaginatedStoreConstructor = Provider(func() Store { return NewBufferedPaginatedStore() })
+	SparseStoreConstructor            = Provider(func() Store { return NewSparseStore() })
+)
+
+const (
+	maxInt = int(^uint(0) >> 1)
+	minInt = ^maxInt
+)
+
+var (
+	errUndefinedMinIndex = errors.New("MinIndex of empty store is undefined")
+	errUndefinedMaxIndex = errors.New("MaxIndex of empty store is undefined")
+)
+
+type Store interface {
+	Add(index int)
+	AddBin(bin Bin)
+	AddWithCount(index int, count float64)
+	// Bins returns a channel that emits the bins that are encoded in the store.
+	// Note that this leaks a channel and a goroutine if it is not iterated to completion.
+	Bins() <-chan Bin
+	// ForEach applies f to all elements of the store or until f returns true.
+	ForEach(f func(index int, count float64) (stop bool))
+	Copy() Store
+	// Clear empties the store while allowing reusing already allocated memory.
+	// In some situations, it may be advantageous to clear and reuse a store
+	// rather than instantiating a new one. Keeping reusing the same store again
+	// and again on varying input data distributions may however ultimately make
+	// the store overly large and may waste memory space.
+	Clear()
+	IsEmpty() bool
+	MaxIndex() (int, error)
+	MinIndex() (int, error)
+	TotalCount() float64
+	KeyAtRank(rank float64) int
+	MergeWith(store Store)
+	ToProto() *sketchpb.Store
+	// Reweight multiplies all values from the store by w, but keeps the same global distribution.
+	Reweight(w float64) error
+	// Encode encodes the bins of the store and appends its content to the
+	// provided []byte.
+	// The provided FlagType indicates whether the store encodes positive or
+	// negative values.
+	Encode(b *[]byte, t enc.FlagType)
+	// DecodeAndMergeWith decodes bins that have been encoded in the format of
+	// the provided binEncodingMode and merges them within the receiver store.
+	// It updates the provided []byte so that it starts immediately after the
+	// encoded bins.
+	DecodeAndMergeWith(b *[]byte, binEncodingMode enc.SubFlag) error
+}
+
+// FromProto returns an instance of DenseStore that contains the data in the provided protobuf representation.
+func FromProto(pb *sketchpb.Store) *DenseStore {
+	store := NewDenseStore()
+	MergeWithProto(store, pb)
+	return store
+}
+
+// MergeWithProto merges the distribution in a protobuf Store to an existing store.
+// - if called with an empty store, this simply populates the store with the distribution in the protobuf Store.
+// - if called with a non-empty store, this has the same outcome as deserializing the protobuf Store, then merging.
+func MergeWithProto(store Store, pb *sketchpb.Store) {
+	for idx, count := range pb.BinCounts {
+		store.AddWithCount(int(idx), count)
+	}
+	for idx, count := range pb.ContiguousBinCounts {
+		store.AddWithCount(idx+int(pb.ContiguousBinIndexOffset), count)
+	}
+}
+
+func DecodeAndMergeWith(s Store, b *[]byte, binEncodingMode enc.SubFlag) error {
+	switch binEncodingMode {
+
+	case enc.BinEncodingIndexDeltasAndCounts:
+		numBins, err := enc.DecodeUvarint64(b)
+		if err != nil {
+			return err
+		}
+		index := int64(0)
+		for i := uint64(0); i < numBins; i++ {
+			indexDelta, err := enc.DecodeVarint64(b)
+			if err != nil {
+				return err
+			}
+			count, err := enc.DecodeVarfloat64(b)
+			if err != nil {
+				return err
+			}
+			index += indexDelta
+			s.AddWithCount(int(index), count)
+		}
+
+	case enc.BinEncodingIndexDeltas:
+		numBins, err := enc.DecodeUvarint64(b)
+		if err != nil {
+			return err
+		}
+		index := int64(0)
+		for i := uint64(0); i < numBins; i++ {
+			indexDelta, err := enc.DecodeVarint64(b)
+			if err != nil {
+				return err
+			}
+			index += indexDelta
+			s.Add(int(index))
+		}
+
+	case enc.BinEncodingContiguousCounts:
+		numBins, err := enc.DecodeUvarint64(b)
+		if err != nil {
+			return err
+		}
+		index, err := enc.DecodeVarint64(b)
+		if err != nil {
+			return err
+		}
+		indexDelta, err := enc.DecodeVarint64(b)
+		if err != nil {
+			return err
+		}
+		for i := uint64(0); i < numBins; i++ {
+			count, err := enc.DecodeVarfloat64(b)
+			if err != nil {
+				return err
+			}
+			s.AddWithCount(int(index), count)
+			index += indexDelta
+		}
+
+	default:
+		return errors.New("unknown bin encoding")
+	}
+	return nil
+}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go
index 0d6c5f3f96..d7af9141e3 100644
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go
+++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go
@@ -10,8 +10,9 @@ import (
 	"bufio"
 	"bytes"
 	"encoding/base64"
-	"github.com/ProtonMail/go-crypto/openpgp/errors"
 	"io"
+
+	"github.com/ProtonMail/go-crypto/openpgp/errors"
 )
 
 // A Block represents an OpenPGP armored structure.
@@ -208,12 +209,16 @@ TryNextBlock:
 			break
 		}
 
-		i := bytes.Index(line, []byte(": "))
+		i := bytes.Index(line, []byte(":"))
 		if i == -1 {
 			goto TryNextBlock
 		}
 		lastKey = string(line[:i])
-		p.Header[lastKey] = string(line[i+2:])
+		var value string
+		if len(line) > i+2 {
+			value = string(line[i+2:])
+		}
+		p.Header[lastKey] = value
 	}
 
 	p.lReader.in = r
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go
index 7283ca91cc..2d7b0cf373 100644
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go
+++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go
@@ -504,7 +504,7 @@ EachPacket:
 			// Else, ignoring the signature as it does not follow anything
 			// we would know to attach it to.
 		case *packet.PrivateKey:
-			if pkt.IsSubkey == false {
+			if !pkt.IsSubkey {
 				packets.Unread(p)
 				break EachPacket
 			}
@@ -513,7 +513,7 @@ EachPacket:
 				return nil, err
 			}
 		case *packet.PublicKey:
-			if pkt.IsSubkey == false {
+			if !pkt.IsSubkey {
 				packets.Unread(p)
 				break EachPacket
 			}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go
index ec903ee95a..3402b8c140 100644
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go
+++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go
@@ -415,6 +415,10 @@ func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) {
 		return
 	}
 
+	if len(pk.p.Bytes()) == 0 {
+		return errors.StructuralError("empty EdDSA public key")
+	}
+
 	pub := eddsa.NewPublicKey(c)
 
 	switch flag := pk.p.Bytes()[0]; flag {
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go
index 6c58c86fa8..80d0bb98e0 100644
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go
+++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go
@@ -904,7 +904,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp
 	if sig.IssuerKeyId != nil && sig.Version == 4 {
 		keyId := make([]byte, 8)
 		binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId)
-		subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId})
+		subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId})
 	}
 	if sig.IssuerFingerprint != nil {
 		contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...)
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go
index a8abf2ff7c..bac2b132ea 100644
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go
+++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go
@@ -7,13 +7,11 @@ package packet
 import (
 	"bytes"
 	"crypto/cipher"
-	"crypto/sha256"
 	"io"
 	"strconv"
 
 	"github.com/ProtonMail/go-crypto/openpgp/errors"
 	"github.com/ProtonMail/go-crypto/openpgp/s2k"
-	"golang.org/x/crypto/hkdf"
 )
 
 // This is the largest session key that we'll support. Since at most 256-bit cipher
@@ -45,13 +43,6 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error {
 		return errors.UnsupportedError("unknown SymmetricKeyEncrypted version")
 	}
 
-	if ske.Version == 5 {
-		// Scalar octet count
-		if _, err := readFull(r, buf[:]); err != nil {
-			return err
-		}
-	}
-
 	// Cipher function
 	if _, err := readFull(r, buf[:]); err != nil {
 		return err
@@ -67,11 +58,6 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error {
 			return errors.StructuralError("cannot read AEAD octet from packet")
 		}
 		ske.Mode = AEADMode(buf[0])
-
-		// Scalar octet count
-		if _, err := readFull(r, buf[:]); err != nil {
-			return err
-		}
 	}
 
 	var err error
@@ -220,7 +206,7 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass
 	case 5:
 		ivLen := config.AEAD().Mode().IvLength()
 		tagLen := config.AEAD().Mode().TagLength()
-		packetLength = 5 + len(s2kBytes) + ivLen + keySize + tagLen
+		packetLength = 3 + len(s2kBytes) + ivLen + keySize + tagLen
 	}
 	err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength)
 	if err != nil {
@@ -230,20 +216,12 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass
 	// Symmetric Key Encrypted Version
 	buf := []byte{byte(version)}
 
-	if version == 5 {
-		// Scalar octet count
-		buf = append(buf, byte(3+len(s2kBytes)+config.AEAD().Mode().IvLength()))
-	}
-
 	// Cipher function
 	buf = append(buf, byte(cipherFunc))
 
 	if version == 5 {
 		// AEAD mode
 		buf = append(buf, byte(config.AEAD().Mode()))
-
-		// Scalar octet count
-		buf = append(buf, byte(len(s2kBytes)))
 	}
 	_, err = w.Write(buf)
 	if err != nil {
@@ -293,11 +271,6 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass
 }
 
 func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte) (aead cipher.AEAD) {
-	hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData)
-
-	encryptionKey := make([]byte, c.KeySize())
-	_, _ = readFull(hkdfReader, encryptionKey)
-
-	blockCipher := c.new(encryptionKey)
+	blockCipher := c.new(inputKey)
 	return mode.new(blockCipher)
 }
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go
index 864d8ca6ba..7fdd13a3dd 100644
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go
+++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go
@@ -381,7 +381,7 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En
 		}
 
 		sig := to[i].PrimaryIdentity().SelfSignature
-		if sig.SEIPDv2 == false {
+		if !sig.SEIPDv2 {
 			aeadSupported = false
 		}
 
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index 6027df1e18..0edfffb0fb 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -28659,6 +28659,25 @@ var awsPartition = partition{
 				},
 			},
 		},
+		"tnb": service{
+			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "ap-southeast-2",
+				}: endpoint{},
+				endpointKey{
+					Region: "eu-central-1",
+				}: endpoint{},
+				endpointKey{
+					Region: "eu-west-3",
+				}: endpoint{},
+				endpointKey{
+					Region: "us-east-1",
+				}: endpoint{},
+				endpointKey{
+					Region: "us-west-2",
+				}: endpoint{},
+			},
+		},
 		"transcribe": service{
 			Defaults: endpointDefaults{
 				defaultKey{}: endpoint{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index 0e5f95c1c1..dcdba9d8f7 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
 const SDKName = "aws-sdk-go"
 
 // SDKVersion is the version of this SDK
-const SDKVersion = "1.44.317"
+const SDKVersion = "1.44.318"
diff --git a/vendor/github.com/buildkite/agent/v3/api/chunks.go b/vendor/github.com/buildkite/agent/v3/api/chunks.go
index 71a1a1b331..cc5fa97059 100644
--- a/vendor/github.com/buildkite/agent/v3/api/chunks.go
+++ b/vendor/github.com/buildkite/agent/v3/api/chunks.go
@@ -10,9 +10,9 @@ import (
 // Chunk represents a Buildkite Agent API Chunk
 type Chunk struct {
 	Data     []byte
-	Sequence int
-	Offset   int
-	Size     int
+	Sequence uint64
+	Offset   uint64
+	Size     uint64
 }
 
 // Uploads the chunk to the Buildkite Agent API. This request sends the
diff --git a/vendor/github.com/buildkite/agent/v3/api/jobs.go b/vendor/github.com/buildkite/agent/v3/api/jobs.go
index 4dee5af4c3..58e0dbc885 100644
--- a/vendor/github.com/buildkite/agent/v3/api/jobs.go
+++ b/vendor/github.com/buildkite/agent/v3/api/jobs.go
@@ -3,23 +3,51 @@ package api
 import (
 	"context"
 	"fmt"
+	"strings"
+
+	"github.com/buildkite/agent/v3/internal/pipeline"
 )
 
 // Job represents a Buildkite Agent API Job
 type Job struct {
-	ID                 string            `json:"id,omitempty"`
-	Endpoint           string            `json:"endpoint"`
-	State              string            `json:"state,omitempty"`
-	Env                map[string]string `json:"env,omitempty"`
-	ChunksMaxSizeBytes int               `json:"chunks_max_size_bytes,omitempty"`
-	Token              string            `json:"token,omitempty"`
-	ExitStatus         string            `json:"exit_status,omitempty"`
-	Signal             string            `json:"signal,omitempty"`
-	SignalReason       string            `json:"signal_reason,omitempty"`
-	StartedAt          string            `json:"started_at,omitempty"`
-	FinishedAt         string            `json:"finished_at,omitempty"`
-	RunnableAt         string            `json:"runnable_at,omitempty"`
-	ChunksFailedCount  int               `json:"chunks_failed_count,omitempty"`
+	ID                 string               `json:"id,omitempty"`
+	Endpoint           string               `json:"endpoint"`
+	State              string               `json:"state,omitempty"`
+	Env                map[string]string    `json:"env,omitempty"`
+	Step               pipeline.CommandStep `json:"step,omitempty"`
+	ChunksMaxSizeBytes uint64               `json:"chunks_max_size_bytes,omitempty"`
+	LogMaxSizeBytes    uint64               `json:"log_max_size_bytes,omitempty"`
+	Token              string               `json:"token,omitempty"`
+	ExitStatus         string               `json:"exit_status,omitempty"`
+	Signal             string               `json:"signal,omitempty"`
+	SignalReason       string               `json:"signal_reason,omitempty"`
+	StartedAt          string               `json:"started_at,omitempty"`
+	FinishedAt         string               `json:"finished_at,omitempty"`
+	RunnableAt         string               `json:"runnable_at,omitempty"`
+	ChunksFailedCount  int                  `json:"chunks_failed_count,omitempty"`
+}
+
+func (j *Job) ValuesForFields(fields []string) (map[string]string, error) {
+	o := make(map[string]string, len(fields))
+	for _, f := range fields {
+		switch f {
+		case "command":
+			o[f] = j.Env["BUILDKITE_COMMAND"]
+
+		case "plugins":
+			o[f] = j.Env["BUILDKITE_PLUGINS"]
+
+		default:
+			if e, has := strings.CutPrefix(f, pipeline.EnvNamespacePrefix); has {
+				o[f] = j.Env[e]
+				break
+			}
+
+			return nil, fmt.Errorf("unknown or unsupported field on Job struct for signing/verification: %q", f)
+		}
+	}
+
+	return o, nil
 }
 
 type JobState struct {
diff --git a/vendor/github.com/buildkite/agent/v3/env/environment.go b/vendor/github.com/buildkite/agent/v3/env/environment.go
new file mode 100644
index 0000000000..a62c5dc7ea
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/env/environment.go
@@ -0,0 +1,298 @@
+// Package env provides utilities for dealing with environment variables.
+//
+// It is intended for internal use by buildkite-agent only.
+package env
+
+import (
+	"encoding/json"
+	"runtime"
+	"sort"
+	"strings"
+
+	"github.com/puzpuzpuz/xsync/v2"
+)
+
+// Environment is a map of environment variables, with the keys normalized
+// for case-insensitive operating systems
+type Environment struct {
+	underlying *xsync.MapOf[string, string]
+}
+
+func New() *Environment {
+	return &Environment{underlying: xsync.NewMapOf[string]()}
+}
+
+func NewWithLength(length int) *Environment {
+	return &Environment{underlying: xsync.NewMapOfPresized[string](length)}
+}
+
+func FromMap(m map[string]string) *Environment {
+	env := &Environment{underlying: xsync.NewMapOfPresized[string](len(m))}
+
+	for k, v := range m {
+		env.Set(k, v)
+	}
+
+	return env
+}
+
+// Split splits an environment variable (in the form "name=value") into the name
+// and value substrings. If there is no '=', or the first '=' is at the start,
+// it returns `"", "", false`.
+func Split(l string) (name, value string, ok bool) {
+	// Variable names should not contain '=' on any platform...and yet Windows
+	// creates environment variables beginning with '=' in some circumstances.
+	// See https://github.com/golang/go/issues/49886.
+	// Dropping them matches the previous behaviour on Windows, which used SET
+	// to obtain the state of environment variables.
+	i := strings.IndexRune(l, '=')
+	// Either there is no '=', or it is at the start of the string.
+	// Both are disallowed.
+	if i <= 0 {
+		return "", "", false
+	}
+	return l[:i], l[i+1:], true
+}
+
+// FromSlice creates a new environment from a string slice of KEY=VALUE
+func FromSlice(s []string) *Environment {
+	env := NewWithLength(len(s))
+
+	for _, l := range s {
+		if k, v, ok := Split(l); ok {
+			env.Set(k, v)
+		}
+	}
+
+	return env
+}
+
+// Dump returns a copy of the environment with all keys normalized
+func (e *Environment) Dump() map[string]string {
+	d := make(map[string]string, e.underlying.Size())
+	e.underlying.Range(func(k, v string) bool {
+		d[normalizeKeyName(k)] = v
+		return true
+	})
+
+	return d
+}
+
+// Get returns a key from the environment
+func (e *Environment) Get(key string) (string, bool) {
+	v, ok := e.underlying.Load(normalizeKeyName(key))
+	return v, ok
+}
+
+// Get a boolean value from environment, with a default for empty. Supports true|false, on|off, 1|0
+func (e *Environment) GetBool(key string, defaultValue bool) bool {
+	v, _ := e.Get(key)
+
+	switch strings.ToLower(v) {
+	case "on", "1", "enabled", "true":
+		return true
+	case "off", "0", "disabled", "false":
+		return false
+	default:
+		return defaultValue
+	}
+}
+
+// Exists returns true/false depending on whether or not the key exists in the env
+func (e *Environment) Exists(key string) bool {
+	_, ok := e.underlying.Load(normalizeKeyName(key))
+	return ok
+}
+
+// Set sets a key in the environment
+func (e *Environment) Set(key string, value string) string {
+	e.underlying.Store(normalizeKeyName(key), value)
+	return value
+}
+
+// Remove a key from the Environment and return its value
+func (e *Environment) Remove(key string) string {
+	value, ok := e.Get(key)
+	if ok {
+		e.underlying.Delete(normalizeKeyName(key))
+	}
+	return value
+}
+
+// Length returns the length of the environment
+func (e *Environment) Length() int {
+	return e.underlying.Size()
+}
+
+// Diff returns a new environment with the keys and values from this
+// environment which are different in the other one.
+func (e *Environment) Diff(other *Environment) Diff {
+	diff := Diff{
+		Added:   make(map[string]string),
+		Changed: make(map[string]DiffPair),
+		Removed: make(map[string]struct{}, 0),
+	}
+
+	if other == nil {
+		e.underlying.Range(func(k, _ string) bool {
+			diff.Removed[k] = struct{}{}
+			return true
+		})
+
+		return diff
+	}
+
+	e.underlying.Range(func(k, v string) bool {
+		other, ok := other.Get(k)
+		if !ok {
+			// This environment has added this key to other
+			diff.Added[k] = v
+			return true
+		}
+
+		if other != v {
+			diff.Changed[k] = DiffPair{
+				Old: other,
+				New: v,
+			}
+		}
+
+		return true
+	})
+
+	other.underlying.Range(func(k, _ string) bool {
+		if _, ok := e.Get(k); !ok {
+			diff.Removed[k] = struct{}{}
+		}
+
+		return true
+	})
+
+	return diff
+}
+
+// Merge merges another env into this one and returns the result
+func (e *Environment) Merge(other *Environment) {
+	if other == nil {
+		return
+	}
+
+	other.underlying.Range(func(k, v string) bool {
+		e.Set(k, v)
+		return true
+	})
+}
+
+func (e *Environment) Apply(diff Diff) {
+	for k, v := range diff.Added {
+		e.Set(k, v)
+	}
+	for k, v := range diff.Changed {
+		e.Set(k, v.New)
+	}
+	for k := range diff.Removed {
+		e.Remove(k)
+	}
+}
+
+// Copy returns a copy of the env
+func (e *Environment) Copy() *Environment {
+	if e == nil {
+		return New()
+	}
+
+	c := New()
+
+	e.underlying.Range(func(k, v string) bool {
+		c.Set(k, v)
+		return true
+	})
+
+	return c
+}
+
+// ToSlice returns a sorted slice representation of the environment
+func (e *Environment) ToSlice() []string {
+	s := []string{}
+	e.underlying.Range(func(k, v string) bool {
+		s = append(s, k+"="+v)
+		return true
+	})
+
+	// Ensure they are in a consistent order (helpful for tests)
+	sort.Strings(s)
+
+	return s
+}
+
+func (e *Environment) MarshalJSON() ([]byte, error) {
+	return json.Marshal(e.Dump())
+}
+
+func (e *Environment) UnmarshalJSON(data []byte) error {
+	var raw map[string]string
+	if err := json.Unmarshal(data, &raw); err != nil {
+		return err
+	}
+
+	e.underlying = xsync.NewMapOfPresized[string](len(raw))
+	for k, v := range raw {
+		e.Set(k, v)
+	}
+
+	return nil
+}
+
+// Environment variables on Windows are case-insensitive. When you run `SET`
+// within a Windows command prompt, you'll see variables like this:
+//
+//	...
+//	Path=C:\Program Files (x86)\Parallels\Parallels Tools\Applications;...
+//	PROCESSOR_IDENTIFIER=Intel64 Family 6 Model 94 Stepping 3, GenuineIntel
+//	SystemDrive=C:
+//	SystemRoot=C:\Windows
+//	...
+//
+// There's a mix of both CamelCase and UPPERCASE, but the can all be accessed
+// regardless of the case you use. So PATH is the same as Path, PAth, pATH,
+// and so on.
+//
+// os.Environ() in Golang returns key/values in the original casing, so it
+// returns a slice like this:
+//
+//	{ "Path=...", "PROCESSOR_IDENTIFIER=...", "SystemRoot=..." }
+//
+// Users of env.Environment shouldn't need to care about this.
+// env.Get("PATH") should "just work" on Windows. This means on Windows
+// machines, we'll normalise all the keys that go in/out of this API.
+//
+// Unix systems _are_ case sensitive when it comes to ENV, so we'll just leave
+// that alone.
+func normalizeKeyName(key string) string {
+	if runtime.GOOS == "windows" {
+		return strings.ToUpper(key)
+	} else {
+		return key
+	}
+}
+
+type Diff struct {
+	Added   map[string]string
+	Changed map[string]DiffPair
+	Removed map[string]struct{}
+}
+
+type DiffPair struct {
+	Old string
+	New string
+}
+
+func (diff *Diff) Remove(key string) {
+	delete(diff.Added, key)
+	delete(diff.Changed, key)
+	delete(diff.Removed, key)
+}
+
+func (diff *Diff) Empty() bool {
+	return len(diff.Added) == 0 && len(diff.Changed) == 0 && len(diff.Removed) == 0
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/ordered/map.go b/vendor/github.com/buildkite/agent/v3/internal/ordered/map.go
new file mode 100644
index 0000000000..a436796872
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/ordered/map.go
@@ -0,0 +1,391 @@
+// Package ordered implements an ordered map type.
+package ordered
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+
+	"github.com/google/go-cmp/cmp"
+	"gopkg.in/yaml.v3"
+)
+
+var _ interface {
+	json.Marshaler
+	json.Unmarshaler
+	yaml.IsZeroer
+	yaml.Marshaler
+	yaml.Unmarshaler
+} = (*Map[string, any])(nil)
+
+// Map is an order-preserving map with string keys. It is intended for working
+// with YAML in an order-preserving way (off-spec, strictly speaking) and JSON
+// (more of the same).
+type Map[K comparable, V any] struct {
+	items []Tuple[K, V]
+	index map[K]int
+}
+
+// MapSS is a convenience alias to reduce keyboard wear.
+type MapSS = Map[string, string]
+
+// MapSA is a convenience alias to reduce keyboard wear.
+type MapSA = Map[string, any]
+
+// NewMap returns a new empty map with a given initial capacity.
+func NewMap[K comparable, V any](cap int) *Map[K, V] {
+	return &Map[K, V]{
+		items: make([]Tuple[K, V], 0, cap),
+		index: make(map[K]int, cap),
+	}
+}
+
+// MapFromItems creates an Map with some items.
+func MapFromItems[K comparable, V any](ps ...Tuple[K, V]) *Map[K, V] {
+	m := NewMap[K, V](len(ps))
+	for _, p := range ps {
+		m.Set(p.Key, p.Value)
+	}
+	return m
+}
+
+// Len returns the number of items in the map.
+func (m *Map[K, V]) Len() int {
+	if m == nil {
+		return 0
+	}
+	return len(m.index)
+}
+
+// IsZero reports if m is nil or empty. It is used by yaml.v3 to check
+// emptiness.
+func (m *Map[K, V]) IsZero() bool {
+	return m == nil || len(m.index) == 0
+}
+
+// Get retrieves the value associated with a key, and reports if it was found.
+func (m *Map[K, V]) Get(k K) (V, bool) {
+	var zv V
+	if m == nil {
+		return zv, false
+	}
+	idx, ok := m.index[k]
+	if !ok {
+		return zv, false
+	}
+	return m.items[idx].Value, true
+}
+
+// Contains reports if the map contains the key.
+func (m *Map[K, V]) Contains(k K) bool {
+	if m == nil {
+		return false
+	}
+	_, has := m.index[k]
+	return has
+}
+
+// Set sets the value for the given key. If the key exists, it remains in its
+// existing spot, otherwise it is added to the end of the map.
+func (m *Map[K, V]) Set(k K, v V) {
+	// Suppose someone makes Map with new(Map). The one thing we need to not be
+	// nil will be nil.
+	if m.index == nil {
+		m.index = make(map[K]int, 1)
+	}
+
+	// Replace existing value?
+	if idx, exists := m.index[k]; exists {
+		m.items[idx].Value = v
+		return
+	}
+
+	// Append new item.
+	m.index[k] = len(m.items)
+	m.items = append(m.items, Tuple[K, V]{
+		Key:   k,
+		Value: v,
+	})
+}
+
+// Replace replaces an old key in the same spot with a new key and value.
+// If the old key doesn't exist in the map, the item is inserted at the end.
+// If the new key already exists in the map (and isn't equal to the old key),
+// then it is deleted.
+// This provides a way to change a single key in-place (easier than deleting the
+// old key and all later keys, adding the new key, then restoring the rest).
+func (m *Map[K, V]) Replace(old, new K, v V) {
+	// Suppose someone makes Map with new(Map). The one thing we need to not be
+	// nil will be nil.
+	if m.index == nil {
+		m.index = make(map[K]int, 1)
+	}
+
+	// idx is where the item will go
+	idx, exists := m.index[old]
+	if !exists {
+		// Point idx at the end of m.items and ensure there is an item there.
+		idx = len(m.items)
+		m.items = append(m.items, Tuple[K, V]{})
+	}
+
+	// If the key changed, there's some tidyup...
+	if old != new {
+		// If "new" already exists in the map, then delete it first. The intent
+		// of Replace is to put the item where "old" is but under "new", so if
+		// "new" already exists somewhere else, adding it where "old" is would
+		// be getting out of hand (now there are two of them).
+		if newidx, exists := m.index[new]; exists {
+			m.items[newidx].deleted = true
+		}
+
+		// Delete "old" from the index and update "new" to point to idx
+		delete(m.index, old)
+		m.index[new] = idx
+	}
+
+	// Put the item into m.items at idx.
+	m.items[idx] = Tuple[K, V]{
+		Key:   new,
+		Value: v,
+	}
+}
+
+// Delete deletes a key from the map. It does nothing if the key is not in the
+// map.
+func (m *Map[K, V]) Delete(k K) {
+	if m == nil {
+		return
+	}
+	idx, ok := m.index[k]
+	if !ok {
+		return
+	}
+	m.items[idx].deleted = true
+	delete(m.index, k)
+
+	// If half the pairs have been deleted, perform a compaction.
+	if len(m.items) >= 2*len(m.index) {
+		m.compact()
+	}
+}
+
+// ToMap creates a regular (un-ordered) map containing the same data. If m is
+// nil, ToMap returns nil.
+func (m *Map[K, V]) ToMap() map[K]V {
+	if m == nil {
+		return nil
+	}
+	um := make(map[K]V, len(m.index))
+	m.Range(func(k K, v V) error {
+		um[k] = v
+		return nil
+	})
+	return um
+}
+
+// Equal reports if the two maps are equal (they contain the same items in the
+// same order). Keys are compared directly; values are compared using go-cmp
+// (provided with Equal[string, any] and Equal[string, string] as a comparers).
+func Equal[K comparable, V any](a, b *Map[K, V]) bool {
+	if a == nil || b == nil {
+		return a == b
+	}
+	if a.Len() != b.Len() {
+		return false
+	}
+	i, j := 0, 0
+	for i < len(a.items) && j < len(b.items) {
+		for a.items[i].deleted {
+			i++
+		}
+		for b.items[j].deleted {
+			j++
+		}
+		if a.items[i].Key != b.items[j].Key {
+			return false
+		}
+		if !cmp.Equal(a.items[i].Value, b.items[j].Value, cmp.Comparer(Equal[string, string]), cmp.Comparer(Equal[string, any])) {
+			return false
+		}
+		i++
+		j++
+	}
+	return true
+}
+
+// EqualSS is a convenience alias to reduce keyboard wear.
+var EqualSS = Equal[string, string]
+
+// EqualSA is a convenience alias to reduce keyboard wear.
+var EqualSA = Equal[string, any]
+
+// compact re-organises the internal storage of the Map.
+func (m *Map[K, V]) compact() {
+	pairs := make([]Tuple[K, V], 0, len(m.index))
+	for _, p := range m.items {
+		if p.deleted {
+			continue
+		}
+		m.index[p.Key] = len(pairs)
+		pairs = append(pairs, Tuple[K, V]{
+			Key:   p.Key,
+			Value: p.Value,
+		})
+	}
+	m.items = pairs
+}
+
+// Range ranges over the map (in order). If f returns an error, it stops ranging
+// and returns that error.
+func (m *Map[K, V]) Range(f func(k K, v V) error) error {
+	if m.IsZero() {
+		return nil
+	}
+	for _, p := range m.items {
+		if p.deleted {
+			continue
+		}
+		if err := f(p.Key, p.Value); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// MarshalJSON marshals the ordered map to JSON. It preserves the map order in
+// the output.
+func (m *Map[K, V]) MarshalJSON() ([]byte, error) {
+	// NB: writes to b don't error, but JSON encoding could error.
+	var b bytes.Buffer
+	enc := json.NewEncoder(&b)
+	b.WriteRune('{')
+	first := true
+	err := m.Range(func(k K, v V) error {
+		if !first {
+			// Separating comma.
+			b.WriteRune(',')
+		}
+		first = false
+		if err := enc.Encode(k); err != nil {
+			return err
+		}
+		b.WriteRune(':')
+		return enc.Encode(v)
+	})
+	if err != nil {
+		return nil, err
+	}
+	b.WriteRune('}')
+	return b.Bytes(), nil
+}
+
+// MarshalYAML returns a *yaml.Node encoding this map (in order), or an error
+// if any of the items could not be encoded into a *yaml.Node.
+func (m *Map[K, V]) MarshalYAML() (any, error) {
+	n := &yaml.Node{
+		Kind: yaml.MappingNode,
+		Tag:  "!!map",
+	}
+	err := m.Range(func(k K, v V) error {
+		nk, nv := new(yaml.Node), new(yaml.Node)
+		if err := nk.Encode(k); err != nil {
+			return err
+		}
+		if err := nv.Encode(v); err != nil {
+			return err
+		}
+		n.Content = append(n.Content, nk, nv)
+		return nil
+	})
+
+	if err != nil {
+		return nil, err
+	}
+	return n, nil
+}
+
+// UnmarshalJSON unmarshals to JSON. It only supports K = string.
+// This is yaml.Unmarshal in a trenchcoat (YAML is a superset of JSON).
+func (m *Map[K, V]) UnmarshalJSON(b []byte) error {
+	return yaml.Unmarshal(b, m)
+}
+
+// UnmarshalYAML unmarshals a YAML mapping node into this map. It only supports
+// K = string. Where yaml.v3 typically infers map[string]any for unmarshaling
+// mappings into any, this method chooses *Map[string, any] instead.
+// If V = *yaml.Node, then the value nodes are not decoded. This is useful for
+// a shallow unmarshaling step.
+func (m *Map[K, V]) UnmarshalYAML(n *yaml.Node) error {
+	om, ok := any(m).(*Map[string, V])
+	if !ok {
+		var zk K
+		return fmt.Errorf("cannot unmarshal into ordered.Map with key type %T (want string)", zk)
+	}
+
+	if n.Kind != yaml.MappingNode {
+		return fmt.Errorf("line %d, col %d: wrong kind (got %x, want %x)", n.Line, n.Column, n.Kind, yaml.MappingNode)
+	}
+
+	switch tm := any(m).(type) {
+	case *Map[string, any]:
+		// Use DecodeYAML, then steal the contents.
+		sm, err := DecodeYAML(n)
+		if err != nil {
+			return err
+		}
+		*tm = *sm.(*Map[string, any])
+		return nil
+
+	case *Map[string, *yaml.Node]:
+		// Load into the map without any value decoding.
+		return rangeYAMLMap(n, func(key string, val *yaml.Node) error {
+			tm.Set(key, val)
+			return nil
+		})
+
+	default:
+		return rangeYAMLMap(n, func(key string, val *yaml.Node) error {
+			// Try DecodeYAML? (maybe V is a type like []any).
+			nv, err := DecodeYAML(val)
+			if err != nil {
+				return err
+			}
+			v, ok := nv.(V)
+			if !ok {
+				// Let yaml.v3 choose what to do with the specific type.
+				if err := val.Decode(&v); err != nil {
+					return err
+				}
+			}
+			om.Set(key, v)
+			return nil
+		})
+	}
+}
+
+// AssertValues converts a map with "any" values into a map with V values by
+// type-asserting each value. It returns an error if any value is not
+// assertable to V.
+func AssertValues[V any](m *MapSA) (*Map[string, V], error) {
+	msv := NewMap[string, V](m.Len())
+	return msv, m.Range(func(k string, v any) error {
+		t, ok := v.(V)
+		if !ok {
+			return fmt.Errorf("value for key %q (type %T) is not assertable to %T", k, v, t)
+		}
+		msv.Set(k, t)
+		return nil
+	})
+}
+
+// TransformValues converts a map with V1 values into a map with V2 values by
+// running each value through a function.
+func TransformValues[K comparable, V1, V2 any](m *Map[K, V1], f func(V1) V2) *Map[K, V2] {
+	m2 := NewMap[K, V2](m.Len())
+	m.Range(func(k K, v V1) error {
+		m2.Set(k, f(v))
+		return nil
+	})
+	return m2
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/ordered/slice.go b/vendor/github.com/buildkite/agent/v3/internal/ordered/slice.go
new file mode 100644
index 0000000000..b60ad39c37
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/ordered/slice.go
@@ -0,0 +1,30 @@
+package ordered
+
+import (
+	"fmt"
+
+	"gopkg.in/yaml.v3"
+)
+
+var _ yaml.Unmarshaler = (*Slice)(nil)
+
+// Slice is []any, but unmarshaling into it prefers *Map[string,any] over
+// map[string]any.
+type Slice []any
+
+// UnmarshalYAML unmarshals sequence nodes. Any mapping nodes are unmarshaled
+// as *Map[string,any].
+func (s *Slice) UnmarshalYAML(n *yaml.Node) error {
+	if n.Kind != yaml.SequenceNode {
+		return fmt.Errorf("line %d, col %d: unsupported kind %x for unmarshaling Slice (want %x)", n.Line, n.Column, n.Kind, yaml.SequenceNode)
+	}
+	seen := make(map[*yaml.Node]bool)
+	for _, c := range n.Content {
+		cv, err := decodeYAML(seen, c)
+		if err != nil {
+			return err
+		}
+		*s = append(*s, cv)
+	}
+	return nil
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/ordered/strings.go b/vendor/github.com/buildkite/agent/v3/internal/ordered/strings.go
new file mode 100644
index 0000000000..198af399d5
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/ordered/strings.go
@@ -0,0 +1,39 @@
+package ordered
+
+import (
+	"fmt"
+
+	"gopkg.in/yaml.v3"
+)
+
+// Strings is []string, but unmarshaling handles both sequences and single
+// scalars.
+type Strings []string
+
+// UnmarshalYAML unmarshals n depending on its Kind as either
+// - a sequence of strings (into a slice), or
+// - a single string (into a one-element slice).
+// For example, unmarshaling either `["foo"]` or `"foo"` should result in a
+// one-element slice (`Strings{"foo"}`).
+func (s *Strings) UnmarshalYAML(n *yaml.Node) error {
+	switch n.Kind {
+	case yaml.ScalarNode:
+		var x string
+		if err := n.Decode(&x); err != nil {
+			return err
+		}
+		*s = append(*s, x)
+
+	case yaml.SequenceNode:
+		var xs []string
+		if err := n.Decode(&xs); err != nil {
+			return err
+		}
+		*s = append(*s, xs...)
+
+	default:
+		return fmt.Errorf("line %d, col %d: unsupported kind %x for unmarshaling Strings (want %x or %x)", n.Line, n.Column, n.Kind, yaml.ScalarNode, yaml.SequenceNode)
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/ordered/tuple.go b/vendor/github.com/buildkite/agent/v3/internal/ordered/tuple.go
new file mode 100644
index 0000000000..4c9006f8e7
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/ordered/tuple.go
@@ -0,0 +1,15 @@
+package ordered
+
+// Tuple is used for storing values in Map.
+type Tuple[K comparable, V any] struct {
+	Key   K
+	Value V
+
+	deleted bool
+}
+
+// TupleSS is a convenience alias to reduce keyboard wear.
+type TupleSS = Tuple[string, string]
+
+// TupleSA is a convenience alias to reduce keyboard wear.
+type TupleSA = Tuple[string, any]
diff --git a/vendor/github.com/buildkite/agent/v3/internal/ordered/unmarshal.go b/vendor/github.com/buildkite/agent/v3/internal/ordered/unmarshal.go
new file mode 100644
index 0000000000..f316eb60ff
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/ordered/unmarshal.go
@@ -0,0 +1,375 @@
+package ordered
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strings"
+
+	"gopkg.in/yaml.v3"
+)
+
+// Errors that can be returned by Unmarshal
+// (typically wrapped - use errors.Is).
+var (
+	ErrIntoNonPointer       = errors.New("cannot unmarshal into non-pointer")
+	ErrIntoNil              = errors.New("cannot unmarshal into nil")
+	ErrIncompatibleTypes    = errors.New("incompatible types")
+	ErrUnsupportedSrc       = errors.New("cannot unmarshal from src")
+	ErrMultipleInlineFields = errors.New(`multiple fields tagged with yaml:",inline"`)
+)
+
+// Unmarshaler is an interface that types can use to override the default
+// unmarshaling behaviour.
+type Unmarshaler interface {
+	// UnmarshalOrdered should unmarshal src into the implementing value. src
+	// will generally be one of *Map[string, any], []any, or a "scalar" built-in
+	// type.
+	UnmarshalOrdered(src any) error
+}
+
+// Unmarshal recursively unmarshals src into dst. src and dst can be a variety
+// of types under the hood, but some combinations don't work. Good luck!
+//
+//   - If dst is nil, then src must be nil.
+//   - If src is *yaml.Node, then DecodeYAML is called to translate the node
+//     into another type.
+//   - If dst is a pointer and src is nil, then the value dst points to is set
+//     to zero.
+//   - If dst is a pointer to a pointer, Unmarshal recursively calls Unmarshal
+//     on the inner pointer, creating a new value of the type being pointed to
+//     as needed.
+//   - If dst implements Unmarshaler, Unmarshal returns
+//     dst.UnmarshalOrdered(src).
+//   - If dst is *any, Unmarshal copies src directly into *dst.
+//
+// Otherwise, it acts a lot like yaml.Unmarshal, except that the type S of src
+// and type D of dst can be one of the following:
+//
+//   - S = *Map[string, any] (recursively containing values with types from this
+//     list); D must be one of: a pointer to a struct with yaml tags,
+//     or a map or a pointer to a map (either *Map or map) with string keys.
+//     yaml tags includes ",inline". Inline fields must themselves be a type
+//     that Unmarshal can unmarshal *Map[string, any] into - another struct or
+//     Map or map with string keys.
+//   - S = []any (also recursively containing values with types from this list),
+//     which is recursively unmarshaled elementwise; D is *[]any or
+//     *[]somethingElse.
+//   - S ∊ {string, float64, int, bool}; D must be *S (value copied directly),
+//     *[]S or *[]any (value appended), *string (value formatted through
+//     fmt.Sprint) or *[]string (formatted value appended).
+func Unmarshal(src, dst any) error {
+	if dst == nil {
+		// This is interface nil (not typed nil, which has to be tested after
+		// figuring out the types).
+		if src == nil {
+			// Unmarshal nil into nil? Seems legit
+			return nil
+		}
+		return ErrIntoNil
+	}
+
+	if n, ok := src.(*yaml.Node); ok {
+		o, err := DecodeYAML(n)
+		if err != nil {
+			return err
+		}
+		src = o
+	}
+
+	if um, ok := dst.(Unmarshaler); ok {
+		return um.UnmarshalOrdered(src)
+	}
+
+	// Handle typed nil pointers, pointers to nil, and pointers to pointers.
+	// Note that vdst could still be a map.
+	vdst := reflect.ValueOf(dst)
+
+	// First, handle src == nil. dst must be a pointer to something or nil.
+	if src == nil {
+		if vdst.Kind() != reflect.Pointer {
+			return fmt.Errorf("%w (%T)", ErrIntoNonPointer, dst)
+		}
+		if vdst.IsNil() {
+			// Unmarshaling nil into nil... seems legit.
+			return nil
+		}
+		// Zero out the value pointed to by dst.
+		vdst.Elem().SetZero()
+		return nil
+	}
+
+	// src is not nil. dst is usually a pointer - is it nil? pointer to pointer?
+	if vdst.Kind() == reflect.Pointer {
+		// Unmarshaling into typed nil value?
+		if vdst.IsNil() {
+			return ErrIntoNil
+		}
+
+		// Non-nil pointer to something. Another pointer?
+		if edst := vdst.Elem(); edst.Kind() == reflect.Pointer {
+			// The type of the value being double-pointed to.
+			innerType := edst.Type().Elem()
+			if edst.IsNil() {
+				// Create a new value of the inner type.
+				edst.Set(reflect.New(innerType))
+			}
+
+			// Handle double pointers by recursing on the inner layer.
+			return Unmarshal(src, edst.Interface())
+		}
+	}
+
+	if tdst, ok := dst.(*any); ok {
+		*tdst = src
+		return nil
+	}
+
+	switch tsrc := src.(type) {
+	case *Map[string, any]:
+		return tsrc.decodeInto(dst)
+
+	case []any:
+		switch tdst := dst.(type) {
+		case *[]any:
+			*tdst = append(*tdst, tsrc...)
+
+		default:
+			if vdst.Kind() != reflect.Pointer {
+				return fmt.Errorf("%w (%T)", ErrIntoNonPointer, dst)
+			}
+			sdst := vdst.Elem() // The slice we append to, reflectively
+			if sdst.Kind() != reflect.Slice {
+				return fmt.Errorf("%w: cannot unmarshal []any into %T", ErrIncompatibleTypes, dst)
+			}
+			etype := sdst.Type().Elem() // E = Type of the slice's elements
+			for _, a := range tsrc {
+				x := reflect.New(etype) // *E
+				if err := Unmarshal(a, x.Interface()); err != nil {
+					return err
+				}
+				sdst = reflect.Append(sdst, x.Elem())
+			}
+			vdst.Elem().Set(sdst)
+		}
+
+	case string:
+		return unmarshalScalar(tsrc, dst)
+
+	case float64:
+		return unmarshalScalar(tsrc, dst)
+
+	case int:
+		return unmarshalScalar(tsrc, dst)
+
+	case bool:
+		return unmarshalScalar(tsrc, dst)
+
+	default:
+		return fmt.Errorf("%w %T", ErrUnsupportedSrc, src)
+	}
+
+	return nil
+}
+
+func unmarshalScalar[S any](src S, dst any) error {
+	switch tdst := dst.(type) {
+	case *S:
+		*tdst = src
+
+	case *[]S:
+		*tdst = append(*tdst, src)
+
+	case *[]any:
+		*tdst = append(*tdst, src)
+
+	case *string:
+		*tdst = fmt.Sprint(src)
+
+	case *[]string:
+		*tdst = append(*tdst, fmt.Sprint(src))
+
+	default:
+		return fmt.Errorf("%w: cannot unmarshal %T into %T", ErrIncompatibleTypes, src, dst)
+	}
+	return nil
+}
+
+// decodeInto loads the contents of the map into the target (pointer to struct).
+// It behaves sort of like `yaml.Node.Decode`:
+//
+//   - If target is a map type with string keys, it unmarshals its contents
+//     elementwise, with values passed through Unmarshal.
+//   - If target is *struct{...}, it matches keys to exported fields either
+//     by looking at `yaml` tags, or using lowercased field names.
+//   - If a field has a yaml:",inline" tag, it copies any leftover values into
+//     that field, which must have type map[string]any or any. (Structs are not
+//     supported for inline.)
+func (m *Map[K, V]) decodeInto(target any) error {
+	tm, ok := any(m).(*Map[string, any])
+	if !ok {
+		return fmt.Errorf("%w: cannot unmarshal from %T, want K=string, V=any", ErrIncompatibleTypes, m)
+	}
+
+	// Work out the kind of target being used.
+	// Dereference the target to find the inner value, if needed.
+	targetValue := reflect.ValueOf(target)
+	var innerValue reflect.Value
+	switch targetValue.Kind() {
+	case reflect.Pointer:
+		// Passed a pointer to something.
+		if targetValue.IsNil() {
+			return ErrIntoNil
+		}
+		innerValue = targetValue.Elem()
+
+	case reflect.Map:
+		// Passed a map directly.
+		innerValue = targetValue
+		if innerValue.IsNil() {
+			return ErrIntoNil
+		}
+
+	default:
+		return fmt.Errorf("%w: cannot unmarshal %T into %T, want map or *struct{...}", ErrIncompatibleTypes, m, target)
+	}
+
+	switch innerValue.Kind() {
+	case reflect.Map:
+		// Process the map directly.
+		mapType := innerValue.Type()
+		// For simplicity, require the key type to be string.
+		if keyType := mapType.Key(); keyType.Kind() != reflect.String {
+			return fmt.Errorf("%w for map key: cannot unmarshal %T into %T", ErrIncompatibleTypes, m, target)
+		}
+
+		// If target is a pointer to a nil map (with type), create a new map.
+		if innerValue.IsNil() {
+			innerValue.Set(reflect.MakeMapWithSize(mapType, tm.Len()))
+		}
+
+		valueType := mapType.Elem()
+		return tm.Range(func(k string, v any) error {
+			nv := reflect.New(valueType)
+			if err := Unmarshal(v, nv.Interface()); err != nil {
+				return err
+			}
+			innerValue.SetMapIndex(reflect.ValueOf(k), nv.Elem())
+			return nil
+		})
+
+	case reflect.Struct:
+		// The rest of the method is concerned with this.
+	default:
+		return fmt.Errorf("%w: cannot unmarshal %T into %T", ErrIncompatibleTypes, m, target)
+	}
+
+	// These are the (accessible by reflection) fields it has.
+	// This includes non-exported fields.
+	fields := reflect.VisibleFields(innerValue.Type())
+
+	var inlineField reflect.StructField
+	outlineKeys := make(map[string]struct{})
+
+	for _, field := range fields {
+		// Skip non-exported fields. This is conventional *and* correct.
+		if !field.IsExported() {
+			continue
+		}
+
+		// No worries if the tag is not there - apply defaults.
+		tag, _ := field.Tag.Lookup("yaml")
+
+		switch tag {
+		case "-":
+			// Note: if a field is skipped with "-", yaml.v3 still puts it into
+			// inline.
+			continue
+
+		case ",inline":
+			if inlineField.Index != nil {
+				return fmt.Errorf("%w %T", ErrMultipleInlineFields, target)
+			}
+			inlineField = field
+			continue
+		}
+
+		// default:
+		key, _, _ := strings.Cut(tag, ",")
+		if key == "" {
+			// yaml.v3 convention:
+			// "Struct fields ... are unmarshalled using the field name
+			// lowercased as the default key."
+			key = strings.ToLower(field.Name)
+		}
+
+		// Is there a value for this key?
+		v, has := tm.Get(key)
+		if !has {
+			continue
+		}
+
+		// Now load v into this field.
+		outlineKeys[key] = struct{}{}
+
+		// Get a pointer to the field. This works because target is a pointer.
+		ptrToField := innerValue.FieldByIndex(field.Index).Addr()
+		if err := Unmarshal(v, ptrToField.Interface()); err != nil {
+			return err
+		}
+	}
+
+	if inlineField.Index == nil {
+		return nil
+	}
+	// The rest is handling the ",inline" field.
+	// We support any field that Unmarshal can unmarshal tm into.
+
+	inlinePtr := innerValue.FieldByIndex(inlineField.Index).Addr()
+
+	// Copy all values that weren't non-inline fields into a temporary map.
+	// This is just to avoid mutating tm.
+	temp := NewMap[string, any](tm.Len())
+	tm.Range(func(k string, v any) error {
+		if _, outline := outlineKeys[k]; outline {
+			return nil
+		}
+		temp.Set(k, v)
+		return nil
+	})
+
+	// If the inline map contains nothing, then don't bother setting it.
+	if temp.Len() == 0 {
+		return nil
+	}
+
+	return Unmarshal(temp, inlinePtr.Interface())
+}
+
+// UnmarshalOrdered unmarshals a value into this map.
+// K must be string, src must be *Map[string, any], and each value in src must
+// be unmarshallable into *V.
+func (m *Map[K, V]) UnmarshalOrdered(src any) error {
+	if m == nil {
+		return ErrIntoNil
+	}
+
+	tm, ok := any(m).(*Map[string, V])
+	if !ok {
+		return fmt.Errorf("%w: receiver type %T, want K = string", ErrIncompatibleTypes, m)
+	}
+
+	tsrc, ok := src.(*Map[string, any])
+	if !ok {
+		return fmt.Errorf("%w: src type %T, want *Map[string, any]", ErrIncompatibleTypes, src)
+	}
+
+	return tsrc.Range(func(k string, v any) error {
+		var dv V
+		if err := Unmarshal(v, &dv); err != nil {
+			return err
+		}
+		tm.Set(k, dv)
+		return nil
+	})
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/ordered/yaml.go b/vendor/github.com/buildkite/agent/v3/internal/ordered/yaml.go
new file mode 100644
index 0000000000..fd5b4b1563
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/ordered/yaml.go
@@ -0,0 +1,260 @@
+package ordered
+
+import (
+	"fmt"
+
+	"gopkg.in/yaml.v3"
+)
+
+// DecodeYAML recursively unmarshals n into a generic type (any, []any, or
+// *Map[string, any]) depending on the kind of n. Where yaml.v3 typically infer
+// map[string]any for unmarshaling mappings into any, DecodeYAML chooses
+// *Map[string, any] instead.
+func DecodeYAML(n *yaml.Node) (any, error) {
+	return decodeYAML(make(map[*yaml.Node]bool), n)
+}
+
+// decode recursively unmarshals n into a generic type (any, []any, or
+// *Map[string, any]) depending on the kind of n.
+func decodeYAML(seen map[*yaml.Node]bool, n *yaml.Node) (any, error) {
+	// nil decodes to nil.
+	if n == nil {
+		return nil, nil
+	}
+
+	// If n has been seen already while processing the parents of n, it's an
+	// infinite recursion.
+	// Simple example:
+	// ---
+	// a: &a  // seen is empty on encoding a
+	//   b: *a   // seen contains a while encoding b
+	if seen[n] {
+		return nil, fmt.Errorf("line %d, col %d: infinite recursion", n.Line, n.Column)
+	}
+	seen[n] = true
+
+	// n needs to be "un-seen" when this layer of recursion is done:
+	defer delete(seen, n)
+	// Why? seen is a map, which is used by reference, so it will be shared
+	// between calls to decode, which is recursive. And unlike a merge, the
+	// same alias can be validly used for different subtrees:
+	// ---
+	// a: &a
+	//   b: c
+	// d:
+	//   da: *a
+	//   db: *a
+	// ...
+	// (d contains two copies of a).
+	// So *a needs to be "unseen" between encoding "da" and "db".
+
+	switch n.Kind {
+	case yaml.ScalarNode:
+		// If we need to parse more kinds of scalar, e.g. !!bool NO, or base-60
+		// integers, this is where we would swap out n.Decode.
+		var v any
+		if err := n.Decode(&v); err != nil {
+			return nil, err
+		}
+		return v, nil
+
+	case yaml.SequenceNode:
+		v := make([]any, 0, len(n.Content))
+		for _, c := range n.Content {
+			cv, err := decodeYAML(seen, c)
+			if err != nil {
+				return nil, err
+			}
+			v = append(v, cv)
+		}
+		return v, nil
+
+	case yaml.MappingNode:
+		m := NewMap[string, any](len(n.Content) / 2)
+		// Why not call m.UnmarshalYAML(n) ?
+		// Because we can't pass `seen` through that.
+		err := rangeYAMLMap(n, func(key string, val *yaml.Node) error {
+			v, err := decodeYAML(seen, val)
+			if err != nil {
+				return err
+			}
+			m.Set(key, v)
+			return nil
+		})
+		if err != nil {
+			return nil, err
+		}
+		return m, nil
+
+	case yaml.AliasNode:
+		// This is one of the two ways this can blow up recursively.
+		// The other (map merges) is handled by rangeMap.
+		return decodeYAML(seen, n.Alias)
+
+	case yaml.DocumentNode:
+		switch len(n.Content) {
+		case 0:
+			return nil, nil
+		case 1:
+			return decodeYAML(seen, n.Content[0])
+		default:
+			return nil, fmt.Errorf("line %d, col %d: document contains more than 1 content item (%d)", n.Line, n.Column, len(n.Content))
+		}
+
+	default:
+		return nil, fmt.Errorf("line %d, col %d: unsupported kind %x", n.Line, n.Column, n.Kind)
+	}
+}
+
+// rangeYAMLMap calls f with each key/value pair in a mapping node.
+// It only supports scalar keys, and converts them to canonical string values.
+// Non-scalar and non-stringable keys result in an error.
+// Because mapping nodes can contain merges from other mapping nodes,
+// potentially via sequence nodes and aliases, this function also accepts
+// sequences and aliases (that must themselves recursively only contain
+// mappings, sequences, and aliases...).
+func rangeYAMLMap(n *yaml.Node, f func(key string, val *yaml.Node) error) error {
+	return rangeYAMLMapImpl(make(map[*yaml.Node]bool), n, f)
+}
+
+// rangeYAMLMapImpl implements rangeYAMLMap. It tracks mapping nodes already
+// merged, to prevent infinite merge loops and avoid unnecessarily merging the
+// same mapping repeatedly.
+func rangeYAMLMapImpl(merged map[*yaml.Node]bool, n *yaml.Node, f func(key string, val *yaml.Node) error) error {
+	// Go-like semantics: no entries in "nil".
+	if n == nil {
+		return nil
+	}
+
+	// If this node has already been merged into the top-level map being ranged,
+	// we don't need to merge it again.
+	if merged[n] {
+		return nil
+	}
+	merged[n] = true
+
+	switch n.Kind {
+	case yaml.MappingNode:
+		// gopkg.in/yaml.v3 parses mapping node contents as a flat list:
+		// key, value, key, value...
+		if len(n.Content)%2 != 0 {
+			return fmt.Errorf("line %d, col %d: mapping node has odd content length %d", n.Line, n.Column, len(n.Content))
+		}
+
+		// Keys at an outer level take precedence over keys being merged:
+		// "its key/value pairs is inserted into the current mapping, unless the
+		// key already exists in it." https://yaml.org/type/merge.html
+		// But we care about key ordering!
+		// This necessitates two passes:
+		// 1. Obtain the keys in this map
+		// 2. Range over the map again, recursing into merges.
+		// While merging, ignore keys in the outer level.
+		// Merges may produce new keys to ignore in subsequent merges:
+		// "Keys in mapping nodes earlier in the sequence override keys
+		// specified in later mapping nodes."
+
+		// 1. A pass to get the keys at this level.
+		keys := make(map[string]bool)
+		for i := 0; i < len(n.Content); i += 2 {
+			k := n.Content[i]
+
+			// Ignore merges in this pass.
+			if k.Tag == "!!merge" {
+				continue
+			}
+
+			// Canonicalise the key into a string and store it.
+			ck, err := canonicalMapKey(k)
+			if err != nil {
+				return err
+			}
+			keys[ck] = true
+		}
+
+		// Ignore existing keys when merging. Record new keys to ignore in
+		// subsequent merges.
+		skipKeys := func(k string, v *yaml.Node) error {
+			if keys[k] {
+				return nil
+			}
+			keys[k] = true
+			return f(k, v)
+		}
+
+		// 2. Range over each pair, recursing into merges.
+		for i := 0; i < len(n.Content); i += 2 {
+			k, v := n.Content[i], n.Content[i+1]
+
+			// Is this pair a merge? (`<<: *foo`)
+			if k.Tag == "!!merge" {
+				// Recursively range over the contents of the value, which
+				// could be an alias to a mapping node, or a sequence of aliases
+				// to mapping nodes, which could themselves contain merges...
+				if err := rangeYAMLMapImpl(merged, v, skipKeys); err != nil {
+					return err
+				}
+				continue
+			}
+
+			// Canonicalise the key into a string (again).
+			ck, err := canonicalMapKey(k)
+			if err != nil {
+				return err
+			}
+
+			// Yield the canonical key and the value.
+			if err := f(ck, v); err != nil {
+				return err
+			}
+		}
+
+	case yaml.SequenceNode:
+		// Range over each element e in the sequence.
+		for _, e := range n.Content {
+			if err := rangeYAMLMapImpl(merged, e, f); err != nil {
+				return err
+			}
+		}
+
+	case yaml.AliasNode:
+		// Follow the alias and range over that.
+		if err := rangeYAMLMapImpl(merged, n.Alias, f); err != nil {
+			return err
+		}
+
+	default:
+		// TODO: Use %v once yaml.Kind has a String method
+		return fmt.Errorf("line %d, col %d: cannot range over node kind %x", n.Line, n.Column, n.Kind)
+	}
+	return nil
+}
+
+// canonicalMapKey converts a scalar value into a string suitable for use as
+// a map key. YAML expects different representations of the same value, e.g.
+// 0xb and 11, to be equivalent, and therefore a duplicate key. JSON requires
+// all keys to be strings.
+func canonicalMapKey(n *yaml.Node) (string, error) {
+	var x any
+	if err := n.Decode(&x); err != nil {
+		return "", err
+	}
+	if x == nil || n.Tag == "!!null" {
+		// Nulls are not valid JSON keys.
+		return "", fmt.Errorf("line %d, col %d: null not supported as a map key", n.Line, n.Column)
+	}
+	switch n.Tag {
+	case "!!bool":
+		// Canonicalise to true or false.
+		return fmt.Sprintf("%t", x), nil
+	case "!!int":
+		// Canonicalise to decimal.
+		return fmt.Sprintf("%d", x), nil
+	case "!!float":
+		// Canonicalise to scientific notation.
+		// Don't handle Inf or NaN specially, as they will be quoted.
+		return fmt.Sprintf("%e", x), nil
+	default:
+		// Assume the value is already a suitable key.
+		return n.Value, nil
+	}
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/doc.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/doc.go
new file mode 100644
index 0000000000..28f9335943
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/doc.go
@@ -0,0 +1,13 @@
+// Package pipeline implements the pieces necessary for the agent to work with
+// pipelines (typically in YAML or JSON form).
+//
+// The pipeline object model (Pipeline, Steps, Plugin, etc) have these caveats:
+//   - It is incomplete: there may be fields accepted by the API that are not
+//     listed. Do not treat Pipeline, CommandStep, etc, as comprehensive
+//     reference guides for how to write a pipeline.
+//   - It normalises: unmarshaling accepts a variety of step forms, but
+//     marshaling back out produces more normalised output. An unmarshal/marshal
+//     round-trip may produce different output.
+//   - It is non-canonical: using the object model does not guarantee that a
+//     pipeline will be accepted by the pipeline upload API.
+package pipeline
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/interpolate.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/interpolate.go
new file mode 100644
index 0000000000..3ef40a2e58
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/interpolate.go
@@ -0,0 +1,131 @@
+package pipeline
+
+import (
+	"github.com/buildkite/agent/v3/internal/ordered"
+	"github.com/buildkite/interpolate"
+)
+
+// This file contains helpers for recursively interpolating all the strings in
+// pipeline objects.
+
+// selfInterpolater describes types that can interpolate themselves in-place.
+// They can call interpolate.Interpolate on strings, or
+// interpolate{Slice,Map,OrderedMap,Any} on their other contents, to do this.
+type selfInterpolater interface {
+	interpolate(interpolate.Env) error
+}
+
+// interpolateAny interpolates (almost) anything in-place. When passed a string,
+// it returns a new string. Anything it doesn't know how to interpolate is
+// returned unaltered.
+func interpolateAny[T any](env interpolate.Env, o T) (T, error) {
+	// The box-typeswitch-unbox dance is required because the Go compiler
+	// has no type switch for type parameters.
+	var err error
+	a := any(o)
+
+	switch t := a.(type) {
+	case selfInterpolater:
+		err = t.interpolate(env)
+
+	case *string:
+		if t == nil {
+			return o, nil
+		}
+		*t, err = interpolate.Interpolate(env, *t)
+		a = t
+
+	case string:
+		a, err = interpolate.Interpolate(env, t)
+
+	case []any:
+		err = interpolateSlice(env, t)
+
+	case []string:
+		err = interpolateSlice(env, t)
+
+	case ordered.Slice:
+		err = interpolateSlice(env, t)
+
+	case map[string]any:
+		err = interpolateMap(env, t)
+
+	case map[string]string:
+		err = interpolateMap(env, t)
+
+	case *ordered.Map[string, any]:
+		err = interpolateOrderedMap(env, t)
+
+	case *ordered.Map[string, string]:
+		err = interpolateOrderedMap(env, t)
+
+	default:
+		return o, nil
+	}
+
+	// This happens if T is an interface type and o was interface-nil to begin
+	// with. (You can't type assert interface-nil.)
+	if a == nil {
+		var zt T
+		return zt, err
+	}
+	return a.(T), err
+}
+
+// interpolateSlice applies interpolateAny over any type of slice. Values in the
+// slice are updated in-place.
+func interpolateSlice[E any, S ~[]E](env interpolate.Env, s S) error {
+	for i, e := range s {
+		// It could be a string, so replace the old value with the new.
+		inte, err := interpolateAny(env, e)
+		if err != nil {
+			return err
+		}
+		s[i] = inte
+	}
+	return nil
+}
+
+// interpolateMap applies interpolateAny over any type of map. The map is
+// altered in-place.
+func interpolateMap[K comparable, V any, M ~map[K]V](env interpolate.Env, m M) error {
+	for k, v := range m {
+		// We interpolate both keys and values.
+		intk, err := interpolateAny(env, k)
+		if err != nil {
+			return err
+		}
+
+		// V could be string, so be sure to replace the old value with the new.
+		intv, err := interpolateAny(env, v)
+		if err != nil {
+			return err
+		}
+
+		// If the key changed due to interpolation, delete the old key.
+		if k != intk {
+			delete(m, k)
+		}
+		m[intk] = intv
+	}
+	return nil
+}
+
+// interpolateOrderedMap applies interpolateAny over any type of ordered.Map.
+// The map is altered in-place.
+func interpolateOrderedMap[K comparable, V any](env interpolate.Env, m *ordered.Map[K, V]) error {
+	return m.Range(func(k K, v V) error {
+		// We interpolate both keys and values.
+		intk, err := interpolateAny(env, k)
+		if err != nil {
+			return err
+		}
+		intv, err := interpolateAny(env, v)
+		if err != nil {
+			return err
+		}
+
+		m.Replace(k, intk, intv)
+		return nil
+	})
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/json.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/json.go
new file mode 100644
index 0000000000..a28a02e3ea
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/json.go
@@ -0,0 +1,98 @@
+package pipeline
+
+import (
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/oleiade/reflections"
+)
+
+// inlineFriendlyMarshalJSON marshals the given object to JSON, but with special handling given to fields tagged with ",inline".
+// This is needed because yaml.v3 has "inline" but encoding/json has no concept of it.
+func inlineFriendlyMarshalJSON(q any) ([]byte, error) {
+	fieldNames, err := reflections.Fields(q)
+	if err != nil {
+		return nil, fmt.Errorf("could not get fields of %T: %w", q, err)
+	}
+
+	var inlineFields map[string]any // no need to pre-allocate, we directly set it if we find inline fields
+	outlineFields := make(map[string]any, len(fieldNames))
+
+	for _, fieldName := range fieldNames {
+		tag, err := reflections.GetFieldTag(q, fieldName, "yaml")
+		if err != nil {
+			return nil, fmt.Errorf("could not get yaml tag of %T.%s: %w", q, fieldName, err)
+		}
+
+		switch tag {
+		case "-":
+			continue
+
+		case ",inline":
+			inlineFieldsValue, err := reflections.GetField(q, fieldName)
+			if err != nil {
+				return nil, fmt.Errorf("could not get inline fields value of %T.%s: %w", q, fieldName, err)
+			}
+
+			if inf, ok := inlineFieldsValue.(map[string]any); ok {
+				inlineFields = inf
+			} else {
+				return nil, fmt.Errorf("inline fields value of %T.%s must be a map[string]any, was %T instead", q, fieldName, inlineFieldsValue)
+			}
+
+		default:
+			fieldValue, err := reflections.GetField(q, fieldName)
+			if err != nil {
+				return nil, fmt.Errorf("could not get value of %T.%s: %w", q, fieldName, err)
+			}
+
+			tags := strings.Split(tag, ",")
+			keyName := tags[0] // e.g. "foo,omitempty" -> "foo"
+			if len(tags) > 1 && tags[1] == "omitempty" && isEmptyValue(fieldValue) {
+				continue
+			}
+
+			outlineFields[keyName] = fieldValue
+		}
+	}
+
+	allFields := make(map[string]any, len(outlineFields)+len(inlineFields))
+
+	for k, v := range inlineFields {
+		allFields[k] = v
+	}
+
+	// "outline" (non-inline) fields should take precedence over inline fields
+	for k, v := range outlineFields {
+		allFields[k] = v
+	}
+
+	return json.Marshal(allFields)
+}
+
+// stolen from encoding/json
+func isEmptyValue(q any) bool {
+	if q == nil { // not stolen from encoding/json, but oddly missing from it?
+		return true
+	}
+
+	v := reflect.ValueOf(q)
+
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Pointer:
+		return v.IsNil()
+	}
+	return false
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/parser.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/parser.go
new file mode 100644
index 0000000000..b8b3334e68
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/parser.go
@@ -0,0 +1,32 @@
+package pipeline
+
+import (
+	"errors"
+	"io"
+	"strings"
+
+	"github.com/buildkite/agent/v3/internal/ordered"
+	"gopkg.in/yaml.v3"
+)
+
+// Parse parses a pipeline. It does not apply interpolation.
+func Parse(src io.Reader) (*Pipeline, error) {
+	// First get yaml.v3 to give us a raw document (*yaml.Node).
+	n := new(yaml.Node)
+	if err := yaml.NewDecoder(src).Decode(n); err != nil {
+		return nil, formatYAMLError(err)
+	}
+
+	// Instead of unmarshalling into structs, which is easy-ish to use but
+	// doesn't work with some non YAML 1.2 features (merges), decode the
+	// *yaml.Node into *ordered.Map, []any, or any (recursively).
+	// This resolves aliases and merges and gives a more convenient form to work
+	// with when handling different structural representations of the same
+	// configuration. Then decode _that_ into a pipeline.
+	p := new(Pipeline)
+	return p, ordered.Unmarshal(n, p)
+}
+
+func formatYAMLError(err error) error {
+	return errors.New(strings.TrimPrefix(err.Error(), "yaml: "))
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/pipeline.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/pipeline.go
new file mode 100644
index 0000000000..69de4345a9
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/pipeline.go
@@ -0,0 +1,129 @@
+package pipeline
+
+import (
+	"fmt"
+
+	"github.com/buildkite/agent/v3/env"
+	"github.com/buildkite/agent/v3/internal/ordered"
+	"github.com/buildkite/agent/v3/tracetools"
+	"github.com/buildkite/interpolate"
+	"github.com/lestrrat-go/jwx/v2/jwk"
+)
+
+// Pipeline models a pipeline.
+//
+// Standard caveats apply - see the package comment.
+type Pipeline struct {
+	Steps Steps          `yaml:"steps"`
+	Env   *ordered.MapSS `yaml:"env,omitempty"`
+
+	// RemainingFields stores any other top-level mapping items so they at least
+	// survive an unmarshal-marshal round-trip.
+	RemainingFields map[string]any `yaml:",inline"`
+}
+
+// MarshalJSON marshals a pipeline to JSON. Special handling is needed because
+// yaml.v3 has "inline" but encoding/json has no concept of it.
+func (p *Pipeline) MarshalJSON() ([]byte, error) {
+	return inlineFriendlyMarshalJSON(p)
+}
+
+// UnmarshalOrdered unmarshals the pipeline from either []any (a legacy
+// sequence of steps) or *ordered.MapSA (a modern pipeline configuration).
+func (p *Pipeline) UnmarshalOrdered(o any) error {
+	switch o := o.(type) {
+	case *ordered.MapSA:
+		// A pipeline can be a mapping.
+		// Wrap in a secret type to avoid infinite recursion between this method
+		// and ordered.Unmarshal.
+		type wrappedPipeline Pipeline
+		if err := ordered.Unmarshal(o, (*wrappedPipeline)(p)); err != nil {
+			return fmt.Errorf("unmarshaling Pipeline: %w", err)
+		}
+
+	case []any:
+		// A pipeline can be a sequence of steps.
+		if err := ordered.Unmarshal(o, &p.Steps); err != nil {
+			return fmt.Errorf("unmarshaling steps: %w", err)
+		}
+
+	default:
+		return fmt.Errorf("unmarshaling Pipeline: unsupported type %T, want either *ordered.Map[string, any] or []any", o)
+	}
+
+	// Ensure Steps is never nil. Server side expects a sequence.
+	if p.Steps == nil {
+		p.Steps = Steps{}
+	}
+	return nil
+}
+
+// Interpolate interpolates variables defined in both envMap and p.Env into the
+// pipeline.
+// More specifically, it does these things:
+//   - Copy tracing context keys from envMap into pipeline.Env.
+//   - Interpolate pipeline.Env and copy the results into envMap to apply later.
+//   - Interpolate any string value in the rest of the pipeline.
+func (p *Pipeline) Interpolate(envMap *env.Environment) error {
+	if envMap == nil {
+		envMap = env.New()
+	}
+
+	// Propagate distributed tracing context to the new pipelines if available
+	if tracing, has := envMap.Get(tracetools.EnvVarTraceContextKey); has {
+		if p.Env == nil {
+			p.Env = ordered.NewMap[string, string](1)
+		}
+		p.Env.Set(tracetools.EnvVarTraceContextKey, tracing)
+	}
+
+	// Preprocess any env that are defined in the top level block and place them
+	// into env for later interpolation into the rest of the pipeline.
+	if err := p.interpolateEnvBlock(envMap); err != nil {
+		return err
+	}
+
+	// Recursively go through the rest of the pipeline and perform environment
+	// variable interpolation on strings. Interpolation is performed in-place.
+	if err := interpolateSlice(envMap, p.Steps); err != nil {
+		return err
+	}
+	return interpolateMap(envMap, p.RemainingFields)
+}
+
+// interpolateEnvBlock runs interpolate.Interpolate on each pair in p.Env,
+// interpolating with the variables defined in envMap, and then adding the
+// results back into both p.Env and envMap. Each environment variable can
+// be interpolated into later environment variables, making the input ordering
+// of p.Env potentially important.
+func (p *Pipeline) interpolateEnvBlock(envMap *env.Environment) error {
+	return p.Env.Range(func(k, v string) error {
+		// We interpolate both keys and values.
+		intk, err := interpolate.Interpolate(envMap, k)
+		if err != nil {
+			return err
+		}
+
+		// v is always a string in this case.
+		intv, err := interpolate.Interpolate(envMap, v)
+		if err != nil {
+			return err
+		}
+
+		p.Env.Replace(k, intk, intv)
+
+		// Bonus part for the env block!
+		// Add the results back into envMap.
+		envMap.Set(intk, intv)
+		return nil
+	})
+}
+
+// Sign signs each signable part of the pipeline. Currently this is limited to
+// command steps (including command steps within group steps), including all
+// plugin configurations and the pipeline "env". Parts of the pipeline are
+// mutated directly, so an error part-way through may leave some steps
+// un-signed.
+func (p *Pipeline) Sign(key jwk.Key) error {
+	return p.Steps.sign(p.Env.ToMap(), key)
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/plugin.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/plugin.go
new file mode 100644
index 0000000000..3c0ec4e47a
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/plugin.go
@@ -0,0 +1,58 @@
+package pipeline
+
+import (
+	"encoding/json"
+
+	"github.com/buildkite/interpolate"
+	"gopkg.in/yaml.v3"
+)
+
+var (
+	_ interface {
+		json.Marshaler
+		yaml.Marshaler
+		selfInterpolater
+	} = (*Plugin)(nil)
+)
+
+// Plugin models plugin configuration.
+//
+// Standard caveats apply - see the package comment.
+type Plugin struct {
+	Name   string
+	Config any
+}
+
+// MarshalJSON returns the plugin in "one-key object" form, or "single string"
+// form (no config, only plugin name).
+func (p *Plugin) MarshalJSON() ([]byte, error) {
+	// NB: MarshalYAML (as seen below) never returns an error.
+	o, _ := p.MarshalYAML()
+	return json.Marshal(o)
+}
+
+// MarshalYAML returns the plugin in either "one-item map" form, or "scalar"
+// form (no config, only plugin name).
+func (p *Plugin) MarshalYAML() (any, error) {
+	if p.Config == nil {
+		return p.Name, nil
+	}
+
+	return map[string]any{
+		p.Name: p.Config,
+	}, nil
+}
+
+func (p *Plugin) interpolate(env interpolate.Env) error {
+	name, err := interpolate.Interpolate(env, p.Name)
+	if err != nil {
+		return err
+	}
+	cfg, err := interpolateAny(env, p.Config)
+	if err != nil {
+		return err
+	}
+	p.Name = name
+	p.Config = cfg
+	return nil
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/plugins.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/plugins.go
new file mode 100644
index 0000000000..7a9111072a
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/plugins.go
@@ -0,0 +1,80 @@
+package pipeline
+
+import (
+	"fmt"
+
+	"github.com/buildkite/agent/v3/internal/ordered"
+)
+
+// Plugins is a sequence of plugins. It is useful for unmarshaling.
+type Plugins []*Plugin
+
+// UnmarshalOrdered unmarshals Plugins from either
+//   - []any - originally a sequence of "one-item mappings" (normal form), or
+//   - *ordered.MapSA - a mapping (where order is important...non-normal form).
+//
+// "plugins" is supposed to be a sequence of one-item maps, since order matters.
+// But some people (even us) write plugins into one big mapping and rely on
+// order preservation.
+func (p *Plugins) UnmarshalOrdered(o any) error {
+	// Whether processing one big map, or a sequence of small maps, the central
+	// part remains the same.
+	// Parse each "key: value" as "name: config", then append in order.
+	unmarshalMap := func(m *ordered.MapSA) error {
+		return m.Range(func(k string, v any) error {
+			*p = append(*p, &Plugin{
+				Name:   k,
+				Config: v,
+			})
+			return nil
+		})
+	}
+
+	switch o := o.(type) {
+	case []any:
+		for _, c := range o {
+			switch ct := c.(type) {
+			case *ordered.MapSA:
+				// Typical case:
+				//
+				// plugins:
+				//   - plugin#1.0.0:
+				//       config: config, etc
+				if err := unmarshalMap(ct); err != nil {
+					return err
+				}
+
+			case string:
+				// Less typical, but supported:
+				//
+				// plugins:
+				//   - plugin#1.0.0
+				// (no config, only plugin)
+				*p = append(*p, &Plugin{
+					Name:   ct,
+					Config: nil,
+				})
+
+			default:
+				return fmt.Errorf("unmarshaling plugins: plugin type %T, want *ordered.Map[string, any] or string", c)
+			}
+		}
+
+	case *ordered.MapSA:
+		// Legacy form:
+		//
+		// plugins:
+		//   plugin#1.0.0:
+		//     config: config, etc
+		//   otherplugin#2.0.0:
+		//     etc
+		if err := unmarshalMap(o); err != nil {
+			return err
+		}
+
+	default:
+		return fmt.Errorf("unmarshaling plugins: got %T, want []any or *ordered.Map[string, any]", o)
+
+	}
+	return nil
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/sign.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/sign.go
new file mode 100644
index 0000000000..0a64e651af
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/sign.go
@@ -0,0 +1,207 @@
+package pipeline
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+
+	"github.com/lestrrat-go/jwx/v2/jwk"
+	"github.com/lestrrat-go/jwx/v2/jws"
+)
+
+// EnvNamespacePrefix is the string that prefixes all fields in the "env"
+// namespace. This is used to separate signed data that came from the
+// environment from data that came from an object.
+const EnvNamespacePrefix = "env::"
+
+// Signature models a signature (on a step, etc).
+type Signature struct {
+	Algorithm    string   `json:"algorithm" yaml:"algorithm"`
+	SignedFields []string `json:"signed_fields" yaml:"signed_fields"`
+	Value        string   `json:"value" yaml:"value"`
+}
+
+// Sign computes a new signature for an environment (env) combined with an
+// object containing values (sf) using a given key.
+func Sign(env map[string]string, sf SignedFielder, key jwk.Key) (*Signature, error) {
+	values, err := sf.SignedFields()
+	if err != nil {
+		return nil, err
+	}
+	if len(values) == 0 {
+		return nil, errors.New("no fields to sign")
+	}
+
+	// Namespace the env values.
+	env = prefixKeys(env, EnvNamespacePrefix)
+
+	// NB: env overrides values from sf. This may seem backwards but it is
+	// our documented behaviour:
+	// https://buildkite.com/docs/pipelines/environment-variables#defining-your-own
+	// This override is handled by mapUnion.
+
+	// Ensure this part writes the same data to the payload as in Verify...
+	payload := &bytes.Buffer{}
+	writeLengthPrefixed(payload, key.Algorithm().String())
+	fields := writeFields(payload, mapUnion(values, env))
+	// ...end
+
+	sig, err := jws.Sign(nil,
+		jws.WithKey(key.Algorithm(), key),
+		jws.WithDetachedPayload(payload.Bytes()),
+		jws.WithCompact(),
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	return &Signature{
+		Algorithm:    key.Algorithm().String(),
+		SignedFields: fields,
+		Value:        string(sig),
+	}, nil
+}
+
+// Verify verifies an existing signature against environment (env) combined with
+// an object containing values (sf) using keys from a keySet.
+func (s *Signature) Verify(env map[string]string, sf SignedFielder, keySet jwk.Set) error {
+	if len(s.SignedFields) == 0 {
+		return errors.New("signature covers no fields")
+	}
+
+	// Ask the object for all fields, including env:: (which may be overridden
+	// by the pipeline env).
+	values, err := sf.ValuesForFields(s.SignedFields)
+	if err != nil {
+		return fmt.Errorf("obtaining values for fields: %w", err)
+	}
+
+	// env:: fields that were signed are all required in the env map.
+	// We can't verify other env vars though - they can vary for lots of reasons
+	// (e.g. Buildkite-provided vars added by the backend.)
+	// This is still strong enough for a user to enforce any particular env var
+	// exists and has a particular value - make it a part of the pipeline or
+	// step env.
+	envVars := filterPrefix(s.SignedFields, EnvNamespacePrefix)
+	env, err = requireKeys(prefixKeys(env, EnvNamespacePrefix), envVars)
+	if err != nil {
+		return fmt.Errorf("obtaining values for env vars: %w", err)
+	}
+
+	// Ensure this part writes the same data to the payload as in Sign...
+	payload := &bytes.Buffer{}
+	writeLengthPrefixed(payload, s.Algorithm)
+	writeFields(payload, mapUnion(values, env))
+	// ...end
+
+	_, err = jws.Verify([]byte(s.Value),
+		jws.WithKeySet(keySet),
+		jws.WithDetachedPayload(payload.Bytes()),
+	)
+	return err
+}
+
+// SignedFielder describes types that can be signed and have signatures
+// verified.
+// Converting non-string fields into strings (in a stable, canonical way) is an
+// exercise left to the implementer.
+type SignedFielder interface {
+	// SignedFields returns the default set of fields to sign, and their values.
+	// This is called by Sign.
+	SignedFields() (map[string]string, error)
+
+	// ValuesForFields looks up each field and produces a map of values. This is
+	// called by Verify. The set of fields might differ from the default, e.g.
+	// when verifying older signatures computed with fewer fields or deprecated
+	// field names. signedFielder implementations should reject requests for
+	// values if "mandatory" fields are missing (e.g. signing a command step
+	// should always sign the command).
+	ValuesForFields([]string) (map[string]string, error)
+}
+
+// writeFields writes the values (length-prefixed) into h. It also returns the
+// sorted field names it got from values (so that Sign doesn't end up extracting
+// them twice). It assumes writes to h never error (this is true of
+// bytes.Buffer, strings.Builder, and most hash.Hash implementations).
+// Passing a nil or empty map results in length 0 and no items being written.
+func writeFields(h io.Writer, values map[string]string) []string {
+	// Extract the field names and sort them.
+	fields := make([]string, 0, len(values))
+	for f := range values {
+		fields = append(fields, f)
+	}
+	sort.Strings(fields)
+
+	// If we blast strings at Write, then you could get the same hash for
+	// different fields that happen to have the same data when concatenated.
+	// So write length-prefixed fields, and length-prefix the whole map.
+	binary.Write(h, binary.LittleEndian, uint32(len(fields)))
+	for _, f := range fields {
+		writeLengthPrefixed(h, f)
+		writeLengthPrefixed(h, values[f])
+	}
+
+	return fields
+}
+
+// writeLengthPrefixed writes a length-prefixed string to h. It assumes writes
+// to h never error (this is true of bytes.Buffer, strings.Builder, and most
+// hash.Hash implementations).
+func writeLengthPrefixed(h io.Writer, s string) {
+	binary.Write(h, binary.LittleEndian, uint32(len(s)))
+	h.Write([]byte(s))
+}
+
+// prefixKeys returns a copy of a map with each key prefixed with a prefix.
+func prefixKeys[V any, M ~map[string]V](in M, prefix string) M {
+	out := make(M, len(in))
+	for k, v := range in {
+		out[prefix+k] = v
+	}
+	return out
+}
+
+// filterPrefix returns values from the slice having the prefix.
+func filterPrefix(in []string, prefix string) []string {
+	out := make([]string, 0, len(in))
+	for _, s := range in {
+		if strings.HasPrefix(s, prefix) {
+			out = append(out, s)
+		}
+	}
+	return out
+}
+
+// requireKeys returns a copy of a map containing only keys from a []string.
+// An error is returned if any keys are missing from the map.
+func requireKeys[K comparable, V any, M ~map[K]V](in M, keys []K) (M, error) {
+	out := make(M, len(keys))
+	for _, k := range keys {
+		v, ok := in[k]
+		if !ok {
+			return nil, fmt.Errorf("missing key %v", k)
+		}
+		out[k] = v
+	}
+	return out, nil
+}
+
+// mapUnion returns a new map with all elements from the given maps.
+// In case of key collisions, the last-most map containing the key "wins".
+func mapUnion[K comparable, V any, M ~map[K]V](ms ...M) M {
+	s := 0
+	for _, m := range ms {
+		s += len(m)
+	}
+	u := make(M, s)
+	for _, m := range ms {
+		for k, v := range m {
+			u[k] = v
+		}
+	}
+	return u
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/step.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step.go
new file mode 100644
index 0000000000..0c1e44d73e
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step.go
@@ -0,0 +1,13 @@
+package pipeline
+
+// Step models a step in the pipeline. It will be a pointer to one of:
+// - CommandStep
+// - WaitStep
+// - InputStep
+// - TriggerStep
+// - GroupStep
+type Step interface {
+	stepTag() // allow only the step types below
+
+	selfInterpolater
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_command.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_command.go
new file mode 100644
index 0000000000..83d2dcc938
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_command.go
@@ -0,0 +1,170 @@
+package pipeline
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+
+	"github.com/buildkite/agent/v3/internal/ordered"
+	"github.com/buildkite/interpolate"
+)
+
+var _ SignedFielder = (*CommandStep)(nil)
+
+// CommandStep models a command step.
+//
+// Standard caveats apply - see the package comment.
+type CommandStep struct {
+	Command   string            `yaml:"command"`
+	Plugins   Plugins           `yaml:"plugins,omitempty"`
+	Env       map[string]string `yaml:"env,omitempty"`
+	Signature *Signature        `yaml:"signature,omitempty"`
+	Matrix    any               `yaml:"matrix,omitempty"`
+
+	// RemainingFields stores any other top-level mapping items so they at least
+	// survive an unmarshal-marshal round-trip.
+	RemainingFields map[string]any `yaml:",inline"`
+}
+
+// MarshalJSON marshals the step to JSON. Special handling is needed because
+// yaml.v3 has "inline" but encoding/json has no concept of it.
+func (c *CommandStep) MarshalJSON() ([]byte, error) {
+	return inlineFriendlyMarshalJSON(c)
+}
+
+// UnmarshalOrdered unmarshals a command step from an ordered map.
+func (c *CommandStep) UnmarshalOrdered(src any) error {
+	type wrappedCommand CommandStep
+	// Unmarshal into this secret type, then process special fields specially.
+	fullCommand := new(struct {
+		Command  []string `yaml:"command"`
+		Commands []string `yaml:"commands"`
+
+		// Use inline trickery to capture the rest of the struct.
+		Rem *wrappedCommand `yaml:",inline"`
+	})
+	fullCommand.Rem = (*wrappedCommand)(c)
+	if err := ordered.Unmarshal(src, fullCommand); err != nil {
+		return fmt.Errorf("unmarshalling CommandStep: %w", err)
+	}
+
+	// Normalise cmds into one single command string.
+	// This makes signing easier later on - it's easier to hash one
+	// string consistently than it is to pick apart multiple strings
+	// in a consistent way in order to hash all of them
+	// consistently.
+	cmds := append(fullCommand.Command, fullCommand.Commands...)
+	c.Command = strings.Join(cmds, "\n")
+	return nil
+}
+
+// SignedFields returns the default fields for signing.
+func (c *CommandStep) SignedFields() (map[string]string, error) {
+	plugins := ""
+	if len(c.Plugins) > 0 {
+		// TODO: Reconsider using JSON here - is it stable enough?
+		pj, err := json.Marshal(c.Plugins)
+		if err != nil {
+			return nil, err
+		}
+		plugins = string(pj)
+	}
+	out := map[string]string{
+		"command": c.Command,
+		"plugins": plugins,
+	}
+	// Steps can have their own env. These can be overridden by the pipeline!
+	for e, v := range c.Env {
+		out[EnvNamespacePrefix+e] = v
+	}
+	return out, nil
+}
+
+// ValuesForFields returns the contents of fields to sign.
+func (c *CommandStep) ValuesForFields(fields []string) (map[string]string, error) {
+	// Make a set of required fields. As fields is processed, mark them off by
+	// deleting them.
+	required := map[string]struct{}{
+		"command": {},
+		"plugins": {},
+	}
+	// Env vars that the step has, but the pipeline doesn't have, are required.
+	// But we don't know what the pipeline has without passing it in, so treat
+	// all step env vars as required.
+	for e := range c.Env {
+		required[EnvNamespacePrefix+e] = struct{}{}
+	}
+
+	out := make(map[string]string, len(fields))
+	for _, f := range fields {
+		delete(required, f)
+
+		switch f {
+		case "command":
+			out["command"] = c.Command
+
+		case "plugins":
+			if len(c.Plugins) == 0 {
+				out["plugins"] = ""
+				break
+			}
+			// TODO: Reconsider using JSON here - is it stable enough?
+			val, err := json.Marshal(c.Plugins)
+			if err != nil {
+				return nil, err
+			}
+			out["plugins"] = string(val)
+
+		default:
+			if e, has := strings.CutPrefix(f, EnvNamespacePrefix); has {
+				// Env vars requested in `fields`, but are not in this step, are
+				// skipped.
+				if v, ok := c.Env[e]; ok {
+					out[f] = v
+				}
+				break
+			}
+
+			return nil, fmt.Errorf("unknown or unsupported field for signing %q", f)
+		}
+	}
+
+	if len(required) > 0 {
+		missing := make([]string, 0, len(required))
+		for k := range required {
+			missing = append(missing, k)
+		}
+		return nil, fmt.Errorf("one or more required fields are not present: %v", missing)
+	}
+	return out, nil
+}
+
+func (c *CommandStep) interpolate(env interpolate.Env) error {
+	cmd, err := interpolate.Interpolate(env, c.Command)
+	if err != nil {
+		return err
+	}
+
+	if err := interpolateSlice(env, c.Plugins); err != nil {
+		return err
+	}
+
+	if err := interpolateMap(env, c.Env); err != nil {
+		return err
+	}
+
+	// NB: Do not interpolate Signature.
+
+	if c.Matrix, err = interpolateAny(env, c.Matrix); err != nil {
+		return err
+	}
+
+	if err := interpolateMap(env, c.RemainingFields); err != nil {
+		return err
+	}
+
+	c.Command = cmd
+	return nil
+}
+
+func (CommandStep) stepTag() {}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_group.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_group.go
new file mode 100644
index 0000000000..91b7690dd3
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_group.go
@@ -0,0 +1,58 @@
+package pipeline
+
+import (
+	"fmt"
+
+	"github.com/buildkite/agent/v3/internal/ordered"
+	"github.com/buildkite/interpolate"
+)
+
+// GroupStep models a group step.
+//
+// Standard caveats apply - see the package comment.
+type GroupStep struct {
+	// Group is typically a key with no value. Since it must always exist in
+	// a group step, here it is.
+	Group *string `yaml:"group"`
+
+	Steps Steps `yaml:"steps"`
+
+	// RemainingFields stores any other top-level mapping items so they at least
+	// survive an unmarshal-marshal round-trip.
+	RemainingFields map[string]any `yaml:",inline"`
+}
+
+// UnmarshalOrdered unmarshals a group step from an ordered map.
+func (g *GroupStep) UnmarshalOrdered(src any) error {
+	type wrappedGroup GroupStep
+	if err := ordered.Unmarshal(src, (*wrappedGroup)(g)); err != nil {
+		return fmt.Errorf("unmarshalling GroupStep: %w", err)
+	}
+
+	// Ensure Steps is never nil. Server side expects a sequence.
+	if g.Steps == nil {
+		g.Steps = Steps{}
+	}
+	return nil
+}
+
+func (g *GroupStep) interpolate(env interpolate.Env) error {
+	grp, err := interpolateAny(env, g.Group)
+	if err != nil {
+		return err
+	}
+	g.Group = grp
+
+	if err := g.Steps.interpolate(env); err != nil {
+		return err
+	}
+	return interpolateMap(env, g.RemainingFields)
+}
+
+func (GroupStep) stepTag() {}
+
+// MarshalJSON marshals the step to JSON. Special handling is needed because
+// yaml.v3 has "inline" but encoding/json has no concept of it.
+func (g *GroupStep) MarshalJSON() ([]byte, error) {
+	return inlineFriendlyMarshalJSON(g)
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_input.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_input.go
new file mode 100644
index 0000000000..ac2b24d1f9
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_input.go
@@ -0,0 +1,36 @@
+package pipeline
+
+import (
+	"encoding/json"
+	"errors"
+
+	"github.com/buildkite/interpolate"
+)
+
+// See the comment in step_scalar.go.
+
+// InputStep models a block or input step.
+//
+// Standard caveats apply - see the package comment.
+type InputStep struct {
+	Scalar   string         `yaml:"-"`
+	Contents map[string]any `yaml:",inline"`
+}
+
+func (s *InputStep) MarshalJSON() ([]byte, error) {
+	if s.Scalar != "" {
+		return json.Marshal(s.Scalar)
+	}
+
+	if len(s.Contents) == 0 {
+		return []byte{}, errors.New("empty input step")
+	}
+
+	return json.Marshal(s.Contents)
+}
+
+func (s InputStep) interpolate(env interpolate.Env) error {
+	return interpolateMap(env, s.Contents)
+}
+
+func (*InputStep) stepTag() {}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_scalar.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_scalar.go
new file mode 100644
index 0000000000..721ac0254f
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_scalar.go
@@ -0,0 +1,41 @@
+package pipeline
+
+import "fmt"
+
+// In the buildkite pipeline yaml, some step types (broadly, wait steps, input steps and block steps) can be represented
+// either by a scalar string (ie "wait") or by a mapping with keys and values and such
+//
+// This behaviour is difficult to cleanly model in go, which leads to the somewhat odd structure of the structs defined
+// in this file - each type (WaitStep, InputStep) has a Scalar field which is set to the scalar value if the step was
+// if, during pipeline parsing, the step was represented as a scalar, and is left empty if the step was represented as
+// a mapping. In essence, if one of the fields of these structs is filled, the other should be empty.
+//
+// On the unmarshalling side, the differing types is handled by the steps parser - see the unmarshalStep() function in
+// ./steps.go - it infers the underlying type of the thing it's unmarshalling, and if it's a string, calls NewScalarStep()
+// (below) to create the appropriate struct. If it's a mapping, it creates the appropriate struct directly.
+//
+// On the marshalling side, the MarshalJSON() function on each struct handles the different cases. In general, if the
+// Scalar field is set, it marshals that, otherwise it marshals the other fields.
+//
+// In reading this file, you may have noticed that I mentioned that there are three types of step that can be represented
+// as a scalar, but there are only two structs defined here. This is because the third type, block steps, are represented
+// in exactly the same way as input steps, so they can share the same struct. This is liable to change in the future,
+// as conceptually they're different types, and it makes sense to have them as different types in go as well.
+//
+// Also also! The implementations for WaitStep and InputStep **almost**, but not quite identical. This is due to the behaviour
+// of marshalling an empty struct into into JSON. For WaitStep, it makes sense that the empty &WaitStep{} struct marshals
+// to "wait", but with InputStep, there's no way to tell whether it should be marshalled to "input" or "block", which
+// have very different behaviour on the backend.
+
+var validStepScalars = []string{"wait", "waiter", "block", "input", "manual"}
+
+func NewScalarStep(s string) (Step, error) {
+	switch s {
+	case "wait", "waiter":
+		return &WaitStep{Scalar: s}, nil
+	case "block", "input", "manual":
+		return &InputStep{Scalar: s}, nil
+	default:
+		return nil, fmt.Errorf("unmarshaling step: unsupported scalar step type %q, want one of %v", s, validStepScalars)
+	}
+}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_trigger.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_trigger.go
new file mode 100644
index 0000000000..02349afe49
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_trigger.go
@@ -0,0 +1,25 @@
+package pipeline
+
+import (
+	"encoding/json"
+
+	"github.com/buildkite/interpolate"
+)
+
+// TriggerStep models a trigger step.
+//
+// Standard caveats apply - see the package comment.
+type TriggerStep struct {
+	Contents map[string]any `yaml:",inline"`
+}
+
+// MarshalJSON marshals the contents of the step.
+func (t TriggerStep) MarshalJSON() ([]byte, error) {
+	return json.Marshal(t.Contents)
+}
+
+func (s TriggerStep) interpolate(env interpolate.Env) error {
+	return interpolateMap(env, s.Contents)
+}
+
+func (TriggerStep) stepTag() {}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_unknown.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_unknown.go
new file mode 100644
index 0000000000..6421fc746a
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_unknown.go
@@ -0,0 +1,42 @@
+package pipeline
+
+import (
+	"encoding/json"
+
+	"github.com/buildkite/interpolate"
+)
+
+// UnknownStep models any step we don't know how to represent in this version.
+// When future step types are added, they should be parsed with more specific
+// types. UnknownStep is present to allow older parsers to preserve newer
+// pipelines.
+type UnknownStep struct {
+	Contents any
+}
+
+// MarshalJSON marshals the contents of the step.
+func (u UnknownStep) MarshalJSON() ([]byte, error) {
+	return json.Marshal(u.Contents)
+}
+
+// MarshalYAML returns the contents of the step.
+func (u UnknownStep) MarshalYAML() (any, error) {
+	return u.Contents, nil
+}
+
+// UnmarshalOrdered unmarshals an unknown step.
+func (u *UnknownStep) UnmarshalOrdered(src any) error {
+	u.Contents = src
+	return nil
+}
+
+func (u *UnknownStep) interpolate(env interpolate.Env) error {
+	c, err := interpolateAny(env, u.Contents)
+	if err != nil {
+		return err
+	}
+	u.Contents = c
+	return nil
+}
+
+func (UnknownStep) stepTag() {}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_wait.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_wait.go
new file mode 100644
index 0000000000..99a510dbab
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/step_wait.go
@@ -0,0 +1,37 @@
+package pipeline
+
+import (
+	"encoding/json"
+
+	"github.com/buildkite/interpolate"
+)
+
+// See the comment in step_scalar.go.
+
+// WaitStep models a wait step.
+//
+// Standard caveats apply - see the package comment.
+type WaitStep struct {
+	Scalar   string         `yaml:"-"`
+	Contents map[string]any `yaml:",inline"`
+}
+
+// MarshalJSON marshals a wait step as "wait" if w is empty, or as the step's scalar if it's set.
+// If scalar is empty, it marshals as the remaining fields
+func (s *WaitStep) MarshalJSON() ([]byte, error) {
+	if s.Scalar != "" {
+		return json.Marshal(s.Scalar)
+	}
+
+	if len(s.Contents) == 0 {
+		return json.Marshal("wait")
+	}
+
+	return json.Marshal(s.Contents)
+}
+
+func (s *WaitStep) interpolate(env interpolate.Env) error {
+	return interpolateMap(env, s.Contents)
+}
+
+func (*WaitStep) stepTag() {}
diff --git a/vendor/github.com/buildkite/agent/v3/internal/pipeline/steps.go b/vendor/github.com/buildkite/agent/v3/internal/pipeline/steps.go
new file mode 100644
index 0000000000..74d732c42c
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/internal/pipeline/steps.go
@@ -0,0 +1,171 @@
+package pipeline
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/buildkite/agent/v3/internal/ordered"
+	"github.com/buildkite/interpolate"
+	"github.com/lestrrat-go/jwx/v2/jwk"
+)
+
+var errSigningRefusedUnknownStepType = errors.New("refusing to sign pipeline containing a step of unknown type, because the pipeline could be incorrectly parsed - please contact support")
+
+// Steps contains multiple steps. It is useful for unmarshaling step sequences,
+// since it has custom logic for determining the correct step type.
+type Steps []Step
+
+// UnmarshalOrdered unmarshals a slice ([]any) into a slice of steps.
+func (s *Steps) UnmarshalOrdered(o any) error {
+	if o == nil {
+		if *s == nil {
+			// `steps: null` is normalised to an empty slice.
+			*s = Steps{}
+		}
+		return nil
+	}
+	sl, ok := o.([]any)
+	if !ok {
+		return fmt.Errorf("unmarshaling steps: got %T, want a slice ([]any)", sl)
+	}
+	// Preallocate slice if not already allocated
+	if *s == nil {
+		*s = make(Steps, 0, len(sl))
+	}
+	for _, st := range sl {
+		step, err := unmarshalStep(st)
+		if err != nil {
+			return err
+		}
+		*s = append(*s, step)
+	}
+	return nil
+}
+
+func (s Steps) interpolate(env interpolate.Env) error {
+	return interpolateSlice(env, s)
+}
+
+// unmarshalStep unmarshals into the right kind of Step.
+func unmarshalStep(o any) (Step, error) {
+	switch o := o.(type) {
+	case string:
+		step, err := NewScalarStep(o)
+		if err != nil {
+			return &UnknownStep{Contents: o}, nil
+		}
+		return step, nil
+
+	case *ordered.MapSA:
+		return stepFromMap(o)
+
+	default:
+		return nil, fmt.Errorf("unmarshaling step: unsupported type %T", o)
+	}
+}
+
+func stepFromMap(o *ordered.MapSA) (Step, error) {
+	sType, hasType := o.Get("type")
+
+	var step Step
+	var err error
+	if hasType {
+		sTypeStr, ok := sType.(string)
+		if !ok {
+			return nil, fmt.Errorf("unmarshaling step: step's `type` key was %T (value %v), want string", sType, sType)
+		}
+
+		step, err = stepByType(sTypeStr)
+		if err != nil {
+			return nil, fmt.Errorf("unmarshaling step: %w", err)
+		}
+	} else {
+		step, err = stepByKeyInference(o)
+		if err != nil {
+			return nil, fmt.Errorf("unmarshaling step: %w", err)
+		}
+	}
+
+	// Decode the step (into the right step type).
+	if err := ordered.Unmarshal(o, step); err != nil {
+		// Hmm, maybe we picked the wrong kind of step?
+		return &UnknownStep{Contents: o}, nil
+	}
+	return step, nil
+}
+
+func stepByType(sType string) (Step, error) {
+	switch sType {
+	case "command", "script":
+		return new(CommandStep), nil
+	case "wait", "waiter":
+		return &WaitStep{Contents: map[string]any{}}, nil
+	case "block", "input", "manual":
+		return &InputStep{Contents: map[string]any{}}, nil
+	case "trigger":
+		return new(TriggerStep), nil
+	case "group": // as far as i know this doesn't happen, but it's here for completeness
+		return new(GroupStep), nil
+	default:
+		return nil, fmt.Errorf("unknown step type %q", sType)
+	}
+}
+
+func stepByKeyInference(o *ordered.MapSA) (Step, error) {
+	switch {
+	case o.Contains("command") || o.Contains("commands") || o.Contains("plugins"):
+		// NB: Some "command" step are commandless containers that exist
+		// just to run plugins!
+		return new(CommandStep), nil
+
+	case o.Contains("wait") || o.Contains("waiter"):
+		return new(WaitStep), nil
+
+	case o.Contains("block") || o.Contains("input") || o.Contains("manual"):
+		return new(InputStep), nil
+
+	case o.Contains("trigger"):
+		return new(TriggerStep), nil
+
+	case o.Contains("group"):
+		return new(GroupStep), nil
+
+	default:
+		return new(UnknownStep), nil
+	}
+}
+
+// sign adds signatures to each command step (and recursively to any command
+// steps that are within group steps. The steps are mutated directly, so an
+// error part-way through may leave some steps un-signed.
+func (s Steps) sign(env map[string]string, key jwk.Key) error {
+	for _, step := range s {
+		switch step := step.(type) {
+		case *CommandStep:
+			if step.Matrix != nil {
+				// Don't sign matrix steps... yet
+				continue
+			}
+
+			sig, err := Sign(env, step, key)
+			if err != nil {
+				return fmt.Errorf("signing step with command %q: %w", step.Command, err)
+			}
+			step.Signature = sig
+
+		case *GroupStep:
+			if err := step.Steps.sign(env, key); err != nil {
+				return fmt.Errorf("signing group step: %w", err)
+			}
+
+		case *UnknownStep:
+			// Presence of an unknown step means we're missing some semantic
+			// information about the pipeline. We could be not signing something
+			// that needs signing. Rather than deferring the problem (so that
+			// signature verification fails when an agent runs jobs) we return
+			// an error now.
+			return errSigningRefusedUnknownStepType
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/buildkite/agent/v3/logger/buffer.go b/vendor/github.com/buildkite/agent/v3/logger/buffer.go
index 9aa3b4f8fc..84480b7517 100644
--- a/vendor/github.com/buildkite/agent/v3/logger/buffer.go
+++ b/vendor/github.com/buildkite/agent/v3/logger/buffer.go
@@ -2,11 +2,13 @@ package logger
 
 import (
 	"fmt"
+	"sync"
 )
 
 // Buffer is a Logger implementation intended for testing;
 // messages are stored internally.
 type Buffer struct {
+	mu       sync.Mutex
 	Messages []string
 }
 
@@ -20,21 +22,33 @@ func NewBuffer() *Buffer {
 }
 
 func (b *Buffer) Debug(format string, v ...any) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
 	b.Messages = append(b.Messages, "[debug] "+fmt.Sprintf(format, v...))
 }
 func (b *Buffer) Error(format string, v ...any) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
 	b.Messages = append(b.Messages, "[error] "+fmt.Sprintf(format, v...))
 }
 func (b *Buffer) Fatal(format string, v ...any) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
 	b.Messages = append(b.Messages, "[fatal] "+fmt.Sprintf(format, v...))
 }
 func (b *Buffer) Notice(format string, v ...any) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
 	b.Messages = append(b.Messages, "[notice] "+fmt.Sprintf(format, v...))
 }
 func (b *Buffer) Warn(format string, v ...any) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
 	b.Messages = append(b.Messages, "[warn] "+fmt.Sprintf(format, v...))
 }
 func (b *Buffer) Info(format string, v ...any) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
 	b.Messages = append(b.Messages, "[info] "+fmt.Sprintf(format, v...))
 }
 func (b *Buffer) WithFields(fields ...Field) Logger {
diff --git a/vendor/github.com/buildkite/agent/v3/logger/log.go b/vendor/github.com/buildkite/agent/v3/logger/log.go
index 0147ad232d..f291bb26ea 100644
--- a/vendor/github.com/buildkite/agent/v3/logger/log.go
+++ b/vendor/github.com/buildkite/agent/v3/logger/log.go
@@ -14,6 +14,7 @@ import (
 	"sync"
 	"time"
 
+	"github.com/buildkite/agent/v3/version"
 	"golang.org/x/crypto/ssh/terminal"
 )
 
@@ -80,7 +81,11 @@ func (l *ConsoleLogger) SetLevel(level Level) {
 
 func (l *ConsoleLogger) Debug(format string, v ...any) {
 	if l.level == DEBUG {
-		l.printer.Print(DEBUG, fmt.Sprintf(format, v...), l.fields)
+		debugFields := make(Fields, 0, len(l.fields)+2)
+		copy(debugFields, l.fields)
+		debugFields.Add(StringField("agent_version", version.Version()))
+		debugFields.Add(StringField("agent_build", version.BuildVersion()))
+		l.printer.Print(DEBUG, fmt.Sprintf(format, v...), debugFields)
 	}
 }
 
diff --git a/vendor/github.com/buildkite/agent/v3/tracetools/doc.go b/vendor/github.com/buildkite/agent/v3/tracetools/doc.go
new file mode 100644
index 0000000000..6900422e3f
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/tracetools/doc.go
@@ -0,0 +1,5 @@
+// Package tracetools provides an abstraction across tracing systems
+// (OpenTelemetry, DataDog).
+//
+// It is intended for internal use by buildkite-agent only.
+package tracetools
diff --git a/vendor/github.com/buildkite/agent/v3/tracetools/propagate.go b/vendor/github.com/buildkite/agent/v3/tracetools/propagate.go
new file mode 100644
index 0000000000..1f8bc27d97
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/tracetools/propagate.go
@@ -0,0 +1,55 @@
+package tracetools
+
+import (
+	"bytes"
+	"encoding/base64"
+	"encoding/gob"
+
+	"github.com/opentracing/opentracing-go"
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+// EnvVarTraceContextKey is the env var key that will be used to store/retrieve the
+// encoded trace context information into env var maps.
+const EnvVarTraceContextKey = "BUILDKITE_TRACE_CONTEXT"
+
+// EncodeTraceContext will serialize and encode tracing data into a string and place
+// it into the given env vars map.
+func EncodeTraceContext(span opentracing.Span, env map[string]string) error {
+	textmap := tracer.TextMapCarrier{}
+	if err := span.Tracer().Inject(span.Context(), opentracing.TextMap, &textmap); err != nil {
+		return err
+	}
+
+	buf := bytes.NewBuffer([]byte{})
+	enc := gob.NewEncoder(buf)
+	if err := enc.Encode(textmap); err != nil {
+		return err
+	}
+
+	env[EnvVarTraceContextKey] = base64.URLEncoding.EncodeToString(buf.Bytes())
+	return nil
+}
+
+// DecodeTraceContext will decode, deserialize, and extract the tracing data from the
+// given env var map.
+func DecodeTraceContext(env map[string]string) (opentracing.SpanContext, error) {
+	s, has := env[EnvVarTraceContextKey]
+	if !has {
+		return nil, opentracing.ErrSpanContextNotFound
+	}
+
+	contextBytes, err := base64.URLEncoding.DecodeString(s)
+	if err != nil {
+		return nil, err
+	}
+
+	buf := bytes.NewBuffer(contextBytes)
+	dec := gob.NewDecoder(buf)
+	textmap := opentracing.TextMapCarrier{}
+	if err := dec.Decode(&textmap); err != nil {
+		return nil, err
+	}
+
+	return opentracing.GlobalTracer().Extract(opentracing.TextMap, textmap)
+}
diff --git a/vendor/github.com/buildkite/agent/v3/tracetools/span.go b/vendor/github.com/buildkite/agent/v3/tracetools/span.go
new file mode 100644
index 0000000000..f966a55950
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/tracetools/span.go
@@ -0,0 +1,129 @@
+package tracetools
+
+import (
+	"context"
+
+	"github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go/ext"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/trace"
+	ddext "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+)
+
+const (
+	BackendDatadog       = "datadog"
+	BackendOpenTelemetry = "opentelemetry"
+	BackendNone          = ""
+)
+
+var ValidTracingBackends = map[string]struct{}{
+	BackendDatadog:       {},
+	BackendOpenTelemetry: {},
+	BackendNone:          {},
+}
+
+// StartSpanFromContext will start a span appropriate to the given tracing backend from the given context with the given
+// operation name. It will also do some common/repeated setup on the span to keep code a little more DRY.
+// If an unknown tracing backend is specified, it will return a span that noops on every operation
+func StartSpanFromContext(ctx context.Context, operation string, tracingBackend string) (Span, context.Context) {
+	switch tracingBackend {
+	case BackendDatadog:
+		span, ctx := opentracing.StartSpanFromContext(ctx, operation)
+		span.SetTag(ddext.AnalyticsEvent, true) // Make the span available for analytics in Datadog
+		return NewOpenTracingSpan(span), ctx
+
+	case BackendOpenTelemetry:
+		ctx, span := otel.Tracer("buildkite-agent").Start(ctx, operation)
+		span.SetAttributes(attribute.String("analytics.event", "true"))
+		return &OpenTelemetrySpan{Span: span}, ctx
+
+	case BackendNone:
+		fallthrough
+
+	default:
+		return &NoopSpan{}, ctx
+	}
+}
+
+type Span interface {
+	AddAttributes(map[string]string)
+	FinishWithError(error)
+	RecordError(error)
+}
+
+type OpenTracingSpan struct {
+	Span opentracing.Span
+}
+
+func NewOpenTracingSpan(base opentracing.Span) *OpenTracingSpan {
+	return &OpenTracingSpan{Span: base}
+}
+
+// AddAttributes adds the given map of attributes to the span as OpenTracing tags
+func (s *OpenTracingSpan) AddAttributes(attributes map[string]string) {
+	for k, v := range attributes {
+		s.Span.SetTag(k, v)
+	}
+}
+
+// FinishWithError adds error information to the OpenTracingSpan if error isn't nil, and records the span as having finished
+func (s *OpenTracingSpan) FinishWithError(err error) {
+	s.RecordError(err)
+	s.Span.Finish()
+}
+
+// RecordError records an error on the given span
+func (s *OpenTracingSpan) RecordError(err error) {
+	if err == nil {
+		return
+	}
+
+	ext.LogError(s.Span, err)
+}
+
+type OpenTelemetrySpan struct {
+	Span trace.Span
+}
+
+func NewOpenTelemetrySpan(base trace.Span) *OpenTelemetrySpan {
+	return &OpenTelemetrySpan{Span: base}
+}
+
+// AddAttributes adds the given attributes to the OpenTelemetry span. Only string attributes are accepted.
+func (s *OpenTelemetrySpan) AddAttributes(attributes map[string]string) {
+	for k, v := range attributes {
+		s.Span.SetAttributes(attribute.String(k, v))
+	}
+}
+
+// FinishWithError adds error information to the OpenTelemetry span if error isn't nil, and records the span as having finished
+func (s *OpenTelemetrySpan) FinishWithError(err error) {
+	s.RecordError(err)
+	s.Span.End()
+}
+
+// RecordError records an error on the given OpenTelemetry span. No-op when error is nil
+func (s *OpenTelemetrySpan) RecordError(err error) {
+	if err == nil {
+		return
+	}
+
+	s.Span.RecordError(err)
+	s.Span.SetStatus(codes.Error, "failed")
+}
+
+// NoopSpan is an implementation of the Span interface that does nothing for every method implemented
+// The intended use case is for instances where the user doesn't have tracing enabled - using NoopSpan, we can still act
+// as though tracing is enabled, but every time we do something tracing related, nothing happens.
+type NoopSpan struct{}
+
+// AddAttributes is a noop
+func (s *NoopSpan) AddAttributes(attributes map[string]string) {}
+
+// FinishWithError is a noop
+func (s *NoopSpan) FinishWithError(err error) {}
+
+// RecordError is a noop
+func (s *NoopSpan) RecordError(err error) {}
diff --git a/vendor/github.com/buildkite/agent/v3/version/VERSION b/vendor/github.com/buildkite/agent/v3/version/VERSION
new file mode 100644
index 0000000000..426d048447
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/version/VERSION
@@ -0,0 +1 @@
+3.52.1
diff --git a/vendor/github.com/buildkite/agent/v3/version/version.go b/vendor/github.com/buildkite/agent/v3/version/version.go
new file mode 100644
index 0000000000..7496fc60f8
--- /dev/null
+++ b/vendor/github.com/buildkite/agent/v3/version/version.go
@@ -0,0 +1,35 @@
+// Package version provides the agent version strings.
+package version
+
+import (
+	_ "embed"
+	"runtime"
+	"strings"
+)
+
+// You can overridden buildVersion at compile time by using:
+//
+//  go run -ldflags "-X github.com/buildkite/agent/v3/agent.buildVersion=abc" . --version
+//
+// On CI, the binaries are always build with the buildVersion variable set.
+//
+// Pre-release builds' versions must be in the format `x.y-beta`, `x.y-beta.z` or `x.y-beta.z.a`
+
+//go:embed VERSION
+var baseVersion string
+var buildVersion string
+
+func Version() string {
+	return strings.TrimSpace(baseVersion)
+}
+
+func BuildVersion() string {
+	if buildVersion == "" {
+		return "x"
+	}
+	return buildVersion
+}
+
+func UserAgent() string {
+	return "buildkite-agent/" + Version() + "." + BuildVersion() + " (" + runtime.GOOS + "; " + runtime.GOARCH + ")"
+}
diff --git a/vendor/github.com/buildkite/interpolate/LICENSE.txt b/vendor/github.com/buildkite/interpolate/LICENSE.txt
new file mode 100644
index 0000000000..951b6ced43
--- /dev/null
+++ b/vendor/github.com/buildkite/interpolate/LICENSE.txt
@@ -0,0 +1,24 @@
+# Buildkite Licence
+
+Copyright (c) 2014-2017 Buildkite Pty Ltd
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/buildkite/interpolate/README.md b/vendor/github.com/buildkite/interpolate/README.md
new file mode 100644
index 0000000000..ec2790efd4
--- /dev/null
+++ b/vendor/github.com/buildkite/interpolate/README.md
@@ -0,0 +1,61 @@
+Interpolate
+===========
+
+[![GoDoc](https://godoc.org/github.com/buildkite/interpolate?status.svg)](https://godoc.org/github.com/buildkite/interpolate)
+
+A golang library for parameter expansion (like `${BLAH}` or `$BLAH`) in strings from environment variables. An implementation of [POSIX Parameter Expansion](http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_06_02), plus some other basic operations that you'd expect in a shell scripting environment [like bash](https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html).
+
+## Installation
+
+```
+go get -u github.com/buildkite/interpolate
+```
+
+## Usage
+
+```go
+package main
+
+import (
+  "github.com/buildkite/interpolate"
+  "fmt"
+)
+
+func main() {
+	env := interpolate.NewSliceEnv([]string{
+		"HELLO_WORLD=🦀",
+	})
+
+	output, _ := interpolate.Interpolate(env, "Buildkite... ${HELLO_WORLD} ${ANOTHER_VAR:-🏖}")
+	fmt.Println(output)
+}
+
+// Output: Buildkite... 🦀 🏖
+
+```
+
+## Supported Expansions
+
+<dl>
+  <dt><code>${parameter}</code> or <code>$parameter</code></dt>
+  <dd><strong>Use value.</strong> If parameter is set, then it shall be substituted; otherwise it will be blank</dd>
+
+  <dt><code>${parameter:-<em>[word]</em>}</code></dt>
+  <dd><strong>Use default values.</strong> If parameter is unset or null, the expansion of word (or an empty string if word is omitted) shall be substituted; otherwise, the value of parameter shall be substituted.</dd>
+
+  <dt><code>${parameter-<em>[word]</em>}</code></dt>
+  <dd><strong>Use default values when not set.</strong> If parameter is unset, the expansion of word (or an empty string if word is omitted) shall be substituted; otherwise, the value of parameter shall be substituted.</dd>
+
+  <dt><code>${parameter:<em>[offset]</em>}</code></dt>
+  <dd><strong>Use the substring of parameter after offset.</strong> A negative offset must be separated from the colon with a space, and will select from the end of the string. If the value is out of bounds, an empty string will be substituted.</dd>
+
+  <dt><code>${parameter:<em>[offset]</em>:<em>[length]</em>}</code></dt>
+  <dd><strong>Use the substring of parameter after offset of given length.</strong> A negative offset must be separated from the colon with a space, and will select from the end of the string. If the offset is out of bounds, an empty string will be substituted. If the length is greater than the length then the entire string will be returned.</dd>
+
+  <dt><code>${parameter:?<em>[word]</em>}</code></dt>
+  <dd>Indicate Error if Null or Unset. If parameter is unset or null, the expansion of word (or a message indicating it is unset if word is omitted) shall be returned as an error.</dd>
+</dl>
+
+## License
+
+Licensed under MIT license, in `LICENSE`.
diff --git a/vendor/github.com/buildkite/interpolate/env.go b/vendor/github.com/buildkite/interpolate/env.go
new file mode 100644
index 0000000000..8966a264fe
--- /dev/null
+++ b/vendor/github.com/buildkite/interpolate/env.go
@@ -0,0 +1,49 @@
+package interpolate
+
+import (
+	"runtime"
+	"strings"
+)
+
+type Env interface {
+	Get(key string) (string, bool)
+}
+
+// Creates an Env from a slice of environment variables
+func NewSliceEnv(env []string) Env {
+	envMap := mapEnv{}
+	for _, l := range env {
+		parts := strings.SplitN(l, "=", 2)
+		if len(parts) == 2 {
+			envMap[normalizeKeyName(parts[0])] = parts[1]
+		}
+	}
+	return envMap
+}
+
+// Creates an Env from a map of environment variables
+func NewMapEnv(env map[string]string) Env {
+	envMap := mapEnv{}
+	for k, v := range env {
+		envMap[normalizeKeyName(k)] = v
+	}
+	return envMap
+}
+
+type mapEnv map[string]string
+
+func (m mapEnv) Get(key string) (string, bool) {
+	if m == nil {
+		return "", false
+	}
+	val, ok := m[normalizeKeyName(key)]
+	return val, ok
+}
+
+// Windows isn't case sensitive for env
+func normalizeKeyName(key string) string {
+	if runtime.GOOS == "windows" {
+		return strings.ToUpper(key)
+	}
+	return key
+}
diff --git a/vendor/github.com/buildkite/interpolate/interpolate.go b/vendor/github.com/buildkite/interpolate/interpolate.go
new file mode 100644
index 0000000000..90d5ee3cc5
--- /dev/null
+++ b/vendor/github.com/buildkite/interpolate/interpolate.go
@@ -0,0 +1,212 @@
+package interpolate
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Interpolate takes a set of environment and interpolates it into the provided string using shell script expansions
+func Interpolate(env Env, str string) (string, error) {
+	if env == nil {
+		env = NewSliceEnv(nil)
+	}
+	expr, err := NewParser(str).Parse()
+	if err != nil {
+		return "", err
+	}
+	return expr.Expand(env)
+}
+
+// Indentifiers parses the identifiers from any expansions in the provided string
+func Identifiers(str string) ([]string, error) {
+	expr, err := NewParser(str).Parse()
+	if err != nil {
+		return nil, err
+	}
+	return expr.Identifiers(), nil
+}
+
+// An expansion is something that takes in ENV and returns a string or an error
+type Expansion interface {
+	Expand(env Env) (string, error)
+	Identifiers() []string
+}
+
+// VariableExpansion represents either $VAR or ${VAR}, our simplest expansion
+type VariableExpansion struct {
+	Identifier string
+}
+
+func (e VariableExpansion) Identifiers() []string {
+	return []string{e.Identifier}
+}
+
+func (e VariableExpansion) Expand(env Env) (string, error) {
+	val, _ := env.Get(e.Identifier)
+	return val, nil
+}
+
+// EmptyValueExpansion returns either the value of an env, or a default value if it's unset or null
+type EmptyValueExpansion struct {
+	Identifier string
+	Content    Expression
+}
+
+func (e EmptyValueExpansion) Identifiers() []string {
+	return append([]string{e.Identifier}, e.Content.Identifiers()...)
+}
+
+func (e EmptyValueExpansion) Expand(env Env) (string, error) {
+	val, _ := env.Get(e.Identifier)
+	if val == "" {
+		return e.Content.Expand(env)
+	}
+	return val, nil
+}
+
+// UnsetValueExpansion returns either the value of an env, or a default value if it's unset
+type UnsetValueExpansion struct {
+	Identifier string
+	Content    Expression
+}
+
+func (e UnsetValueExpansion) Identifiers() []string {
+	return []string{e.Identifier}
+}
+
+func (e UnsetValueExpansion) Expand(env Env) (string, error) {
+	val, ok := env.Get(e.Identifier)
+	if !ok {
+		return e.Content.Expand(env)
+	}
+	return val, nil
+}
+
+// SubstringExpansion returns a substring (or slice) of the env
+type SubstringExpansion struct {
+	Identifier string
+	Offset     int
+	Length     int
+	HasLength  bool
+}
+
+func (e SubstringExpansion) Identifiers() []string {
+	return []string{e.Identifier}
+}
+
+func (e SubstringExpansion) Expand(env Env) (string, error) {
+	val, _ := env.Get(e.Identifier)
+
+	from := e.Offset
+
+	// Negative offsets = from end
+	if from < 0 {
+		from += len(val)
+	}
+
+	// Still negative = too far from end? Truncate to start.
+	if from < 0 {
+		from = 0
+	}
+
+	// Beyond end? Truncate to end.
+	if from > len(val) {
+		from = len(val)
+	}
+
+	if !e.HasLength {
+		return val[from:], nil
+	}
+
+	to := e.Length
+
+	if to >= 0 {
+		// Positive length = from offset
+		to += from
+	} else {
+		// Negative length = from end
+		to += len(val)
+
+		// Too far? Truncate to offset.
+		if to < from {
+			to = from
+		}
+	}
+
+	// Beyond end? Truncate to end.
+	if to > len(val) {
+		to = len(val)
+	}
+
+	return val[from:to], nil
+}
+
+// RequiredExpansion returns an env value, or an error if it is unset
+type RequiredExpansion struct {
+	Identifier string
+	Message    Expression
+}
+
+func (e RequiredExpansion) Identifiers() []string {
+	return []string{e.Identifier}
+}
+
+func (e RequiredExpansion) Expand(env Env) (string, error) {
+	val, ok := env.Get(e.Identifier)
+	if !ok {
+		msg, err := e.Message.Expand(env)
+		if err != nil {
+			return "", err
+		}
+		if msg == "" {
+			msg = "not set"
+		}
+		return "", fmt.Errorf("$%s: %s", e.Identifier, msg)
+	}
+	return val, nil
+}
+
+// Expression is a collection of either Text or Expansions
+type Expression []ExpressionItem
+
+func (e Expression) Identifiers() []string {
+	identifiers := []string{}
+	for _, item := range e {
+		if item.Expansion != nil {
+			identifiers = append(identifiers, item.Expansion.Identifiers()...)
+		}
+	}
+	return identifiers
+}
+
+func (e Expression) Expand(env Env) (string, error) {
+	buf := &bytes.Buffer{}
+
+	for _, item := range e {
+		if item.Expansion != nil {
+			result, err := item.Expansion.Expand(env)
+			if err != nil {
+				return "", err
+			}
+			_, _ = buf.WriteString(result)
+		} else {
+			_, _ = buf.WriteString(item.Text)
+		}
+	}
+
+	return buf.String(), nil
+}
+
+// ExpressionItem models either an Expansion or Text. Either/Or, never both.
+type ExpressionItem struct {
+	Text string
+	// -- or --
+	Expansion Expansion
+}
+
+func (i ExpressionItem) String() string {
+	if i.Expansion != nil {
+		return fmt.Sprintf("%#v", i.Expansion)
+	}
+	return fmt.Sprintf("%q", i.Text)
+}
diff --git a/vendor/github.com/buildkite/interpolate/parser.go b/vendor/github.com/buildkite/interpolate/parser.go
new file mode 100644
index 0000000000..fa7e84340a
--- /dev/null
+++ b/vendor/github.com/buildkite/interpolate/parser.go
@@ -0,0 +1,287 @@
+package interpolate
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+// This is a recursive descent parser for our grammar. Because it can contain nested expressions like
+// ${LLAMAS:-${ROCK:-true}} we can't use regular expressions. The simplest possible alternative is
+// a recursive parser like this. It parses a chunk and then calls a function to parse that further
+// and so on and so forth. It results in a tree of objects that represent the things we've parsed (an AST).
+// This means that the logic for how expansions work lives in those objects, and the logic for how we go
+// from plain text to parsed objects lives here.
+//
+// To keep things simple, we do our "lexing" or "scanning" just as a few functions at the end of the file
+// rather than as a dedicated lexer that emits tokens. This matches the simplicity of the format we are parsing
+// relatively well
+//
+// Below is an EBNF grammar for the language. The parser was built by basically turning this into functions
+// and structs named the same reading the string bite by bite (peekRune and nextRune)
+
+/*
+EscapedBackslash = "\\"
+EscapedDollar    = ( "\$" | "$$")
+Identifier       = letter { letters | digit | "_" }
+Expansion        = "$" ( Identifier | Brace )
+Brace            = "{" Identifier [ Identifier BraceOperation ] "}"
+Text             = { EscapedBackslash | EscapedDollar | all characters except "$" }
+Expression       = { Text | Expansion }
+EmptyValue       = ":-" { Expression }
+UnsetValue       = "-" { Expression }
+Substring        = ":" number [ ":" number ]
+Required         = "?" { Expression }
+Operation        = EmptyValue | UnsetValue | Substring | Required
+*/
+
+const (
+	eof = -1
+)
+
+// Parser takes a string and parses out a tree of structs that represent text and Expansions
+type Parser struct {
+	input string // the string we are scanning
+	pos   int    // the current position
+}
+
+// NewParser returns a new instance of a Parser
+func NewParser(str string) *Parser {
+	return &Parser{
+		input: str,
+		pos:   0,
+	}
+}
+
+// Parse expansions out of the internal text and return them as a tree of Expressions
+func (p *Parser) Parse() (Expression, error) {
+	return p.parseExpression()
+}
+
+func (p *Parser) parseExpression(stop ...rune) (Expression, error) {
+	var expr Expression
+	var stopStr = string(stop)
+
+	for {
+		c := p.peekRune()
+		if c == eof || strings.ContainsRune(stopStr, c) {
+			break
+		}
+
+		// check for our escaped characters first, as we assume nothing subsequently is escaped
+		if strings.HasPrefix(p.input[p.pos:], `\\`) {
+			p.pos += 2
+			expr = append(expr, ExpressionItem{Text: `\\`})
+			continue
+		} else if strings.HasPrefix(p.input[p.pos:], `\$`) || strings.HasPrefix(p.input[p.pos:], `$$`) {
+			p.pos += 2
+			expr = append(expr, ExpressionItem{Text: `$`})
+			continue
+		}
+
+		// Ignore bash shell expansions
+		if strings.HasPrefix(p.input[p.pos:], `$(`) {
+			p.pos += 2
+			expr = append(expr, ExpressionItem{Text: `$(`})
+			continue
+		}
+
+		// If we run into a dollar sign and it's not the last char, it's an expansion
+		if c == '$' && p.pos < (len(p.input)-1) {
+			expansion, err := p.parseExpansion()
+			if err != nil {
+				return nil, err
+			}
+			expr = append(expr, ExpressionItem{Expansion: expansion})
+			continue
+		}
+
+		// nibble a character, otherwise if it's a \ or a $ we can loop
+		c = p.nextRune()
+
+		// Scan as much as we can into text
+		text := p.scanUntil(func(r rune) bool {
+			return (r == '$' || r == '\\' || strings.ContainsRune(stopStr, r))
+		})
+
+		expr = append(expr, ExpressionItem{Text: string(c) + text})
+	}
+
+	return expr, nil
+}
+
+func (p *Parser) parseExpansion() (Expansion, error) {
+	if c := p.nextRune(); c != '$' {
+		return nil, fmt.Errorf("Expected expansion to start with $, got %c", c)
+	}
+
+	// if we have an open brace, this is a brace expansion
+	if c := p.peekRune(); c == '{' {
+		return p.parseBraceExpansion()
+	}
+
+	identifier, err := p.scanIdentifier()
+	if err != nil {
+		return nil, err
+	}
+
+	return VariableExpansion{Identifier: identifier}, nil
+}
+
+func (p *Parser) parseBraceExpansion() (Expansion, error) {
+	if c := p.nextRune(); c != '{' {
+		return nil, fmt.Errorf("Expected brace expansion to start with {, got %c", c)
+	}
+
+	identifier, err := p.scanIdentifier()
+	if err != nil {
+		return nil, err
+	}
+
+	if c := p.peekRune(); c == '}' {
+		_ = p.nextRune()
+		return VariableExpansion{Identifier: identifier}, nil
+	}
+
+	var operator string
+	var exp Expansion
+
+	// Parse an operator, some trickery is needed to handle : vs :-
+	if op1 := p.nextRune(); op1 == ':' {
+		if op2 := p.peekRune(); op2 == '-' {
+			_ = p.nextRune()
+			operator = ":-"
+		} else {
+			operator = ":"
+		}
+	} else if op1 == '?' || op1 == '-' {
+		operator = string(op1)
+	} else {
+		return nil, fmt.Errorf("Expected an operator, got %c", op1)
+	}
+
+	switch operator {
+	case `:-`:
+		exp, err = p.parseEmptyValueExpansion(identifier)
+		if err != nil {
+			return nil, err
+		}
+	case `-`:
+		exp, err = p.parseUnsetValueExpansion(identifier)
+		if err != nil {
+			return nil, err
+		}
+	case `:`:
+		exp, err = p.parseSubstringExpansion(identifier)
+		if err != nil {
+			return nil, err
+		}
+	case `?`:
+		exp, err = p.parseRequiredExpansion(identifier)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if c := p.nextRune(); c != '}' {
+		return nil, fmt.Errorf("Expected brace expansion to end with }, got %c", c)
+	}
+
+	return exp, nil
+}
+
+func (p *Parser) parseEmptyValueExpansion(identifier string) (Expansion, error) {
+	// parse an expression (text and expansions) up until the end of the brace
+	expr, err := p.parseExpression('}')
+	if err != nil {
+		return nil, err
+	}
+
+	return EmptyValueExpansion{Identifier: identifier, Content: expr}, nil
+}
+
+func (p *Parser) parseUnsetValueExpansion(identifier string) (Expansion, error) {
+	expr, err := p.parseExpression('}')
+	if err != nil {
+		return nil, err
+	}
+
+	return UnsetValueExpansion{Identifier: identifier, Content: expr}, nil
+}
+
+func (p *Parser) parseSubstringExpansion(identifier string) (Expansion, error) {
+	offset := p.scanUntil(func(r rune) bool {
+		return r == ':' || r == '}'
+	})
+
+	offsetInt, err := strconv.Atoi(strings.TrimSpace(offset))
+	if err != nil {
+		return nil, fmt.Errorf("Unable to parse offset: %v", err)
+	}
+
+	if c := p.peekRune(); c == '}' {
+		return SubstringExpansion{Identifier: identifier, Offset: offsetInt}, nil
+	}
+
+	_ = p.nextRune()
+	length := p.scanUntil(func(r rune) bool {
+		return r == '}'
+	})
+
+	lengthInt, err := strconv.Atoi(strings.TrimSpace(length))
+	if err != nil {
+		return nil, fmt.Errorf("Unable to parse length: %v", err)
+	}
+
+	return SubstringExpansion{Identifier: identifier, Offset: offsetInt, Length: lengthInt, HasLength: true}, nil
+}
+
+func (p *Parser) parseRequiredExpansion(identifier string) (Expansion, error) {
+	expr, err := p.parseExpression('}')
+	if err != nil {
+		return nil, err
+	}
+
+	return RequiredExpansion{Identifier: identifier, Message: expr}, nil
+}
+
+func (p *Parser) scanUntil(f func(rune) bool) string {
+	start := p.pos
+	for int(p.pos) < len(p.input) {
+		c, size := utf8.DecodeRuneInString(p.input[p.pos:])
+		if c == utf8.RuneError || f(c) {
+			break
+		}
+		p.pos += size
+	}
+	return p.input[start:p.pos]
+}
+
+func (p *Parser) scanIdentifier() (string, error) {
+	if c := p.peekRune(); !unicode.IsLetter(c) {
+		return "", fmt.Errorf("Expected identifier to start with a letter, got %c", c)
+	}
+	var notIdentifierChar = func(r rune) bool {
+		return (!unicode.IsLetter(r) && !unicode.IsNumber(r) && r != '_')
+	}
+	return p.scanUntil(notIdentifierChar), nil
+}
+
+func (p *Parser) nextRune() rune {
+	if int(p.pos) >= len(p.input) {
+		return eof
+	}
+	c, size := utf8.DecodeRuneInString(p.input[p.pos:])
+	p.pos += size
+	return c
+}
+
+func (p *Parser) peekRune() rune {
+	if int(p.pos) >= len(p.input) {
+		return eof
+	}
+	c, _ := utf8.DecodeRuneInString(p.input[p.pos:])
+	return c
+}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/LICENSE b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/LICENSE
new file mode 100644
index 0000000000..d2d1dd933e
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/LICENSE
@@ -0,0 +1,17 @@
+ISC License
+
+Copyright (c) 2013-2017 The btcsuite developers
+Copyright (c) 2015-2020 The Decred developers
+Copyright (c) 2017 The Lightning Network Developers
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/README.md b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/README.md
new file mode 100644
index 0000000000..b84bcdb77d
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/README.md
@@ -0,0 +1,72 @@
+secp256k1
+=========
+
+[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
+[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
+[![Doc](https://img.shields.io/badge/doc-reference-blue.svg)](https://pkg.go.dev/github.com/decred/dcrd/dcrec/secp256k1/v4)
+
+Package secp256k1 implements optimized secp256k1 elliptic curve operations.
+
+This package provides an optimized pure Go implementation of elliptic curve
+cryptography operations over the secp256k1 curve as well as data structures and
+functions for working with public and private secp256k1 keys.  See
+https://www.secg.org/sec2-v2.pdf for details on the standard.
+
+In addition, sub packages are provided to produce, verify, parse, and serialize
+ECDSA signatures and EC-Schnorr-DCRv0 (a custom Schnorr-based signature scheme
+specific to Decred) signatures.  See the README.md files in the relevant sub
+packages for more details about those aspects.
+
+An overview of the features provided by this package are as follows:
+
+- Private key generation, serialization, and parsing
+- Public key generation, serialization and parsing per ANSI X9.62-1998
+  - Parses uncompressed, compressed, and hybrid public keys
+  - Serializes uncompressed and compressed public keys
+- Specialized types for performing optimized and constant time field operations
+  - `FieldVal` type for working modulo the secp256k1 field prime
+  - `ModNScalar` type for working modulo the secp256k1 group order
+- Elliptic curve operations in Jacobian projective coordinates
+  - Point addition
+  - Point doubling
+  - Scalar multiplication with an arbitrary point
+  - Scalar multiplication with the base point (group generator)
+- Point decompression from a given x coordinate
+- Nonce generation via RFC6979 with support for extra data and version
+  information that can be used to prevent nonce reuse between signing algorithms
+
+It also provides an implementation of the Go standard library `crypto/elliptic`
+`Curve` interface via the `S256` function so that it may be used with other
+packages in the standard library such as `crypto/tls`, `crypto/x509`, and
+`crypto/ecdsa`.  However, in the case of ECDSA, it is highly recommended to use
+the `ecdsa` sub package of this package instead since it is optimized
+specifically for secp256k1 and is significantly faster as a result.
+
+Although this package was primarily written for dcrd, it has intentionally been
+designed so it can be used as a standalone package for any projects needing to
+use optimized secp256k1 elliptic curve cryptography.
+
+Finally, a comprehensive suite of tests is provided to provide a high level of
+quality assurance.
+
+## secp256k1 use in Decred
+
+At the time of this writing, the primary public key cryptography in widespread
+use on the Decred network used to secure coins is based on elliptic curves
+defined by the secp256k1 domain parameters.
+
+## Installation and Updating
+
+This package is part of the `github.com/decred/dcrd/dcrec/secp256k1/v4` module.
+Use the standard go tooling for working with modules to incorporate it.
+
+## Examples
+
+* [Encryption](https://pkg.go.dev/github.com/decred/dcrd/dcrec/secp256k1/v4#example-package-EncryptDecryptMessage)
+  Demonstrates encrypting and decrypting a message using a shared key derived
+  through ECDHE.
+
+## License
+
+Package secp256k1 is licensed under the [copyfree](http://copyfree.org) ISC
+License.
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/compressedbytepoints.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/compressedbytepoints.go
new file mode 100644
index 0000000000..bb0b41fda1
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/compressedbytepoints.go
@@ -0,0 +1,18 @@
+// Copyright (c) 2015 The btcsuite developers
+// Copyright (c) 2015-2022 The Decred developers
+// Use of this source code is governed by an ISC
+// license that can be found in the LICENSE file.
+
+package secp256k1
+
+// Auto-generated file (see genprecomps.go)
+// DO NOT EDIT
+
+var compressedBytePoints = "eJyk2wNCIAgAAMBs27Zt27a52bZt27Zt27Ztu+4RN/8YgP8pmqLC650yqn+RN9o5WamIzkLa9Agq2bEoRaoJ/oRJplKgCMS4iEExiT3u5gnfu//K8yz4XhSnQzTXYdtp6i45sAprIIyKo/AhtXelFMQPtnAcGf58V/JbVveLAN8Vcf0Q+bnIk0kWveCiUp9zy1Ko9e0uaYkPT0UfsCTgaZhyQTuNksSeqX9vTrQ5GH/G8fWiUIiM8Zvl31woXxiquMqOQMJCoQiZ/iXuDOqY7tTyHlv85rmSanWdJIe0JD1VkaEwqclvlvbtOy07cCk4KT/jXWxBk6kb7TMLkeWtCZli02fkxlspmDtHfMonSi0ScKI+ypNPlZVHgO1W4GkR4AkSbYSxX5oGlIctKHhCjRY9eR0GsIluyh4QFg9t4FU0TdkfLf4BSXR4MMmKV/aDCqtzxLvXaFGs0GVb7zLqgb0kn7nDXfGhpaDHAVIodPh3ummk9NZItmfr8juoNyszjGfELYp+OX5UK0lt7hcNU4NjciFV5Wd7rF4mtLxEEuUAmyUdQwDHGfahHiT9dk4+r7K6EPYYBbBaAQeWYegqF0z5jh/FwuFiOMV3oh7nteiCQCzGOJqamLDUBuox6qN4ZOLY4x+vIdtftTZM3+hI/xBsP5Zv0Yo1QQ8jcIP70VItBoA5MhNnb/AxAJzAPDvNTmrNgIqr66MA4MupfWmXLUa0gZu6/vA5lc2V6cKJC0xIayi9bT8kBWbiIVj/9oh5qCV/g53tVrxQ6XfhqsPWYFps2OYNc4vTaFEdPWT7BVQ8mWoNfwWqBHRV5KIyHgt1Mavq2Z7ogUtQRuRyipzfdg8hKVRf1r94lYx6hjntvVTgLiRvOtETLWBq/baEcWugzzIzxpLreLZWuSpPIj+yd7xLqrhNUu6uWQIESsLW8mIPQPS9tpxRI70BAQEYTY+gGvz8N5xgw3s1lCLjdaQA5ncTo18lEO1pjX0HkiKXSwBpVohq/1PSTUKwMXmdemLJmH0VfWx+xu+rIySuZYabbIJZRmoP7hx4qrjNR7ZLln4Fa96EAuW9C2RGaEjJXQ9XFGPJDi3O6PpdBHFKaPg0HXSqNaSf5HVB/npt+uEgjJWHh/bMHlB+Cq37JsrxklflGImfRY0WYdtecfotfzSaGGJXYGPdY6+Uk5OmyIgHXhgmAvG8kLXWetUZMxo5SuRCpdfdoNQ68uHArCZBkTJ2PTWfBs9MFazROg8x1CQdlvnwm3GSYWGuoJ6WsgBgGHZAxDltrlZDiYEpBMu2yV+sJ/M4cX1Vid9DJbwSWuMXdX22YqljiMvUf8gp5OUDO8mz8uD4izxRrDBa0k3sEsOdiPjVk/tGcJ9d47fkgYdy3jBfKSNrrG1Bvdn37xeqdSxFcEYdoggg3n8iy8Wo4FsHQJwB3QnexMLFrXO/4o5JCTJYUZFGBlT9yeU5rCdnGvJZeE68PVJVTlVxezDz3hnIMuy+EiH0QoasAI2GGlHvlsfDLcad8O7RfbwYd6nlG5phUmqOOQ6xgKFG309xCsbKvDumzmZ8WpVWGCvxYKExEGktwqkKfedH9PbOqcLSpzIDmoCD+/IQv2ZSKB3WzF+bZzAoxImoNSJPDDeCNkpeTbu58L3n8UQ6S3SHPhRoT9QneplWmOAhQoLTGNmzIqMZUflkPDNZJ1PUr7qnLP3OiPWWKfVm9HIXv7X2zdP0oFP5yC5cGCKG83zVSeBqd14przJKjmTOJTEFIJcZZt6LLnSPjxZkSMge3GLpx7KkCao7yMOxdxm7gKn4l5w4vrz1D/MuTgXvedy45y4n3YzmeUNNV12X5hUuCnC/RbBIfxXrE1JJffx1VpHc2Z88roux91yJK3zm9FHPcxWFL2eElsDqvYgoYlrrxIDCF8w0L9xVCJ/K1wBYV6z0Il1liZFnrh7oC7QnCoi/AZVC5wzuseezcZmsO1PXijTQ0Bz2N8iU9xr6H+hAMrmj5/kfZNGZwfRbQEYgFxnYiZ/RcnKxv6L/Ug2cGHP9l0pkpT1bcb6HfTZLVoy+UXd2qckfNk5rcKMh1Rl49vNssVRf7Hd8AfivO3ESlJUSz4MFkdDmiOzmO3GCXh864/drgFmZq6f1XiZsaz4r6NbjOTOQdQJf21TOmp1+bfXxGWCL5M810GZBhc6YLVGxcWFHwEhf/d8HuDXUa+i2WUcclqdokltn6q1nu2eLoI3aiyuYvyr08xBNo8h6o3/yX9fAyxP9Mmwwudd0GpruWyd+6Th6G7vUmuZ4LHgO1XTEVeMhK5PHoDx4J0JDDEVGv2kJdA/3hHRZXcjF/n6qveOFmgDS/K9c7qgLFfacIAS+VHxEk+Xm3UKLvhMgQXSU+GVKjei1uFONV4Ye67v33x7hbEEbT78HMWliBfVkEqbEOLiGV/81jLALoxKPMEAZaKMp2vBrwAHmcYqvhnZC1UE6Cr/JZW6k+69Bgxp+sGI3BfNvp5h/8spmJqg4myTAcqzYaW8Ami61S7cR5I5iErsXlk1oVtuqnyJi7rpaa89xob7p9udfTmT/ikUfL49GLZpxgYnjt7W/so79IqGHzZIGIIARcU7ktlYCMyhTUsBzf3Bt1fI3TPdf9q89J0+v8+lSdcIjcojNA6OKoSVmZl1IDJQ1FFUSB5nuRRnBQad6DQS/gHAZxefrebtPJA3qGzu4EsFsPJXaPKGq5gQaz1ERF1dpdRQmIeMWxLcyKaPK31zIUjilx0vGc0CupUPlgfzlAnni2l49rX6nxM8NF71OfhDKFLBIggWSfbBKORmOU1ZvFZIkfWIpDwOKy78HNLKSp/FNA7paMy9pcCxu0OvI9kcWld/GnM57qRnliEKgPUeQ9/Prs6koi6i8QAOov/xlN1Me38NfUgZIe5LJqw2p4hvWm0D7YuqCoZdYCnRVvwVDT/xcUc++4D7NeXYhKC1Ffqm8Jy7NWQE3v6koJY2q+T3yGH7+2c4V5HLboKAmLx7LSvsCRDjEhoUARsfKpiOSAxmJAf2Evbi5Z/+Ep1UFmFxGIrCPagCv/jUfTrQ+tB3CCp7L8Av2vR9x+42mKoeCso2te6hJ8+VZ3cq9beFbpyZUb/fOOwBjlS3NpWlKwU97IdO8Ey8Cr3w0TE9YOTY8Mu3LpY7j7PPBQtP/lrd9WaPQ7dJuSMp+mS/k8i4fIuuDBwMIzVouGhrn9t+nvTuXacu4D60+FYx+4+E27N0Q+QQY3rrMISc3dovqzKig1eMkIDQ8d5/Fwo7WflX2uHsYn16sKtRiQyMIagTwBBeTPEsm5U2YYkfBomnnO2pW3R9IdO2QghSgaQX50RveltU68Mck0D08wkYGWLA9TrrPfi7gzxJAyiup8DZsUrLsipVuEL47gpqdNtsaG7GV3ShLcCPfEKQEKzGRVs98mluYbdgEUagANpVz9XOstElchJ43RRkpMsmXYeoSzZatjSTDwGm6vHVgHrUrCk6dzTrrH2QZehW2xpjcc+JA4qp5zvuGSYgvC32BceJXaHvQMhwHs/5DXsxeioT3l4awYPOiJOVDihOyPZYb2iAWOUREfP3Seb2+3qyytoILxhIPf+0OYjWCTg8rb3mV3iAVpk5+PpwxDdwXI5GJ4Rz5BbmrY1ze7wcbSJvhEzIWONaUROb4ZEvIHHbMVBqyw0ITDtsv1S6dzgmZxZgRcrWSks18w49DFkEqrysIGd5HJcM5jpNWJvXax5ki9ggd6DDzzLWI6zBqgFht/NoOMiNfj8rkz61v0a8ynunoGYT2YB75l8dz80Vz+NvTEpN38PUdnv8dw12YmVQDvlXDXrrc7kdH5tigy4yLb/X8OIGY98t4PchBPxvtl/lZ7tCzOqU1fIFs5XqG0Xd5PpslyRkcpY0e81mcmxLPPE1dXm/j3t5BQpLKlKku137Y/Ln8HmU36BlrBS0Pr3Q36f5B+S8A48pP/Tb7BNB51Y8UI7Ub1vs6CueReJtn/crGW78BnfUiaJClNdGhaHdyveuLOsMjuUxQs8AUNi/miOuH1LE4s+v0UG8A4sXVdldX+FRkRgmcg5vk6xKFlGvIC1YIybgNrUgwUdE2P8iesBha81++nZ7KnL3l8E0ngcicmo3KpRbm+Wutc1c0NQhEnnjo0uiZ4xuVEFomiMtwkdD7ZlpBf/c5Awbf7wwUb4CxF+ha/zpWs/kLZIPRTaA2PHSQlGH+jSjRovkiI/Tn9Ba2mPNsW8v94k7CcHcrg6FtxVHjhd7Y6zPjOvYqeSstSHVAB9b7mLV4Q+TU1HM/g4eCcgHxQ+XIjelvMEVb4Bgp6VnQojLgPDC90zpna3WQMjvfaSoARO3vM1QioFr04Z+lg/UUre7tL3t6QYh3hPDuVC0p+y8DsZVAXs40fPjGFEUl2S4UuO5Q7+opHhQ1whzh6HySiEdJDcc/TMuk1oOVBLhEvgpzCJIngjYZ0Q8R/a5nBTNZdiEJwBlx7zjyI/BK36wsYgUW/NxePqrXlxQzmeY+o9uBGDJ4Au+WQePJ4SI/N43qkCHvnbHcCx0VTpsF/KP+ShrvwUvPlYho36mo/LA7HoywPU5iePgWgXCmA7QS9B+D+CwarAEkUtsJveNSkl5/aBqTQ7kWqA7ZP4DSR5WRPSsBSVrIH2EK1pPpQsgBCXs3o9KJROeFtFDeEEL/Yigqd5VvaQhL407jWgpFgNFlmaKcq2PTXWlpgyUwmPyUNmVHmza8saL+x7X4wxx5nBsM2UTqfafXxuR87Jv8+i1zpuNu4juOCM+QdGW9M7K7WnjUhtxq8I4p4xWzjT+/TGdcDfLZk4I45tl78NBJtR/H8mKcCaZ18eEcXf+J/nYUup7oMZu5+ed8KR48m7omvLKoDLOdVTkM+/6U5FcHDPGddnvoo6Fx9WCGkGYUPVE0y+qbFHOhIGEKpXSaJE/I2qLSYdKfIeJl0RzyqiGSVAiPE+3NWaBAGP39rkU6qmT0JixxcnRlbDrJx9ilvQNtiEZosu+M1K/8BXdvn9h3mIkrkPpKCEAPokV6J3ow1edX/3jttNgVdrLAdg9G7XkWZ1DrHb5smS2vgC5Tl4ez6YPKwROTT2y81Fo8ckE9gzWDkdLGFGM7ZTgvavoDCouXzQpgwx1wGEhYYegGbe89daiHrMIdyM1/BTu8xewC6uvpxt0NlJtgSwhZTh4/DPJKB5CD5wGrRFU9QdZpvRrh2zKhTrICkjaO60UKkjjw5S74vEMr9kYOcO9Xsugd8NXr3lToFFQPUda6LnnApS5MmCRth3sg+1/r96dEKGj6rAA+nDoQzJkZ4lnG7GWoZHyfdA+gV2Up7lpx74NOvChoWRCFwa1Ti4FfQfOCZUeiJwrdSekkNUgzkDcGXNciiAGpJaihg5PMtWVc+JL44g6fcH3wn/0hjiK0+XgkS4YXGusXaWowk1S5Mb88s1ZVfIHrVGrhKjIwXqrbjGlRsofTTW3L/1YMAfduSgZsTfE7x1pqDKJS5/QAL6/R8wkDP6znlJTOjJ3Q7mKRJSb7NxO5lqAum565vFUAEjG/F6ag+JXcliFEAtjB50aaVJq9Xdmr/41U1W3ohT37SFS+tO2QhxwCxYKLfZRZ5DTejhYjGjv9nJ+MPVs+reOkdiDN9B/umM3Dra5BYIR3e3Ma+sh7s7A3Vx8bChA637iqIcQHBRB/KC2jhIOJ6TloMJKwq7kb0E9XF9mJKmCg3hmo5d4RJzHqbNcjzYx7Yk5pW9u2bnVVTfNizvlXChMV4Jf5oYhhhryI6ltlUEoy0dcnVrxqGCr3oxeTTUf5UBKirSe1/gYybkHmnwPlavGS7xGvudqLruLswtEhy+WLGktsyMnJ8W9COKhKX+yT/JsbS4agPalAWAgq0Go9WOP3RF1RuzHL1/Twfhb1NYiWnyHTV97NVRg59dnhMnGmBgXkg61GiZUapX50LNQZxzNuqugyEHjxA5KJfat5xMGdWLkGemJFwfcuskXJeWXikIhyV4hgdXzZRgUxRslgMfjuxQLvmkV0Mo5xvsDp3TMwkg1PqkGEkXlV7AWFN3s01uhJ6yDjC2Uk+o6z0pfnoqwyJnVgeA3WjXxaVtR7egPbF8hfxxAfwKF0smjTHmboF0kgivS8DFJLv75/Ucq8fbYoMgadUoqnmcGJcHbqo0es+Dhi9Xl7vCUVADTnhnuyVi1432m6bM6IF6ay6xJCRKjLVmAyEooegmNImVU5uh5FGxvOAAyQ706a+NJGgUoH7knQYOL8ZyXPTJSWiqNEpr26wUb1hbclv6Q2iY+PTf2KneMpMOoIfJjCN562NYCeMp+soVRGxbyic3Kw5CfLkEZdmVFpFlp/gH3QYYfvp+catQcIPNavc4lWGp4i1Acl01RNbxphG4D/KBQ3D1souHPwVFKgaiC/gateFzhTrPRF19IEVRm4h1rGFjVw/0h0O8psDwOIUQVE6yWSmoOaMX56IjcaA3jEE/TStAQxNE3H5qejiTpfdUTzQdTRY2T0u2OiDdBC+f2dMXvjCGFtQItlBJQTQB+5X1HSQO3L/UBu2N2IH1WfCIaagjNUmrY+DpLuPswrx3pnRiwB+IZ/JH4x1YV5Frju6HvMEaEbV1VMA5+tyOcIbe18oGEX+GHKGb55evhOtLwWOEnIgwWtNtEu2k6sjlmeiYYIZe94CyD85JcA8jNtrGoOwPIzuOXQDjq2FyAZB4Y5oHWAFNbhuA7DjS3b/0r9FnVkB4st7jizbrNtR1TYyoGS7+lR4A7b7ya/FmVLMgKfAbohRbBjWWpfltG3mUVMQRTTI/F+Gsw0zwd0Y+OeqznaCusBQw2Lc8QWFJAylqaDK8ZRwDapfYycfqT1j2SxjIEL8FScDvLoaovVia3yBwMh2po02KhuyvIc1A3KVHZPpgD8RYU4+EhZCPky3ziajZKg3rOGHuBzw1A6g9gXNpN0pbWWS/gEyhMSGxg4AofPhtBYp1z5l/jmMREAJ0qnNpj3vXsXXaAKjNDjJel7acD0GUPaHRhsKq28WwZ/UuD+ILPjHDtblSZDMWR2a+tNY6kQm47emyolszCZo2fsc25fjpIzGXs0cetux4Dqzv03nH6XMdZwhuPqcrOixrdEEkqRPadaaRgfJymbY1HncC6myfR3zrPkEA3PLdw1x3ljW9+iZ1sPUeatW0c/nNggFNkllR4SxeOrOqgg18rwG8dPbaeun3WPVrvoIgd7oO/fUQJKSujMTWHZa8opUxebo6CahujaYmcGMC9h1ndg7r3k11sdoudFbd/0Ywwispr0E8h8XOUD2mVPlPesHFkAvglILLJ4WXRjSSnNcOdhwVilZTFgIlnDdimNm0JVz8Iz3WoP0LCp0+1sIDtpcuFVFUX6xd94DWvwsU0GstZp/Bd6TxbxfLluEDEZiy3YUJOsXSGBH8bcTJ1aZyu1woCqm4f4ogQG/otHbC0UKoYwwxNemdnuEdPF9Z0JMYKKo08Z53vle3XWrbODlTX+i9FAN5t6qe5RRgWuI6I68LMMB2ZBWGCwu8i2DinS1I9L4KgG0j0u7nA0vqcuJS/WCcLKXrgG20fgdRQWgYrhA0KHO5dqfeyNvSeU4h0pU754wmk58W07CK8z5cg/zzXGMuD8W+4PJtt+49U180vl5GWVWaUQZJ+MOOITDDV9UuA4tJ83c9vlnUzNeroPXVrPkt7kOA57ZNe2AkyfX4AR+q8paq/U8PwhABf1pXN79ERqG289WZdsb9waoaMeiyNYpvlwdD6gjoUZgOO3DdHKlYHSCeG87Wg9f0EMUkCS85KP9YETYylYArRn2Cy4CusH20VtLfLkP88l3VxQXV/PkPu0iwC3bFscgTVpUONTRqRoMCM2yI7MuLC9YPQTZsbFI4lMkpYCPKXaCra5lirPqucwJs1GqoCydQc+/2JuzNW3ZtuRYudz6zh1i3d9+alM6DMbSG08ao8AZwLW/KHbKBv5egSSHqsYB45H+aWJ96wANB3XanS5FvmKYQmO/mpndEk/N7geQJ6Eagc102vAM4FeOJRnmSoS58qGiPhsU5iDV4DwWJhZvFGSIslchcwjhuwOkvFRqlUfM8aQo3LWSXaXOS8oyPS86HHDIQtfeRAENPZ3OwnjkLG65FuhlIkOaCG4R3oThoPTsfTknjxf5z1JBhRBRCum8JRTWqHEYyOMHsnjicS/DH9zRCkEHdzRfkLOZoitOdpZsNaAYEprChcMLrLDZooVOj0vBgN2kEU4o569PY1x02SsaBWTffTVZvVnOc1j90s6YN5C754ZTMM04+J2B9V7j3BfM9YxQ+SSEcSO/QheRAjs1Fha1KUPNXg8lFuUJlOIQnWxvrupG1DPukFu0EYJLCUAhnBAL5/tXKdLHTKyRbm1BaDJSdDLcPSOngECoQH0C7UAOju6rz02WfgME7XNGRhmVdPLjfkdKle8Iw0cVwA7Zp2b5Qo4N99oOs61z6Y4GQy08B6xmWmlNccFSceTbSsTO+d6OfrK7pp/iqX4DE/RSSLhU/tvBcNlrUJXrr4tRK2L6hB8qn/JXI/jBPaO3TNKmVx4g4M4u27svPZfb/XozPSwBbi3qgsU3Q5+E1eZ3rKKolbViC+/LrGjZy1mm+Kvt7Ht9bd90kJ95+HJB2DhKY8jpzvUmXaCE8I6tnBvvao24ClQhwtFoKfsdVT1p80/vVBVyUNAb1sRPpTjjgvUr10ktSB1o0Kn7324E32nKDNkULKlBTRdNNAaAYDnQP/Eo7qukPuOjyR8oRJ2HU3f8ZvHTFXy9FYM+gxZj+wSDYkEch2ckTifXLHwDQaNSrIsGYKzaeJ2jAWsDHy1nStpusbz/O2FH46bUG94YJTVc9Wi7NkKNwt/PbmUJEds2/mP6wp3Z3sMRodI9NG7LZElsya61htUDAq8ucyuzD18z7n0wyJqKxSHJPOpvR9eAoSGrZJrhs9X0LKDrW/AuHAZ/cXQ1no/qjPeoZSJR33jTb8+4dUW8VX2Q7kBpO9ZrTTiYg3LXBPmKOGG6ODoKIeJ70obE3SBEB4AXJLHZ/4pmCvCAgABV+JB5v8dgAz+tjn/HSzi6oBGOCxqYVz7lg2JDM8d2LOQO7Z43ITYVfauEvbDjLM60S8t51edi33fD7bXGFKV/NmYy8yQTzsRnqx4vDblg4KV+/jxFSUE0x0M3RkOTy2FaedY7h1TIL/vViyJ2NO6eI92dcWLq3HQGio2RPh0MkEXRb2xtN7WkqE/MmGSlceDK5OEn5SnD11iI5y/VYbJfJj0jAIR6d4E5/oDAKKwJeufLHmIZZSKLSZ2VfCeHAEC5sktoU2na+vC1aAePQ9NeoQRegyDxHc60XsoW6iFXMkPu0FZ9Qfv3ZdKFHYHXn0G5pmYERvVgbctB4r7GJ6o4+eJxZDm+GkiC4Jw8wbZ5htmKiMAfhnWwGd9v/ihBblCIVp1nuFcnVQjJB915PyxvUE9wZC15lFUxk0Epa7YjhGUtVtjBdPIC4x3nIZJ8icVEUAAgwa7gBJ7alchWR04DXnJCyTVfaJWBNJz6q5oHv7qUMHd9VyfMdj6NTum63Nir/p7n3uLoJ0PwPwYbS83a1KNzAvyhdV9ypHLxGeDbP9LpOnmtUkC/TWOtdFKaZRR2dypMKeGE+0GZg/e5tjPvt6bhQeLl6KdYNIKCmMLx0rUp//YkkiGpJJtEPPNcLX6u7G2JYRRQPSzba9Xp+0S87BFdtHqHCvdxvkTvbOpg9xh5xQ4kBgwoXBCPlGlWplP0H+UQwB5D8lKIciWtdmVZLszP447LqeIrCKf/IoCUZei9vdI+bTHi1PthbG7xIDKO6+YJryCS0xA/4chsPnxVBRwjIJINQ9nuO41273xGQKmDdFEBs6vAVobitbvn1ZH23uZMESZX96o9XDuamK6Kf/KgpQfcj6tBtSX5QEV1CZkrzojQUO/+PHA6gUv2ZjqMOxyDZevUuOjUTo1gtGsY+w+kJvIUYSNg9w6lCqVcKA6tA1fNhvXf438haETgbYt4y07KqSsVXc8tKr3VTMQPJ1JLGuoa48AQJhRdy+BvR3rOvBSV++ZTsCkgDwb80VpzJAOSbqkCj5hu7EHLmfbvkbHZ4VSVluMY+LHxGZyowi3vqaabEBNDdXD3cGPO1m6mbdm0vxmwQhCgyHCcT1FaL//4wxyROIcOKe3YCpknT2YjYtcaX68pc+RC/MyEkAJoBr7uGkVxGJ3ZDGgecqQ2cjIqnH1ZUV0U5cEVx2L5jaTxw04k5xI0Ber8m+rJ7saZuPN+ygx6lHrMJenixAQmg/7N2VQuqmpHDxAMve+Anxp9hosyVm768h0LjWSUpdT29yGfMyfHKKWFjn4beQvUPYntPtGNp8G1nIJbzL8Fdp01C9GrnZjWCJZvK1sZB70onSsrWY585T8zSSyn6N6betbDnr88jIz0ssSwQ1UDROSfzqf1sV8+aKNFiMHkupDbFGZ7FZ1ndTgCHNaFIlU6Dwy8ZJO6J+QFNE5U+KfJfWA944DwusC/BEpsdDWiiFk0ECYn6t54z/BVFEeSrRsq4TAXYTfnXV6yA5URbVIzbIVyIAzZELWFWSTd57L3290vRBSCJSjkji0hpFHM3sCNZPpIrN6oK3YFycolTa4TCJrc801u+kuTZTVKS2Y/XmXvECwCMWnWsDhJwOahZi5cmlV5EcIpGq92gtZk+w8cmFm2I8RGs16IO63GvzA/RBiucWo5n9aBlcnZ2uKaIzEcrD7fMFvxQ00PGRkOCxASoZZxphcSWPfHFBgS9TEb2lNWwmxq6UQoIztgE30WGcJdoN04rJLKowps50xNxzhXkne/rIf7U5NAyy7ZSME/CH3/RvchIFNPQKqv0cxEPB8lDpN/NO1p0cjjqvqEa7xaZakbe50pY00WW6kK43s8cXUp3zYpFirE/K7944O11A1RbitQ+c9TCD8UUfB31YWkpgrelvnK/DhlQEfouj3IM8ouVMaC1DivFeY4XBJk7bVzOYwacQbil2pS2w+ij09l94qjEHXN6VpKFnOEao1GFXR76vaUak+CvKG3I2XjlkQPnwU1rs/y+6bIAUCQ27Ek6wcg5y1l55ay17DnSXh7FvItOONpLqpY5Sezkd+NaKM8tK8MaBqb/mmfCbkKtOLLoFs4x5Ndnjqomiz0iWzc7HMxdH7hZVWYyo/UXAovQOKSUtzk8GFe+HTMsI4mWgC190MpH8CEEXG0GI1MfREdtr+BaHQeR+Xv38xiiTFOUOyQLIiQ16eEGnISsTegDKImVYs0BtwslhG/taCz0lr3rTm/FAB6u0aw/e0gKi/RySn65kjWqtcqRzQT5LyKEdWGJI6Z7+OpOS9hUWruXezmdJbmVLng8yol9gTsm+cfK9PaCWQIFue3fB+NZn7cCkyWqUvTaJuyTc2ESuyn8rXB6k2RhDiDTZ3Q8YOnjuzTwOcThJYthMT+yS/9atI8q0QiJWu5Y3e4BhkZu/nOmri519zbCWxBbt0U2oHW+oMAmhowXCKbOt/tdM3Y1tgUUCflonBKpHvU6U3ZCdq7SwJ7jDPufsvbJPOTcGiLpTCjA7Ou4g0G1NtYus3jjworABGLURfulDRHiVblBdEaB4dX6N8+CmTZ/NhT79735JRrAzlBaHsxRQYIX2hCe51OqzlsNpBjPaDi/gL8le4JHoAdUS1XNNkmoMsNnYYrBquQebob3HpYiChA7uTe6SCWTyi9aLN1x28YiEAT7JZj7UU21eXn0IlvDbhy+0gh3tGGKOWvnOweVdwaxlBshDG8NAyvaQ6MSAZgjLoDaenXpyKC5quYWpJxKByw5Sb5cycLfU+wf5cHiF1dlDiFwt42l+k7G7zuxVOhHGNW87pg4j68HXzg+e4LUwKStit7LA0odvycHWXSW402BVqAsK5TW46hg2A2jT3kjzp6icPZP1xjUUmPxqAusg16PSiJTA56zccK8b1xHdvpj+pO6rvPsHjiT2lUD8DucY3fdiPxgotEjcBwVTeLDWKzp1K8s7m1hHVKtnrRqcRlgzy91296MNte85PlYV1iiFiATvAJsh7qLhI6DsnqBhsQJgmbdOHtgrBnpcRHXg2tgVxbKV7LE+ybuJB84LDlnZhQ+ZtbbAZlgU1VWjtG3HwrSrADO8v6JyE8EPdbMWYBfRQppTmmEXbGhbtx/h3UAWaTvtOjbazFqWbvk8qY1zmD6c/NeFCDeHxslrGlKc0SfvXUA7yHlUqVZ8x14WmG6NXiQLNfi/Wom23SGMkv92EQ8uG/bieilLbrEq6U3m2N9q3hyb+EBrfLmAfPHGK/2QbYa1R7tRuFn4XlI7Xz9PuO2TidNgpe2vfqEoP5T2XDsKg+XuQO+Gm4g4VX4sSFUzpduyVFWShH6AH3MvW7Gg+6EkZoMkr3zzuXLHw860t0RU7AaRdG/eoyrWgNkOmxvTANm9ClEx6BrHCfWvDnSkwB7euqrpJV8fLIaBSlCznaszkqy4244x7Yb9ysS/o9L6DEHRZVWfj737HblwDYCV8qzH5gU/kXI3i9+HnmmdcHPpHnb/fR3mF/be1ZHF317MXD0yZdB3Kuw0P9W27dsdXqlw1O7lVP+YlFMch/sn9mwLqOGLjdmT2AuXUo8Yqv2s6Q1T1suwnwjBE3NY8u82vqh5G2M++POyDG3X33xu0HmTrGKIWtr1OAD3VuFvfoUAqHwGwLYctVDYm/siP+7gDohJ+6faIE92xYZZp89KUfWftRoBkogf5NmSNZqF/l+9aXKrnepP4nDEs9PbP6cHQPg9I2/0EXIReLK32XKE67BT8MWt1CtVQXOx7/9wzaXzl2pd/t6TkN7bWcaOkg6AN0D3Ld8CtuLS/Gxv4Vo3RS1aorr1j0BF2OQLslpR42mJYOjHNd0S4Jd5mZYovju3fVlegfMAxEQAyt7bY379E0RRwnLeZUXgOCd1qdTqPr8lDhJ7PiFx/UttYlIythOYvoJoyUaPeezq0H/PPZCStqDXNbnga0eobIl1sNQ2K+pEKejL4h5MF9VZiC+QE5JLOoUbC+MZC0tFiLIpdNbjBbd4csPI3ht5jR9DVpbN2KlHlSiMf5oiY0e3CnqS0JZRgaHn+3MZBVPmd7yh7otO8nme5NYX0cKFzSVChjicW+FegIwGXBIQDqnuWr5zPbC33m0Gh6rZiZPF3ebb3Z1FbEtNNC1PmCkMYEUP29ybzDX5quuNfqL2H+hlgkIvNtYzGCv1hnPRhRCcjD+iMI5SXMYQq+9BB0jTURw12evhhSdrMuoidAsvl5a+T8Yr7rYDApt4T81l/cork/EbuGrznAXC903Zb/228hAYsjcB91mHRaOrd0edavZ99CNZdIyhcu8x+L9vXp+pqc7Yl24olI/61LymCfx7+OqCQwJ9GJ731kVt6X+K4GC5aiDyhRrXD2cJ0ygRICzZpd67TegiihiuWKOcY3JdQ/rlYjvs1yUjVbMgDfBndtWAzO2nTzLhqiJsRbo3OyVvfaoLdSgliLH/nnorb3pe3vwsgowu9MwbZ1HzEhu+njq/3TuEahhv9N5uMYBDrF3FasqnMyWaRSp9SO0dXU2ZiuLzAJtdMPi1dXYKRJGVP6AfXcGud94sD+qkkWN5dR5273gGJKc1Wk/KPd/NouhGRe8pL6u4G7xC1MvI0IID8xj5hfCWb9i8PlBfSd0I6qTfXKeP81glBC1pZVKb5ClKYCJpYff6nR7/v29ydsYYxqs5YOWYxLG05oifisQ7gyIumKjdUllSGpcF3hCSacTaVhFlF66dJ6seKWtnnogsRwEGy4loiNKzs4kJ0pqO0IPjDjUr5MTwvMugA0jHEJyowYOHCv1Ga4GmIPPmpl4lvyLcqt3ANXcTJ7ok6+2JQ92FiHKY+fOqb6u/y+KnNDUV6FvYtVxXSAGRTBjUhorqguaHUJq7kyndn/lpdao6K+HhE2qhYH9l+u//AWgmkbnjkENL7jymP4RRUOKRVPOs5O2ZdTkctUWDTOLncJHE4we7i6APEzfbQQs+ai0tRRMFz82YuZ09MkViJz3qW9E55L4Cild0sRnLA5sjddNollpJXaR+IylOW84+42K0YilrkLLQRIam11NYU8tKltkrKWdK6gCduHt/M3YFlYxRRjLSD2j1FqE8ApLKrqRrEksa7S9+3VNHbkn//3NsOL9JaxHnpkrtQQ9xFfBITwGQ0rQ1I+YZspVFuBHRk8kAlck2070qWuECNZff23TE4926Ky/nTzvemhoHBRhbFNO4ElF8bLWybKvPWLSCBGYqRngBOPjdWpMNQihKS8V6mOXX8THI5RYaFt2kOJdaeudP4gDdGmrR3NiY+H+K4HyRQDoi43sr7Va8HCfUCgc6slfMbbXn/igWx3KJPJjyBytreFPHhVm5UM6x/LwgqkQKLZGEMyLRpjILHoylLNtPsZ+xsk5qjuYdnTmG755KtPbQ923NYCy9R6eY4zfGaucmL4eMt68uZ5CqfvLjkAWr6/epBXwyBOZXxIkQWnZeVxmGF01tSBoj7jG42jkIggFp5BR/Qq1ltV5UA/ZQXYTL1BqNQgV2l5zTtZNOOOEVK2wedW1tsonQEQ4XiRzKr0fkZSqR3BOj7ACXfDMGmwS4X59kQystTaMInfuFWzia4uYSONvZcQtQOwOcPL3MwYWE7Ig+/MlmApvOZv4vZZmNEBxSnrMF2jP4UfP8w4SjcqOcZGMxEp5LT1Lfa15joK/jKMF0t3IZ8Iwd0DOFCZ3QxoxpdrHZyBbX1FfOcUdAQZgFqnuzDtlTsuw01FBWSNUD30qRKrmNpTLGhEBp36trunJZL2ahIl7zyL8WAxJmUN1/FF3qkFPf7dETEJX9f22kJOjKaP42cE3JPXxjmvlPZ9YzHCyeaDzp+NTdB2GefCCamNR4JV3HFVcFMXNqJ4Hh9yStLAczsX2YZ4JKJOQ5SLIUfrRcLrqmO/Wj3kTPDa3rfH/9NDmEeNnnJb/Wee/hBiCnyryAYZgu9ugc1qY26lQLewY+GMOkGbGz5HfdzfByxVgaCm4oAoZJUsvG8vI3pVZcFRULP6hgPzmfHt7zQ9+ck0/NFKGtgAx6wVaIuUTJOgEfgvCjikFSydff+GfCsogXsPmpiBAwEdp1UutKr5fQcjAWrhMthbQK18NuR7lwjVTpqhGrgRFxkQcNX8tSBHuW9BFOL2B8ks835oPih9Cxp7sKvD8YMERqlIs0BGiQCp65e5Yz49lITMSorlfy+KcUpVuAMYLdbpOp4msTYEPIf0l8TJVXGbc6Yr9AWoz8xxzxIylXl6VxF4WkhtH0U5Zj7Gba13dgtVRbX4vsphAV5RyzgRKW1wMjYeK9kkEQ+squ21bgz5HeRHQiP3159yYwokmAO3EGeAQWD+VYR87qlneZxaRdyoV38b94fr5vH+v0ZXrgC6RsWMslZaQGqN1xEzCKvcXrIFIQqciCV8d1OmTYJFw71kn3cBzZDA5l4rt7FVKxqrvYBFllfU9dIwCVZZ4YiRLYwSpZm7yjAVCN/ecaP5Zzy6CGDpGttWrsKmxB6g1AChFtFmKFBhkKIXdED7UmRXNJeCbp61zdqvf5APXIR2eWM+kEEK65AV34oD32e6gL7bFc2Ljsafj7B30aRYMdzy7IjfhiQVQBf2CTNQCd8PWx53uzQxnNPrl/VKkJM/L9z1bsMYcHwjN1bk+q/Wa4bq9siqjU0ZsKDSWU7i1nXL52l5JycaRTo7tvUai+2QFS2Dn4F9uIqGK3kUMCNYYBsPNemxBqEI5POVFr6ajMyS9F+4T9w9wXptBztb+lFPFaWQ4Ma/q6L12VVOh3h30B8h0AL3evKu809yU4b+U7i6Bscs/UREDWraoEs7UetLSffkTB3ZG9OH25gUtXU4dBp+CiM2QSBOBFAF9K+BvTgM/O74+7PheIZKsBWUY6emam1McPMdYLlLzIl6BdHAy5uLoXLE7NyZw9bSJKbrXUs+Akd+j1TMytjC9ke7YBcUb9+OuUVOxg8HXqHX6SdRef8ERFu/wNHXvglBeUAMMC7dYA6AJfFt7VzPxpv3tdvULTm+Px3UUjxp+JV5ubNUQO5+6chjYeIyWan4/T76SLU7tI2an6o1AR8iUzgapMAN65pJFScW7Mv0aBI2Bou7G2NxXbMkusrxWKi9umbLCjW4cZb9WzGUY8XcVwRlnqqmYKwY+Zfw1smlVfyBasNI4kxPIOC+kpurhAdujUKGGTHWmStRPCcG14SrwwUAst3FHyZKvsQGo59GHkS5jur4vPTQus9lWtHAtSwBbY2N2WbOeEL8T9x+FZdvQ1Lw/g/UzTWho1IE4PeZXlRp2Pm0j3dLSN91ao9YmpjtwRS1QwGzrhBkDk18PWvbkRBhwvW8YMXedJSISHnKvCzCAynoYZCT2uBdlAy0bRSUj/3DT7NZ28iRd6b0E7xWiqlO1JiFZg3eE9tQmfDhYaTQLTyJ7fb9AbO4O1XCgxIQVmB4KABTOgPxF1Hvk0wab16UoW8W6uLUXboJDtapCP6ZhGMpJgumHruxmrb+jMcLyx8/4u4f8Xe+oROFTCkahOkYe29VDRgnN8vVB9KTa1Sl5cazTY+ImVrZP7E2T461mGb6Nu7M6s/YLlVKXu16+nkzwQ2x6vYZ7VWLKaMux/yhkqLC6lOoz4srWBGT3DQUCiX16pyn2iXg6+3TbLj7k6fsvqP59ro6No0AuQZq55ksvot6+33KXo2M4/QQsCvAroHsimaeweOgc1C9xEd4tYMMKOEamF+khyICDXCLeh9G2p0lFmiuuGRr3nZziwzNC2Ei2Cg1VU78nob+rgLEXFa+V3pXgzuK2NbkWb6TkUePlYhAbfeaDze4ZjBG9gRKQrxhLa1mWWGjymxP7Kunke9O6i9QkbYRxbKZgDsz87Jtbz3EZeF2zs95mGm1/RRjy7E1o8/XyzB6rczulg2jqA6DpIDKVhKVFSRPB+OQHFGuv61WEuhVvOepHs1yiYWS/yTLv+exbRPi9UJdO1KazmfzECtNchfyJPJhdQpoSPrhBmSboZNkJaQpIzswCTgQrOo3Y6pjBWIRFbHFnpBpd3Vc8mFFh2EVyM++e2LNEOnH2IOjNQF0V2odyuJTX+csT49srzDDlOg5MaWqAPCD9LBecRjAj2VuoGzGOKTLs65v1WoD9eFF98K/8CLdryzpFuD1b3sX4KFM+MaITq3q36IEDdmfp2uSe2KFT3hPojt50ztGIPBFTtHHQ76AI6pLbiQg3JGt7BlNdPE2CkRuXl+GN0UWhvJ4DzMZe0KcveQlNwP4GOoUutW0fww9GxHd3SZBm+SnTHaKiIVLkNVGUfkcmopFvjMJS0FiSoiohe5PTXnCVJIre8F4oaG4ecNMiQF4tSjmUwRTDIIKWApBb4LE+xq9mtrT5aTyYcYKkGAQ7cRjyPSUDbQMYcgTRJkUFaV90ehHpvJR4c1Fah9AAqRqapZCakmCXxUVzkIIJbnXNWkqjcBOb/iSRhJHziVl058/KUjMNIVXOcdAjmiRq6dl58U7E3s2YLTurZ/0MzUypt/SJ9h+PaNqeYtd+1HDWKB/4ZaZvK4fHqLoJXnAf9ZHb9eFCN9piLm+Pehexkbg6eeAyuMF1GiPxXjnEMT6Y/eLuiaviYDYuNYm7A7LAQzLClp86B3TUmnhCIPJAKG3CS1xnDXT4Z7UShZBKCvTPZ8Mep5Lpz7zlaecFMSy8JXbADWHGfMa97EoND3A8oT6KEqqrEsxVWWd4PGza8HsfoY+kZZx0wSYOJjKtzBsU2JVMn/jamqK6Ee2R/dNxsT6JYPObjcEwycMGU+m+JLapmLo9I8Ugiz7a8p8pRZkx/Ct3hTzd5Nu8KOFpF+AugO/Ew3niJxELy3m5nDSihJS1vpay6KiNuXzr/RUho4R7HWJvp1sfaSaepbJXhZrvHATx7j50AIIZXywXZd6aoyojGg9X74p7ytRhxKzgMFwBC78gA0PLHLydMnKA2d8519etnZDxGTtPKABOxKgcn7KeSJKMmPdeCXiSumXxW56rmdxyrJJbP3PwThuTYodXd3RSGG5NZgNpQM/jIKmQl/5XOY5QW3U4Ib98S0HbN07YA8obDHSLlBuFdwOWlqqGM2OkvI1e7qnFPd5fenqJxRSr3Ob7vqaz2IbV/aGMcETUFTt+znfOS99QjnRo5NtqzM91I0Le/p6awFyWpohg3PAZf0hAO4HjouiVTiw2u01HykyfmCVs6wP+j/nhM5Rmo5t8T4y9lfYxl0/hgTHVXAisE34+dZsI2cMSi3F3OUzJ7srM7vy/NYTaIa3aeGd73LFk3lXr4LEQ7/qXzu/XqnyrSyqIBpKN6UYtQnkrDuiVBswefzgfEdOBebm+591Ri10PjLMWChEOIQF2s/ZfNXsl9LZAWzo68kIMG1yz2HHtnYn2dwEMC6k6nfRNUMrqAeGewxmzGEwj3nmk5q7GBxaKACSt+6XnCgQ1ZzQGnmU0gKhvyYXV0Zd3R6xUULhL8WKgr4TB1fiT1lSbV4+rVueupSKj29Pktx5gUu1bc4my3DemwZcIJuVPtjHS0SEziOq+21hub2eTp9nksjW2cMeJTznzaW/7JKDOJR3hnxclbori4J3Ci8y2k/oXOIBPaXiDtOE8TYeOoMG6DqmyksyqtLgSh05qD0+ATM6jNSzS9NJbO5dTnTfJYmgM0+bW1aDGWgIpe7P9rYyOJg9wrHk8h5Ev840HbNdGXSAp4SdMy7fwHIIknZNf9iw74gRbLP/GGVPWhHDGBPF0O0q9ca7jLqUCE/4iGpgvWx07Rr4os2rM46nR/5l5J0RIrbYnsXZ8atMOp67RFI2UOkE1YVImt+Nz8TtPmL8yS+9JMLn3NsF3UbiuVIUasG2bM/JeKNm8wzdUdngTII1A2guwY2xc/3oOhF5AMC5wHx1XvC41GOdUjc1/BRXpQb++MlDKll6GFfaxsk3LUgILXvXMFVg011a35XSP+zrCA8arwZrev42kOeRxQ9C5qIRb80Ck0j8SqmO+urRAMmmGdswc7CfaR+wFTIZU+MI09e0pPdB+xB3eaIsdisiWCP3/gNfXiEx3OTTlwaKSIZwIZdxbuHe76pGlaclDtLmRyhjzrLPRw25NxHdaVu4K7ugxAn8ZhMgaslx+j9DYTS3ML98h+cQFFTMhcAmC1Rvgaf9YnOTI5f3IBYFMAdVPIGl2A3GoIofQUfDeWPQDZ0EXhnXeOExBc0CZyLlXNpKyAp+H6wA+32NomAMFfRxDp9G7obRW/44s+bhNbxmSPSV8INiJu7eNufd3+4fIljPaDMmmt0b21ja/f11Y0cvvJTCtilv79jjKQyd/sw74KikH6viV+LjPStSZQtomYyldg3iA2PMRUEFMDU09ohawT6UDM4nsxwQKEz4+hdMrweuIrODJbf9asuUUWumkR90pqVwNMIQpMv4t69TXxenhhETEM0wf5UY2ezxeOydRAQM4ryh2+L7zTBkeqD/ZXRU+1YcJG610XeguBEJ7oKlE192z/bVbJsB6tPMpXjfUBbiLQ8hNxFil9jbiwz7Idgjpb6tfyKKVcNdTGEqWOsGst0eR4i1+6gIrwSl/OL7YQ7fhVVR9M3nc/YDwqV2Fs3l+Uti18niZ3iCqsXUPWecYJefeDOQukhSH9Ll+SbcJJht14I96KYoF4i4qrztbztBC40erk/jyM+hsDentEtVDOqLg/ecdLnrTJAEnWnxpZvdymEFZLk66S83CZzMnv2FS7IOFJAu8+7+UrEU7S/CUu+aZH+wCuHixWK6iq5Y0U3yr0V0CxiTkQWGPs91JkCxQlf01PuNAzXQDjzM17bCWK+LYsgkIEJYxpv+ohbyvT8DbXDSLNZGkxdHxYiC+ZITaGg5DCRnD3bcEBX6zbH3nqKnCOD2V3RedhErCxQt6vdXXLTFrvXNLgYvm65VmAf6pVH+gutSjEei6URst4JHEJUh+pMsMC6yHKbXO84dI2NXIm5hi93lcbwwAhYzgSic0rfLbG/svYhNTnUTxgiEidg59bmvvvCS1eUPI8YMUT9SEnhvXMl4nKzkH0jcOVP4ddckKrWXNDW9uJQuD8wOpOShNRJN/0G43JIhbGTf+alU5m7CJMfnDmsvWyHiUNL+is5PlMpTYGqg+cvYTiy8XeVKFL9xOZTu7/SLD1ZYO145fdjsWLKZnTBPZdIUs4+wcRd/cUKTaXa3URejWWfdvNG1T8JG+MhELsys4WVGndlQbp8ChxhrxpyDHm8d1sz6zTMqIN3EXYr/SZDGMwGIUywqimIPn/fOaJ27Welpq3cMW5i3g9vWc850Jah6wN/d6duqlJJU6BL6BvsLOjuZbxtOa5+B/DWZ19tXLmO7uB3nexcCYxHGmW33Zi4KDbEiIGWCIgNQcc0WVfzUjoLy8wIja0+52s4JJ1sYfBY3cfCtGIPvxnCkSUmhp2sAoNTGIM4WDO8atzF4x7tNhHrUwJkY42tT4kZrsx1YjTZoTU6UPwQQ3Am75yOZdDpd3AdAp1ppTskW+2nXsN2KSGKuhJHGeJfuVz9KS0igByrMCh+CXzgbMODtv9Afki9Y7hGEBpVhwsxHYmg6AszAmuIxg/llxfKG9VUHGRFB+K2+HsWZGAbnJXkbkvosgJrFRgDsS3rKZ5YQ7cs9ZD0/KjTvECcZZjoVKp2093IfjQEGjelaJWEVscqpAzZRRjnaBrBtQl5FqV3bZMP8J5Vu8DO8fXXhsfhHkf9lYUJDExFfwq75yzMLD+kG9kPdwLA9Rdi+arPyEXkXvAtipHNNgTMedvB+GthKAAhd5OZXYWI2Vj9wA0HKulx4wMO7mCLyYjxcw2WIIOvvUSkxQ7pTZfajgK/gdPn7OU042OYbFHDMYX8BC8LKM3GiL+KOLEIt7bgDxklmZ9Y4I5KzphsGiyp0GErOQx2bdkEYq/f60xwIteDGNxbPN419D9f/BcwkhrBLkLkzMm1qYlMzWyCdCvFtm4iaX/T4HLxoxgLmRokbzAy6vhEQbMkG34bj098rqc6KsK6+mkjdcEVQ5GcySlrQ1fHD3+S/aKcALXbkSoxsqTzfiBIivCP9mKWYm3VSij1eMjICKR4iTtn1iYRtBVx8qIBH88aFUVHkUphHGClQfjSMXVKgjtAk3f1355uSvWekMEtnJjOUYKNB6fuC1ElhOWVNQV3GwaljRjL/iCKC+r3eE5+B5Hd1AVdRbug7cU0Nz/DQ45J71QIc5BKGarQJwBCe6pXfuDfOaDLbi8SrG60u0p3w3uhBhnSFgIErUb95VJHJt0sskFT1LsCBS9flYgkya/CgxAmrU5gp+D0A25T+osY00jUnrwQYql/DlM2Bu1oY5Mre1TaAHvwcL2wDcFs4q5K0ByAHCwVlzM3EgUd0uS+r9n6aAzlR4Vp4XUz4p6Wx2lBUVHh0QDwbZ4HU3wV27YPclcJTMCv0NDrWDYqsw+Z8Ccom1cp97YZof1dFpvrRnONadgd+nEnY1ytib9lVIgSUKLSUf+yBoDy3L0FEsRc5zzwG6t/8ma8fRu4/vDTw33qm5RoSviozctV9EaJDb4OumsxIcbBBpcoF8G/U8stWyTTKn9Glmd8bhHR4E0yW+UnLOfEnnFeM8+v/oAeU3faTzg6W0ATLq0DpyPjuh9TX99ICVgFse0HnfUhqIvaZG0J4dssMg7gjmyC2FcJgJLVtm7TND+yyhAoEFiPOT++FYU3op+McXRGVNRFfDPQRGT95LgphZCJnsh1zZkGtgr9odAWpa+82RBd653N1UqjdwBOOGzUWRKT2JqIn5W4dNV+n9OZ22O9PfFd6Lh2fh28bM3S8eH/+/+RxaTUywKAKo1a0zLP0Z+N/b5/l4YbZhm6Vxn4rev+WVn0osaDGIr63/kKsyhP109KPAGU40IKHfBLasX7/Ue6PRiEggAAAM22bf5s27Zt27Zt27Zt27Zt1w1xgzzbafIr7vcXRhIlw2vQ8eiQX6SFhMBen9mqE7cxdYlRMPng2nGmsUCijTUAoV+j7bvt4oIXKpGRs63FUjuzgb8Pq2T/nYsVlJ3c01Rq29XGPtCWuZ1LdOzk78yrR7RGyNhJPYV2WQtk7sMAxPWEJykovzhI+d+r890INfzM1Uyn+VH2t56HUh9Imow0OU/9RwwRzMYS1++P4ELAjfeT7u7Bi5J5/pR4qiqTgspy6BPJ0D6vylIKaIMTQ2ODmeY3wBDDP68fLEMEYoskJ9s8ftsGAin0zBsFXoIa0Wu6eCvQjOzwY6wJL+WO8UHoL+R+C+U0t8BtK4fllGmAx4zxCTQNrfE9WKXohq365/GVYeXXSIpNjSIRlwiS4tV7nGlLlfwsmMlxTKtqT94BhCmoGG3+27W5EOaZjPUE5gknbBRfmMRcUNnjEIb1wdSOB77v48dxm7E5mhKnmAga8x4FLS09f4zZ1rjc5QDo95ZvL2GI+bGRUwdwCYfmczBAbpZSbDxxdnzzEEvbneFryhmtQ7NW4aDmaC6BVDGf0n89J9/oXORnDm5FPA9yHgVfyK0/EgN5XpJMuESPSfVOnAYYq//AuzZbf1yns5mHAbb+dBmkVdr77afy1VIghRLfgEJoEx7EWN0fQUyE3B2ENvd/98t4gKlX9Q1jb1EAx4V80WQvY1Dk5KRX1uc6Lsba/Y9lMvqwdhfPO0bhb7yCTFvuQRlnT61I1GxrA3c7pFTyjs+HslcXgCcNjnWRFj9viPB+Op8hGX9CCquKGIibgTBPZwOdmy8JqS3whB/SlG33ANpQYjwJk7KR0tWuqEDR1qn1AxemgfIORjf3srmQBP1ggpbrZ+Y4hxRX4MMp3ho1R0Ec7S8JwZfyH1VDWIdXLI+AZ2/WTtx63SjYO107Oix7lFC0Zj2+AMqrGluR0MG4i//C3zda7Bd1tr6d5n6dhaFtrNWCTOBZamPmMfe5QFnq6bXYXMzrPTn/XJLexZOiLu+cZ842qIKInMLxuPkA9As4yPY4ErPBEmRD64s58/ZDdx2qjHRzy+UVXuxefh2KWIzO8RxIWal8PSgUN3pAyderL0+qKfojrImsoN4qNv+RuCH7I1I8iASwMAhdSDqq3PtmplWOgxpjtbx9wn+yq/wqYJTheiPnqatfo6Rlrrf4yT+47VhM2BOAqQj2pHaksnercP4ZzXVL5ry0If/SZt4JQU/jTnjCbEzfHD6GhE9ucsLSBD2ZwnIGTNq6a4Oxc7DVPLelT5aeV3x9KnuhramvmqrjMw3U9oe/WMSSgfWKREnm7+90hTXpxGWtB8b6Q4X3uIX2yLynvKN1pz3yyacHBQ5pylV0sZVjPXe0gJrIEz4uNbQf8p3x+Plc/tVCwR0ECCYr03n+GebaOK9vH9vVIOwaB1sTAfPhm9osq9EW3aHLOG9wNEUH8GJB7MFm7Xt71dkj8LO/0CuV2OYaDphVlf4xXeRWSiwI6/m69qllFC4XZ97zjo1wl2dwqT5e4IsFKYGK13PWLjjN6YosUV5lsx+BEFCfRGQgeMVQAteMzk2ylElkDZNRHU1PJVshLnkPQh5msSb0/FQFQzvw25dhWFWT54yts4Z2Y5vSzae7O64F367D7OTvMQtXMHxgfduMtKj3OqKDTT4VdnowYzBFTyNi0lLLrbZ0+TSs1vbA/2yqYgpMDFZ3gfOJTy4x7eeDK9YWOhmAAr1Q9yUhVbqZ2G9X1o1HSJrgHuBCY87DVgMiagFkwlYnA269hrb5CQPWlGfrG1SyBgK54psQTVXAFyo7gpTzVIyx1jkYCGXhZ1IsWLSAgqR0QSqLjHV8XJopOqSGujAXJc6tFY/5yLUt0zaGCdGSIFnrmaAoUd2/OGwJUlsif+X4QjVx4OpDq2yALw6SRJXn2UQ+2yzurvexVROHBorWs2ft40uZw/z9iwqwRHF5Ll2sodcs1LehXOeFurDQ17wuuadF4LWi+TP7tgFyvL3ZRAZiqRGFov8dsvd05TzXbyF7406ZBC4U4Y8vceoMW6ZjhxexHRmJqwmGbNuLApgiX1ln/7KAiSn1Q/uuUg1piKjwJhRXpDidBN5th4T9mGy9VL8aaJaAXJPE+QjQId99yQxLT2GbRzb7hDtA0TN/mYCT2JvfITa2ZbB1Cefw9C0GIXbdW40FP7kzYR5AZ3wQOpdGuJ44OvwdNX7ivJ0UVKBfOGgm2ZkTJd+RW3L3v70CizA7dJVBs2ECjanjyKmI/EemGzUbdDKl4YMV1C3gCj0j/Ko5PNabMdfkoJOwx8pTcSx/i8izlwfFGyNS2F+AW2S/LbXm/NqxjX97NJ9gj70gCoA3KFaF0sWf6qDVhJgis/RTxYgzvkNcmiJMp2A9JeWG/vDFzXZR2kpxFBG4yKS3Rh6snQU/3+hiG8JGtYGjdfqmy/dPpD5eZaaK6zT+8gSSlhekuOiPuNB1kLRl73mZ6laN8brtuPcRLWp3TyMpgJmxGtjSNwP1KiGZvWrhJwFCdIUopPKiK1wEj4EtEjXieXcqeZoAGRE0V6dADyY4a14N4/BFVg1fo2C9b+l1U4hHF3F2ur9qZMzQ+qyzmTaikfAtZBiSguZE5On7CvM4fWuJsByBHveqZ7gu5YQw1u2bJpG80c1c1oGg9GXauG14F80Jsan45QHsG3/4G7uv7vfcZdt2xClQh0svSSiaBEDCm7gLT0cPR4flSab9EFaOFcKcs3I1nvT6qn6wRZjImTUPcrWN39Q/IhTXKZmgHLtJP3xJezOgwSLP6evlh+Pkl9Zl3n88stFXWQMFWPrV+seGzBEB/05vgEdpvIud0Ay6Y3KjvDJd95oh7SJwVZtieNbzsU0rYrQ4zleyHxj8r/SQhRVXMyXiIIAH2TG0eZnhef5z2yVFgke13BqxBDlQMS0Tz2XYC/reUSNH23SgnR+wSnNx6JAxWwCkjNjSmMi6773dvVfg+aPa20dIJGy32HzmZWp8GyiG4mhU3t5mbNfv4cnLbz22ySdjz5QmYh1x+peg4O+DoubJ1nE1KThHBRNekDa6UUm2F3e0NQgzSxFYo5M6DICpdmZJuOJ5q578XenJwOA3JPe2zjQz9SpPRCjDqzNFDWfHQd2ypGhilfeS0L1OsHHSMwKbfa8lDmiVOBXDbFoGRwYEE+Vw0bMbwFrft6s09eBJ9FnlPFHMBG5GQRQ+gyqFnuMEDEGsuIPCMZZTi56GeVi2XwoEkpsqaCkjIL+UPyG14rD9xL+MnJzgYCiJwJBb6HRbEzMlJZ6wHfgC7iq1CxLNKYhyltw9qYQFNHXJNxUtGVHfdrFv7wNxVMoZSnUqueq0duf4iYr0e7JtG72xikIwYZhJQ05Zy9r9gnoIOiCioeqz89AAwZPcO8ulMY1395MxZONqlSAMEFYuXOSr5rj9GXsC6K+afw7vy/VSnC9ozIkVcRQ2JDXU8R69FosfeFtP8P6L+C1GG7q3PFYGDCw+tc1abldPLZnpwQyz/UThcGZ+gM/6cbGKwG2aoqUvi2jBYkT1wcNkYrDPZ8xgJUhAkSkjgikEbQ+s/MEUXAd8Qapj+28Uul4yMDF7JQQ9w/vkFGG6w+pMiKaUqloNMWthDlx473wLlRmk1nwxGTuJipsP9iCOrlTmO71NQS3wRo6qZ7TH8my6TvMLWHnRyf+Jttx42vPfWOqFjx6zSDzzs6lcQ/CL7ymEz+o2UwihaziYsKeKyxCkOr8QHrvfUwVnQvLNjweg8dFisas2K22E2mKJ2+zgW7kAlMi+wg5gKw1Zw9yztr0D8DxTKCYBRJhuYZbGMFxisE2FlZVLnRUEnqv9R9R0YYQgT1uhpWlTpHhiZocVEvgRR5Ce74LOeGjXWr3hnYfNWYUr6x6oUtFqOZ7jrpW5emqzkVZLbnCzxavcB9SdPTEC2Xd+n0BqYlC6q7X5I8cac04SQ5P70hjpVhUAKrNjykPsJoqlab0lLeMbUi93XGKWnOod19sXkovyqDnS+Z3uW8l1MZrwND3mM47dq7Zk/nFbXlMlaIlEK3xyXwPiSHLlio122veWm/JaeUslWXzGT/JWrKuHhVgYykrX7DIvflkv+Ldvs5dv8eNm1KB0jaclikEuwY51ChcJ2/Ie87pjlNJuD3Wa5j1Dg/T8X2fzm9pmuNIsoLKNR5mPOUpsLK2+s4TKaec4qUi6qJvGcfcT71GX5roK07IFhAuF/x7nFmggmQESK4JgFGbyNZjjEUqgDqMct3DH7kR/iPhHseNSMZ4iYaQNItxFtIn0UCJBa16epzQlJWmnuSKRYClnd3xrGf1zic24shNFnxqyXoDYptQCDRma8VEkuRYTbpzfRAqax7mRpTZg1OH3DaICXTwCVkoy6MfO6yetcDjk6JCEOKlmUMX7voFBDjNRJm/8rfeo8dbTa4NRzif7yHRiOYvSYbDy6nEU8lMK4ZJvjzfsMuyCbsJfJLU0BzeaBo+ZBmL0UNK6ceTSc7WaBm67QuafmiUA8kqgAXdGxC53i7XW1Skmvad086jFqNfJe+HANhmABxgBlD6RxxVxd+ziIJln5+9n/18IdeV4rGS6sHxfYgtjwSP7utZINmlZrooE83OX+S6EZ4LYRGR4KVB1hp6uFxSmKsC9tVihqJ66v9y3nj9Q6B9Ywsx6hNy51Co83Y+EYFjQOA/LyjAPt3erc5EECn3zhXUoejsTE5JS8cJxugLm2KYkPbuUhgtQOD7Y2F2nW1XalyC0MlTLTl69AEQDv73GqKMNN1GaqRpfs7zxQKw7TLTZC2rn9fc6QwE9vUZ1+M75HKqcS6U423a75VzCSWPS7sE/kw0bMP+Sdr2WmV8U0Vw1NiuiZC1FSokOXK+4XigsnhFp4mn+Us+ji+FlPLgsS6DIiBz7hH3tUZSxTIacAQSnu9R/n+gn3mzRAIzCFJqeD+oLL/rvcLz8Ek5A4P5AfXvxsFkSm87xS8OCesDfOsx7+2dFXtbilC4kQXLeokIxKxvS34RcjxiCr5tTvjsGxpwB1qOvEKTHsWpIT1PYjBuHbR12FHm8egBZXkXB25XiguZq90R/G8dqAHeXpS52TPIohOapMqHvw1qrbBoq6XTGLsPqmQBfyo10oAnWcUgGDdviwWdGTEVi1Usi0J2inGe89VQ65GShzrX0VRJre1krPrxrUVmz3vlwI7cNxDDw0/WTUIEl/SyNPt8ezOELDzrQROnHrEYC+xbxfgaAeOcfL15M6dBvDbgymsYCE69szmDGvCmiEYpCucM8zAloBjVw0/cjseu3svAGMEYWeqZgPDGqztjwAszAeLF4F+dj1zg4iBYBA7Y/3cNBkY4EvmDKu+TamnMW/2p8/bREtVh4NubTgOJXnbnsjdPJR/qc6rMHy2Ub7Ew91Zoer38eIdlEOd1dEN7MJTSJuvdK+GMl2MTRdJsTJUaZ7nZvpa1Mndk7mnmWLpJIDlEIunXwh5hn+2f5g79q3mI4GLj9TqE5lTCk+4fy8cu8TKbR70mLJSP2JKxtzsj4+er0455kyIEqABzvI8WKKYPF5CwHL1n/IeJ4sKm3EVN7FepSvTXJ/+2Gu2K5u2qS0Yt8S3EiNJWY8rrvCXWid1AHGuZXdmuMCJYg1Nx28nY5EL8dn9bsKRB3YYZFOw8Svkta3M9mE2YMg8rs1kQEwwQttRJ8W/qfFosWj4IO+gn+zvfMFf7zq/lNIcPEo7Sa8WYnCY5CjPgMQ+m035VYujwOBQ4JA/PZtdFWOdObQmksvlsGx9GcZ4R084YqgAbamwOU8iXV7y6/yMRJ3Y1I8Ii0bBXL75eK11t3R+RhICx95L1QJW4YReId26IPVXYKh7/+TbwDlCGa3oPu3mJGqPMKlDe4NpTZKukP3uD9lx3EG7OuN3uOtb0l1lpzJdwMaXAb6IA3n+DfZm8770BIJ0xed/5FJI8xK4qODcY4dtHK0tUg52lgQi7l7AkNhJKR/hh3aTtPP6nk/J1XubAGmogfat+D/L3+KDyHZADU0V3ZlXNGN1vLa39xq7RATWDbsbGR+6Zbetc8BvXIjJ87PpCwTsY5/w9CDX6GonXWpzho97vRWxjeB4KPp8Ea9cDBBB2sA6vG8tTZ7wV1RalfhdHo+ofW5tRZhDn7fioQOWjAkn6HICMFyMLCS826dZviJp/OPN1UdDgOq/Pu6cvdeYDrktG6Duc5bLCSWilmn5zNHJq6CHvekWzv1IIqWDNvzmA0Ptvs1qJwhypvcDCkWNN8cBZ+D1T1TTXW36156XPNOgd76PmO9+Gm9yXV6QJ/lP5tAWUiB8b7QrkXnKRWUSDxB4tIoOPUozK8Ot5fpl7i6FwMCV054o+N/PITviMNK/+Ai7sUDleB1IDsYaJBagGNDGefG3qK1cmpHK/XUrNGCujoPCWHramFbTNSs/FIJKVqkO65yx1t+5vVB93j3kZqaQ9VPhf42LuxJApfZAWFrqrGQvfI+KvS3K+RNL+LVLP8I+OWrp+y2dJyOcCuO242tjz3GHkPQh+slq5QxcnZWoU0ism+PiGGF3dZaOvfb7yCuJSGV4OfydMt0nHE5DeDhIEyOkwu/rxKAcyeXbUeIX+6JYVXeTVqx0NnZB5Jy574+CXCq1dUzjybrSIUQbkQd2I00T93u9BCPCkiQgvuPseD/RwanN4vGhzpvfv87/upUvS3bdz5XIUZa8qUOrLRDEeO5HdaYM1aue49t0cqLNIqIeSPvfTcJ7IhE7R5fpCHaN/4/BRT/PHDMQ3IfG7KkcJ6EGj8NI+6HxDzXtbgdguUqXRDUZKJW/4d75OMBSfRn3F6oCBB1go3cDGMDM7WWkh5omChowUdDE1ntjby7hKqmRstYS47PHS+6X2bB/lvqSbZnIvubfLzYjmh6KTPLgzac2+z8796SQW7fyrIOCooM+oeCUuQo6PolCctT3zmkFwQWoDZhYYb9p89l5BLOO2TadHleeKaFBXy1HVKObxNPYWXPc7wquUW0OlGzY/qbHcv+fHhRKdlJsYn1BYxo506sYtUA+OsRsTVz8IliKjdPVNxLskOnWNUgKcmySCcImDOmiEtBh42+vTzOw9wz2UFEh4MexKUp63ENEpm8q5aip86Im7oAT8FBLWSN41DdmGe3SF69q+JCk+jrtl5cDgiZ8O4VX7vt460OepuqQqly9Vt3GsFYboMvhw+j73whFuW2QkZIDqK+V1amwP2n/V+hZhFQp2nL8F4EBmbgu6h/DAl13ycDv6VnEUEwbBuGTFFIWcJEQ9fwE/SpOuzDtNN1N3YaTfhO08/H8vyasYUqXqea8E35lwDmDvtJMpuwa0p1ggsCGp2piY31ZS0xpjbgwA+LP7YXZGrLjo0NC0ghArthpRh3LzP61fOvHDalyMSxa9n+CGfUQyaTVuBvryQTwFKaR7k4jnJghFCRuJ7PCvdebN1vmFyFi2dUcXIj12UOCT3v+hNa54HCUwbFM2g/HCkBZDsHag9em3WpYI9u5dpQs1MbK1UhXEDC5ODg3u73E2Za0fmKUl6a+3busXd+V0yMoPm4OWcLjdJu5NONdcwn3F16otq4rqSdn1Qk8crOxuLu4irkVt3caOnpwygg0N+3r3YxaY4Wvx8PpNXSGBC0UAY7isPSgAvWaaGm+kuokMOxMIZAVBACgVQDCCRIjGRnfVgP/gHJVJ3nUvyRCBtPjrcdgKlKznuX9d+2mpA8A6RpSvsGmmboNqD2aFI/RuFWIl1obx/SS1WIOJKm4x6gj1YdJKgDCYMC467ovFdfMnu/CKr3AebJE6YqPzslUFqRDMdiB4TJ5zAqd0fBzIhQYpPwCG3S3xsJmq/EbHSs6MqbyhxSj9XqaXlLy/HV32IzXqhv2qe6rHV/g9PvxiAgIMZkGvRJh+dJoyUGeAYL4m4O7QiL23D53vu65CwYuOXwEdRdi5RKedH7Yoj6dJVQOsNQPGLH9Zlu0uV7HOfim++Ru9JmYpzyiDaq5r2vqOBlmEgV8rlV+HNbPxl/FSVi3Iu07lVDvqOylbOfSYqZSLALixfVUl5pNjNDNkT8LywXAysZLVKIR5Ww1n0MOGdIkq7/ZWSboFJjmLgyxKZdvJUf211+9mEI4X2Xkiok7yIXW6U0/1QO8rkgteAfA6/PVOdpJpwhwRk3w+13rNWycD6ND4pjr2oBn0YPlDmx0PslCVEPF6soyetvGSD/vnWOHKnEySRq77ewbNaoB68oJHTMvIKIMmBgwsgi4I9XVJAPs0NwiaIOOMkjMBDKUCjzKLfd+DDpWgHdOLMKFqmuG/h73/F2vl1EOtlxI6reX/3W1yYIpvlfWKBLOkPKOnrKThYv4WCBY/SzTb3EGzpNQ5W39ayRBEkpsmS/YM9tAu1ToA3UHNTbhjN6buJK68ZVEIGyaTzYQap62b7VeqtjvNdEJBynRJi3YuVYOeN1553E077jlrgQbP7ZGyCLNIkXTQEWwPfigPcAX0/cf6V+rXWtN0fMW7tapp5XbAfkLw438pcsuoo+KlrWbTh7uG4bxLlcBwjUXdFcSulDl95ZwRQxu9nUSue+DgtcGfk/RcKB48RvVxWZpcYlA6ZWh3x3HYbXTZTU5x3frodv9qXQ1X77OBVy0ucGiu8nb2/xgEWafP9MMmbVZE2G6t7tPpq6aDK4iqqMfu3btRZAW6hj6JFF/Xs5p9I0Xdq6rEH1n3wLQGPOmdK+1HnQVD1c93Yx0MW0Wdgr7reCn30NqGdeKcTR4t4XTj8tOAuXrtNoX44ZnAYfCgkd9yCE7xBNgqvAqA9TTWKzSSTlppdrPk9YEEOZJekQDh2ioG2UqdjUT22oThNIjczBH5PgcL631X7vguYzGlOC4sMPZ1XcvU5dA510uWAAxkJphCXLlfg+Ccie1eyHNjwOZ6lz2k+7P3tYpTvRX8s0Q3WfOz6ltuiz9bgSm3okJ2Egr8iZoYGM5rydXmeJ8zT3yT8o7lzwBcj2e1Oo57+Nrda/jVdGdg3vdSrJLZYXlc1VGYakjsaHFMJdKQrxoCL4aC7ygxQXYooT1utyQKta7dz1eCZvjxtdQImb7FzbGxKpuWjpFj37hfoezNGnxvsWsedYBRtHzD9BiHWWfeXQ4dYnE3d4/5EHWG+O9q2WCgg7MUbniapTDmsQgq2JbkrlG/m2EzYnSeGSctg1e8YmqM3MdIxmjpLsKSlhL7WxHnYDzZ5dfWXUCvQtXsj6j3fb3oThHmBnW+7MWYasZsLz9mnWbiJeXUpWG+1W0qb5qz65cFGcl3Qsv/y2h9KjlBFG0vFblOisY2vRd4V1EHDgUGL1vbMjSNgG8lpSYM3V4lbp6Je5RAmwp4e5Z63bJ38taR8WQu0CUNtqCDgAJ4Rzt1C4kcajPimANDd+9eucw4LWgXJf1PxA2sQQY7TnGDbt6hs2HE9O7YIlZcIF+zP1x3Nb2Ksfom1Fp5d1au3l0fls5GKPau0/DZ5Kq50SNQ4TQLOCNQp1K1PL9+Ujf/F0jYbAN3hegwcAw0o4wA1VMqmPTB5J1r37lWeaNmFFEI73szCtcs2ur/xPmMcjLLg9gGyPHIOiT2jXTSDRIMsayiuVWXTYgOJeDT20169w5r+lF3a44i2aMHXQ61OjehVM8dUyyPfQUyWmGzV+8AXiqNQ0Ix6gvq23Je7dRKz9JMN3hrEyI2/2cczeaJtANIsd92BmIDs0OHH7eSsp6ObiS8V4K6xMXfsUudNv4GeQatNjkHgU8KcO0AjgNr4OHFavLzHOAmcgiYzKx3Vi8OqMihS5ehcBtHpjzzNP0ILwsEEhEwdWbIQP9GAsyhXY2vT2diKSvFbFo3LAePNJrhFOz3bqk2VHrwjp93pEeArfwoPV8ACe2tHNwkCYKKSwDPOWrZCr97t3ItateTpF5Cb6r+VuoFqXVEmT4aABk079MRSfodN0uu4ZhsUtS4dJwJspjRGPZzQ54BvIhN9CslAxsjWuqNhFuFNUtxMOJdhDE+QaSWAb12qhnH8fnIB8f1RGJm0d1O41yBrpJc4Wvc2xQaHOJuv/iTiq6UTANOfgrl78K4bw8c1wuShl5W0ij5grb6XU1BCygDAFL4JYdnoGKjpX9qeuTANvn80EVRhB/dVURUGxjbw58G7P68+FRePHqLYc62P/vt4gIQAUb4lzCz3+yzwImPVCQhBeibY2nvGp9X0wbDCWJk8mzxdOaQzW+PFGAhAfktTA/tru4RNfsRvAeADSH4tIc0GIL7Q4RTbyHa/IVKBsAS5p9jurT7d9nKv/000b0tMgHLKQuJeZ8QPmvaKLNztyJ2/53G+Y0WgDz+LCeXFhLac/odj0rRsT4imatageaDKUwI9Zix9dBXRrjTX4feKxJEpkO6j7eH8gW8MxxLokeJNhJPw7RyPulks7+KSRi6P54vdCa0SjkXYGwkjMk/JdKXo0qYm1aSzNwybaeDDJn5SYciaH0NmQpiuVkrzsUPNf88GWZe1zLL5Yj71Oy0mzFEIpXn1vuYVQX9lS6/J0vFhxNwaq03/LLXOJf1pEFXgRv5BuXt6ccK9oxVfiZZFkvf5rN8kF1kW2GfIgS5FoveQPtv41zVfPGzCY32JM88jEJVtgkEKPFD+8h7jSl9hNMWa4qxG1BuapBkkNxzkBV2ucUP1BAz5KCxIdcZs064pvFT+bUfSH/xKU5mNvSMW+37SCdUdVJRXdEGfPanazG+FarW5Y7B0L1LCh0TYb4VHrZjWefwNlW9M7zpc5qx/XNVn/QzToEpDdYjOlNQZeVOzbfZj4C5KT3HEklYRFgpeHmOAqsMLsG5o4ud0MdHHyjdWgZrYtfYX8BzeuhBQULUSnrKKA8brHYjqTcQxf49pXKNtI6Hp1QWMW4W7iESNB/5SOHzl8yBLSCjhkfrF33lZYHO8VEUqqjk5oSXQveWOMC21W6ZmzuihWCQYJOuDSaiLl513f63xWcGwveva3qZ3zMMUPD6wQp4MZWKNUBU2eosJoPNC2HxcX1cR7uER19By1pn0WkovRAboNLh+YNlE5YY9/bTt5/FDRYdpwIpQp4v6EapngXVaqvkFdhRXAADZ54yaJekOJjQsCuVNP5upcR2KYNGZpd6Gud5iurrKqndUT32nUU1YyMrW6OuCMh93BBAVgjWmezLv2Minl+Q1/8q6rWxX22+Z47fX5YqrTYb5gEZQjnPfS4D4/U9i9BX1uoAkGO5+17/t0ZXZ3TKFJuDOd0j9L8YAlt/kp3/bhzza5rY2aUxrhu7CvyUh54xdpfkb1AgFL1lUj2LDkoLaXTEp61n7i0ag2NqyAji8dJ2whLyRslWkxNWeSe/gpQpEHPtzDTe+WMgXlQqtkrGIC8P1nlUWpnNxWxxJyXLdSRWnkkwIGN6uB0Ac3Y8dHQ6X3bRDxy18u6OodB5i/ljNcJPjCqUp8JLvHS0a9vrryCTQoK4HeMQMEsg6t+DpPNAZSqsgiNG5p2xnTdn4Xm+7hvGXsi6HIHS4jwsh7fuvpVtNswhtsvLMkJ7h5Zi4gEWtV7NNWXkbXRkwN0Ywwu6hZNSkb/PVp+hYclvQVYQAHs5B4tYhTrBCQAhKVvpTdNfzYevu1y7NtPlQqe6xaya4LKDmFj5auNkgb/CFiJdZOzCqftv2Myqun2E/ZYP/MuYVR3BhAJlqTbMrj75FapnzLm4IsX9d4lM26Gwd7uymBRa7EXFgSbquCUHeip67qa/mkjRA/sFSZIvUfvrNVX0sZGGAMIXw9fa+4MViZ+8YP5jwlgpzamG+LpdAr4xYPQ3WNY+TJ90cw0A0lFFrkdZo7+JrE/00Xc/M63skDnX8d9ZQgcK+dvA101HCAHFY36zg7Quf9UQbCJvNpW78OLmpWKvTilNsZJ5d9srWlVj7H0PX0GuubjWs3vaJqwawHtl5nPZFitm1mB84WOWQqvjqWY8fstq2RWT3/cAdE6WW/CeZIsx3wMoZwZFtdpd+axyOag6zaKSMayd3F4Bt3L3OSJpperTnTHk7KH2iI1wh4D38IOBvlAsYe2OJmTY6iIhnhhn6CdgE0X6zDZlDMZB8tiSVHupPLAMjoSrnQZDNzBv0fNqtWeA1pb1aIbxyXNcQQtwe+94yS6B0TSUikmWTHz6YulqqDThH2jRm26KWV0YOg1MPIeU93dNvGfC9sllV2KlYUhuvhXSk0muGakm+UfdxTFPzKrvchcoKwAdLYb1YTf+lB40VJmaWUi4a5UgauV+hFe3XKrJwD9Mb6iW7SbaLJdpkBsBwcEMdwWBCZSCH2vgZstK0uc5gdCzF/Du9LQ3iOFRztdmvdZHuB1hiPXTC0hanSkocLBaoM2sxTECwano/CmsARyfsYGu4s1VmGx656FLfjersyDUqMYl28rhj2n3YEHa4BpeP/WtWmCs4OR+jWOsQ1TcFfJ+vECQKgYZZ4kiCDuTBKHlxCVcWLSi8cNlnZrVWpV5ocBgxX3GeM+EEpP57+S5t9bxwI0Sf79vMk7oLYP5Wcqk46kP5PEi1oZ1z8f7eARM7vt5DBLrm84t2cpajAKWei0mK1gy52mARWHJpZtD2tw6MFBUUtOL5BsIWmiIE6QIfqAs/GM1KCONq5qNvs+V9p6cT2KwKF0JXbksbDsMvHcwnugEFnG98ST4qg5rxNrUG+6n/48vgLxgxPQ5LZhpDCHEOuZEl2IHCqca5wOhaoxWwhAF8IgvH0qzNzR205N4y8RvweBnD/pljh+u1CRSeqctyHC3noh5sq2va+GYw1nrQyRrIM4/hWH5hgR+jpwRlhzjDAcBCd8ryTBscrWyih7VUuGZFE1ZV+yBVlEchKqzxkdwiBVhR5rK8EAEpIukwzu1paOCh5v0ItVtQ+u/qO1l/B5053Yngvz6NaHty3k+wu09qUKIOrXahVnWeX14X0UgRFwsUC7eshEdDmdQk/6V5ALhtmlkKgtHyoA/7HmzZdk/vOJWirDQL0kMr6K8dhijGvqZOVph7E2Sw6dba/QoSr9sSSoXbBJGDvTfyVkh06dg/an5ZdhWICM2mKbTuagaOAWVJuKi8swJCeqrJktFQddQbfy6iDD39e1fvcamXZB7OBbcl6EaKa2ZaFnrbfuNdBONQwUDAJ6+3e1+UXPmedRDccKlefkB+YAAf1/e+08VIDJ2o2Id6/FOtG692jX9oyOHiv+DIAbAWQXUNeIpv0ZDtrWlFvQEeO28j/gnukrq6avCnQAX7N9+Qs69pkn0JwMLFyyrMvD+XjnjTCogiK9h/0mRcoxzGWon9OjMPHSMgbimfWTBES5z+T5GOWuzwXidTZEL0YRYA9h2wF4gZchlzBKgGToso+zicT/xX7RnL8fZzigZEJ2i5BOHJ7NUv2oV0vBmK36ss4bww+QuMn5GgVJaUQcerEZ2WvqvumacDMnwDyTA0bZNAKOXT9hPAwQpLNWq8XGoSQqzJrvZCRQjES12sD9XM5e0ZzeXMg5CWPUXl6dZEjGghv85X8vwyZtMS0kyatDF/DBS6DY17DR9tsRaoukAnfLjYtEyYJGPyTQK+4ocef7ZrT9udtEh4eRbvX6f8Sa1DkdGjk3EeAthM9xhUJa2AvXRfDH4duZmzuVnKw+f7X6Lq7ghnxxWHhGYuDpHRgzh2plnob2hPXFx7TC7uf7JOQaGhSvO5MTY9T2HxMRptAuTO1tvweY6Geozj8ptoc3WFI3q/hCQBASfi09NQ5qMCkNkhGKNARmio7ulPU7Z8mvoGQtD9JK7oLfXU2bB1MhgpACFgHFu7JWOSlU07ycu3kL127Z11flT8V3UMGBwAEYn6pLMt+xP00QM+MA6QF6RJy/UTtCzUik2v0GybeFt4WoNKc0ZzazeQW+Amg3n1dc/qkPR54qAgs9O7JUm7Aiq+MuyImYq+DdzJhOvvNGFD422kZ6hLhzfb3RhPjGbSIYQM4zBh1GdYlSIb2HT5ivbJPXLIN1HlHSsHercI+jYRvMHaUjg8f2gfeQEcb956f6JcsBB4DeuDhngNhHQrPlRvAg0lGzSRdecvjVEwPOV+HEZxVlBK/vi5I2/NaQiD/s27iECy6euqpk9ySbNK9K7M4AJsNb/UyhsmEEJ6Ea01DqqFMhlp08L5hr9q1CkSv9QrzUW6TIUlUB/tT4yLGvj19Ya6Eeshks/QOt95aCEWQm/M8ECxK2pMRlASnzFkTbEV0hGmHylqe91pC9e4PGxQLrH12DMDIImx9qSV0aiIzpDV1JDoKiym587jnlf9Mx3tV3yJytLTjVysxKOFPdytUDdH/4zr0nIl7Uz277UwCGli+3nEilsj2ze88qYA5y9KsxHcw7XiQMmTO4dyNZdO/CVZ2yY3EWygQX6PYA53gny6Ymtd9vF712y2t9C46FMXe4R/e66kXAAj4hl4z814MnO3weRKafDXq9LNQxGgWC4kYfcEy6QXZBuvNkePJ+IR2JHn56ci4nWCHX6P1HEXDOVEkEujLrHuQup1BuuD44/AoIbjpYLVTw52YO4r3DVaZIKmsINcZTcwEQ/Vxnb8N4R1i9gfE1QldP00MJTJ5aVDXvNTYRwGGjuuYWpNuIN4cZ7AbcFdgr/141hi5P7iUpjYX30Sde1dTuJIgVoayVKlzlTmgvO0UJFrpc0pT6qIIaH1a9II5enLGarnTapkL2MzaZDXifazeCA8NOvuZ6VTpcdjlYhOaq3G9aP+tQ8AM4VnEL8DIO+9V4D2W1n9I3PLXtWHM/xso+0HiX0ShC8NpFE105k/hMGjUTg13EzncxDJtmO2L9xgifjxmXsZPArp8VDutHxK9kDv8FkbjDTjgeSw1k98YZuv3MqWsBf5FOlKfVY6Cj7CH1KE9S7OGfs3zyvtvYFNoE2mrczd3W4hADmiCLPlIVo17sy2/oh1ds3sNNo0UrkivyY6WgNrr5TPh+0rYlT4RIjLnGFiDXY5q85J11zsxCnnjF0BmcBqkzBaaRkW09W+mOBgX4H+791E1lKT+Ny+XL6zBOCgU4SDv7SRc2nbMh6fx9OEvDG0CtM7b9g7UhOXbBi80vya3REDBsryyImTxqcvnV3PDbtDWMGy/l2waT75qE6svfJM6aLMimqcWcZoc53+UwtEMg/B1wQR6KoE9riRXw8HvTQyKo4kFnAwd/3DaDCzgFVbG9ZGK7o5QlSdCeSFZgtzX++vNbTsREcMrmvvmK67m5sjKWVRkaE3HZLH2DRm7tK6HOLDfZw+tS/Jpguqe6u7pejmdfjBmn/5yJQ9FuLZAtNWwnbtZ7lh7IlCPlz3Xjke7aKb5nOXgFaAz/zjOLq/jzhH5afFLyCQKnMfHwGbsA4OmVrVKRRkD84TjQo1ljj+0nHK0CbuYqJ+gyzMJuTYQmgqx17rZtaFrJaL2sf1X0tsFDrmX0ToBI2nuwCjsF+pcmxgM6BJBcNujZ1syHqYwrQjj3fvetzxrWr5BSYkb/gXNfvQ+BCBwCVSlRFZD+xRzF+B9bXInoEL8izvye6ZcjXa3DJBPvinbMycanXEsZ1rWLg7X8w10wC1z6ExglrrwrUm4JxHDmyW9O3qj0UPLguGEcUIGy67m324Ie77rLrVMmyyW0LueiZo5xYZ574neEQfZvNxH5f1Px+XdDuMMJow8juEzCPbQljfrJlh7x0Io1jLny1nJqTfoJ/9jqp0do3FP0nbZfwJyeDJstBzxT1ZOFySCxEOyqNNLSbgR6etDeXcCld3YWlq2g8WXYv+We+RRYGnJlFDaRU7mh9On0vVJbVyWzjdEKQsUfTxAUEQ5QRyyFGYTAJGH2rnE3kCQBnu7YF07r0T9n/BsFj3APE1TAYn0hTYm7CQAmDf9e0XNYYqBBIm2fWkiPtaIo6NMPrKegeGZODfUQdn889Phc0QGs1NMxZdVxEZQ9Lzrdeawr7BC5lg3IeEF5GFz16HZZ7RaiqyVlOSsluNerBSmx6yo6yGHTlCWmbM8tuaN4qTVR83D9O1gIgwxFYriQFOMN8DEOd3OrrU60Si9YNL0Hxt2RsDNusjZXDtAEREYztw5z7RjtApwysgikBGaIh6LmB7KfJg6T35+0bB6Q8OlWpJQLDCSOU1u3t/E80Fla2IRTNbKi8v0QPArBve5Jw1qnhES2RTOQUT/OPTEmj+O5G5fMewqQwJAYuP3LuKbU6a2k2vX8qa9eKpngyY2580dDz9vMTibX2ozxl8vAMN0B7y20nnNqa3SYDQ6WMiFtglfz/ark9ObNP8+VCr7cmRHq1c4FHY5r/GfXqWSN9fsOGIRjNTSGuIOUyeudOMwn7LbAarn6oR2+vcPM99iGQzU1O5pxmHB14dD/h9d735qbsZEgUZdSQsMU/8+ZHqmhMGcwvxh1lUmP1DwXKihf4hgwE0jJEiS66YtHtZvVDkMlhV0usAU0VV3A7fbWM6o9PIfeImtqcsJtA67odzaE3q3ODjsa0GFWXcce1Jc5Ox++hOIyNfT7Py5xcFU6b9Kw37XtWkbk8DtGMyUSzFat0NnYxJgWt3ZWuddiX5EaAKB10IzsEGj5WXzX9lfGR+iodVsfefp6msUUg2yo8XaA4o+nYZxpxJsNxoI1zI0VB8577zqGhlbXf2SLfU7AEIAfgrpSKgQzarx6cMazWMHS9bQFQDDfpjJ5xG6PfWjRC5ElGiAZWI0a2WsYRfYhjXFQou5JWIKtjNt47WYGvoG+JmGGUt018SGatf+W0rhDTPKPcZ7Foq6P/8hOq97Ee9fDMkCGvF06sCr+A4Am9IhjUKBtSZgznrQ/0hUarM9dCNn2uwPOhFpwTAs3BmSZYAj0aynnxghq+WlHUZLsGfFqNKijS/VE5Y8OmxkniAKfRknic2wZfKOnHaDeHOgH0YJ7wwXUsRPuw1B5e6JSfVrIx3Nv+Al/lGyko8f3RaU3/O5m3xk5tJCxkTc8wHdjE4SlQWNdWf8dgSocW101wdjG3PHofj0zZQCFwst7Ql+lmSJxS/tl0UJhEGUsGUGKPrxV54lnS/yOC9qwwUtjUvzmmIoUoHn58OWtrzJq+6qtEU4rmIQ3gaDoJOMho9YzaIBqK4JAyboCJCCXFX2RbIg4uZQsDetH0vQV5QwKgcw9Y2Z00CXAj4wYJRQuqGn9EViHOLBO5h/4nIF/MGfg2xM4o3LxEjs0XqCW37DHssjU8si4BqwnCPmJ6wnbEZ9CbJbuN2y1Bi6UosYZkl0+DbKgdpzLvlWl/OyzM9Fok7lvvegCiQ97DKDBmbpFdg51QMlgDq/9Dw34qDHUPkW1OgUwgq8ZQghymZkNnI5ktc9pcTtCTgWRm62Eu5PlyM7pidL5c+x+PyTT6EzzI+FZnvJKMtX+3J+TBm+3ShTVF2TE65PCj8U+d4zZcggFFnTz5RtEuXgeLyCUhhP3vykST3EaT9NNPJdvPMjJnC0G+x/2Jhl/fzmwLLWVi3vxRjfk3ZAIcM1DL6tPHzZjS8GhHj3jQaXy/zIeZTfc1AgXlXxNxSQXYfizMM3me6iY93vT/7s56FoArmRh+GBy5PvOf10DSOsDqtSdQnB5oi08kTvadzX7G1TRzn68WVfnrmp35HjW1mfpy8F1gvFrkSSlCaI6kCeDLaWPZZCS4tGKvENFKDcSbFgdXwxAZk+wonTv4OZNe771YbJX86k7cR+ymYh+XEgdtp33VoqujQrCNQbgrvA8DA1V2Ly1nDE/5n1LiaCshe8LYa+1oPKjF1OJToAVDKmhdug90j4YCU9r53JrMCLEn1CUR3HwyZfQIokTcBpMIQ8BBRROAcP037pBFGYrD2/OM8FfhYh67G0mmVjgoMb+P0zey92YJW5EoW+EfyB4J9sEyMc+BvfdmMgzVZ4azf7TZ/3eDGrgrPCESGnf7aDxhrSAb3+vWQzU6isw6CTpAoH/5tbBleidzcIM/jMKhvvs3HxFpJXskM2rzW4V0z8gPGpaHb5xfP+fiDunjaM1e0r0ICsH8AYMkamn1F+O/964nVvoglrD6t6DfNx1KXep6eZdaH/n4YlgyVDeXPF8b1R//Cv0fOpnTom1O3LBl/PPgG7SOv8BLDZ3Rjc3uw7OfCp1LfkhvkdtBd+T0DRVAfysSiJB5fqoVDYILEjoSUhCrY6GVQQlQa7eHwQQoqwx2sXhL9FUpyOD0mSMQO/Nb+RhFCHHRax9pMuThfGmkjFzB7vyA+10QQvlPcYZXo28hfe7E04rFHL5/0vUAVc9kgGphR3rirR4iNeeLm2P75Qp5uWPaM2exJCFWG3v11rvD72M+qgk2Lng3AsL2+1i+h5ffgrwp/l4zT6vbnsm5fhPTxyaAtAfPDJ1JsPl25QfKUaJdnoDcTPVKpdSMC9oUIU3vzRzOJsBLSQFOisrjSKO+Hqbh4vjXYkV6vxk1g1Vjo8i+Q+R8VpSAD5rG5CeAOQ+hFC+z+ufgQTeddSpAZwXi24KUETMQNxkra4KC/GdH1RxImvNAGGe7ksl0Q2mEctuVF8tnKrjVgWchuwl7ujOQgmOth2Mpkn5HaYD1H8RdM6/vVadhpB9DH7cI8JMB9oaoZmvrBBKk5XETGURA3D0FmwXX2+uOXSf+blMc/YazcptOXpO3bC9kSqmHqilvomOpbHpECw1DdAd0HnS0IInuKQEq6N5FaDRIOtURRt7t1OL7FYkoeaF4wgyfl6tYAT6PFLFLyMPBWM4jq1WDp8Pt7oElWwT3tteMurCvZMFFE8Y0VrlYTRLGRhTGr+BYV0AKxg39QvokwCbDrAW4uGsWKKwNTuReOKI7hkIU1iqf59+LfPK8jJrtgFEr4QZh4l26rzEw7oINGe151R2gueQvCunXN6qjUTC3QHB1ti6pas/XIzC7SJx/ABxcaAS8A+79sIKoC2UsKTLr2ycH3936FO+5Jjgu4Xu1kt2AHlGYdYgq9nUUHHyP3AWFKYDIT2KnbMxPe1ZkktwimVLT3bA4usRdOlZbXVxRjQ/qJbeA031rjVywnEqGfHPYSHfI4ABYyT4IzBg+oRqjN4vu5aE3z8aM3ukYU1wwNbCCIEILL1cRf7UFdzDnhW7jO3m+MehfTu2L8WhJr7/SUjLdI2n5e/4Ll0qj0H6kfYKBncSTjNqSqgNMmBa585lmK5JLYhDhbg+24fygXyreD+58hzm1rcmlV50/EzmxHpMOuXWMwucR7mDAob+WPWbCD79ZfOlf6oebiC5prhI6n9IlqMHBVK87IZeKAat7nMeBOoY8Kgcbmm7M5NsxYgkijXC5og6sRT+4I7UhQGTRcGY3vRcWzEFq57m+AJXfl4Y6i76tA/6nmQWPZVV6qTtfNSP0z3lgj2gqMmySl4S6veKclbYzAkF7OXA+/xnFZmiZ7XJoOSPXH2ip39q+uEeLG5ZqpYpwoSTbSJ5tZo+hx6ZSTTkq+l8VfRdPShOdOx6Qhxql2ZpM/ZMI3Q7kG9xibIeSdON1478+uWyAYz5/1Rgx3G5sZEkoGRxte8aq/Tjwmtm5visLEnQp19HNOsSSdcCj3rG6VJBZwC27Wl2XJVL81mSLFUIDsUzclrgrJDl99BL1cxw9py1qI4/jW5/RzCQeDsoQDYrv86tblWZDr/xYlqXdurxaqG7lCPQhN+y5zsjBgswXOeomZBmuuq3OmS6yJeTcS1PVr1xQKEsf9iM63kald+x7ULse6tpuNkcPIMiXMFdE2vViu/cRpc0u/nHD7JJFEKWyFTlkQjBiRUuAVGOQWzJNEv4tUuqfzpMh8kVYQfbT0AkI2eSvMBfKcnDOMsGrj0VoQLuCbGO6/LbNjHD5yKX5SIr3BOIKo7KEHkz36gpSpmrd31woPW7hZKjtaE5ylvj9Mw1wG6zrbHUWSyKBW5XTPvJB9qo12Eq9hQiKIig3sb/6AFQYVT+aN3k3YYFqFFz7zyAJg54rVI4vMPjWzHjAYrKsTWPagv0JIxqvs0zezlLhZzUxyHfdVbCqyaMbk8lCCmBO7iaVJb23M3uQynAemptw/6nIKSCRfYrBt6eeQxnK8/ZKZZ3sf3F8CFgYu/sXzDRdfrZVcrqmpxaOdY9fm8e1n32Do4xgPv0Es/KNzRiNWzZc92r3GxqfGz3zN7OqcIzEYzfoMyIhW7GnGcyMwEgY0dSshfrZ6ySXmlxKK+vLVBQd4d5+kR4/YeZ1yGdfUxbmHPEK4DiHPwo+r4Q9ScaGjbgUzEaiSJlhDC9SoMyIAjPXYtU/ozjrAV/7sArUWiKxZoKR/okVfrZG4jfqVgn3Eac2SA1V5+kkp1/HXg6ra0cqf+ey8deZ1K4Ugibcg3DT0DI4UBtkyECYggEKcmnWNoVzye2Vj6n6p0SrA5wEFUYvLnMZYbf3UhrGcMajfS/OlRa/+fhNjznRuVkjyS2jNa4YsP2ttukxjvRnsCv/shdtbZPokfYayA9tVyhdLiaMoDvZjPozJawuMQmtjdW0CeTnfYdYZSQVETTC6lgxwPiq4sBhBcSqEutR5O4Y92f4hv5tuHEJ4+xKh7tx+jT9CYXlC2hQ2ITJjSIFgF7bnNi+bDsrDMREQqDiH+nuLcJb06snE4BdzkU8BdaGVMz1lqxWdmmbdCwFP2zTan1ZFFqMpLO0kXnf6sGVLryaJ9YaTybpu+oyYUr2GugfayCvR39vr/4Ftk127XB+P8vRh6K4dYu3rEKLON1aKkHuOplcgNwZi6MZp0Q2ndDecEIEePegnfJe7jiKw4zia/SKvKcNQp5Zf8G6V65WUq2xXnNLi1p9U1EGNPg+Q4nR7qhvKJmxrd7jjWxTB52gelnnNacOFL+78cgpCZw0VQNFV9HESCDi7yjXfY7m2taQa2AAmbhULV2YlPyPmsP1mNbMRfxJCSoIp2I0N6pAi4d99jpUmQtduhcsffyMN4DOfBeQtJoRXByLFgWuYulaLkGQ8Ixp3dFEM/e7EDW9ji1RyTu7liGuTiVSji5c5exPn8yDm51a8vDfshbg9ptuJLkxUY168mAsnai1eZE7wIMTQJYIHmJ0MsCYtKCFFf7Q64HKVqmMhnVfGG3epK7UMQe/+e2S50+Jdb6lhKl0RTHD3u5T3waLPVJVcDlSTUvRDth0f4g3Cbo7txm98bwNsupC48NYNrNgKDFTz6Se//TFYkBcOnFEFFlc/F+BqU56j+5Yn2pODpUVqF+zt/iZolXqwztt0Ahc4lVXh5DeQjt/5UdOjQSW3kvnCE684bDrQIEKwa7yWRBryN4GezIAFdTeyVBhewt59ZsIg9vqy+xwtXh+2UlN/kTbpo0KdQwN+lS+aPeMzJ0QnvpTwDbh+daoI7Eed7rnf88zDESXC5onKHXVOzUpDIfMn0eOAQTNOaOXqHJM15loxhGXIFaJ8eBuZ/3qAX3Q5k2uy+Gx01MRlenIGbLkefk0iqSOnFoent+C8fkwKpJwe3Is99Hnf+oWm2CedcY/u2UrK2fydvhbfzw8iunwcV9EeP/diIdqhLpWWeqYpI1vt03wWkP+3BtUFVnvX8KxtggU5OCe1SmwoiyNA7fXD+WZuWVnFt6isc905oyriYGb7L8JjrV8XupdOKgxcwIXmVFeKXK4RFjRPiDahWDIQPloFGf9Yv8yvs8wS+k0SUZj+fiF4aeFEXYhROGUmE0NkR/achop4/yJSqD/uB7NStT8xnbqDyU3f9EpTzbBRgdyktUu/XFcadExVAq76mQRH58aCSzhnmrfOucmzQ+MbOuKPNqe4EAEtgQ2t1zwDBcPZzRoXOUMWBQfH23L65UWQMnUckmhW+P/6f2N1X2IWMbz2+Fv31LW57VCr6p4edg4GL2bWj2ZV9H2njbyZlT4LoMPa1B1jscGMgpAOKA5kzssmdcpc3bw6k4h209UGwb1keOrHKOUJOhik5P3Wwn8Jj3fC3evRQsHisJgo9U2Cllf2EYXua5eZa9UkJ0T+Urihq3AmWWL/kW4PhoEYAAAAY9u2bdt2PrZt27Zt27Zt27bdITrIwcjUlXeLe61oCgG/sxUiz40kFngOiYH0ewNIaNS8oTjCIfCuHuHhkVCW7yhZd44oje6WcupQsjqOcotUG8UHrq++DQlfU7fHh+wG5A5vLBgbAHWGHa5QHd/IH6suUs6U4zbCWk9fD7ga7CMXMNOb3ptACowyF7JJn/t+pudfZMsaZj/nq+c0/2patXMAAzcTk3m9n0ON1KGAnLLE+8AO1S/LVTEaqopoI0QWFr+CgxGQ9MQ/4RvAqRJKvG8lY12dKbLhWzYs6ig2q8bpzU+3m3N9VMwZFliMJhj6lSSuQ2OwWegl29tsOgLDxTe89eZXY1aVo+u/PsRUhfJtl7RdZeoekbMhczaxL9jSF15GYujX2G55CwXh4PwDZhv1+jBwzIShEczLojQyHKvVKisigV9ALhKwQiJ/97kgLsbQA5WueOimdhguO4o4TJ1dynG501XRAD0BUDCGV+id4yiFmyKuz4FHRv7ccvJ86oR62i9MfEeodLejhpAy/Bsz8sIHdITdMvR76ipANppDn1ZgylIAbDEv3/kdZ+qfuWYHaXhX/xavw7cqq/XHKKXM747AdnYSsUIv4cZaiKqm9AtFk/noFC775tQZLkGUHe+uf7sHLl664Cj35HVf4jBe2JbpzBU8hOMw3U+AmPeBlLkKqIjaaV0FIhTXNNeVo3ariR48mxO6jR21F+O50u8yELzg1Lv2MwBopuQmXqbKAAxeBUghZHeJUy+1y9/UOgI7FRbUvrmFc9hMz++eU7yAA4Tncxf8+wuBjx5hBeokK19QJzdp+plvG4KNdQUhPZJMQ8BofWdi7JbK6QUqi1ee77MPbacZi4AK6StwI1rzkwLGytGjC3IK2jVoFjwD7IrYqua/SVlOqbUsoz/s0KbjafqLYVGq6mK6P2IpktHPK+1rrYuWQzVay9Zj65oVSogERbcClWu1Vf6N/IhLxvxCEe2yTHaBFRPrzWNvQ1UzCm9AMN6js/tMogjw9VJUMUQTmLMWMd7V1mt5PwXusR+kwmtyNL6ga+F1NYqIUxjYwopq6Yf3XjdnriVtTCzwQr0wjyZ7SXR9WHK1cPLE8NnST7Nl2Vxn5nsiTCfAbEgLBTwu1QrJ7m4KDujZPz5eD2kqEVs6kdwFoYcqnS1UOPthjbEO3RE9Vue49YruVAHZCS7Uesq+bpyMh/IYpiajU7UxbmhJ8wD15bvF+zc30UGmqrttC6R4tjjM99Gagx22WHAIV34lDyQvMx1fPh8NSnfclW51Ah/cuEj8eRy7Lkw7txTDON9XAMGVn9OvKpbm52TxCeWlPyENVxVagLCu+eYVaOziY3b5MjXcBKOaVCCM9HXov+0rWA4BNEP/YzLf8xLktvMFvao2sDwf2p8oiPJux727l7NgxslreQPMfXZTH5Lq1+PcC1c2nEHf7NMbRYrtoxxdlEzCmQoZXtPpcdlg/ghrRj7uQ7r8gnvVlTedC6V3K6a3MXA5GjZm4GXyZ2rchghmSGrYZZB3fqlXgnV5Lz0LDn99/NxloID40PJwjSiOb4cSuJmlJbEmyi8O97RxWDXuyBugN2eP0bE0uQkf+nykapObzy4FguYNhGMYeos5feZZ80YJjwvCRJte31Qgn+y3DmjncVcSTN+eJwehp2o73IsOq433HsnP7zJnRfCNBWkRXCMf16KATQ5zX78K6plYSXptCwXDOMGEEL95PWryxF6OSpL/FRw7lPPgfe0I1coiMXuNhmdddCAbcMVf+nmuk2kOeNAixlSN/swq5PzRbgQ7ncACr+l8BVSS7FuuNCeAMEDLge2vA2OCYrL5mwSJPfCKXpD9+Dkx6154CL31wcKpoaK4CylLbdCnUfn4VMwIpCestLJKjidazI4weYki9AWw4dHkMFBC75IItfMefOQP8Ekgow6w+OSeUtV1snrDxpQgiVK9Y1AlUSv5BihMuKyALfgPxJ0Q/pJnSn0pNKRefue0KD/sTnCLWOP4bNK73lhIGPBsZKqlW9pWQeqpToJ967z4SFrGOGA71mFDsGVHxBmq+2f/MmcKA87v9rHaQYSwC60rdgbmtTae9F/kHRL+DTvF3+zFJUrcTEDMXR8IPKsIXgRXfb3+i905NOpZBBZng8+/LmqwjDEAVaEkdc/ND2532JC1fc5Lc/k6mJatnbtmAKKHoUYcZArtX0C/hMuqF9yndQoGpHZaYApUgHRB+jeoDQ+GEzkoVyCCDw6Ca1FTloRNh+KTQKVyyEJh7mQd/jct3xlQIBDy/s7eOMHQ5z8+UMR1m69jbizlUd3hlYeKsQZrgnjq+OmfPco/i6IYJLOG5E1klh+CqTi3T8C53OybeGEEX8GFawG1KwPUQnXRY4Ol+PvTfDiolxBHnXnSuW/QLvEzMcvhLmpMSi2FK8GBQaT7GFdZt+TFCKq45IS3yaRR5EPHJOmwEbv+eujBR1S7wCLECJyKFqQpIHJPMknQLnnSKqtF6R07xqMmlVTKYoUeDq6ovGUe64XlxD5OulVQyhSDnRMRZiXriweUPuEsChi91jU1q7tFgtlX2Q5SDT7tf3Uxk5yOw4VSRQnxBm1EvYskWqkzlRkmBDv2BnkigwbdApAxv7YoccgyK/67w6n/wltb+EqDgmFvQkL11wzxv3HNF8K3R5X5z4jSmwCoAa61DlRmYpXIpNNeJp5FmkAhXrJlujd193IQtsMh0BQzU2jZtx0cvibw38l35QKtPs0amLlzA0bD5jdJP/jE+365CpCmjsH2a5VKfWUiQuzSxZnbqqMXCX9NV0/RP0Cu9V+qWiv0lHMmmmf2lqIxvR82UixEuGyQUCu9fB8o5RLQVQI1rieILP7tAt6zElYhzO8krab4mfJUvmcFjJR7ZCru5jAlCUBsI7H1JA2usfqu6BszAalmV379G6+hKFjm3jee1xd0wbSQbxRr1b7RBGVd/2QoidPed46iI3t5wMoTzJ2VGL6zwtYDytdg3h96/Dy25Ngh4m8pOJXxeyvAWrnUGBVpjFkToNJLrbA337Clu3JbyYTfH/yetiKlH8MJpbr8fSWYQAqh0SHVOq4X45iNdYLCsbd3AqeDIenDlBD3Y2sjDr+j7K1FjrFdkslZ25ZUEr7wYKOAXjma48U8J3Zg4K4v8XNswgiqlmQmBy4SgN+JOFHTMnDi0FdptNkMrxIf7vYsgx0HVhTzsTzqlc7+QFtWAZfOSN2etG16lYqsl1YjH1xfo2zJVNY8O4QrM/GLxnoHPgxZaSoxC1VxF5rKkGFS7ZObCcajVS2XLMLAAP7t1BFRB5cazZcSEXMKpyfOLRWF93Ax9warXvjTPdlhrp2MOi+/MbJYuDzyOMu5sepfV/x0zdKNMQnxfhoYiLXOgGlFtBvWMrr4IKlXM3BV7LkCFl1BJuk1a3fzrULVEBbB1YdvycmOMQUfLWTzFq2ahCmk3eDzVDx4/jfulA3j7eEVxKzcoYZaAn/tafybZMRsyoC0bFUy2sf/oeFjxQehWNulZ7AlmhOzQrVik6kw2xqwBKij+P5412NxlwGaXKe34KyUIXZhBZ0qdxGxYrZafJxdSG8YR9bUhd95uCd7iGMPEAoQlxwylyrjlN713gH9Zh9iWvi7RWObcWl+JNh2ZXSacr4Qi45QatoZOZzuXe90Q1yxg/oMIbFGCrJvxcscyR2F0M1D5nkJItDcWN1DmWh/Kj1sHwo+EsrdAokseuL0VHIohi2+1a9MbzV7AOQrst+xQxeQ9tX7rqOGVDVEBeAkNSvrZOzAlHDx0sa/DQGEafpmfICpuo8ETsJUBSo/yBKG01Y/GNCXSfuPFEgeqK2l7Y634TI08vIBtbgV5RjMVIVFtqK/Jh6c+/IGD14xWg8i+bRebx96HHEbeQti3tgKDQq+GJW2wGeofu1F889z5ve5vLfX/6BFZebMK+Z3GG3WXjLkesx8UfVPawUGMU9oqCHWsSN7meZM5EYdyfjjV6ktLEdoCuTV/e54Z88i5bYHuJw1ByC+LBrDuDlJn7/IDsmTP76Q3FZIGiUnAYRUjl/Nscknjeex2GoBIWL9XcHV9F6WgyaNOiZG6b2Kz14oA1dF5rWGa4P8drD4Isdb24QdWWI5pOfCC6/yB/VoUaKwnIzwjfyr1k1ZAJ76n7mIZXQRwTcEVznK3kEzFE55bXn8imb01iDEEK2zGXvLQL6b7Bt8VjEIPdazx61iHgL4EXID5i+Uoh3dmhGxYumYZT4ICU/S5E/iq5CxMW7hJy3dL2G/BrrDniXVMZA3HiiX2NsRk2+kK4FnMnrrXF0NWMjlIR4YO97H2jzUWz554FZQB/1dYdXBgh+EvlwBmsSpTud+zOg/0pYn5T00bWKax160L+N+2cPvDbMVsX3PgUHGH4W9gkVNRX0N2XC+ADvLHsMBbRHLu1cHxKTvhB29q0IjGAXjLvxPpsMJRgruPShcElsnbBDgl8L3gTZaEN7pkFFElRxeZQjlWhXmkauhGqUDKb1BOFYCzYEipJXRPLocExHDrGkLZ/CSaACSlhMbpe/JVsKe7OYK+Zx+Qp934wCoHGnWvOKg4q1VeV8t6ak0ZNzDlQPXTVnFblPy0UnPiFl8QTVwJyCjOBrw0PL2Ip9fUMPA1qcBWanqrJ4zUvODXyrAA8xd/Z2Jgf1v6utE8kHF4S2PA+1/aK8RZjlGUnV3NgqZ+iKgatqbHuZgyhshQvCLL9YyOxqDdgM2qDgwPvk3r8POYjzq3qJTvgIuEQuaNRG7RPz5ELLrUm2VfNwH5VmzLrEfKKi7HR4IvsU9JE5nClRsXZH2f1gXPJlmkIIjkgUii8sWq2J//REJtoXzv7+hlHDLYj/pmWWfg//8l3cnh0i/zS+Enb1GiHNVwnPKmtVhSWOmjhGghXaNdgu+7hjbB9ZQ0YvfDBK/xA3WaWi7i/gaZLtrBZ07ABrL05ua21JZ0YoJ65Wdp2uAhR9xqG94jQMypo7mah2S7XmEZuANola/nRs6SOthyQZsSNWI06ckHvrUEjQ1zqMCNPzq2Id1CZ2Mr1E1CsNOZT0AcJwCq91vXVcOsoUxWyby8VrCRykjva1csf37pI+SSfZNbbZFf9T2X49fluZajuXkDOmlHn8MITcFLF2iO5iTD311rrUkszMqGdQ1jFENOfr3HK9JBwInq6zVzxMOzTnsGgnDDjpP6jPJxDIAzjhyf+4LiBHzSkdnao4u1XPQOpAK1IFefQohl9u4ccTv8MTCr9AZNr3Hk/ZQ3NbED8IOgoKWYlLJJq2H1IVRpygliSqg1Falu8dBIZ2ICpUgXfw0P4d0XEvkt4qCrcrkyZi8oPTpUJ38mZetmo/k3qcLtCz7xCrfyBiXy0uoB4kA9xgX/jw99bHZkM9DaXicU7R6KR2C7YO1Jh30SEGoNT4aM7OD5kVRx+JwSLUcP/y8OByQz6PZOnKKrrbJMkQN1FHMp/DajiQWIWdqomzwuL/699n4EfDlu5YLcZg/FZYEbPLffw1ysZvuHgEc6r663ZrFew8KIuDRrzMHNI2fZWse/HtsnmyU2Z1kVtc4Dbwg7k8kf6kPH9b1crXHEkVmfBwdq1rDomjmTmBEWvfL7K0hGpAYWScVi5Tm7QFke6bspHuOO4bDEn/uyibF7YyC+12VPJb6bWnA8ZFj073uJByP8ED4ZGAh/P4WwEiK0muC9QH5Vrjj+RIcI3hwdR+BxfelzCfGhgtSerQ21VuojgGfZ/fHXddGVLjkKD4ml5i9Ai8F6+H5HrqKvKK+G+Ui0fWO/GM5s8wWMdjPWPbSqhr8l0jX+CfhxKX+XmfNzUlFWp1NaQ62cOp1/EK9JYTyo+WcHYTstuwHi79/E5b4l7VhNb3y5e4D7oBKbNZPR1XPhNIrLBAKyurKuDscmpYXfKWLzcXbHWbs4MbozlrRmM8cigQUW5cnd+6v7kK13Z43crblx3gF/YpSZjgvRiipMiPXud+LhSOxuur0WWb7/jZURr7u+r4n/VEb39gwKvOIGlJUR1hQ0V9J7gERaOiF5Y4eWRl1hKmXENHTVRZuWMnbCSmNXk5OluK33BWtNMaBmXUJwLv4Cxt56rBK1zEmtjHo1yCQhbvb2APwDXCCM46lrLeYi6yufEVaRE8ccBIo0eRsxVjA+0a3MMWVh/pdWNj0pAAsChOjG8ORGKinb890bbBBiqgdMYE9TFzjaEudHCDr7v23o+s28LQZJwHqFzn8m3WBm+YINVcg1Q9ZXgpgwhvdW9G6KAvLjKqt8pZSSbWPCZb9zeQOy8AVjMyYMXiJsBka1DZ1yaE+PUdP5+xbgnZ0YkvlmnoiO54spRQ1teVoDgs6WaLMed4bccr00CT8mE4g6jFGEqBag5H/EcOF9d1iA9z6mz0r6dC8fEMuhtEbeLXBNEpC9y0Q4uLi+G3T3pIwR7gKcTiZs+SJlUAYIrDkEXiCi6q7+zBb18+VWOOZxSzVS8k6+Em/ZIWSQ8Cfa8zwpPJ7hAIymRUPXQB0Nev1Y09GOFu0rhCtC/PjfMezlFrW1plk+oqFnpATWga2qsE+AOy7XGv43KzAxnx2g1RTxjZgc1ZVn8j2qRqktkl4PFfBOyBt1HjaOm3lT23nGIQp7fVzkmuknckxEQoBI7YRyxf4dMptj9xCDXxL3k5L9BarQoQHhiaRR7pt2oN5AHsxvs2s5OQfHwQNx5HTs3zDEdQVvYeT5aiiy6kDw1VUfuLA+nGFDnZTKPI+3eYNFfVVk+UGwMSLiWWrw+6R9Nvt06fHcVtOm+5earWOXfYXJl1AoAdZ8qlS+SGrcCqljqelylCJkpeRNwleDbEBYZ5zXm1Pdm+LkYvoDlKAUCdKsUAzLv8e6dNjtOUsRAjjfopx5JcHaE7m1Ea0eyc2S+qbbYcXTEMDN6d9fIU9FmJxjTXymPnk9vxYE44SUgAeg3kecF/IOwLTulx966emmSnundnlcRm1BTFhc4LuE3uahr+l3+Vd7HIVPPporjBCd5cAyrKvgbdepiAix+Fh5vcpBrcy3DsvAfDYXpEXRZjm5hEhJX3nQcKeiHoTh48DqjJebPQoAwKQ04Iqtx4QGkjf016u8nQwdjYU/IvgVoU5NNFT1IfVN9GK6ZHvRORR6PzjJ8VJraroF4GNtYjEkxsI/qrwPE4bFobskSOTv0GE7eNUGgPsOrxlkF7g1ear7OvEbArnK0bPZT+tSHciuTcZ3WaQ5SXSBEOzgelkjHBhzEUH0W+clvp3vbidl/HYb2bxuDEqZC8GV9n6bX7Jm79sncRW8i07909k2oB/4Np+eid0fMn7FFU5L/G8uX4bTPUeHNLV9VwPWi6a28pIW3KsSMA6aGyMJybrR4ks+HaCRAkas7l3SFrZafQU2QJ1GXkkIdsibBwXc7qVVikmRna1Uca4dvScvhJVs4TZIIwV2odwzrqqjvEdGninpW6dAL0bOeUXwzKsPZ1BU8IfPnG/J0Ax0StU/Vu/wtt44sMpyDyADIXVKAGOZlEcN8NULLU4q+ntW3/bFByhU0mPvJxr8Smoz0JjMXvPo/SUGsz42woudNXghe3cWH1bdqa36vLCE7FfR8z88F/B/JImnX5XFckhV7ZRuFa1xTEevj4nyCJJApGqifbyvHRDtDMxb/Ol9O4B9wha3ppyWvRTd30ZWaLvLyInAzxVx6jrxMB+aqIcnsNd98uT/fL7LmX6vqDUC1nSGuBqcJB2tdwwGpmn0OmMmZAC1DdY5oyjTd0/2i8nC+6ypLNBzoM0fkvIEt1Ecg5q7IT97JyIpciUTQJkN0/4gMdpvYrHc1y0gWTTUsAWC8YkHeDDytX2eT9M3rTOmcFc5Jk5DgVxtGjHikHugwg3SE7p4ViYDKkIq1B4azegV0xQ3Zf7WtdXkEU3sbevY7m9nI1jyRMYUn1wwTB1JKorvM4E9bgWU5akeaYvGQEHwym777EelXL3Pxahnhd3cNPkWQw0bmQ0wQVcfWoURzJUUt7ue/IKEsrCt6NMl4GOGpB5o4NeGe5NAw99zntLoSP4mR7qqx742v4PZy23n4U/PsLf8fjIY98Oe3FaYIFb/e2SZyX8iWEYny6anF+o9mhIGAMNuB/sIiLRtPjJ5vsn0GxuTYW4oRTbyOc2bhiIKZNqXqF0SI0LfzGM3kWnlmkJVZM9cuhYyIkCioqTy8QjcdN9AlRGHJ31YyLE30kfykIe3CQX8JeOXxUG6Bm56Ng1W9bTqIb6KD2lnzx2PCVUJFeQICE0EHym9Y/OpKGqjwPUvOikHLuyRFXHmvIsxTuzgjd4oYcqvAiEV2j4u5wj/tZGOjSXCAvmXn9MIhujkIyeUbh2rcsX3u1hPIzfgzOBIjFLk9rL5laa7VripH0PfTESKl1LYxs3wx7NWlEDxt22ZdjVLroFhQNcEsuhrIjHAldF4Y6qlwYwfEHdAjwwwNuO30fr17XBZ2psBbHXcBkwniVGvbXnrdWbx6gy2VKhGwQSOnbC72kC5rhIt8zZ9sujb78uOAEB3MPxE/JkrLW3iPk3stqkKvvO4ATq+mSRbZikIEwgrrY8rK8VGaL6NKUSDNHVte6Cv4yEY4jLr3PQ0CHdTolrLqWan6LTtEC/NJRZOUtQflmO65UJ89on5ZFhxOfQ7LzCIBpUUTeJ/hkemYmL2vjmjwW76Tb8YRqQ5OaEfArk1c5gSgQsytrA/H8iEfrAfPrAzJkwegU7NN6jQXQNC/EXP7fsgCDc2mC05x0JLz7oYNkgtU+Cr+3qkSd+n7FCFCVsP+hbrMu74GjZ3Ai1XqdMcRei0/nf3Prv+htRwtj67R+Qup4ywcWDJv86wKZNlwULrSjYo+aIl7j/qhGNvIm72jyXlM7+tPvzBPtsE8xWtBLy9eRM1pCBED9H+ZGl4BNWdv0j3JOGBA7QVrLGZhumpfBB6tSxXvezkHKkJu1X5ZIBKp1RbnWAPWodlrafKAeEWtcJEvVJx9gJjoPkD5Y32VLiRAa5HyCf7VuHvutPxxCciL1nc/0vRFwqpDBjhITqjEZMiC3SJ1QNv4/aD8m5cMPT5l4x7oBlZkqpapTno4Qi69kvTetua6KIPlGEqo54a7jgKaN+WZC33VYUnd8V7gctmJrUf9q2VtZhZXOYxu1foDS4xqkA75JSEwEG012IbLWW102pRg2eMXeXC5vp2zMHYD+uzkFaB0DPVUmnQK+cwZeCGBgP0Ywl8V/ozQeFplLcpaGJgIhuBGdnMTm1zR3OBZeEdCa8csCfJ5Eu2/Pk2thsRIaKJmM9e+fc7mU01jdHHLtZdPpqIE6mtTuBDoG5yvwQ7MeO08F+szKjDunH/y28UOT5Wj9pZ5+6ge1EsJ+0JLqSb/hWig8hgMS/fDJldjEKZMsISfl3eEQCVQT6aLWhuLnveCRcZjugf0Kx/Pj+MqHMSSAIks3tLuRboWEbB5w+23FjbvqOLTXdWBzRIsoyz8lQu0uWXGUN5rhMbvQEyxkqvdbGINIJN913BgbLoPAypZkEfmmYbh09t7nNW4dzjvJ+CTpRoIc57nb234h2JDjKYPaEVPGoL6Z4oUYVIpNhdjWHkEjpa+9aBgf1+/ZiAmUP9kWO4quIiwTZS5k7EignbdvaE08S2ZWyTnXO5S+SfRi79ctW9SZLWHSzpfZhlhjJr3zTnaebeNEaGYrT/JtbjKGcVADSGxmxBzfr3vD4p707tWF6t7783PuJCnj9SBTSOTOqwcpR67832nPQmTVcFlT5Moq9l122vIKRexDcIXuUasxNSadPonbc0uhr0G0zHNY7eaGL9ygzFner8iHkN41ZOKccYQAby8XOs/AutQwZ5UDGFyLBq+KjxUEDqMbz+febLrePzvzQv8nx6VHW5BGOQXSnSJy4PqWxUINXV6eseaD4ZfxvkORPIdwyE1F2TsE332rK0oXGeCzQLvXlm4HZM1sZPm55bPtYDQa7T3dycRQeqd6U18j6tLMq9/VZEnuOWLZWlXhICUX5vJRiCWrCVTAw+MJ0AnxdU5IHiXsW+e+dG+DLyRJaDJA1s61D4w3SYDRnEURT0CK4qpTqYy5wumDgIJr9G8RjOW2Mf7ASd3YOhrDoHDB4GbToakJnpu4W8UUa+dHbgGd3JciDlgCVEdJjQ+LAetH2Z0718yVGyshbtZnyJ86SxVzRu002LyZG4w1efSBIKup5EYHISNkHsn+mg/yQrBVuJTtCaxGIqdvMm1VSJxVQj12SX8HFIZoA/ZhGd06TvQrJRQYpM+AeseWL+bhUxzbBoqMkSia3M4hzk5+9H/jKSj9gvQT3IlJLcUyY+AI3HU4qz/iBEzN9qus0CT6E1nFKila/hdi4ZtZ0gPgKHq8441xQaUfbCrx2Ucdd2+9cs5BiErN4S7hWZPBlgBLbt5rJdj795DviM5/u4SCS7JYljBf3SPGuiSAZMZTuSoJ47IXKGHYb9KCbsB2h1NQ7oHs8LpcMLG279Gqh4aq9wL0mg8FGBczoY0sN2OKLs1PpaT+C4l23kYbrG5V+Y798YybYlMlf+CSSAO5sc769he6nm81Tu0BZuvGJM19C4VtSENA0zH97K0qRttKFbC5lZ8IzLw714J68g6n8zBX3NY2BqfWZDZSBG1PT/AG0UgkrxIUsrrBCAvlP99wekYPQ3PustNxSl9VNhfxCm8sRQEK5TT89O/IJl+hr7I8qw6mHadnwBlkoTM5XgdEa7+YXrXFLAAMY8SOCALzw921ei4q71TUiL9uWMHEpLmn/1gLo4btkiy15Tb5SGnR+UYtXEWDYlsiXF9CD9zAZtGYf3RXbuh4LjkRawvhvXZE//QtOtoKsgnykHTrwQEnlU9taRYV87oBG11t+uxrSEuKeNARIl+n4o/0vDk2xD7zZ0b3rQ9HliKL89WgF1jko5KMdB4dJ1zznbx2QBZVb5yAMGQctXRYFg3/Rh7JyowPVfFWhO9fKV+8oq8k3UWE6aBBDISAzs2yEoJ97tGPdrMVdcaya9FXlnNiKPDqmDUL2+w44Szx6R6TqnDDBxsz9bhHZ0vcQAFzLg2yt7UWhH2SMJbkvr+v8qMZ1DeEYnRi4jGSsui6QTtlBMQVGp+vXc1MtbIUEG7htaressttbty2sgZe5Xr7ey0BG7zTBWSVe1hrOgAnZ+V65gh77HRSiU/IttYYEHmwi9FCReFGv3NAlB1RL8Qi7tL1kfoggzebPDsD8H5lFGOu4iL3ziEp9IkBfnZf7LgDkT0+Z5O2hsFSYBVUKPu8XUX12rpKSfB9rPnWe0ZRyVxczgJqm+f52cDx3BcOtZOFmVA8mlyMkrMJSDnsOsxy/iFdbgt/lG85jdh3XM3hGpB9zijw779sW9FS3NgEje04tWxIyYh7C4aLjSOn5pUvVD1eV18S+Be/2TG0rGJY8GvwYR4Z5lXxvlcU/aTKU7nceDTi2O3x29/sXjcysnWTyHzfeN1WIgqkswUWpInhSwQxQ+5r02/ao2znXSvtPUWvxUCbKBuatSydKwudkpsZZGTKicBbdtikNu7mWxgvt2WkHnzEGk4niVcNYIvZwUCN+W961St6aw9Msf1vEFmhsz+Zm3kgPYFx3h4t34YvVYKeYypwS3AsPQ+vg9L6uarAdO7GbmEOgkctnsqZypohzHKxulS1UfZsDE+XFiP0Bvc8DyjNfS8Kry3NxUDm6LamVwQMGY5nSf6wqqCKZekaWLLiS5InhOuso7Yplz6jN1IENihvkmLkXODyAv/tPP/pRW5bqlHe1tEuB8pOFDSuxhR3O6xqN21k46Qd9cOZCYgnYTQzsu2XHnYSCyZ6kV2js39SNg5XrHX9hJqGqbKfT66GtgFKfHkfAjoO9VnpX0RDfWwQ70f7vvKl7BPUM2fpYwbghTQeVUIpKfHq4xNGx0m9ITbYo2QbaIvxed/wLliyuZR2dQb1hMocB/XVoq3yttrMaXt+f02UC0yynxrJ8iVnch3S18WE4Kg9tGg2c/yTqGXQvnb6EfxvoeEvmhTIWSWgZ4URJi+BOWCsxi7k/Nue2K2HOGYjeQlouSU59jRyowVeAIrPtMxYqKh4TaYMbILnhJIbmhznQlolGErP632HpeTwFP/Yti7WqKQX5Hsi3t7KlHe0OQ2Dfwz8VD8kEy6t8LUpZGRsu+GEQ2dlkhZj3/04HVJsFA8N3w7X7VZcoq6CRPPpaV9fQy03myrG3DrlNDLSfRkk5xpruXLLNZwtUi0mn4oMg4O8uLZ6g8+GkIDF3Xkz8tQ6lmcfoIkh3T2xysw+AUsEV35vw7vq/1dQ08ZDiPb7NmoMf5uvaCL/v7o0nGica9ZccXjDbTqdlxITLb5xyEPa+veRRJODjiOcSWXy/aV5HNrxVLUZGt4AyT9TaGgMHOvvPw0qouZvRfdhm0beYHV+Rx3/joQIZIK9xQRYsggvzw6YtHzHI+wZKTuZ3oVm3PCGw11JMYYYmwoHra/XutV/RApyuTZ8gX1BfUEq1Itz1JIznkCdaY/HkL59hcuyt4/JvnV+vwaZ3lbclraI3dbpILd9IjMjIyLfpb9C/NAD25bHxSS2X1I8KZM7+YO2RI17egp5knpS9xwY7vm3Mu0vdlfj4PA0SNk0b1csLi/kuT5cVUzYiP4E0/t7m8awRCzF86CpsADytdoYCYyutCxIrQyeToSmhFgTfl0x5VRGwspmzrJGCd/yYv18MOP3VsCremb/DRe30bBCcoKlxNbjNlvZ82GMUqwWXmdDdLaAIqcW8VPFE1LyRY4fnayP/2qbNR5xZZxDxkRMk42l699rKKtmTmytwhwr/HEqnenWy95JJnSRjf8QBT9ShbEyxQQAlbgKsUklctQVxqLMm9SGtxAyTlDPdIWsTW9Tlu7+ub1G8qU7ke+CH6nPhO6/thyzwE0hUHiffS2YUQ26AHQpRASLoRuBYvpBhiVrelOD3lSRLcqYWa97n4oZiiV3DJcdxepjEuN9R+URp7uEuA8RXIUoi7SOG/gE6dk4t3yKqDjORcK/gIufl0433AEgCLCHJXCeeiRPve9p4PzHw88Tuz0nfQoLmoet/5r2F2LG6Bn6f5S64AxIg/vhWjZTQQHAwDojF2psbW9DRsOwhMchGXWsQ/Yo6nJzzR8Qt0e1P7ZpUK4fMcUvUW1rzyufOF8V/mrKSjy2hlqDqi6vcbe3G2CBBaT4vZaOjNx3u1AASM9gHLFwmnO3/RDfJkKV5pIeVRMSzT79irY5qcggKSx9AoMQ2JVSyfCI9TJ+OsNhdnxQlsiF7Im9GmSx/rjZjDeOzwi+tW5O4UwUE6NfLptz/Ht2VvrxE8f9LcQuUe2dNLhYowRzX9x6x62S2jStt+okmZ1Q4lQ3r4WEmWxCx18S3k5fWscPIfliVJT+4G8jvEg0a+Y3yvdP+DpmtGFOmf59D/Jlirr2Jr+B81LZe8ngpxTzgrVmtSthpvJOD1aA6TKNpMvp4gmGWJEfKYn08GW1veSUZHhR/0GFkarc4AiupNRIHOdxLrNTLdfvVGlXwaFcDupmukWedN/gRaD03TBkPTA+QHTOv0igdogkycwnABfrW9JdykrwR4QU2lgRZaEqSmyX6TeLoOKsiJD4dHlcHWbuz31aYB5CyVe7qgkNMZoJqbWA1E/kR0s2R4J+dtbsiNF2MMwuS9+eDICwFsu/gI7IjV1BL5En7iKSfqxFfCnmTlh+rHeOeq7lVfULtlv5WrKFnlkf8ESU3qAkuCZvIO3coMDbVU5iHRl+GqgIIpAzDTSM5K44nbZZOANXzxL5PxvvEk0XAAqPYmEXpfC1WD9zCBgdrQpNfgbXbJXJNb51A6FyvFSsSQI4PxSPQsrd9nKNmbZo8Wkm/iLqVZIi5bJR5jn1BzNlexuVCVi63BwP0jRWlk2Uo0pM0wbQUeg6mtd68vPbNvZvD6jnQ27rRz4CoTXJpA+aBNh6ogNvBRq/ZVw5J2QCm1ngH4yH0rLEva9hK+aYAX0Hx0ea18Waxt4lkzcypHNzgoKVLunYKRfcrmTmT/nqOxvuKCSQ4NuQ0iLMNvWKYAL37491rKenk2xa8KgswxyXGxcq3fGRaOJZfZ8rt2gySwBdElyMKCFPmf1orqKpVvUlNjo7VLiXN/zu4GTOWbPvQ7eueSfiHmEGaEKG/TX2Jvz6Hu9rpBPc4IN8F3PF+nLgWfGfs6WquDjX/MkS/1zictsfzklfOdNvJXC1zBHRM3zB68Qcm1TGh2C75OdY9Qedpx1DZnEt4WILgoWYDMZsSwSKbeVI/8MvXNjxFuCsjwg0adU1tBfQMT232bRoyELy1/w8HS8tfwzfA71L3W+4gFV0grLhP4HRB1Wdk2w7YEVXuJgyMnUzFXihodOTjOR4RmFj18OFGAPPvusPdsYCWYJV9zYapI67gzXLNC4eZWzL0gWiHsjHEVbJNG/GxI3gLJaFSUI+ersdBEeQ2+UJgQPNrJsDkZ3lV7/GoH1MVfi6k07Np9jRkJ2u1xbmvJvaW/tWYWGfB0g8OCBkOCuziuRdtrKomT2UmpbnxE64f/HFm70cib1aniXQ+nbSltBiyqJ5RNZJoo0VYIgSPEP1jWcPjlpJgab4HNdrvpJWXy9C3Jv7V/8Js+5qei+qqfhOLPyhQksIgSFg+RZ+n2NLOjcIaWE40Nl3AI3a9zJ+KAJUSX85hBMF4TDDoWTIHzJTMphsyEpMTIgVxsROaVXeDd1kOmyGPsri7eUK1eJjID47XqXFFPSlMXw7XclL1EvEUR15symzyMs3ToCr18uOtCBEZAldIoDjgw9RLxVxOftX6isli8Lj0XSBtPTeR7FKrwUNjmB1/Gk78dszGqFKTRVzhxTJbjgQ8L1P/qLaWEXqyc64EGU1nIF/wSMW9OZbDRZjzo1n65mbBOJ0TwwVcqxjB8zsbMyZVZAM57fBMYrC2tzQpxlXCybTFES5O9SPk4DlFQrHB3xzr3R764hDB+BYQl5dFed+UIFxzr9fjbTk3MLzqsFym1pLpMELxB9KEfxn55graykfcSyKPNBn9+nEPSMv9iK3NFmZDqbLfs8wQulTrbYMaeskLnxptfyS9mumgv5H1MyRlxXVgaD0IDXBj5gnHdrVocI1r+nR53QLC+Ag6dQJm/dmE1wMEbaPJ5ZvJ7Ii1u5Zdglig7zMWDa/G7Kbu3SM2hZK4kuaB+HTW4p9XmGJSvUNWSuJR9ZYtfYfNXEJSt2ulinhjYSLvUajmZ7SDlzMmCA9nkHpRVvciYJkDUCHHWJE4ALOYh/2uS/gOELLtoAe15eTixvCv4QxLdQLrd+x+z0KL1o9J8kGgKC05obOQhXjHFpeyGkDc1J7qV74PxSY6uOJLKq8/gT9KgzG9UL1TLmdFAee9858QW0vWe35t8jaHfodKcgIQ3/HocTQoduf1Kr4MzyW44dl1+wXL31waIDp24cebRuChhBrEU4gkqM+Fs7wYBa7WUAcPp+XwP96UGcnBQp7aEgrkK1bwacylzhstBX3GO37Pww4J3Q8QKN+aJGfes1l/NAjxk/NoK6vKk4SNT4yvBUZcNHcpk5O5xD/Gbc1q085+QnEjUtRqxEwlW1dxw0Mz0Jdnsa+ok0fuIDAtvIlL4vA894WDJQIGMnrPptsAcGA7vfJNnK7mo2ZmGzoqvjvX97fTS6I3E+g5RT3Rd83VChSdTg6B35FtOW5kCiwVfv8ukwZe2rToCoLq2sGIXfgCE/Se/vkNMzFTQJky3TFNDNIpa41/2ZIBwcgjvpgKAalkd17vFKtqbsGVklcJ6BlJogJg5cQHLKIMz2MEU0GLC8vTlun8tPcJrROKeuPqwr1FtaIqg+5/rB+qfITsD/kz+SoZ7S4RUddCrQZ77FxkXA8lyPeGD/BRm9xrTE8HEJI2Ajd2KpbY1lqUs7tX6jAtekLDcaGPOaa4lZEAYQqZs/hXN5rcNQUWuVfcR9gAnb/yKVsVqhRMYKJ4IXUIv3GHpH4h9xg6C/6ZQBFFZ6KlUPjUwADmKmCU6+Rc1UDMDi6JTHu5xzIUD/UXw6HZLI33MIdSGuxnGEEv1U8zv1DIgnRXBEDTn2dlgewZb4WQS9zOXyHdYIvbgAvSiXVnyLoOIHAeM8txgKkcaz9zUyVfHjhNRn78hc92CAdTQadFvo/uWo5zg0rbcajMR/uZRb10J7RyJkJy5i7q0jo1yKXAZ2M4Z5kRVaGDNVRz1crwdy419dRFzlWmSdAhF2MxC1TASt5i+x5IkIOhgHmUVXPaYstrTAATs24U5xIKa/3XQlEDrmx/Bw7/PN+OlnlNK3kQqjbrzf18bcu+pFX83uC7c4V7ZDa5liN2hCjzkXmQtiQSJ6HEgeKR0Vba+Y+P3Yy9wGC8Tb7WZctp6AHwyuUTZDshdhKssI251M+ndXRYhlNp5ATBdVqUM+HTpYi4mp/PUFX09ID0StYEqmbfduX4vAVyKAzyip/nl1SI51gGgVrmAVlH9EniguFaw+nF0llN/WPl2oos7UZqC1HIIDnq2NAYFaDiai8/j/iAbIZVhBSHDpW6KfDJO4kBHaLaW6wMjfN04xF9gEaZ90o0F/EEGW1ELVBxXo5o6sFTQgYLbj2woKlWmtRV0Hk5xDC0KJkUpWNqCa+XzCXzYr7zGyqUnpikkcbpbLKOYS3L/Ap9ZqJVZ3X83bjMBR2UuhuMlVat52en2G5cN+PK3QB4BkzAZgmB4vPMrqDm4gJ9CTimG7IrOECLIw2qOjqCSPk7To+ZB7vozFWFLhLftYsdVUriiMxlipgk4K4ceYCnbVQa5iTMT4pNIhc6PeYY08FnuyWU8VPMzAZxejGTrA2CmwAIiMLpSVUrmgsJYLwq2SCuVnX49vFuWbgqY44PIydRruEJLT4xJEgMT1RB56XHqYYIRGhwxg9DS+VnIOwUx0lJT7i2fs19+hJAYlooXFu6BSRd1UZ9STyIyRl0ky5E82eXaUqRhXfWoG2Bu+9Qlzjn+iYK10pQ0ymoZEQmYrBOtVkVsTobSNuoRYuIZyhG4rpgBmjuo/6AzZcMMf0CgHgagd9/NVSX6X1gUnqE0Obq3WdXrrizJ+WW4a97sTnadWXV4VbwAYdG2D9Vcj1n8su4q1rSrZ8jo7UdqNQq4Wjp+a7w75iiUqDiEQlQcByo4aBU9XcRdYxsTveH8TibK2diHr/IIKhR9sOwCcuVfd+n4IQgL1eBjZDN4KQ7dgY6hUmLcM6mnDfzUgMykNA8HstEkavqioYFVjn2TuQq0DT3NmTPiiE58RwMHXlCfeNHv+I78DqLKKlj+e34x8B+nTwqf6bb+x+UmPI6kG70VQV6AueXw0vKEUAE/E6kmkYdZIrwsW3XWTKVgSkg4c/0otYiq+6yyMddiW7INwDB52EEV0hwWxhHJf8aeKQnVmOMEZFsyxUxe0cjDCNuBkvMvEIK9XDmBJLDlr5dWYtl3Duk6w06Xc6Q0q98xDjLB2GuFL9KXXoTkrk+36c/w0OlqBmn4yN0E7d6X9O5k/CjRFUmHJDgqXXwQiFj863Dk+MBtqYNCgQxeSW4IuY9bndjTIBvCZUNV/PhKQeo451qrNsLnMCEQVgni0JLoPQjGq3c0AkL7b1/ty0aGZ2G+U9YZh423GjH8yIkXdtvcXt5yRvzgaku6gl2BHeyijfpsFoxsGHGQOGbwiBtZ6/Sz6q2O1vx8NWjrVSQ5pOUZISmLWv9CPJP4D9kAC12HN6aTQrTsvCS234IqMcQzw0dga4Q6U+1p3wOWzLcEXTcZMYMF6i8dQ2kIPQpMlx+ax3nwp5cnsm6FIKsqiuJ4wK/CeDFXpQLbkyYxXLXn2n1iqJUW4h+TI6BPAtFwe/XC1H6pu48nrAIvND2lPtQE/TD5YkmVvKCaPsMx5qOfHA5/T/HRMQ4t/pM4EJaDWrKVc7KxfLPfX54Xi+a5m+geZXuUxIaKZpgv5khDr6p90UO63h+cNlWzt7UTEQQYm/5F9sOsx4WBIWIaNrt2Dp4b2vNJc8fUu1pJLkd5lttOxcGHyAERC/TdB/LpkWA7OFQZ8Otujmy1rhQ/Ci7OaHtrSgyxvqFucQB09i/VaHQ0yAozIo1GHtmrf9ZLczEJVQ0CkRpcP3L1MkP0DOF6pQrguFQvrBq5A6rqnc9s0M+mruGzJS8trtV+ZeASj0hoRjxFb2H+MTsPM/rGlbrX/rGeL15knOs5l/0eXEut2QTPSuIoHSP8ZjpJ6i1C5vhWqK07FNuL8qmFMsHxA7WGcSs712jDMSS9Sx2ETwoQHudneXRpeCDcx3AhEantxT9+AkjAfuX39PBAjWlv2SWVEPjk7IbdvNDq5JdeAn18HI2/H708FZ6+/uAhNHISGSAEIPiAEomIZlffw94zgUzvHFSUyHE+D3WEVy5f2Wylu1Heq5hOQJboqNzdiHMZxv/sDNvEX+CSvTvBwxnaJRt9fZXDdkb0inWTcBoLL8GnIFQAlxxISz5Oxw24GCW/my1upRh7djKBT72qVwsc/rE6nqsJnp1SOgVU+xBNsNkSaXExNQ2XxQJqtwQN8X7116zfsMdqr4SADs9VbqmoGNHmbn8Gtur9ntQFzif2bY46IkixPnv1RoevWkLmImIE+QyedxJJL+lKTJXuNOrEaLA9LVow3+duXiOrVHfUYNT6oQtPYivHP9b6eBC2ms1S5H0jn1fkMyi7ClbNI3VLQqrCMyDKSu5YJLv/MJbPPlY5KhumyIBBoJ4FM6wl2XOHwZbooSP34JuDFA41bXXQyl7PELyxp8Id7Ym5myuuU/NoTQb8JXUpRKdq07psiXPXeuqLrQObHoahRfJgK7Ks2Y4tTQF3/+Z2P0f7xwt4twYnsYaKTrLjXQ1VYEa8bCCcrCOgQELCGq/edjxa6EI7708hzDNVFYmQIBW4dir1mnwPJJaglENCepDTVwSjS/4+rqMtyXVqD0OIgxtUT12QQ+hRwGauedp8JWrOLn+0rXXbC4LohAVEFxTjPVOhtK7Rdcu5kuBDvh3p9ZEr1oEyShqLrxsjZZwGiwZtjaOvk75vwUUFoPJgaQdR+RclWdNZvDwJXIUW7Qb5REZk9iidIHsJRFhYtTCkb9MtSaFaUDUGcAUoQ/x2QUkCg12h194UeFMIXh9YU3KWAxq5S49lslo89RzxAw9suf7mgVeihKwsihTpm/SYFuID68CDQw9wRMS90nDGXIJ2ObYEiko6XCDhsbbc5uCwBMQYXqrEUOKQkBlT8T9i/ju1V5QsZswjJ1g08JDyk1iLqZh9+1qQN76kX7OPQg9E3kndQ92r4FZA1/JGqVoNQDlYU1kjxeDDI2jJxKId+CD573by+JYpFDbggyUJ++02Cr9Q3B5Br/Ldj2G6MzCXslEdjhxKDxS9qi+l9zFNHMHb3QHWJRN104/trvGpsW+4Hc/A3vltp9s+KzJXNuDoMbWfupam1ZffkfIkE70hVBRFksqhNityyQRFYV0lGMOyBbWqxXukBMjEWAcS6NYIIC5sm/uSvthvSB2qxgTKFAMzGx5Kk5QW2xKORjbBHQu4IMLptpuQQQCMbmOKTxp3E7b4ZUL51Wwv1UpdV8RH3GcMnPT4GnHQ1tK0r+Xmec+aDc5qLxf4CxjcavVpfEVpGJQaIv9ltIEteDXiiSHlx8k6JxC+nfHyEwcP7dFkxj7gCPd2yHgqPDi3e56IplLXH858FK9hhMrUyccNyIXOnbMeAD2HsVhJJKAMUSENB88RlZ1kycaFSbfmZ9A5FGXhe1CryFyTeMdoa2Rs/CJ6VqrGqHeWYToLA46SwDj0J6UcuiLoZ0gGTLXb2KcUInNMX16VNJLJSyfv+xB3euKjs5OmHH8vwg6JME8G7kW37Hj/FVH4akOHDF1Pg3gtoSDZLKDBmq2F6jkKFSHX/di6K5Eh/bSyxpU4+9HvVQU6Y8TQ1MmWfVl1JBo6YSw/AMGonGi0aXJIySHemM0vQWVLCtWu8V+M5Myb3p7/SKGsETnUaFntA6YN9Supxhl51O0atruHTzMU6mNE/kHtNr9jwTqmm2xHUSrIjfDJGVCJDwATSziv0dFEWjm7uIkoB6IoGUQVjMD2tbR58qjhEoDb2g8dX/pJZc4Rlifv13z60UzJyMTZAiueX3KeOIB4jbog7cDd8FnHijbj90YKqIS/jf8kVbSawR6tJuTtieZMf0qQVwSBdRsuO2C5k4EQxhqXiv+ROoz8THI7gFxn875mKvY5cAe5jIxfw4r19mQhg8tBP6HcawGt2ODaEA/QoHSi8BAeTi8HW5QazB7+bhxE31XHsopexQ50zvTf7v0TPMafSHbTJ5xRU8XjIw0ZXyRKOF2UQgOaVV26jofBr+yogie+v1RSrr8+64645KqzT8cUQXPm60yHUlaYJbnxINyfMrZO1myRXIYFZxB2PDfhxFe2mu1/CLdKKJhXtJ0Sl3Az6AaI6ife8qETRk+XTlL2FSEFhzX5qYj6T+jD02NLg2syMZlU9M1Mev6wYrVT0Lr7j3EVcSmJ2Rv1rbi6SqsG3Dcm1Ckl8PxIHv+RNxkuiW7eBiYV4RXxZSw9Ml+3aGFIve/zzrRmA7DvCdbCXX4Ozv3sPwo3YndjXRGjZju0X2qsZsRQSXEJUY0kXzzocBU7JFvszFbAvjan9OJoH9llcB2qcl/MW/qCY8caM5+OIeUfFygg79zk/DaQn0Het78DyRyD9sGJNy+3XfPYKTbRS0Ka+mP/QcQpKatXZw6zRatCXbI03FHGWyhirN/D81hRe6LJMMBVZudK2kp9nlLTIfbubFvYRKQsiyX0A40BNUiH0s075vX68zF2zVzulWW4Fvv9yOGqTJT+6CH3LdE/o8TJKTYnwzPKtjv+Yb2ylJXNlOL3Hud5jL5W/cQCeelzAbdHoTFUETIGxiFKfV+lo45S1yK5BD0cQUyGo8GxnRiuN1QjODi4XsD3gbbi1bOdnEGbTdtlkZu9HWN30ed9G6BeDypfO2QOlW+l6gNMANzOynMS8swp3l1IgkBXA26EbhvOi+n96LFuO9EMsGOGgwnq1nnJmBzqigcs/Zx64Pgm9puHobpNCLR72D0CK6sTj+h95CiOicBDhcCTZvvKbJx2oDEnXoSqxa+wh5R3XMJ3XUQdSvktc0+l0/l+eZUEONkmjwYS0k0Cc37XUd3OD/vKI44Oz1BVHIxCvwYM1q0CpNuCKbWXxDTAA98XWUoU/B6TSvIYjNNqQtoGZJGeLdOYChcWlDiAMjBuO54hW+KCZr46sQAG98UG4vkJoFsdPwUlHKHx6vV9xptRPQ2v0pO002QNwPSXXaifL9aPHAQOMJqXO13AEHmd3xrxZMp1nq8oPZ2YX5v2iVFqfvI01+m4c98UuQhZRHP/jNkOYsu0f9CSaF1m3WrBlxN/OFIh2wrfGB4qXrIvUZ/ygmxWczW8ekal4hWAoOeys0g1gDR85D2mqunv5OUnIIFCEfn4k/GaYESvrO4I+fmIrd7lu5+4hOx6i7z3a60oxV6D17B1IpWVbYNmOyodmuAUXRESlthIchptSVLJLIEiJKMaqymzB13wAc1D0AJ+pJUunKdcvMR0hIBZZ9xYY+/Af4ikijI3POR9XYtkHcQAItRITmRq6OnVbGe9SJM1tUUmXggTpEl3DkH43cAO5zitIyRHlGUjc5/X9RnKQzHruz+r/9PSUpNxxR21+blU7t5AYze2uc8YzhOiOINl9+7T3Zp66TzVaUIH2LcicrnsJTOXg3ONuE3HrdPCrlor3p3IXn8mbpFAf2DLdutO6Ip23Ha0F0DIrLiRBcfAyfXTIYef+F+JcSXl3P5AGrB5zPPCkyeg817fg0ge6OgJDptdB/3E4W6cHWZbaP3dY+zgP2ankPJY8iestlzrM3P1tmAX80ISSDMcHo9jkG5aN+7BcVSGZnvyBH0vMX6ixi/+o90ezAMxAAAABjbtu18bNu2bdu2bdu2bdu27Q7RQU4ABTIfuGalczGu8qV6Rx36E7pQcZ/nqLf2aw07+8ltPAqlCXHnYpj7eZNnqCRNVkguSCNATnH54Mw0KysC6CPwSWJ1KIYaNZRKc8CAbqJVD90dSAe/jXh44Jd7MDNnIGlvg4w6nq4elKcSiumzrAhWcTNMAsxOScL+IzmiOAMz9VScx0WNIMdqxtXAqIUkRth81AYdHOUPaXQYySBN1qXWQ9+9rh6LHlkw1+uff36qVlVFRKQW4E1bEsfz0VHIQptLGX8TguXH5Q1W6q2Z74U2Gtqxa+bj4nhyAgjcSdKHg3T5WS9/UwAVogWAqYQZ5KVgkNh2gMjRq2n+S4rFp4AtkC27L47SoxB1OTo6Cl/FHs+fwOyiJGUTSrdmdOrLKz8UrDekR4/i3dfAvUfb9PLRfgI2+K7USvHLxHm7yMfbBYySF2uhhqn5LYgwiuwVZ5WgdPvqB7+HXNRIj4DKopVd/nOgP15yZn65VuedHt74h36YA4NsTOvAlNGSkuayG0ocSBgMW5nhaS6IX6adwQp8UKgv/JZJn13UFzKM3B0/f0l4mr2RVfqO2tQNolpAgRNtEhtVREqUeFHq3qxhvpHFO6Wc7UNv8EI4try8K/2eDHqVGrSW5GT7Bag/xdDxROPZEA3mz92H/tS/bflkGTyT5yAIjmQPlbIXXfltNjMLSSXHBwp8R8P0q2xbxoAAYPBXwLUR99d61DvKS633qymg9D2Vij725XDAit2sn4ogTbqn+Yz9Xg+vqX4wp+YDtPf6bs7p2nSzfEZpWN8TgRdH58nH7vCpyxZ+ZPoCsx5slx/V0eTAeToDTS6EILlsD1qVhs4Mo1xDkTkFVYoKtHsDFDsPmrUOZmR4ZI7jSAvONxUN0V2x4gm4aN7PnpAMtcd1gbKzxRRiPqssWlDXSkjKkq+3GPy/pYCtM9hpgeKKhPJ0Ex6OctNHEf71AE0N8sdLgIzVqj3cxN8ZwG9K90KODVwVz4KWMI7XciBq4bWk9U+d+gMOgyuLUW8DLCmPgK7mASEw5sVSTvlafjzVVZECVC6EzLjp/GIeVfxO2i4FZpicwAaFc9rQL2Oh7zZVzVv1xuRAgjJ4F6jn4EnFZRRC5+33ZFC0UPMbNmGpnzOyjg5UceX3QC7nf5aW8aHamXAV3ZjgcFzpiUDSqhGfBRenF2SkoVSUXLzTxG9MKIBQxphNiiVx9hbccFocsHQvYxtC3fasvN8y1P29R/B+3/D3pxhSavNazG3ghRyRNVqHyyYS/A7oeK3DOKR8O97ickdMuJT58dJspru3GmiO+KHxdVDXklNOLxdYO4MscAxMFdZUl73ik3j95GxTVgTQtEKfuxQ+HPQ0sOOOBtXRfE/CjzwMDhdkFlsSATs017hNEKUwpdIFHl63wwgYtMyUt82y5+M8hwDCE1PGaRhcKnOLM3y6pxj5i1iy5k8T1tIhwIL7UsVPyC8hUA5DqUyFTOtknFnaqUUQYBrSBUZcSU8+ZHwyrb5PnhlzAXWXYpYgVk5PD0h1m2FOQ623He9KBj9LmcP7UC5ikIaMPzNd18y/PPq7L98LTq8mgt4VQtgVKurpI/Mzv+af69CVJfF4a0jK2gM+E1iEallSxpqHtMW0T3pc/FVA4pAFl0HMCm/UtMEsWEMsENCPS7Oq9tqYM9HBD4BG96VSl9C37pHWQnZUgkaVNz8w64qeEDP62eVEhe9Popgbfse0X02itstrU4GCuqYtByhloGgNGAn4CfYmljxdIJ90157Qctp6Qgyv4/UeTtL3leeF4YsDHSM6ZhKIB/HRERpNQP8iDgkkqa1Se6hq5p+qsURFZCSKWl+t3ea4P2dmz9ABVbV9/PW1qteSLR7GZWb9XYESlawld/E3g7g2PKmMCw1HtFONR1C3DcJD5s8k9gPV0kkU5OZ+TirImL5Vk35BsRszmrRsuver/Dmpg/C+f2Up75PXZSCqvZbwolX1wcdHGl5cxfcDY+NE6xgGAh+lQs7AYtFnxjq1lEn1QhsKasfWtx5PHAXrsBls90fPDeX5+nu2iWif6RN++TRdi6qYwnk1iv8IfnqTOJPdNuYPCgltSKu4T6gPomyolYfWP/GkKZX4Svl2Vgsjypejy3TkMwNo/D0JilgEw1YQTZ7SMlFzE2nYjphIShXNWUFjd7tvd/qFtLchMfSkOhH4XEz0YYUM6kkerYvjTagjcyM/Gq9cMCR8QCIkpE7sIk7ql/of9dKYfgRGQpFWcUUSxIbCSmBHbqQe7sSULx7QUqsxr+UGGvzOPg5kLvOGzuKStBSqb7gqp6srTpvj6FSX9ohgYb6UJg9Mbpg1kjoARBzl/DLM5mmggGhAxGDYt+0W38TztchrRpC+gvnDr/sEVUfoQgOhnGHlk+RGJNUQccU6H4LKDAAUdYm7HYGA2Ug/iDpmrFiHkSW8raWqJx039Xar83qzp2SYVMN71HcsGhZiiVlHJp2SDNi8TynKH/PnON91DXUAaOEXk9yFBHTomkfk8yFNK/cwNc2NMgd3gcAKn4/HY/uFnaB5WMy0rLOT7WVWJLO3hD7aYC3sChYwzMSoPldPMoFlYYf9l/boToKYCdMfyOXciU2iz9xk+5mT7bVIZ6vg+PKHLqqNP/FKPsw46wR8fl6cTrfqcDIAcY9crnjbF6izz3iday4PMaUKv4UNKEZdcG9NOAKBUHkgrrzoN6Aj10uJOH4SyjO28W0u3Yi5oHIYyX+h5Lgd8tyjXXvUW1Y6SBftm7IBOSHDKGCs6zJjsolZo1a14Ag0nfCheUVcROGJGKIuW0Ro3SGnDO1FQTfOkV8nbO6cyOTzDC8VWtCBllzf6/MqaTfFtfCxzQeBExncI3k6x57tUN5SiQ0Yd5N05+MOKGXmMYHG5AwJ1nIkB8agwyrpUdXjnHWbswls+k5LqM4VdT0003O4YVcrDcknRp+9noKGZvy1lyuXvMHrVkI6OFd75soklxlP78BgoCGQqlf9Bi9WxrqltFFFzc8ZD8yJLkP+rSsoxWEh4vtjdui3gS8JaZyr/ZnwI45oVwIpUdkYryJOf54IlSXvNdJAMMJidJ74kfesEJBx8k0Kecv0iU+ynlZG0BpUvcRZLo1T48NfiGwPa6K+5zUVI6fiw6kP2Lv41UcskVqI7JpNbri52z45wqEPTl1xwspPCh6KobyHwr5ne8Z40DerJ3qvZxs045hXCioTtUJmHXTzbJ3Uk3NphZjrphioUZGn11dCQT+VtpyZvtbwtZIO8+jCOFlLP1MP7DJzm4WAmMpwiPU6jM92Tz3tKNxL0APcFDbf2a1ypT+aHrofYFDHdwqBnKswxOVvFS75s9PsHsSYOU3zmqZQBXmvlNap/7hU0ib/LAwxLlvvR6cXhVn/0mKYNWSfowueI8mLgpFKFdUXEjPwo/KmXL39k9HHIPQ9PEn9zOTK7+1to6hfHvEEN05IUxgTIVIR5/SpOSbXNOBa9hJI7BKcQ/uLoixyl6V0hGo3yDdIqP+Ol/zqYb6apPNQPuTnqQYxq+HON5xti9s3frE8CutP4PwNASZNQ1NzirnW59wBmKoogcVfHKdz3Aj2i6gpW0XH4F56RkvUOXj+eyosnivinIWg0SQwpAnsWLaAx+5v0D7sS5NnTgIera586NTAyFCjb3Z+xp/EFT5Nf/rBsbye44ukOUomj2jtzHyCMZyuxKVYaPekEolOzSSAYXjrH6T1zDWWZH8gqQOczhzUFo8opGA6JA0+u8iPmRm/jNJuwFOMvBKcEYBABCXC/3Lq/UWHZ+wj0dOm6n15iHp4cNd4ZbmV4srfZnuycP1TNX+cNKnFtQpOR2aVCDHJN1YRtzDYRldUSeGH8oROcYNY7GaJdGSFEZCA8odXIOEutGm+i/ggN3ZlHAeeJlm74nHpHTxaTuOHEapBf5IK2KT+ne8QU57rMEhe/gX6Fr0RXIK0D0UhgMryzvlqxqggmYEr9cU4t5/3OyWBexEyc6Md59vHwgZi24MO+PLZOaG2sWBIneqHtNWdsFtEIBjOnCUf+EZb3w+05hMKmsLf8NyGwvtVR8CkKjXOLrdq0RpEg1TwqxDM1QvESIdV1ZZJVU99yrySFuD6U5MIj3scFSFX+NS8Ql/xEOYzTrEBeBUXR+zhfllrmhIjsYVvLAA7TuXJ5kNahBpHDGjfDd8+j1sYsVjsviAOPGMdbErUdDo+qlSgNadkmOQlqBsVwmLN1yZDsXr0UZkwNdEisCVqXtgv66l8hyrXL6YaHraMCOqVBZXQxlkcsqGtw95/WvABxOm9MW44/FhbaWlCCKyXrVj3zcNx4egviwcZnFpunrryNh9K/hpS38FVO16oo7NFvhCthHIvuToFhwIkjWvkJEuS1klOpJb2NNiKgLdSXX5/8T182L37lcYcFNVDjvp1/2i2JlLmpZp4YMKQUbvnOp1Wiq/effrTVJzLpphO8s9iaUotnphZTTNKQOen/1IJn3H1Fqigh0vDymQZ3phaq5288NDI5YoKQcZmu360Buh905jOxNhYFvYyGLMA4p+mBRtQ19vkwU7oVVdHtwiPM/Kt+/v6vA0l3rbEzOAf2/zscj7iUe3oVOaCZx7JzbcGiQmFgoJaxF+6XN/7V86xi3sZ3nXATRXvlXmxHJdurOCUnHPTsdSNAmAFuYJ5KX628ULhbENbQ6HkqLdHl08DQWy7SryKF9LgpU707TMv34kHCOcJC1+nfsV7iwOfOXWZuvm+/kwwRm0TKS3sYXiQpxiRqMX43IIHbX/jBXj3++yfJGUt7xVjIQlgHOeyhSD+eV2/KRB/U6W8Bkf/g7WCPG6/mFRSf1/az+IXSAUuaKi12u6BlNmxDgLFGLWFT0o75ZZMlHWWRSAHneS3AePU69a1DlqXQajzKqCyBQOkhzPfTtoLfSosrmGLYiF448Ut3k1N+hjVzk6xO40TCzb/i7xQKL/UGqV5C/cYGvvXrL3mfC6a8mBII9ySDROlZrmb2z2BSCx9gBmYrKFO7cgM4MuWpfJr0/yHJEpYhNoV68t2lkZo+n5GCBKdZBl+R6AylpLkj/W8Qr/1iiMgFoYNI9PS70Lci1Dn8dbF8OY0TgrvWPMEcOfCRF8ZqtWp7rnmIJ4mUf1Le7OxJPdW+152FMxPHJf0L95W/PVHc2ueVrXNUu9Tc1J9xJpDSJYLWnjlb9PDW5i6Z6iKb+52CDC4fHseWAjf0K/6WfChSo0NXo9t3O/lGH7K++m5CZTmha+rV5aCG3WXAQP+7ZeZG7D6dw5WFcrxy0CfwSp3cV96w0vXy8B7S6wxlHlcuQdhwixhklulxmwwmASX0nmHB0E6aXGdv+ZU+3Z5L8mt/ItTlsmB/nZ6X9okeBnVfavvKvcXRu4h6at/+urUlNEHYMN5wSm+D3GeUKldWPHFKpYdXvLF4RUW7NgNbDPZppnC1l1z27rKgamiidg3Jwa/3E3km+6XV22b27WqTFTNZrgnNtLC90QCjv1EodsWKsR34Dxdz78YFFxUlnLl5eaVjGi9UPCIgaPjESW7hu9wKX8tF9Vm97sxk9kcNPrkEOVbt/jg9FSIrqx0CWJS3z0c1tx+bsXjnfnkLnWa6izthoxwgQYsg8zdDnLdxPcSOeBS2htpjIJWUgDUK265WWXgyYhcQv9JPVGutuyoiohC605NMfFvdKWqgiymRQozcn44u+uDJKYKF4db5jGUB//HxhM9E4IA1CNVZKigKAHcyjzBeccn5g7bLdtZVoGMrCsgKs77YkZUdLsdE0dFknOkUljtPOaXf2NTiioR7CSXR1pek+BmqLLPSot0fuSnbeLDLhz4SPImjQWT4tOt3vO5HrsZTgT1XsNZ92R3Oq87nnKrmijJUPkOJDCUt7ht59xBZHbT7Q+71iDH3xZhy9TDaFL3YpFMm1huHi9xEBfJAgXYBM8VmTBQVHh62em66I5i5qjR0HPdV3uPa8PCvpEYukinod31IHUhafbHA9EnlJeRGWCyOEUT6q2g1bZ+W/BmSjsp6+IE5M2Yrmc0FzmpbNgqWH1yjyR2ON7sh4IF83L6nDW7FKEHKIvQHanUjz3gcpaPTKJy58YBkV0IIAWPYM0pRGDLfdJU0AAnOTyYhfV+E+rH6T3Pn5WA2utbkozTeXlSKWaWSyLFk8TMsZn47vrlsDAbyqsGkpDBvYihwiBUInNgSRJOkS/8bZE2qWtcbh4mdM2aHon+39nXs0wIx8KaJZVvWgHhbDl3SplzzSlBwjbdC9O/KETV+SNdq1AZ9zkgIBoO4XHQ0MSh23bqpYhwxEgAe6Pe9GPBwiThPuc/dK5D0J4/XWEmCkJsFmMnxpBElPgdHV21kxMO1rm3YPmn4q5/NTSZTED8Y11WHtuba4MjzcUQNQPH/zysh96BzzS/BYj7fFfXMXZFfpicKQSpiPRAQ+ZrrPmpSqoGCrVcoJePPu9wdwcrDx0NLwNpq5S6ehQJ/PNIxVNL4gVmQbhAeYpmIQCNCdN+g+R5jQoLEQqs+8fH54gyQBlXdbf2iIK4IoEwRCkn2pM2Ym7CqQUPz37H7OZNgP8p3ySL4le95kOOhXEqJKQt7y/MDQZRCmSmRgTEJ0s58Riu3yMUhtB6ahMC/C138XRzblIe3oyZ6knz2ak29TlOiuU9iJm26NY05sZmFLBD0rmDnfk+vUTwDQjIvFIJFohRH6nlKI5MIjL3I6n03d+0EO/7v56dDvWuB0o03Z4sQwLO4fgR7KPSytYdjj8W6NV1O+R5nzUWD/Bk2bKNxnUHZUAQLx+4/HTnFsUJ3MjqaO68lGatDFnrmh45e29eZBu7sGlE/3Z4zyRdpkQhJoxgDt6KuM88DqKnoxpTiTJ7u6O7vCSYehKBewbsPWJmyOnpsIqJ8uNRTFV7ufpJlnhv2Wcvw6NQ9v7aNGMNJpsXe8IWfCOhTaBd4eb/NOp2Ns/IMgzR5KdZRCKXQEsCxMASdR1T2WkZu/tm9FXwtOLPV4GV7Hx9Sr1B2UJ8YMUjZxmZeb4ZHMXwzuuPWBew4q8fPSH3uGMdRDhaFZFvxYaux14HMUucyicaSgKhfJqUAg9I7SLbO08NZee3KJ0bCH3zo5IwuKhRR0/9eaonmM5zTHVE+geeP3U6/FuCnivpnDTH+03jR67ZO+y6FE1ZUdKWIlPMZu09y6ET7O9Qkq0ZHeLgBe572/l3EzFbOat49mhFQFdaMnb+J2jt242GhGDnbXeCpRzdyhtIjppCxzyVMLhhZlo3iP902/MpLVlH6w+ejoCyuEeO/PmMpKpuF9ZERRbcKrOt5IBOazdAIwg+VegK6Ez2d/gtm6AMlXIMFF5ZJyeRPdiLR6ML50MhNZbKlnM3fNU/j5cY6m98UYmlcKMT8xRp7QleoLgmelMmOAohx1dKZysk7o+ssiQRT5yHeLYtNvA8wtvnt12Gcf0FIbo5BbMk73bW6VsX5BGMYpXOWey5o34Zat/W320ktSrjGeVv9qQAHh1aqib/Ojvawop1LdDkq1u/fl0ZTsfXSzduLsQ2rTNp8kp+Bb1KjgE+MpTgLDcgnjBSyVdtrzjTd9Yf2aY3Mjld5OhxOVLvHAiqgj8LbxYAPtlp63VCeU06U+MPvfmyKEccaRG2CaOtmYYTd2ErPAJRJG270CxhoNLcoyyjd7WK5LJ+KufplePwsmuvHAT8SJAgCdZQnCmMrU61IQAnuHnvOZj14IzHRDD6JPs7XK41TdU7txd0/NG40/PrLY4dyfRvlhigyg42TLgD6u9X4MkQhKIeQ3nlKRVeXs9rSouTxXFVXbPn9wChXVBv8BjoiN84IzgS/19co2yFlwdeEoSVK7kssMZLpU/8W7vp6Qry4J0RtgcLZ4gsOdAm2jTOoutaUmHPXyyNfSj5P3gkEWSwqJdNnXuCiFNLADjlEFPUnoUoujqnGCnmZRW6NL+M+UP7Rsji60nte4VwvR4/zvyhvVB49wmBFBEEmZ+/wmZfEB71xTMNtybpH/9J38uaQ+f2wIE+FTHUhGDoqezBm6HYj72PNVRwRVP50t/IgL10xOwWPMJZUusnZBs8c6HIerlLcMnEU30Nv9UAtFodsH1Fop2FzOpWNNXsNMEfifqSMyMBa7pBrDimx0Wnw3FgIufafKNWzmiT5RmhZnEJWFvdi1nbPFgGB2h1y4tTsFZ+WqxHw0hQG1k1XZ4Ahjs4/I8GMcsBDbkrzlX83y5QgfOhvYnsoPiYl9BBxUOid9Xp0ds4R2gG2MBvVANMBnR73nHD9NHSytBHYiOnpVF4K25MLf9/o3LfJmlBhQxHjBu64vl4KkdB57O+aYXHpx7HKTAub0zwqW6taDt8lkro9riIskUKqlnluU+lnIGeizL4MP2piFrGs9H8ZSI5k+DwwNDCWVKvnDOJBoEBTXcRG3foC6suEUowK2nw7fN1Q/LV61RL8QCWxeuYP8CtOqR108OCtQHTcL5apFkKRachaDHZGNUkLV6VL7xvwkg9yi3Sifnaaiuh1Q1WZb4+rU2lFUIkERN38Rv6B8LOaLRKlLdmPLHrWk1/i+mLPuYojWk0XsNGht2lLXG40GH97du/CYXofjBrdfxydmBi+0nqT0DqYZqGVs0NfM7MI8oUL//iVzn3mmxzAReKKWPpu4e4pCYGL4wW4P2I3y2j0eDSJKKTBuotwS2F2PlFk+wJyTMJCOVIGnUlVAKayBe/E3siWoYGqsaldeGKV/XuCQlJl+yJk8IVSKd8YeJ8DrkoiEasTx6R5/8O05fqGlzi0Y9maXTNOLNSkkT+jQlPggBR4X9hueYpJ2hOIjpqAomDcswaz76xCo2gk+W18hu5oe272e7MtkxeRobATDiGREsrERgsQUMihIo0wnLoKsQZOBtAYAzrOaImrBOI6+KwmkIqfzI0ppLKnIXE+xnrxNBMJKFyw8DW02/iOexgT9rLdE72Y1eQzXckyZ0FVzqb6NjPxIhVrtLX3tHwfuUhhq3LYKIfcjoTBT7xqG1fqKHh0oQIDAFoeM71T0KQLbB/n49ORwasQZbkbTI8inxqF2EB55v6lo/nahCvZ6drrpTqrQ1XTpkzlD9aF6dgaL8jo6DFPJnh9HWtbjDQHt4cNYSAZI3icc4vtEoQnTWij90XLmhynhVGZVKINyaV0LrPCYLWob1Ix2XEpNfwqByyp7ja7GCkF50p2T+yFfnr8Ex14zGK5VqERjEcUE4YUjpgWycGGmrc6nyYUrMMj3oTmsy2w6AG6AjnmE3yPNDR6aj13vv/OTnE48X4XoZiybeDKIvQM6xCcV4IoM5XcHOZGs1QdDIoH7OqzxYRyoWt8CsXgtX5CtScunQ51dT8c/+7J6WLM0L8nuLX1eXD9aRvfhZhZVyYVHOV8fuKBILROQhbQGhiyCFdfIznXATWzDVFz59pf7ohPMFxgQ07ZJpcYSfU5Io3DYgwv3tJRutP97S+Q/G4XOnmJfF9dgxHCYPJdjUFppfwksiuLud3nGk8MjuYeX2SqXTHuo+lVhnnjX7t7IFWV9DFPay7dYHvqrfD+BNPHZV4/8yyLOMpSygC/LGGVn7cKnyDcglCb9OTlde2WMBYGOwXrwhlOxukcDvtTeHEzis5UPv64hlFCC1ffF1mg91v+P0+4di5T5CVa8DPwATNwGw0ZMov3j1MjNLnirV1B5qDcNk++6c7/QH3SZ5Ahje5pwv8SH4lqJG1Y+1Psr8k9mmmX75K+Lvmv+hmLkHVVK8hY65/S2ieky0djtFKe5yMguWjEVRkIEW/fNIpq7I0Ef2cUaria2v+VbB+gXRyca3h1W1nJtwTf2HSREFE3F0fyKbC9DWH6TdyZ5FsXieWaKaEfF/dHHJC1PBxBnlCcKAnNJZ2fTnuRScE7FCfie1Dk//y/KDPlYHMJ5Voro6CXRtIL10J8wtmK6MA52SWS43GxkIbEY7n+nM9uB3CZOnQEMWdfpLD4Qr0EhUftiZq3ld/9HyQZccAHwbEFdSjQLBVgmno1XSm2wGS6NSH4zq+z0mSa2Vcx/APi2X8WF9vfmAVKrNzisGRanbKrSZVwASOi90/8ZDE/+yqYvEB3NFpHhgFor80048937Eu0sj7TN9lozXvPqQIgOaTJ1BkUiYkWAAZt2DoaQVyNcRJvJ0aaA5BMAeXzbzytGdSxx3nlFpMKsrxvZhgNz01vHnbTenQ0FifQHTpHt+JqZyKQv9TldrCT0EPYH/EFs7vI1Q9K9nxsKegOYdeXUgVDhcst3888klGNnO2rSwJ1LZ5iDbrIfWqFZPCdXLBzw0XQnGkrsDwlfMz+IEwg8HKtxL4DpBXtBEn9lEMz1svHOy8mEu/xD10gRrB1kmsGo0yBHgKD6WDRW4P66+FuP16NJqOmxzXwOiznKmATM2JbMCMDFcWyHRFTvO1+nKcni2H8+nUNLNK1t/IS8aKpQmovU2cQCK2lElzc+6zJe6QJrsSP3Ymrkh2zL5/v+GAUlPiAhHexWjefuw91DRxtJmnB9RvcwSjWv8CLILceAt4k277xoEo56qdIM/Umpgpqevw+b0dvM3/IB9U9WgjNn5S07R4zpeXhpsRSLLdMKsSC0SzyaBJuch3RxlmjT6dqVoLx36vRWwKATCedaTwMA92pwvf8/GTtGisLBq/OlkNMVFYI3N6saITXgwiKihiNmfceJqoDpLhpJ9x+5NAF3ba5Cd3mDU7MeRCLJX1GIPzApe7w17gdcPnVhugUFrigN6IhjaxzyOXynKUOt6lIQDl86JpfkiDJXIkxLIHT4EbQU+K45Z9fItcJMbKwNPeydfHpSWNEjXluPb35J4jHLHKtJl5bVyZio70nMsXs1/NEIFnpqeXqBqveDY2w27pDePrdxf4XhAkZqsG89uAG68cSXSZpAxhD9yXiuN1+eFS34Jn9DDV/ViymLHkMAyU6mfNAKjYFLOIZV2mJYw9N2tWODjeRU9A6eN8u1xzKCFA3dDaSZS7QQpEhFKPt5hAwdosCKw2KPhjngkwHX6HLP2+zGF6UoC42NzG0Dl1R4lXE4OAK9qsJVhos17E9BssTXKZiP7DgeblCionbTmSecM/u4CULv1MeHvDDP+fYTJnkGXBBm1p0LBPx9oumWtauHnh/nuSSeWsxziY84Y3kA6W7Hg60I3eZq2Tcsz1p6F25ipn9h6V4I3/AAuZimlPwHQQsXta30fOH2I9marehQmKe7WGnU1PH/t+7MVGuqOwHsyiGLXdcDly4mt2eCRM3q/LTPsy/yntMovH5kAzdckn86mi1OyXu3enImGk1K95ecUsaozPe8l9VGF9Iqvh90nyA6JiypztSO3E5B68I+uvQpPhRufOZaBnDP39ZwBrfPn8LYpiP33F907fhmQjjOm+1QX8qMvHurcMMmAXP1OMJnhH4n4VVr8ytiYRVWMA1EzQkoYA6qWD1XI8od4tA143p7PpsxRzay6Y9kX06MaF3mDuBmy2d2B1dilCBzeqsFKAX7iJeNRjyGCctD2N0V1XGhADeNl0KF/UlojRqeXKrmr5p7OD1lYWp/IBMEQKrCOJ1ZNy894ugVJKZjQuFSGWojHHCWnJukc413RocfWbEHdMF1wLy918jjhrHzbDVJo1+KQFBuQx/skDzB7VaKwiG0blk8Ttoz/Ozsk5BuJiNn1BJZ+KqE3EnxhWOvCkJWiBGIQmtzg//ctduIPr4/kpfivuEBTszGCfkQ1FsPLAbJ9vuCYcOZ1UmZb8Ae7VpLE7mvXj6jVCoKmvIIIeq9MIse5AWwJws3pXgSYf/mBLHZVohKGXc8vxp3g3YOhXcJLoEdshGWU9IG5o2V05cjSVtl4KiwkbTarTPGF0IQ9HKsBXiwk5fSBIau3tpvgTvTq4HGl9YCGZG0ajN7y2+U3lCsZAv9CR9y1K5Imm+2zDfFlIRfN8o1SFH1U1Y3JZvkHA4SDBNLAHexnpUc4torUJZbpl5bxG4bPKGd9J7/CpXF7wmNqVVURtZDasWmecOMWdXzj95C5eKqY9XgJx9lCIX909p8bcDPEfK6mTxSNE7yi2oQ8jLAJG8gDgxhZMvrp0MXPANediIaV6IGWth6AcFzInsZzv6DfRkpeW1RluGsClN4FzhPGO0QJc2Dfu7pzfvy/4daOF5F/zo3pqSTU3YB92LzrMfODLlU07d7FmZYIyiEK8OulcJ9xofNYhWHlkdFPqPHkDf8KbpkA0oGk21niJXu1r/bdPAKYwpX/nTHQNQKKPjopYSFcQMB0eAibQTFBaHy0d0v/+Gcobw5r4M6HQhRKpnhDrEXdmtS/mJKMPmrSjMHRljjKogbRaKBTiwobXkOvhwSqicEt7uDiwI5j52EkGKOlZyDbACCXDZA6i/upLl1kJS2B9sK+Ox0INxz1GjMcDp3dtE4ELPxfmxTB+ip7TYkx0BF55wXW3qchAXbB/IBfc78gW5aNDp1Qu6U+suvgTgVLwV8xEG+2OJ5ps1h/ptTtUWIhjpEwuBLG1tpDzqsNApH4qg4tSs0usSpBZFmetCBGUBe+RVJiwC/DrItNOSx+j/XB5lhy6Xmv2TSogqILBZyx+ltBdO47s+hr6MLPIovM7tqs9g2H1Z6RS3CySniK6NNjtQawlhxWxOIPTDyRNZ57eA3ON7ifERN9olXGZMDcWMQndVEKdBM5DNxPNh1fdTB6bG3tQahYUuK2dRlFyzTDyp+0M1LlKxasmTkN2UHscasWCV+NzKMzIImqcqnddfqlHQ2EAVzlTPQKPfMtKHroqs9s7pNBPhND66HGud2JBUuUCOoCbNkAiybcl54tw5YS6ajVQbpNPK+DrQNKG+eUYrUkkCENupztxea8Qib5xN7dtWab2ypUfNZCOotX5uA6Xnhg6aCYIzRbr+8IZdubAv7uGEE69dfDO8xxTYGLItHHfb1+w4yRnWgPoevOQsSMtw6yQUBkUrxowwVcrNixIFdueJeciLRvll8fusc1nq0z5fTQZNzguqok/v1/Yl/2jVH64IG81/GuZKk6ZeZbhqVXCVXwYpGiKMaMYLBMnFo+VTzKRVDwatvbVMp2Qo4aGrV43/kyWOAKSA5t/qlTpmzqMPBH3fKz/MlDW+pL58Lt2G0NolznUoceiNa539cTWZwoPXtk+NeLJqLZTu7mhI8yINY3A6hkC683nqn9Ircrc725d+AmskCtpFXXfYCCL1g2EJRm4fgf800jN6Vv1VE/eWu8zljoyCfB/Kk6fgGPUSZRMlSG8EjYnk+dt/gzveSA5CLWG5Y3lJYHpqmvKuE1K7hzhurkHkvUGMRMnA1YzAhCsygbBNkkBkDGm97dmTPzNWKZbc2d/ktIIIXHWt9OsVTfSuHAgIHqbHt4iSdkaB5NE1CV1G3TDEkPHrs6MalKIfW9B71ELYt+VuaCodQD8qE92ESjS593jWstxE2/boyDJKaiGCR6U2UcIBQZvRGeugiJws4q3+daKFnta8e+h19ecxVlOqtSu35xCbj/A4S0aoyfaSip7uRh8E0CfLk3IaImO8kPgpeMOCvUGu4XxHywo5Ehw5DJOioczU8a1TOgzJC+cOLFGLF2krI4nXwduKnSDWk8ZbKGzjw2ld2OGvhDHyDKUT7Ij3Z9rIwI8JjDBWUI4t777UoGJqkMV3TT7wMAph1lFJ6HApjP4xVIpQgKCs/FyXC3dOVPDtHpag0MxqsUOs82WMdhjN9oviYZ2PHvEbu57g989DiEtRhSZU5IQN+G1+a7gXaq/7TCgbX4OuEL1Ffeqx4KU/oVaNXeNLrXBKv6jA2nfTMuB0PMoo3vrtHb2ICaOESctjlFspxxlFEgVgT3jelLK5NKr7G07fwR91JdOluwFUzTamRN3oa2HnC8hq7/j3IcBo04nxVmugYH+nn+po+qN2t584F0WRs+V5vdu/P0GlCxlRqUNYaSFDAQSBS9Ccn8Ldq5WagDUrPtuyUXnkP1zO/JnhnU19aq/iyH6SCXdqjea8hcBpGWa53Cw4+A7qDmohC0lD3Xtmmq8IbOH/1hNYZeJWN5xD/C+1ugsmc2xtfeqd4qTW/JOj/fHW0stp/qiul4c6cTUOtXClMiu2FPT/ZB05Pm5Y/pHh4/OGco638/vPF2eIf3okfv31E0LKX7wlSgSupY5rpm7cvnK1+GNE7KFRLkbQu+qemG4X/0QLY6J6ZyUgjjY3E6EzMDdr2YSpLzWAeLwTyvH4/41M6pnxiGILyiS3Kd97YE3qJ6aEsjeULOa+d3htfI4yimr8YWHsowQ3d25hQENrNWAO3MlNoQKKX0i+BA9lc9lyZc/nibzwq30Ptoth/xvAAp59UfnaQKNh+HdT5ItKvDxOJUauMsInPi+7lb13fB+on0++s5yTVpAjIozv4ydC6ppr6mXHJYGmT/YT1mDGIzVPMonvMB6xQiliz4rMatHC/vkmhP50yDn8mjiuSmFWwn5wZpaRzKq8FKeXuIRXC4gCTYkiyIbxguf21Skf+3iY3Sh8gNWKiAFsYAQpYn7Azuw6bqH6WMd8weXaADv7mFhS6RO1BR1tAdd6kMgZ2tBII1tlJbXO55tpFbY4vcVEbmGych6bxg4irPBQoWY2z3S8vjT8w46rYQdAYCLCBUqKQBl8ZPGZOGnn8nusJING4/Uq8808V2lKmreYfG+Pk/XbaUOphHW9mxsh0JROC+zbLD/fS9+MXeb+tdMh7w+jH9UI8qsinHiuc3Vyhnp09+otqYuf/4ddRkyIRuoUPjzzZc2XH0BTgZDTlmDSAe6DD9erb/fAg32kCBQWlAbitqadR58JJuT2pfLdxYVvEc2D7Q+BGPe8yat+uoM2TaJcp6Gi1g7i4J3bC/vRRulUGw1IBimEy8s0dVNH2boh5sppehCx+eKIi51sVx+/p48oSszcXpMczbKIxLgIHB+mJanPgjMeFHKQVO0H43zbA44DB4mAUkI7fAtsVin3iiwvuaPvzDu2j+EFj4Yl+YMM/yKuFL2mSHyUCfeLwSZ/ixkAFZ4etELB/Dujj0b111pPaeZ0sHN4NTf3Djan3Gp37XdlZ0bBLRdQ9jbJXhP+fmvgl3Sx7mEJwKWCPujhcAhkeBCwG7FRCsVSX2GdLt4FS+EuiIG+Jk+d+mjXfMJfRsYO6lamLagHFpCZGPdWj3aPJ62KUtIICwa4vRQ0ytDVo9iNjPx2qzJ/pFkU33fFIWp/AcIPvLmpxNBdsQEBTK9/AtCUrN2j0bPWeLfDVD4I/KpJ1J4YEvak0/WF14Gujs0IRbSzbG6qQgTGDx3MK+w5Xptg648sxYjEsc72lyRRvcRZiDqAwGJmxXNhbMqnoCOtyJ1o2EJieYC2b3jQ+/Bl27SgoTdu2s4sHJHIRBTe9qGVsIo3+B4hWU4Gk1VGz5A/vVufnN5BLl3a4JWChWZw5285j/ksBT9+41z9TBwHOqwFliwrpzdoQUPQ5f69MgHXWcSYsEyAdMGSebJUSinQY65wDphOEi7OortshUHDrmJbXXYti6tvoNHu0OrDmH0hzmhGRRDtKSzjXzQ9aLdfPq0fAgjo9rk7WssxxtBmvPaeoPKkxJOZWb11Bnc8QwiMvkTdxDR++HIzYzcYG1WZGap1k35SWvjShTJyytDN4VnW3/33i5LkR3fR00NP3x+YR7afR7sjkPsIevxTYpL2hsI0lXAWOfpaxmx5OKP7wOz1oYYXZ50acjRYAGt5L+yRpqkD18etklPD+VXpyiLb/10fmNd31JaUnkaDAn5W6MGtZsj9Em9FaDnoNYfdEJbH1tulu1KPXD2w4R2w88GtaVhTynvGRZvMBoCzHwiBhctQ+VJNk7aeEnbufTUOxn13HNau0tr0Yx2v3oLdsMgI5lMbuv2hE8L1vcGz4ebShJfC5gL8m+qJ03AmWxYeJYJLaghzKF8iIdUCBOwx+VA/St3O7tvBpv2N6nfBs2XUsQH6VHhAhm5v0GKeDd9ZbVn+WPoiSi3DdLQ2pNdT5S0ejyJewW/bXSqz/Z1GsJ4SW14Vk11BcgCnWwaDF2wgQxL8ve5NuQgaq/AGMxYX4vV8X4RytElGzeGvuPfN16faavXy/GDa6IEvxySDWHOYJrb9RuaQHC6XM1EPWcReP/g+qckhX3UI24XHo0K6H7eYnIr/4uOXd6ZELi2urHNK0ncjzmCFe55nDbqKM92qu08oPoqizW8QSXFkEe9jS4MujbAe2Irg+PRLNkCLGSieFrKWus5eLVvaFLvsNSU5yr+td2pH1qH4uB0Htyv49++Snv+4fDk0BZ1J8kQ8zkNLOdW8QI1BzDBCJFdYOKARMg3wnfAUgXPxu+CF3ZFSeghcxXQ2VfVLwXNxkxDpSIMpTFTxNnIq3pT1a2SwtlzcwV3ujNFvN5XcnWh4UxqnxdkALc3rF/I220IKvCnhXH73u+lI68eO7D+uLW0AvyDwGK5skM3+S6srBSD8F8NSpbayIGuQY3k+O9DRmXSRoRCEIQGteMo5PMD8MkIAlD8Jp9pfFD5ykN8tWPpxmzooaOH2NLz4op3n+mudNvu4J1uzznyGQElW5+X2+ccGbkfUgYwnsWBAmwSj4Z+BhePqtkwhE/dh9EutBWWSSwXywihh2Ni8x619svRBafN7tm2D7r+iCt10MLdwOAYpHx8alsww/mLrA7bOkfqQ7tlyNof1B1n31k9Zk2EloQB5QX8mspI+CYRx7VACZccCFoilLKkLZNx7Bw00KACu72PX4pXKprBozxq1NdS7DpQWAeVDnDb1PijKxy2hxMYRWsIo1d5baVGb2moQ4EQi3GwmGVObbtCzlHnecDm29My8XXrXJuGh5gX6DnzeTllV7FQzQQpePnaXBb0IV2LFj8TMGpjW/fzrjdhGLYbgaiSVwV17fTfaaFZ/eW0+KtKTxvzksqrulO2jGsRn44j5KfLkTXATt0kqZ7ikZb+FEQNiMRIf2wuIERWHS091+mEF9CXHo9xaMdHyNWJmDAfjCOaIltczW0eBUgruKWsApWjozIzG80gP8aBb9Lxu0J92HmGcBHvHPLJcYjAw1QegXvuAs/lIcdZ5Fobt5ykdAmcOb2uDnS7j+GLPTXzk9gnHsJ/N0YBKaYhlk1Ddm2wIFQ7cBO7Nx+B9SOxROYtGAe8TTtdPkADNKir3xAd7bBVSaFS3lZW2+BP63kd3+aZHxWc60w3EjBb6jtWjDmfX7/xszd0Tmsi9yVemvcUZ2wor78NPcVkvSLVXYgIA5t2o2xtOI3bYor4RdaHS9AQUZlz1h4tjdst0bAjWgC9m9mOPVhRy+WzKMdRRC6sK1jRu/5xlu91ANDjuoxFYMYRB1JnIMONuX/ZVjY0ZROUtiM0AYwIddRHFfuStpTeFJ+yozMpz1L+Tlan7VqUQoPFMs9v65YvMh1qFhaBFHE3KpIh+UAwGFRwdyRNvB+Y7mYh7PL2GNOpbZdAEmmfzVtI83Dd0wWUuH0m0UUjOUBq5bn0FnOhdPsllZ15y/G5BmWpHr/JaPfzwrXaA2yel6Si8Gg6hg5cS64qBWZGwxizETULT3RvU2G04sPqQqweWlCVKnFu/W6oSyhF2KLU4mRq37vkMoreRyFNMEplE+esgpPU7BcWl3ZoBoXydbpN1QTtett+KWUMin3RZp71PBiLIdoZLiTkA1e8d6S/Bm4mVVQ7PQ8BvOcRORdoKtzmEonT63JheSDpaSUozUeCCbXhImimhsMacYChrry3RSJfzqK/xIkJ10m9YhCENcgX9E5jqVWkEsy2RZsz37WGGgI/PhXxLI0jHKMpjiUsE3AuaZdGm4mbBoeVOf1JDEAorXfbl13Lb3zB+//OpfO/iqxqTEKB3Q+YThMXDG7j/WvGGRdD5qoXxZkXagNIOCDpI3c5cdhj5AFbCkZyIFdyx0EAgLlJxxyB1VnS5GOtfZzA/RTBsPn/FR7iBYKBa10/hLqUK3rZL4mN/RY5+r3FW6gf5UQqQ4SOh3sZSGjGLv7oy18BAokSkCDM8AD9TL0NrKcE2FjjuDWwePVA/Oh50SQfJ8Wn5afIuoVn7kGzLbl/wBn4Vq8URed5T0NImN6uHUKcCXqjvSMdfjYS4J1+0oMODfOAuHP7AI/EqFf7uHQyVaM+KJ28wtVL5aZYy8wBYNFIir4NOXkLgtJnNyHVO5QCnCxKf3vU/3S9bqKSI1xz6CiBBythcYGftW7cW0YGG2cAohTt/abo1Up18bN5x5PrKNsOmEbvKzasqdeS45NkULEhE4basfyyQSehlnvdBiypaj54nlBo3pot16QsZF7DVLIYiKuTlVpfc006d6jVojSHMwc84wnmw5N/RrHSj2yRhRgnUPUsPtkTRz2EussTgivB0fK0Iod4ghTtpGQeMuhvFMkugo0GCjfa1cjtbbg1/VrI8u2CxxLlBkXDm3+9Bc/vN7V/htqzhspKriwy8GCV01NrOhqWp+q5spdU/POP8i66pz5lsrdUlzpuQjVq74lF8rt0AYgLtF7otwiuc/vODqUSN8wfJ9Io40RT+wFjThFwLEdv068B4gsXEtgmOmU1BQFRWRlQFclVIWv7OK+KuSU7i7cuW6aFp1m2GjHHt/ZuFiDw9F7wVDHoVyPxbTEcI6Be871r41uTG2TAb600ngWc0PDsgFwpJ5aV6aqghgAi2kCLCOKKXaiDBLRA6rXtf1wLkTJ0RoVOlgnuWJqPkJs3XteioWeZR962kyO6pb6Ps3gKE4WE6qIZQ91g0+ckFPaskMyhzMVnH41iEcOLnGqd6M9ZFxTrPRktjR+CRnO8NRYRh5IDtu9nk0lyPX9rdmlUIIFL5NB6k97jy9SN1fGanlZDZ8t2zhThOL/eQWQ9G77qDvTa3vn1WYDJlJ+RZRSsJEkuVDYKjvroZ+TOhntYdedDRDZ+LWs7VxXSuqs4nx23NrJfUwcU8EmLr5Ivsk9pP4/COOu2FTvMdiS4GQ9hFJ2Zo0PUIzUOXdpQz3qBXnqvgftHRvFSYUR5ZDosXcTh2dp99SJHBSt0T2KcOHIk1I5ZBulkGyQusBWLq9hTHd7pxZCmlBkvlW+9VJ1BSkfSshM+U2A9rj+FFiQa5syJL364tAAwZbZMKXNsmpyxQLor2SW3R8Dn1zeepeRjX44RuVFIXY9KzKP7fD3tVQMWIcxnKcoAlxe1eCoNbFYVoFAvnvkVn03Y8JuO78f87SAdfzqdZhy1DOiae0XO/COQhBhwmyZPbQlnpKY5uID6Plt6tOL2upIBvF4OpoVDKkEbsNceeRuV+4II7p/8FHOWG9FXMgAq0aHQ2moVBP1xk9Pdy4xgNFW5U0jCFv043shuo++zIc6pDwPUiWvZaPskbdbuX3digBn5hAlb42jOHJkFGrRNFsZEhi+q/93g6D5Iqjvj/cxke5wmx7cDhLluRqRd5Pfc/dW5KUOOmuI/5g1QnHPWPl8gvRdnCbzXRvw7m5qOqn2fX3MQ89Yh5oJacESH1cIqAMZNKSjA09e8RAFZP79MSPKpw7QQl6Ed1p2LJ6kEkebGV7FrVCZ69RJLEgNDiPABxFCWeN96qUman4PJvyjOTrROlUuE+aRe5uww8sp+sRqsbVh33gWWbbIhm1w9pZYVO7epwYFJaChNefVIxE9cFTHLLCrcfvSGq6ffaknfRr1KzkYbyJcrF9qBv7veMPjdh+ecJAZHKGBgbCanZ3mcs+7bpETH8uvtapeBEep8KbY2jL2S2dBCBr1CWiVbyr8AssFsH1qxOxhg8ET4T7QZT6tFVx1ec7Yqs7dJXYgWFX3fjyCqLwBQzzgSDHLu7i8ocYlIdoJJuJz52f47i695af0Q3wh09y7TLstW4e3jAN6q4LegzX01pNXtuQXxNKDCMALCFu11luURw8DfKu6GiwW64Nra3XIOAxd/ITQLw/L6GqY1uI5rqihpZiIqF/z0teWqGn0y4HuFhxABtfZ8HOILv8FAiYnM+ixlknPMgBFUYm2iLm1G8V5ZVlJHP0+pROu9MH52l2gerf4ods4TkbAnaTkn8ohr8NP2A3a8ia9hgMfPIqzAI1mE6K1+f2zEFF754izWPlNFAXfurXkAENpdjYrmq2jL+jBDxiuJ7xZLfvxGx1Y6fZKIQrFmYaG477MyBen1Uqmhr5NZtu6IdqqTReVRbTtPoUvalFrtKLJ7iIe5gU4LUt5q9vUhPwrAiBAOOyqNmt+S9kxYQpBxQkqyLuGxc1YaM0iHj0HbzPJBX1UVBb1Kc0KNNdXF5h0+XErFNqiiTypKo/IZDSyodm/ytAbBvmpYN2YXota9zIJpNm1iJDqqESfuQayg2NiGZrqKbG+p54xMD5Em0ihinqJwM8bGsKFsn9zcrIQ7w3PIi/wzl93d6+A+X0p5tlPrEPG29lPlQBpgl4FlwYZ8+FhIwHJ+8/q0VhrDhdn7ZIN2mOqoys2NDBZSaaPE/ZEW7+DIkWHJCaswX1G22qLFltCqxJj6M0N1k7zclpYGnSQHT6e8AMwQmqVp0es6Ll8XUIh+nwcH8OEoYXC6L1tyiDeBt0HQizkAWYCX1RJpZpKji7HDGYfQdENClNlFi1wX/StpeUw6zfyeI4qaNHsOkAvldwNW32ajpMmQCTQeYNundAvXDqTYlHBe+EDAFOBQDVxfDMSbmwW4NjoUX6ao7zopFpOtnorHUeez1JsMUOtNQ5FSINvy4pM5/e6xqdX2M0E5b+uQgGddsfj1tFdyhv2fZYDfFk6/OWhpxzVNwsrqOR1FBGQRB8urONBczMJBcQkhfMTftWSUFGHG7RBklNrpvUqTVomC5Ck4fQjPR3vvq+af2iyRSMNFgBEgHXZF/9jEd+ty20RzTx5OGGWKDSaocmPt6F3Wu66HBpm0UrB9rD4J8CLtPcmiBtj54HLDW3eYSjS5L/xeAlHfcXgTBqmKRs5SsHY/R0sSYWye6XwoERt0hk9Rl2ZjTvLkvH/ptTFVJ4Rzl+DzbSU3J8sQTKE09oBnw6HdM54jcDUibNjPHdOu8/fAaJIdJZoBsbiXHq7mAi5/q/90VmkSVCmS/bZVwJ4PlHo/3I0exKtFpPTd/PPzjbkeU9WWstFXh+9EBj8oMk1KjytUzytgN7I4fCp9r4jzNO/8DW7DHR6qxgraxzVbQwrUTHpipHF1QqS75BilZ5jIBCOH5sZ2ODwxowCKJs4xGDnLCCtO1V6miSboMamiYSKGZFG8nfUuHbDPMmjZhDCxg9prB/niU2Yvf3KrpB+FqzlHCL0r7esZzTjPSGa0DurIQjkmVQSX8mttx3Tf41GaDNpHzTAZPmrYW9NhxB60xKnBrjQPFPnqz0sdFINW7qSBAkYW4nI+7/25tmOxCbJVpBG0esu9GBeS306LDFjSSlMxQwTG4aJDuyGJaPfhUqFjVHu75S0qThOZUvUv/r8QBP17xBmYYfom7gf1OCBZiMY4vuqX2dvYEPzjRd18/avsGelumNhDhGYmtvEsWGArN+AKZOCPgJjibUHtvUSuuWpgR/od1DFegE8xF+PKM0IDbuZCLeBdcAx2EEKwkvvwXZ1cXZFnwn3RV74Du4+T4AB1h2lVseWm/+v/yw8D/2DSwOIdUf/Sz38ETFEZcfeF7fDAqJzqDWDBZJH90li0YtpxIKu+/Li1SFHDo8wyeeV3rFZn38YYEZ+nYSZHH88O/WqnpsLs/HVLbY3uenMrXXjMBgZ0Oy79XJUK3pbRTg4r7TClt0w2iqmY5QPOOU/MA4DzzQeg5n9HTlRricBHZmoOjcif/KDxjuHkOyniYk59A3OJCjLqOzk8Wi4TfhVgwPMcaqQTRYhr7PW/y7182JbfI8yfZkyD5T/tTvd2RW/ZFsHuujX+ncQKkUq2mVEKsk8hTudJ1oeSAPCEkTe19SZwSMTRi7+1xyUScOGcXCr6re24iP9Huj0g1KIgAADNftm2bdu2bdu2bdvm72bbtm1bs4hZyDGDJ0bv4JmKu4Grc649yb1WRYTQVDXa74VTuCQy/TxGxEKE/64kZSkDSnD9Fzmuw2Qr6U07NZxeR7A3loIh/CDYA55A7GIktXX81uxFY9qdP8lxviorDgN0nFtCjHunlo90cH28Yw1oc86UAAsaozU44vsvmUwbl1ONYRK8y6qJx7mUOp+/vnGEhyr6IcQC6kIKaO3MfrVRHkqZ6Lj6NhWpdZNnaoniljpDkKDHv8gkwdoQx7y5YZ2aGM7VxBoBHb1bms7nmJ1XHip3ozyYz7wU7tt7wjD4UPEINfcRaanxAJFNcvsAWuSr17zgIKk10M1nEs1djiPN60B4I+A047DPLJkbq/7SwaZCgDmvg3B5PZx+1uOFfBxC0ODNIv9t1/7LJAPAXrTtEY3xZzAQz9cIKAXvvehfz4JTjjz4S5HrefyCCKEJfZ/Gq6vwspBXEfZYEmy5jFbnX5tMhl18tQS3CQjWUxZC2ZUXRWxy/G3hj2IoQ3cAUx5cszW6gl1Oh7nnvHc8gPGTwPYbE6Z+7pp7tjgAETFkggpkXoNAXqa+ZSXMb8nfBZL2wXoqDzODBBAz3X2Hf3qkzfnhElf+wOXMpNa8GvoHDinsgET1srj4zbXw+f2ypasbXOGjQ6wonLf1PJ/cc5P6Q6lXxOLPqN20OUJuZ4MuwVLfETCLTSEFiIn/8paB3YMomXy0C9Q6On4PYQXfpwfMuFnM1Otql9lJHarrNw2Ay1edR/IR7smat5ZtkjfchLn/6a7OIJ34VayB2e1S79veYJ9nVO3xSf8GEqNujXW3hBzf5MvKXZcjFhpNoj4Tk0VGX+EgxHt3SWk7x5x2AGKLQMa9qiaY6102RdYV+pNqkVGBgG9IqhEc5dMQJJz04dl/Qa/rZd5lnxRTNodoTWfju6dqWWIaMB3/S3MByyq50dbj1la5j4pEruvcbaq0wJBW6ZgoDoug4zqxt0KZbNLTavpUIoPqJ34K3g3BkvKdntjmqPnORddoRTOglfg4NbXhxovtCBMJ2USphcz+dIGOlxbZqGVMQwXJqg9I+ajwquSdh2MpPrScLkm3kYNgKaQieHx6BpOjMVvdFWPy4L4ZLVErYin2xJBG5YVJy6PQzl1IfyGRn5ZQa/R90NtrmmFhxyObfgCaP7lgt5L9r2BBuSxS3Gj0aF37hukqpram8xUTeU/P2G6tAw7kIu/PACsQYLcoHRjhTJ2R7bYgG3L/q9WBIiija4JnmnRLiKNVP/TpVBYLK+bKNmi3gAa0Jq1qULT5/JAYSFnunIlBKAki7ohneWySgSGBOKouRvnCFFK8gK2cHj1nf6inUXPiKsvhnaUsacfIesm36iLbn5twt9LTuqmi74ZJT9+SV1G6vlxv2G/lCvZxmpzM8K0QF38C0xoY7wii7w5mPxapxO1KZbVFbf+u9mtkxRinf+GuMbFFwC2iUFLiqNPT3G3n/Ctw1ZiE8VCOwEAci4VZb6eqnQflc9+PcRN4HlBjvNQtd6Ah8/7yeAazyHDZ4V3jxNPWIWdtuh5XLMDsNwUPMxilMHShE+27+q24LeKDMRwz1+rI3IBwg84ZBwC1GQyQqXRyEalDoJnmtsH40IGF7CnosIvRJEMMBTa1Q99/koXMY1aWTeSAIzZCyf9ShXuYTdGPIqerQqt5xzS4q1JXYeIw1+Cj8KfvW/sdHpwaXazXnVCiH2RPUUjEM4Epv7diZCisu8ga/4jNxgYI5a3Xad/1b0TCvLn0esVNkmeJPEv/sUebjdv16iApF8jjEb3O8ydk/uFdLg5ptP8YltlZwjlG9vLNDerOgzXA9i4WLd4OkXnqgvyrz1bto+aNuQ9+rLl4Ifl1q+x5F65wTeCNff/Xz61HNfYfMMKB0ZZ5eWro51r5b5tlfW7hsDr3OCoTGVG279slkymUJ6BNDhCp33zR66lNlasKYW4zjfzyr0EmAyI2c7YhVsea4tJOncZYIuIfK/R3xftQDvU3lr41hJiOMzxg2qgO6XvNbT40qKuN12caX6chJNMmn9RmtG72z2XFovfolSoS4gcvH4Yg9CvDTIFRCk95rCIwDWQmlL2WK/lO5619oUBd3rKc8fDBhkrV5TaRBtEK8/nHF0LQJT6l+sGIzEeq211ppGufoTeD/CyyG4jFY3woi4PyUoChy63ccthwX7WuU20/ldJCrMBoa0KIp7c5lA0L04ZLNbZYLVfFSwe9B8cNqeYdwx5ffgnT4tSmngw+G/Jc5XdyWpnU2OKMEotFwROs0q+v8mJnY7ay9mFQMFpbQFKKLIW04CJIUntFb+YRSRwcZqKJ2pDw9PAsPdXmP8YxSPyHDvqSFT5KeHgxYCQDvRKH0H+LLkt13r8KaFrH0rBFdRkR4mpB+BNefJVWvP2+P2hd4m0pqimgnJSQgcjrlqAsSsPo1keIPWvhCW/e23dnknxFPSTIv5+5MRvh6XLSbCGlF/0ypXD/vP4sPlG6CVW+Sbqc0bhJCqCHQhW2Sq4boIN5hI5jVsFORwVAnWWLjlFaOxHOjHoewyqDYrtaDT7HuDurQymUa6l0NJXo6m7iP5GbgGUzqZOX0KDh1TWZtuP+dcVebWOiXDmBTOj5grwOxeppCiAg2t1kDmVRsLshkT7B/5MPL+dItnx14y0yFun+CQHOcy5Qw42305alumySpsFBy5eKn8wn4zh3XWBFUJw6NDSZNlEqxC3HpfCJ1blS91VSHUceQ6zIh/aD/bXboeozJmTk5/SZznNp86bifW9KK68Zypd9ERv3qGbQ4Edoz7zZJeXi0lYV2GmMc/zwa/5bNVOzs6Ws4tCI46grAPaqVfda2gA3sN94BiZV4JF9N96OJO0XnTYUGVVITjo0rNucKdSZ1cQOI0uCCWD/pC3hU6cGJ3b+MVHANtXGtCrUGN65odz83FkRNVHVmz5tfX/2EJjynzyridV9FiGVLzz9KMqWWP2jQanT4X6+g2qDPbdvtbwY1Zou1DMVAi206yOA7MWNIYM3TIADpNPxZAtSOcWIuoiiVLHXvGsBKBddoDvBmz+/X3jbW4jY00QCHRu3hvVmDX7ZB5BrwKyJJ4Ek5LW9opelysW6wikeJ1xYMvLYwDL3KiDxKRn56nQcweuMbddzoHHJDYOTwHnfPggIK9NC4a44b0cbK995/OvgBoWQh8l6KVDROwKG3QPPZVB5P98WNl1fI4Zb1OuyQDnTxzjlMGorWZ+1zTTjOHwv7tjJAgHFYAuhAOEfsdamD1RRHQlvzIS6zXkdcV049hHml2vmH2iywzvuLXaHx0NZp0JbPG0Cm3Qd1/igkN1jxZzrmbewJMwBENPxp8qcnGe+4UrWhWd1iSN+DVkyEXoo/FYp+IS86rayw0uVaZYsyLd8jWYUSkvrYClTvCC/nAuUy7RZuEr0f+JJEliNHjPAzbU3Uejf7wQ9uru79j8L/W7NPmWgzGDuej38mUMaqcs7Qw/LLzVQY5ZXmAU5dcufBLbzWJoiWSBVHih7ta4tZzoY8DiFgXShCKjlNjUfLQcSytZ7QvlDVZRjpPd6ILDkV5snlWGzUrHdOCcsds5ZTiHNTtOPBTY4sOjC7XVlV9K6ZFPK1nFHsMSQ1Op+j94DIJD3ZypVLk6pf5P2BGX9MJThWeqD+iUq26r1bNHbbRZzMTBK63C73hAzeJLfJCYgdf28+trQ0sfiyvaCk2XLmiLWeu1u7X4WKxB9F5KQfG4yIRb10e8TpV764IMTwA8a1DLeXtZVd3wnf6VizNl5SbNhk3ZwuZxZdvR21fohAYpq3/xc3WRs33scCI65uSrz9S6RNMFQMp6rc5YxEJGRK8qS5uwNpROpUAuIAepunXsU8fRoqXhf3A3ZBmo/4LB/l/9swvjUoVGcKPUZw5MDUKNsJJAM2U03lJyN1H6UNbcTXglDkfNLqyK2tdOkfa+KHwPVqMscendwOZROpsQkEHgzmMN+rOp2e0hfwQsTiHU5fJGusjgapLltr+vBpT6h/6m7YLyts2u1JUkDIOAUJoZKpegA91xmxVWDwz9v7gnxEAeCU9UF1tUL3YF7AYFbHVZ5uGZiiEcKPuHfEcid7FHHxV28hjeGD1gXszOVC7LPhfA+kp/NOGb2GGLkYaO60r/35evIT4qDrjAMfenoyF6D5VtuzhCHdyGgnyJU5SsaRZmnOUYcjRWngyGc1WC7YssQ0jzjo9JoNGHXxftfFuJ92Scm9YxDVPYaLcSeFkYPOp1g+Tt6jxeqS+7Un4foQlWYE0mMmgrS1xn4WrZtoopkiWG0dQeDze9YPADavS+iXgt/y0UCm30CxoTCEliX0d5GmxWR67NvF3FDjcDoLZCPkl5axLCXTnWFC9/3DHO/bmhrWqFdiqEef0WoM7CLsyO9jtjgSS9FDSNU30u9lA2Vfjq5FnYE+GjBJjYTV6TUuqE9jtl5NwVPafjw7ZB1D8ztVy04dMe+esNd6JpexuDM6NRDJrje2v6ToFW/ksFjNBRcIXzMAb1WCZuMM6tfJhorEA0+zq+1BbGHZJKh1n7x1r4d9484RL0395YwBiILx6LvdffVhGrPVbKj1uouogJ5u0KqUlDURWZtYyfdUW3xF2035XZwCbAaFllkVVXTMOwWYv6GnxKhGu59JWcVHJVWn4G37qIHNV9HF5HBwetUS7T0hG5IcoseOskMJbwOG3QNPKwSMlQpo2Wz+gtw5wDJvkpKMXDcuwyLNk0Gy+BOXGTqeAUzRBNGyAB0leEvclJIkLwXxyszJQuDEboScp4hWz3NvYK/1jR/GiYIkqVB3V9fDc0ZBcUTFcJHHP8E7saHtXdAzsBPbHeMtbQr6jp5fkGISw8iMxJUFS+h6Fc+iJw5h4ZCNDMr5RG3bv2VWj0q5o0usyu0lMGQFBvSV9dC5APZUf5/S9imGzI97tZA/AV5n8jme3EikM4sLBez7/iSc6jJoy9HU8+o6T7iaQLii3Yg5xhMJwnlcSGyr2xWzAJWhg8N93pnozkaS3GK5q0R5p3REIzSUEqOmijtmPtaJ8nwco4v06dX/FuGqT1Y2/FwlaRrJUpblSCBhf5OW45s9ZYotoOyJWdtCozYkO6uYNfqkFpYdELb6nmkm8ZzGnOvYFiLH4hUsKFT18jtN5kdyD8PSh0QvIepC6S9ZiThPG1K5DLMWLQINL9/1dVPugHEnWf2Mi1JRcMZCcAlk+D9iGchnGnx6tFIJPn2p0kY5uSavcT4kMeuBKhR2jbJ+BMQQONv4FpxxM+ZTol81eIh9nbAWbW7iu6WOlQR5HEORfKO9rQIBAmx/vXdAhx0J+erjphHeNotXncKfZCtWw4kesx8eFk2N3xgWtzugfKMfwzWd6H4c4zdZvGHFY0T5xptWbSjtWIdNAqy1DCPo/RaJqqleKz6wUu4aaRhtqGxGrPgYsyPCkQxcxGFKNagl32y/Jubmoi9UdRDTKMhsGPLzPUZzhAIGNO2ZEgg4tOLGFZ4Q9TsIPR5UgvSxNvuLuNBFYjhMKy4QkHw9x+1sZFC8gxBZSE6IFqxLLhd5skCTqISF7Fey9GT9oSiGePRMcDFjld+Zbie2NA760OUXDy+abyvnoWIvo8qmGbWp5CSEJZBHm9JLhcZKfY0LYkldpGr02uG4LSQAbwrn51EKJVjAJkDgvaKvEQPPn3k9OkUjtlUUiKdGCiACDS57pRMxgF+ZC5BdxvRp1zx3wUL0qomVoF6S4V9SgIf53jHQTVkqn9FCipeRuzRwSei95082SfEDU/vyxZwNF4dlNBgih7SHJZfxesoveo/eCtFXJ/Oj2aVUfhc/UN5GnwVWoLFNtOWpW5JOh6e8PJ+o6tb5wKURnyPy6gBYrV6HS4Q9qWR9wM88ttMPZWdhh896ykOaufFJpP1iPM6kKhfueXtUciUcZzK2KE47P1QKXpOZkxKvsgmlqOqQ9C2pmv/SYzITO1D1NP2tpq9Iz3EcB+YKU0AZzGFJyiZkpBF0dQeQu7rHjrgwongNR3OsvSpqQfgwogxBZlvtH6IIv16RXSAUMfn7TP8+gq24eW9JRKcXblvHobAk0fL36Ugc8aY/qR3pkOWq2OOBdXa2hLhwaKq6NSqB3M9NgiLQAjQ0tSz9RQjF0u0d3r/U1/ozsWwqzKOgjiLsZ0v9N1ZkHJzGb5eKPRHIrllNLS/pSewvDcad9ZCSB8O70NEwmVbDDCxWxtDOsATBrbFMmbGlMhgoxqmu7VoQy9Oi0QnByg0i61QPSadQ45LIclDRQ59T1/G1cJWq8ZWVkTTNf3xU8ZRKSRU4fuHUEzCN93vixeGITE60ZuDFSoBG9CRERDEsAQbblJbsqS9M1PP7Mk+J7X8dPueVGG7UKvpaeGLXkcxSwC5aM3Pl+SxlDVY4TBmTALrdBHObal5RUfrBb3k/bMzY4OhHQBa3q/rcKQPy7ueCmGCA9CdljFg5hEkTD5kaqRkNP3LiwAAZ2JymEJLLL6LixBHvMFqZk54Hm6QnBMBSToJbt9A+taF66BQ0ODSuArmvRopNYxu2FBXG3Vfs5ARTcUN1g6TMGThCWFN1U75KKV1a2eaGJU0TXgzJ7JU6T8BzoxIDWV/7z/uQREXk6RVJanQau5vpVPY1BZZHmW1QIS8a7kl7agzujxVTuSLf/AocoIsMtPxU7vq9dXbdq+YRlWvOpapnHmqpDJ75r+jLklc1VCSnaiEnJaiBzqtwhCEAGDzTK0jlAn3ODxRo/Seo3BB2Vg/JiMeC8/TXIdxQ6VN24jAdzwcDL54o/jCJdVORkb7RFUzz4EMZGfrnP05lvIizW1rv2LMtvGrM5pSd0iKNH5I2CmmjtN1fTJ1kWZmabd1ltwf+B1l7q2wZ9m2O/MiehF3CrpoWQ+kdE3FuSUI0onxnwCZFLvuOJNmgW+l4ZLbYVGaUhByVuvoNru0CbyYcqC6nqVekhtd6D9ig61vUjQ5wsfvb+UsDSLihYEZEMkurZpUtlvm/sL3ELfTPUT6PQg3mNhZ7wbPchWVkZqlZbNBrC+3HmULqIkTwXie0GupFeO+1iPogl15R4jqw66eglrLuj8oFRKe87zSXy6SatB+63+IjYK9yraX2iaxA7hqmQgJrGBve0140609YsTV/PA/Kxb0+/IHwYV6IGszcm/MYBbxz3rSN92STrw9LTU6S3aEildEacHg7iX9I5JEl3rrwAnWgjFej9fncQL02/ayumUy538jtBKdMPTakcYsW2M1fqIa+E6z4hJdoF0LoeM5SXihcKMophZB7+BgFb29jLPpLOtctonODVqOJrKcVkEH0nIjgBMZJrFuNElM+F7D+/pXu5Ti0DAXOSvBtGzKfmVCE/peCo/pX0f6Vx1IpOwh941Qx88X0i7wEjusDuHsrbz8onkQOOKaxbWGhh5R3HG9FP9iT8cis7uuLeq//TTW3B6dR+nKngyUwO1nhs/XDmEKR5JYlTo3qYrm6d1qeUzMgMddNxo/UAG5J+5YizqaofGCuiQHYvDR61N0euvNJ8bT36LjbgVxAYNh3pPCiZ3w4OrKAjanJZ3YiZNPymxcesFWJ3ji4IJji6BObh2GNCtBGZeqhn9/2OsxpG49bIAN4VlBFSqCcFLOkKSR5RrlDeFfFQ3df3QTPDq4FYVTD4Y8a2a1t1GwwvkYhiUM12CK5a59GJOWcXU++GK3SQZN5m5ZLoxRPtpJq5G3EvDHPxjX+Uba0V/Qrdo84vy6fisg8BbCf4C1Xbs5rvpQi4pN9k/2Qhz2gYszy2aYdr3cUWqeh8e114C241zHbVH6NVsk5Vp2IO8i37CyD5AH31Bt8NL96kIf8HN2uO46LZMCOUERgAEx7aZ9/TiGxY2B8ygwY8WHEGqtd9Twpflvp4zSZpyj9zT2S9saUTXvMkAwUFA52juIrBaLpHiHJ7rml/k6pL1B6wiTQeal/qHj7/BLTnVw1dSVOYqc7jUknB9OBNVZayd7ItdSfVFaazXhAW5bq/EyZpOuCKgmp0fwo3sQmkbopJ9Fqd3VeQObYONYsd+ve7iagq5Z2CAvgr5hDwwbZuRtOGyEgVynh5CwUNZsl3XRmJwKvsJ10f0H8jtHCf6OQRk8EV7k1gkvT/Kl2SLyu4jZpmFtTRO/w+KTTnN1wRV4M5X29wtorYQRW/D3jSVA/raYqa0P7Omn7OQfwcnThjncWUCLpBSv2Hhgu38tNKlHLO7+0wrzFZ9I7M49J+5jRMl7UZSyR3tQA/hhiVWFKs7EaMuhExCSS7TtG3z/fagzY+jQApGjQcN9/KTb8WmURTbI6qvkYtOfcXlXvA8YOcbxydWxPlY4y+uKlc9VY2lZAyW3OK/DR2S6iL+7lVr1dxdldI5NAfWkuQast08F1g1ZkGRiuhPqZg8GljmJrRaSybM3R2BzEViXTg+I4ltejFg9OWXsp5gl2WMSbxcOddqdjb1vveZrlqd9VHRfFDCSTmKvHVvLzBXNOSWPlX5mpCzlYN2i8cSat1X3IwKGGEjT+3zgEv9zU48gg2SpMIaeL/fM3qEqLWxIM+MKxh2+7q02a1x+hSdKzDCcoEUC/nXkXijQeYEMxO68RWka5F02IpwEA7eV+Ov58mfmitNxSkSMT6Q2cqD/J4bfM/zdisPPup4b0O4/mMckxkLWy4eq6s4DMbLcy634Jf7tSL3tcGWf6IlQR9A1aiwUENswfpEkEVWqmJ7ZOmy6PyRufhqgNheTMsBwa8mF8kNBOrQWXcNMyw10HIF7ezQLe7tqFEYtW/9YCT1xBa0STv1oH1Pyr5smWN7PPinV744ualmrJSo/9y+bNTfqNsVWwWolaVn93XVNvIfJgVCvVJmGv5iAC7gNGJtllMh7xJP5lypOskTe4fliqmy/k1B9FMZh1q938lFC+rgjLhhFN3V7jWqZkRbJoT0Ub7boB0WOV9yEjHb+ISrjTdwIu7h1UE5MJ6B8Dn1Z7DiYHyuK5D6GUZg7PYk0XDpjmys0/WYzBmZifPG/lHGV5k1ErquV+DvW7aVr/ikdTROT/Zz796bzfhtNKdfsrb9MvBLc24YoXwPn2mo/FhIu0DKIY/W8f2IqKym36FDrNiv4O85C8xBIH1oKqD/6+BmbSLCY/myt5H65Qi5QqKjE0w0zq5aB5MMeQJWIB2BkyXkLk//xgporo1jeufa/vhVl3w6XW5jQE4o1FTt2EQZhG+bm1p8TYA7RUKfsbmRJfVvilKeKAU9VRTGndQbA5I+WtDu3IxUPmqXptXxSsjBzsKowF703GttPFjodNCzXYi8CARuoc8+QelVx1qxLIPrIWSDQuJPfQWV/zoY+EMbRR28XG0btJU3SyAdUFA55fEqxBegSwBtCQ+UGa8IoD3lfTAIxHo+5N8Pasaz+Rtw1ry0ciWQYSGO70pYyjlo3RMOVxJIM8XfqaB+zC0JapFLD/3XS7ZOhd0fNe+QinMCvEicJv7KanS/JvCMenjRPW0QPezskK3G9s4BUvjk9sEVLl/r8bk2Ihl11caG176TLs7dDWzno6VafsMkPt7ED/XKdfonFYRlX1gPfAmkkw/pMX8DWNyL9dINASpl7xQavgOptQaJhJFm4yWbputYnFdBuW8L3DWzDURkLetnWz/QfSlZ2lNK+EV2MnSTtMD6VV0cISh7wk/bCpUHD8HZUOXaM4jIYn8WxJrtg/zgyFqOQGzG5ki/l7Ct5bnOgd9w9ISa1JftGkmJA4VzBLXgSjEX1PcFxJ5I1ym7qC8RFQyDA61Y68tbhO/z9+5VR7OawI9WYw06Ma9NV8WWbWSV5d6sOLkyq8FEAwswzdWMXRZJDraoNEDd/tooo8Ts0TWwaIXc9oG5AcMt4cJ1yzr+9tljTW8XvK7+COtwdGVP0svid9Uevv4M0PAtdQi3KxSMJjUpCBYulQABZUiEB/ju4yuN47c28rqlELCciGGAg6aMNwXOZEs7/WXNMM8JzVTqgq3iLjmltAMNf48VyX+B9JLELbtlqEZGe8806rvznPbyPlZXfgdPtRJIl24r6kw5WdYmSSo9j6bh/knad6ydWBn8QR7hrwR/hLWBKlbZLxLPozbQSPDpb1UIZWD/m/V1ocZ+5RKW+nNcN5SSk7fzF77EpJdOyZXtfPC0/3c+ssFZSo6Wz6DN3oOgfzD5s1lQ9ZTc+0Rtsaj3Q/RvvpLHVOIb1zrNPhe38BLeani3yX8kVUT6bl97NW7yjSsbx6RuxZcIzH/ojOdKZ5b67J5BBd0unmQXDOqJ7ia8dn6HFB4t6cmGeWBWwLhTnaEXQXWY1Lsae6ly003qMbmZTi975KzmabIxCVkgdIXuoV4xNUbT2hs08kKlOsNlM48FF2NHBYO7Jghp5csPJglf2plIu1wJLRVzRCq51i4Rwma+XVDpficotSEqUe3u3KpLRmxo9N3TG3R7h8GkgZOyKsgc398cKmg3GT1p/zsxAAUl1kC4HIA2ZCjQ21jV9SCwWSVcxAOrm/EnBdf4hhTJldgmkwOlVw4cVFqoEojrelphprrguOSTYl+etS3FTaSJ4YeTTRI2fz3ygYUuTqISoteHC+lu/EotUKGWa5wiUtYycCWeUZ+dUUq9pbpuga3hMptsUvmhZyaz+W8jjuycJ0e6gP5As8+YuF1FuftKH+VmwZmJhxXkHpD4P3vGlBlGlYCHTm6Co7K5+4LcCfCuxQSqAgvl8rT3UC0yPwi+rNnguXsJXcYBNOnYqjj7E6KdYpYbTal5I0ckSebS8x5WoS3/mGjr07YYwN71xB6E/bCrY/EuuqCVNnY9qwNAaOu4uxcc+yDNv1+mWxmxUaKENu6RigQn0nL75vSWx80U+Gt9k9F66rDxZf8DzYg+8EPydAVS0V+isH0tFcm8b5gq4EZ3Dlfjq26v7DZ3kj+IxfmnMbojxTRj7KRLM1VhVk1jH6x34FtJzWa9KjqocTimV3gFcupbFoOfI4PhdEC254dElookY9QPyQJoV/u7HKYxq7ViWo77EnWDGWjcvEzh4q6UJh1q8G7mvXIBY1QiwwA/9p5VCat0jbRyVxRi4s2sqLQMuKNb2GTunKkONJ0GNpWu9aWd/KpGDz4POjGkT6jkwiJ7re7r1Pq5V3QsV2/Wm5+ne17FK68cj3dIh5zMCA1mILzcHZNpTGqAs++aU9OsXOx+m9iIxUDPvFKOaxzpDiR7UKxr2gB6c3LFwbJHyUZZomMX86DH60F6uhxDyF56304Li1PchggmUIKZxONUJ7kN56r9le5K4ScGc4sh8Cu7pZOzQlhm5N0qFLHT0SCFSY+pXpFe2yW3VXhakVWUMb7kfiGMUveN2XQ/YZC+WZ3LbjqSDpzh5VR7UuqEDV8ifJycO7/wH57fwmYt2uLUQ6nj3nAXhw/0LwCNb6s+s4FMaiJj85JZjl4WAV5wb/SA0/OAeG43BhEISqh7Us1mClMfKefInIFvoZDbnRepEDNXAUiMsEzwtsH/ubPqvMSTkto98MCbqKgVyD0LUDlq7xpJFX9ZqandYDfo/y3e5IjIY3QpK2N/E/Ge8apBGF4l4h6xbUZqpJP4FNhkn4fFV8iXnE/xS0/8cXtE7Q/vd++5TGyqqmJ3JtYv77b9C8B3eI4WUU44CVnlSIZgmHn6rhzX7U9gK1qSrliPiIuxXV7htT9b2ao+1rs7bLcKO5uYL8j+1rcEZQriungMiefCtN+FAtqXcu2D4OHUlgoMJyiCBy1MPvmLCoNrTz94+ClUUAavw8nJyvbYBuJMWH/he/Z/NW/d8HvkUtK45CYnaNq3LfxSH7nLRQKnhdhSRvxle7jo5ysAHxOC1jDiJbT5ncHqNc7BWeByDiL8vEqiCW8iV/M0JAcWaARjg9UHjLPrPyI0DmwMdGd4nnR4ZIL9JYLaf6F/s6Gvk8JpJ4A5bCQTVEO7DKq+jl7scu1CnwP0mKn8ZSqni47pH2ZIGSJ8+m/BjQjENkpiZz4kk8oEzmzsGth5nRJIZ9M7T/StoIDSqf2UiH06Sog74HvOQigUyUk0BZQSZqcckTtHdq2BruYyBzP2cDGbLwPw9fw5LYKH/8ppGH8EFuBPR/hvf559OAGLxn9fqClwaEmNJkSa+lYXMd4CCoGb43r+2c25MwBA40yiLfZf6Jza6xF6NVKbXRGOOFeUpL9M5SWEhddnnC7TxwlsFOgU4TPlvDLzkkpMq4DVJ+2vJ5tQetm841CSyRUfDtXUFFWikQEnqEfuIUFt3NPK7VqubaWKHij9SnpKuwFrKUHDKr3DD/yRQLLaKmYINtzwGPvWJ5t8B3D/S9q8vB1suBIO1K/0lyM28vffGU8mPwdgANsi4ZqvwVyL+7qLeq43qbVk1+CjcSIKAVfgk6Rgbr9PxdjkX9vYyuWIpL3prqrATB2gk492X00rXEzD0Th2OqbRcH4pFP8M7qsLEvK5pbpgfDNoF8eXeUg/tvWc9AK3WfL/GSbZs56J4gjspESSeQ9mYegcKuDSHfYVdZDfdf/5X5xFXAelDituy/e0z0BluxxS7CWyeEX+PkXngu8uDilq0YqGmhVH4a/BjTWtTWbckpmPTRZCy3uSuUVxpu6Upk82/c/Yw60EhiEGTo1pvyJas3fpHRp8dlDBK8aoge+PhJbZXSyqgP7zOm6BhhtveH3sMUdyQFEeG7brw5BYd88hmeKOFTf6VBlZ/M5LSu0Us7vYgBhiML/nL8kYqgtlxg7qRadkPkGeUgWM5CkyEnDgyH9PZKuTeFDBiTXe4PT9EbEWPAUAS0gNZakQ04xp6kMIOHzkdKc1LSMv2nUMOYp/I1BiCZSa8Ejk3ME4oxVrTdS44W399eBBcvJXEWkUAJkabl6ejWwVooLAMR/pBjXSUrioC39EU/Zkpy/dbAXMfMz5B/3ahhmAWlKNOmYQw62MjYWwWMozRFy2zKzlDutbxk8OZfHQQQYLKLL0ZMvxdFWSI5nTaMC+T37+hy42/PoferUFZ7ZCVIdQ4OVSTiTrj1GnciMdKTAvNhgHFq5Drd8qG9K9PeyCu8U4OpPG9vVi5FDixv0qD1Lc8P/a8F6gxOT3KS17rkRSc2lPjIcaTuDGitwzv/xCDwDh0GL3EebxkVsrpmb9MqqDI6FpjacYIX9qeOF8fwNQqhM7WZHj2ufTQu3SPMUZLHiCx9YGZw9YhpiOcoe4OWw6u4YmiNGoWBH9fbiaPwbXKhugv3WbEe1CN/yjJeWbn9qancET0Meg1edIESPEz5Hyudj4G3A8whuNpFNzjyfwIDDtxWePduttKxC3QdOzLMum4xL5cudEQR506HEqkpDX2WRMRwbX3I4gmDcpObEZ64u4uiqqpGZVG+GscSFLvu6cE2E2II90pVEQxtylyHh3kXZA0z1h4V0QiwlJ7r+veZO7ZGvhQyi2qwKQfjow37ycbKtxlKm6Wk3SUi6yl19z0jp0oe/bGkO8cdB+uSmUXcwiLaNSpmDCfjKh0QTyuFweCiZl92m3OE4/q4tFEY6bLxGUq7qimQK5Nt/QyQRdfknWtEP4Uo7lm/3uddPLIb5E1redVeRC0SK70YWJQlKDT8qPidO5KTkcPvQ0QZLYxyr4pGol3NjGnfCiH2ZkyUqU0xj6MoYkXy2QS8mFm8xx8WCHH2rKc/RIOw9g/KD3Wc66GPgnH5AYs8rOlSbbOqOfAZkUxia5kv4TLx/Q238WiSyjtzYg0q445zQ9PZQ53iCR6sWU6c66VgYYKKKIQhcmY/Jdmizr0l7UoNUP65RSFppD2n4/i5CcZZ8XAzpqEcNcuBEqH2xwQv/RfGXMEjsjUnFaYwNI+vRtEGeCtvdKyxXhielEBoz7hJmZT0PWtfUBUVwXIYg3b5Qn5Qh2L6pjm4YSXW3ALVjPM5vUcSelWlPt9oTayfe4ULjoaiPVVypUk26pXVp6EWkuSubMrK2N4tsE7fhJib8aG7AFROvQcS/W/iJxpNVyVxFIEuqvL9hb4R/deTKRYzAAiFSgop7nFqqkefi8CZH2EQiYjybqJ/R5C1zltR6yzV6O18wPo+CcujeHd8c+cePPR+TCWStgnAgsTQdY1wlz2AYTuFIIFWz6o6CuNPHepfkFX2gGV2mi7ImxZonJTEhiGFdZKE8zW28hPFQ1mZxvpckhEgKGZWUxK2hb1XakkSyzu5Puc7TjuSn39Z7xSb5KA0Phtp0t6ntnoXG4lI7c6e+tfj2YFSTGpWTwxvQ5me5JGefKina6MdSCv/RNvVJMos3oDBfiwX8JMH3JlfFyf+OoWUnN7LY+QvmdhpWei1i8zpBH/8pojv605ayIQk0+LGgXxSU6vclH8ZotpcLxNxFVwNMkjmhExGZLgi8Zd0XG9D8dny4Nph7n+p8NfPrLSdfnI0UOMUF62/HJXJXbb1cm51nMR6D1OA3rvjA+jmDNOYYX7v03g7LER8dxHYsZCZDqDk6nWcHXxFOl7TyKELlXPybsv7tIfGNyozAKHsSck8QwFXfohsxGyNL3YniGOUi6bUNx3DJsZxbtNQzmPNcYGDQVcRmZ42chj8TumaUtPKy4vrO8RWt1v+wtJon39uJZhIVT3trtjEJXJPi4VuDPDO6ojCCGy3Oq11ylwyk5PJbt80hya5b/juQWL/eOTyapZ2/82CGs2M9Sw7CsEke3IH8QFaGxfPgshPZxSMLhKIYLYqf8w654G6ZG1wKv+Oh9SH/5wwB9cy339TzDC9d148OzqDDwwSFmkDUSgIxj+Uy/2KcswMbjWKOl/e+IGrDrY55W1BibtJ8B8XQqe5JaEX5CsrugJGCnrttwx1vQg5khD/4o9LzvZ8sypGXYeGafZd388rryEScdLm3PQCvH9vOF/pXVuDEHX5Hg1zERzU/oi4lVjIzRs2D8zMf2JlC88DF3GFnJ591MOLx6p6cdqPD+4W1p33/Aiznz54r2JfJ0wIPYOg7TKqf0hQucqkYLaxZqsdhKFFH4bhOMwiDVQ9U1zudHf+7Dfny9qB+8bwTrizOHXhby8T1locQSR9tj+dUu1Hsj26U6XpXAhOLMkAm2Wo7xO52oeH4U04qB2571Y8Wf3zbSviedp0MM5uHuRuU2gCtNpJKzGXdGrl8jU9il6NPEZ6FA4YmWi7TaDkZEEkrrsAIfL906ANFH3nr0wnfqNmUuyCs0B/htBDk4ljvC1G4yuZaAS/svXQ+qZBZJWKC8yrPeWoHRpzG+OYk7ncvNITzhh45dh7qHWSnK5f1skLVLViWX7Vwsxs29OZQxApyfjoTTaqvb/0OFfTvZbQD7ulgkZ8FprJ1vG5TCZ/pg73UsNbIPoTIyspDzNxKpvBIu9XUG4UxQO0B6iPUINnaxUS4srT2wUFAnrKzoZUsKpcYDCRrYYAp9uzNO80IYtlz1l/WacJCfRnCUhbmzEYxGX4Y+0khpc91LjY7DiMnbIcz+XXd8IMYCo+zEUX+yNth/EMnTMZ9kOlAWg3MV6FgCxbxj8Z/Gk6qWl8YCNPtgskBicc8eJGK+K4ZOD8wcWTywNX8VNWaqRrxdFQRTPVEaXRLEolUVCQAmObZolq2jD9lb2muNi5K78HVgnzUS1vO1HRO1cVNpLKASqK/hFWBPv2YtE3MoIZ9u9+8rsTORgH2+mCxprPQN71pXyReqe0YM7dRTuT50QhLTdfFYamwZjMiRhTfhaZBOcDDf9BYoOPTkw6RhhtI9543qWEtVrw32A5n2vwcSxLGnxaX1PQjqbrvRYmcIGEFsNmr57a9uQYLQUP4veNbmb3mrfow9SKxLdaK2ItE37QDGxzhgYKjGaHp5AT4LoUT5cYy1v/IS/KpNk9Zsdy2pegMFzVKUG533PElm6ZuccuMwzO9RiWlCE7xWLdSLY1PQYSw6SZxzTg7/3UxwdtLbezzWMGkHf5segxKlVQiKWGdNoX2HcZ0NcN8RNn14uZ+TIYRv2UjEi+n6UMTMknjePw7vtu7ljIh6tO+CGxZpQJ2BmwywMPjT918xjS+riKV+xNhRdgk3G6p/LSiG8fjBm0LQH7ydW+iKUhKDhJxTgmFT3PJZ1jnEZgiDmdAyQkSRS/uru5pWT+poTF/+1IwuGoBpKjBAgqvO4ubTm6I0H5Lsq88h6thWfzbO/+Dt40Ojn/aALt0+2y864K6GbSfDXnauhzNj+42e73Zgl0AHOvgjPgaX1LxrobsqHrLZqgG+/xRFulv2q010dIA4gRJSZHHh456SG6fCr5pfSdFAJKRsuPwHQX/dMMTpOyB3zQjMrrH2wvjDG0Pfu4xUkZN8h7w+gp3+VpUDHlfGFyXNiO6OLhq7d2bIYjaQxm3ddQ8h32O1a7vTlFNjdjwbJSAMBVGRzbCIzR3hhPCjJbHeAuyQIbF5rA7LjKuWX2GoV5IbZ9Hf3bbqUST9mrKy81EkciVcHCW8zqVLswuviz1ngEk/7U0nW3+T1WXNulHAA2XLJk/gGasNyGefKFnHt2QyNwP2pfaq8VTalsOKu/JcmL13oFJ2R5EPbvpisKCT/cbuuLC8VG3y5rtuNwTks2g7cT/cIvlmfnfBD3oEL86KviRLQ2oC+zDoE92jSubQShX+CfVvvyp80w/7o1WJmCZzYy3muzyvwSkscfsNgwKjfrxWhVuTB5sdi2/mK/UZ2xDoULgl/hsgRipttRiaI4YXnJuXOk/qcudXyjhQaC3shqpTeqgZs8jMWQPKalhnyKoWuKg00MrLWaOPMuadjkRHsjHtv++gHn9bgKE/BKxdRoGIcZ3/QXDeBRsYrD1le9tg2W8V82uBfpJ5vTUIgwbineTl1C1k/6fwDYfjdhJaQDjmo2nGbT5ouIzVlNSDnLvAZEEdf+Wn4L1xp74CRyo1aUSHFJbm4xWKCrNSE/gwQwjS5MbQPLkns01AfTrjuF4XBJEI9Sgj88G974PzLr1bXd76C8DHIfupErS6I/wwKkVqZktkkf1tPtOqxduw/+hRe8SemEogFfi/ae24WSbjizCOBTXnhmxJ7Q8uuXVHQSZkvtG0AEgAfVVkmgqQD7QsamQ8dbOBpi3HGTDoDXsncYru/4R72v6E97lDc7v+qxftXcr6QyVVRRrYL09QbCkoTOkb2YnpvuaRh/ho2Vj4MmUZRfaaWvUnhM54kMovEZHzQxFiJhQOwSv3eSFTouZLbWPPCSUKdMA9krCLQmvg2m7q14gxeFd5C8WDdpJL7T4cqvGauyWCuRrK2MUqfh17btZbxG+1N9Q6h/rFGVf12slm/OK0u9ym7diNcK8ozdez+lTwzfD8zrwUcq5ai2b9bQ2AtzzuIdJMvaVGSr5QIvXqMpTtH/4NFoRu1oqFQ5zgJXRIp7IK6PhrmPU6hrbmaoIEvko/jxBnJxlOwDL/Zl7NfbvHzraFFFgknALk/NYo4BhDZpboel6uCu+sFL3iszfqfwRZG9K1HY5RMQT38vslz97ubSM7O+AXEF78PTtszkGytetrlub1JT9SvHF73/UKNoThFKAkWz3z0mAjEDR/MZWrinxqsJJBuKYx8hUvL0XdfdMuS31jld9w/+hR5RrZUr9DQAi0AK8YOmiurjEKEdZkwmyBIJFo1hO7j3/py2x45KSwg4sqXAZG3Ql85EA2YxiN73buUp9tbvBP4xNZtbV8J8Ryt1QBiASO87Z+pFhs5Ly6rpLbJdQF7L1UV5R0k7+GClLpHtfFKA3FGFkStWoQaXdySW0xI3Nz7KOi5EwnC8ISstGeO/ScaA4+rZ1rz2gxhecDB0tmcMnNgxUWcXpo/K2llA6AdYmL2+/TSgSFjhRaTxIPIqM8hpp+8v8eFEBBeld9B6fktKVdWukn4lu0AnchLGy7ZF8kgsK7JnoHGE56X9Bky/kKRE2G/l1rrvMFHvQdC5pajX95gGdAj8J8ZMJpsktWFLEEiKXGIRnKPv8jFly0imD56+DQHJpZtMgvu9dansFjcZwDV8FqnGguEWpKzkMApQdJ/jgkyv+2/XXEs41u6VnrlEQ9PDfpvV5rPOuhB3HLLd3bHkmw/+JrO+fCxI9290pU41WdNuac8BJhB5QyEuk5vDtqcBo7/pngXkCPNtD7sqzSBNpYVM1jr+cChOVvlVwyoWECsXc9+thRxwbmSmtEpRsjVfSJmUPsfA4t5QjjGT+W02jubzEedZmSG1redqRdfKb2QT4z3OPYC97Ac2uauviGudnTQT17WuaNsRtAAPu1QxJ1tcuX6aZuDjBtsQ5QE8ZyPUJIYP0nydy4Q5Duk+0zZBZ1DiQv/OTLkJnaCLGjrw2O9REDXv7e1hhP6gqufbmeSRXwIqtEUK1ALOc2KcgWn/WQhJbwUNReUZ+jrz5vRdM/ElqAp0xv0if1sFuVDdAAaDoPp+Qobq5lcn4uZ59J/2hclKZoDN6N09cuAql/VQOGFIWk59blCNT1VJ/6DxGMdN/7kdTHKxStlUQLtHDsgroKXRnFwT4LWf8we+wrq0WqnIG55yndF/LXn75OOp+HM5++UHuHfOnEqLNfv6rVqPwe3/IxYP3UNuSqvfbzgUJApuXwI8X/eu0mP9iOk+ZuT5SIYqUDWWaoo2kFlG2vldQdCWXAnuzrphZr8vsmUB9B75JSM4L+uwPdeng8rrNttLDff9TrIkkYiRSzCPA1XPaP0NxBq2LRX+3H/RwGG54KZzpVlu44az6KlfB+YwkkWc6d5VccUDlh5PFY+qRrRoNAR8at/LBmhzfCIqTzQhCllwgbYVNJWDhY/0wcZ5HyNHCxYbo60zpVKLJypoE+ndW24eh4AStGJjw2jKxkuzvxEzqSo/xEhiW9EfAAe1XqXuReojsYCdh8Hoe4f59cfO53rvz0CBoDhBc4p2VG5uC4iMORNV15RWHesgAKS65PT2GUfOi+HFiT/qi8oNqs6ehv05xfAIk0lpQZuOamT53L4it7kpyPfxeU1+Witp18JZQ2QBnY08puo4Vlzw/RMzTTGAh9JNvupGRWjanTlu388udqDF9KEBinUv+avMdeAOZpzGCii6ckJZ2ePFL3LXng4PE5Q43lIeEzfYVbDRkoXKjUufxzrBy4flAsoJsdWaVgqnFZDHhltICqN+fTO65vNSNU2cvBwf7+81hRWZ+MT4vvMtQ0UMlc8D9fJeHMQNXYR9UNpKO1RsVnv40PyG1fQ7beKA6RQd8RWCWKfjSKM4OocWFgQqLSTIW9f7sI8A5JzQmIzTSclKJhf56GQWZtVXU3WZ/cEJwhVW2QV/sJXIsoEim2KvYjyVbnV2VgGKiUOm8hthVAq6en3vsu3NWMf9Yr+cLwjYOJsOLE5xgBpSmxCm+5n7lLjkuC80efZG/0nhYvelytxPaGjZ2OUYVZLnFGLomsgveivTsJfyuMOXlHiOh8Bi7vjGA0LggSe3Ki3BOkVl6nd5KeFKoTE+7S/YUNMhn+6FTaH3OOk19X7BtcDtxAj4taq869VVvw/OXWCA8yVndRTTZP8XlLVs9C/TlXs5lgeiUsNJrkSsIHv+T/YxCfP8MYZqSh05SZ5tJXkNZoUfZEEkz9L7j4LNw/MoX8K/zqaVMzlr6rymWd6d1iz9pfOxEOoiBzyzJ/TRplUuTzcZch7Nhs3RG8A+SuJxhaDEWac1tstqFoK13N5ub7g903hrfX6EyCt+hlBklq9sHBBxF8b4zKk8ZWpaCwTv8GxRGCfICyo/4lccWD32px4SkapTShzXkeHm/o8PU3R0fSsNDIObaIbkCCKj2Kk7D+lxfO3BAbjUNTjFlRsDixS8wDQVDsIiaHuUM2gJv1Jg59gMuQTFglv0cAHB9xMapi1oEQA6x0gK5KoXqZb+iVd6PYbThdoSHIOUiClYh2xiTXRSn386etbnqw87pa8Sq7y0f+FxHju024zX8x2XVyy8H2JM4mrLFSpNg2bvUX7Q0WPm1oMT8rY5PX5E6SUkRaFjydgJZ59yCvRQO46dqjkkE+SjWcFexAk454f+0JXyUu6eRmOrSWEQBOKS4iVVROvTttilWSOaG7AhGOWgJEpm/DHsgAFkgdlri/lJpWkd0w2j4O7xdFeNEwfot1P6jLBu3TAW6l8GSPltv/tP3dyopYMqw6WeIu3Dr076d6da9J/eFhEZaHEXnp5hyxnUOV30SKfOhJNvKcRsl3iiAtcepVV0RL3D5BJiyLqV/RB9p/JZEcAnRYNL+jWESWZ4eQvznhGfhqsyfFg6plwFzKaQetvp+Q0BAgcXDY61syL6tNeQBEBg9UAFvgW9kTH6f+iZOf211DuZAPsvd8WTeLLGYwiKxXSikCKR8QCre+9ZsVoHB0BfrWfT1nNPZ00ocjuhHAaEuQFfZJAOvRHc0pbqR8SfLATvMDvIJBIiFjOrO3AkQCBcqMgifodMAQNJthdlCR9nfIAzhqNg+oniKXK2zm6WWqeTOZXH2Oztk+jPrBSSWgU29U7keMmCB+NQrTKYOm84vuPCFv0fNbP46dY78DvH5S72aZNvq9g7sztMPGDOhdHNHOJj5jEckT616rYKAz/nPClSxSy6Pi7mcBJpbiUbM0BbrsHAMDNu2m9+1N2wi2KINHJAVtCR2LBWVTHcMGjYxthRMGs2AumLemtjgQNFNKQ4oTqQNVbW1t/kd+mIdXeOuqc3ZGwDChJ3MNeB53Oz1bhGb++qkKZzMPEeYTSrzmGZ8DTodo6owz1rgoxn0IO18OC1ieZQT2+hhxo71Y2lt2MHOWLwg08LvyYeWJwHWJ7VASvP2QGGPUvLMf/K6p5SBUnWYxKlsxl7By9NiaQr/RcvJCIBKL3NqPqPtYEEoxcYrXmpKy2whBbxT+IpmnpQj5WE40IhJnKNWLB2KyS8nHKu/TEQaYeKgXAh1LgOb+6YWqM5CZtH3OkF90MuQezecF4E/+ZPaiGVoQWYQSR+VUPEiM/Qa/rrKJTj5+Qy4+17poRfut50kPKaeIFWhKBBcw0NLZtR82qeZ9YNVYpYdufTq+RLWny0ZyD+a1rt4N4IOrU0QnT1wvPmlQmkTii9RKCCz9a1IpZ8xt98HgAm38Lvf2LsbK3pN1gI0Q8DfMUtTiuhkSyLR6Aj0RpuXYstUMn/GLsgAb7XyXjKcooEdBfFJ7YBJHEYEisIxWM6uJQtwyk9+fRXn73uYMBbGEL+cu6F1n+vzGXwjlcRD9RrwFt3oWIfCz5Tlq5ujBQqSj8d7qpETN2iZrrltlQHN7w8yfHbIG5kO+0hppm9TP+N42ad+pTArfqOcIIiYqrcGv8XD0xRSmSX6j610Y8azeObQLEplrblKuS7NQubXhFw0DIhvAEow7P1RZkXWZA/jTmATYcRzo2mH/SRP+Wl6i0v7BRoORyp+nxgLY2pziDzA+Zwutl779L3REoPFsHBinJRJCkBa1KxjP3kC0Q8zu06KcHsV018PrOh+fhuAcWQVORJcKyDLQyGTSg+SjtanEbh26l0s444MAg+Octp/Rfpwq3mi81CVw9MIYPpk3y7v+c3ISx4IASSyBjj8giKewiEkXG+/9f//+p2P+KR3xy788924Vd6IMcB4RMmE3Z4WHnXunFFHj7YRM6mkyScVXVTrsYQRxUHq1vyZxS5k3EwzkT5yegQIlkki5/a5Touu8JLPgLcgHFfkZCuSgaEr6sNdJLghmgKMlj+8VtEzUnVFLw0TQk8zXBpwmbIrdrG8rJTSkXxZj3/qZIzyvidYQzU0L1wY1b/F/VKIwg2uQUldMtPEvSpj4yr8bLzarv549WBrTY0KozmE/4Ev0b2Wy3t0yufkjY85U276GOHFr7IR2q1c79d4Fbw9oD9dLUJtM7aid+5xa1WQwrsk7yvoFJBLddX59AVgP/YczQOtlv/HqT4xwecwAe8q/nR0i9pnFJ6auxPKtay/OkrkzqiW8BoWsKE2L7Ajih8UMJ52r3m6eNdlFWxZQH3vWh2eptIkDsf6Tbg2EgBgAAwBgf27Zt27Zt27Zt27Zt27ZtdYgOcpGjX3kekvrL+AirgGBI4XDXVn58TFT4xFUMcZRZZTdETojgvIxisZ0nyxdFPrLQWDR6esJ9wV52jVJ0LPLRQ9j7qgHzsuHLrpVDSGJ7Yjzqw13w5BBSmOKw1vKVajR1rwTrTLnQOqB3Kq53MNr2d1V3jOOMQIZ6Uogkc4C6tkcqphhtCtenvNqnoHXpH7ybFvC9zPe7An8hW1O2FuWfAKhlTC6SecLz0jtzWdeEVr9eJVbzDGPW6lXcDX1FtNOj/fN6zBFLgKClRr9avdra02Gn0Awi5wEgoy3azNmZmNe5pM8h30r9oS0CGX5WBHh9Oljw1BGBPEMZuNMlThly/afZ8owlgIaA+8w2BIh4bwK/spVESZFCYmU4z4K5Kiy5lfirCRPPEPy8p0EO3BkRqmuqyxW+Z52UpNlLmbY9TDdDmZh1T2uzjrspip5AYDOBWt4FQJ6HPU/rjKYRQnLZtpnrgDdSLiHO0kv7qtVJC7KyBNvBfqi5a52nBaAomjRXUj4P319mRJSYBzWsr7IZN28tva1TYQZ1IFIIAASXACyqZUReBTbbS/aooBJh+Yn4GmNyborwwhEaBpY7310G3N7DBjm0vo1tMaXd+6ZLlDt9N9O9wu6alxHSklQRRqLmax7tVT8hYnJI39aWT7jLf8E/ezrgOH8PUNgjSaBbO556e9k+PF6ybwjsNYhMbPEf+Bcer6ViiM6UWS1mQCy23P5/fxH/ck6YmlRI2tRdcJJD77d581LK9l49eqBEJPkBLKPsk3+YSJycKKrH5po6ORp6TCWOuJN6rz5JS0Vi6+atdARVfeYFLxMdJVgE2uyQI8hlPeSqzG/RxdOccD6b72vK567ptJMLNnEM5icdMREaSvqsfMLPBTHJI1tWukoIYZWWqPTAfAcD1F40vTPorT36XQ0MJnApTMvYMxY9miNN9DrhMNdZH2btgCTcCByX33WF5aIVr21s57fYaWulNwuZH9sYFQK5k0oORrLA/6Fadkg/rM5J15R9zMDK7SP1wKPQhisctEi8HE9FwOPGLze+AKRVQhF+7oFbMD6D6GZ5Zp+vdqKF+fMucClUEIfGfusK32+WYusS9Pv8fdSxvUbdnfaeHYiBlFYGc4ev8Q/C358sxaN0hSBY+HqcA2p95xg+0d4unKnMbkwlAWWNwb7bke/ZI7AKN+pgYKGyztYPazRgBGkT08IIpJBor8ZCjdd9HERhvfjLQLoQcXYi0jsX7BMx2UhQXFsx+2y1ENkQwj/tE5FJYIkyGRdZgFEDYmoSQsuMkvJq8haprtO/xez7hDkVxf2DYe+cHg6XiIdQZ17E6tsW3P2EBCn2YL3M5RfY8KWRBIKQf3MWYhWK2spT+HBLZJkUZT3uYj2lcy8tRVFSYC1jZUP6jTrHjmbWr5BqFZTz9t+9kXb15v0LBZP7TYQA0UsrLBCLPNtuyuR7x4d7J5ZQnTapKmZ1kqNw2ooDxHZ1bBsTXnqnGf3eWKfGDuJKwOJ4154IaYqwAKW95YsCUA1rJskJypTgRhHQUHYHj5/C/U04HH3ugoDL4hZIm+N5HMk3k0YiHcKgAZusSLoXykCJqvpMdDdc6m+47EHsYkX4fRkVwFKPipf44VNXmiOr8SR7lOeXPdzo18B0I0z8xFSjjulLnfWR2KDNHX8c/OQo06wwCq3vX0JCQ7n5uL1218y0550oV8Jc1+6tBH6MyLQ/fjaokkszmDAWvfR02fVdMiT20j7vOGsWHBKiy8u/dmbRhbbNXP4Nsb1PbE9adUWrgqA+yZ8FVMwXrLtGKcdqdVXW2cw6sPkCDNLb7ADfebdEGv1VCGPP9BbKAh68/2ytSPs0MRZlRdU2fjcw2/wmhrcHkRcS+1yMql44cAr24wEEXmmVtGlCXDQCZJjiB7xUPN6vziCtPc1z7K0KqdzSJQvadhlEW8J6Bgdg8i4IMpOeV5X++q5rgxzkD0SDyxnHWIHbvDG2RvpHq33vvjPPAnwcr+qg0Sqmgu5ZZdaW8sfTRkSrsZadjh41Q8riJ852DGxjL20t6S9tIkKS7I4hn9NpQuxaTZ0Vhn0A8Ylk6ZRmGpbpeq2/FlZd0CbsafZety6RDEnMOHJtDfHCLkE5l0qT/9ZIDldkrhYBAdnOLuEdRlAzJYl0Zp1Cs/av96zZ+4serMApgd0x3RYxC9EqwZXMRge6IKewCpo5pxTMgo+BRdPAfkYUFIiIell9sCeLKy3bax9K9nlYeC9k5RVngJyTSF856OD9grlpEH6RemnIIhG/f9e5EQZfsdBGRPsYUJyXAYUT1TW7h+KW76qQMQXlOHmUxhC+EYyEBOfYg0N4NUNWwsx+K1R0e8Qx1x9ATEdLEF531mFBxc/Pjq4a5scgjRDkCp4L78AdRfIPMTE6bYZHNuyY6bEFdFtltOjR/1vtPePg/d7ND5gM9OmKWAV7+5KsOHIXC9shFcZYy2TmW5kdxk+xdaKkKQzmcAB39UbW1PStcfu6NYo47Rm0H1sD6Nvx3hw4+Q07Q2IkYpdUW52TVP+KrOStV0WZUaNrjTC9Kx70Y5fyXIZNXNgLTKRprP+0Ivk02snmDFBpjtSzIf1IG+9dtQRpl384abwbKex1R3FqOLOb06YrJ9TYNbqxUSH8TJR5a6N6SqZMeE6y403ML9/A1drw2cKCCp5OHKEXlvEOWTUm8BzIUfWmxo5b7s8Pmu5FEO7TuHkZxtvxuVPVFvyIDNxwi1PglWIUI470p/B43ncUaXclFvk2+JjgUv2kQ+t2NX1d7LUUpiHkNW3o70eIgkz88APThrjZ4hCm1l64nbmF1OLn0RPRpYgUZgEB32Ok8yokrrVE75Xn617cILo3nrSS+dP3Gr5h14vUnv7ZWGj5eXZXNYd5C11WUKBZUhxlKSnGc3PK2ezwbJA1aMC7H4VrJsq16CpRJ2QEQx6LmrVJzYFYzmxG3f4PRy+GE3XcY9gAwZqlOecDKXq3M5LPouLzfQSaNEBYCRib3w3uQWIWR5Pds6LuZHHJEvsXf5pzt9gDIc9qn3wn90LrqjQgtO7pDJ6FKQJZMPoRKumCX9RypYNI3tyZNEcBxun3ELUE+Khstb7QTHMLB8Obr1ZGKEi7tV7oZVkC8iV+iRUBhDu5UVMa/Yt3UAGV2ZfBsP3+xs55Bq5NYftqOwJB9qSMTlU21yfUqWxW564itqklSM5G7YdGbcc6fq9uciNYdhbk57nKLkP+C1dIejaliMsjYc2Di1B0ngT807RnTjJeZwpiVcXH4O3BessNSztpG/KDn4p9I+JXDqW/LdASYW4Umfn4vKJxgzpXn+k+WgbJECpw5KXjX8FDSjbSw4FBBWjRjCY4BeEbBR//bO5GO4PH5th4CAUtaIShbOjRDkvi67JWQ+HVP8vhlICJLax9pyKRto/a0lrQjVG5cU0CddaHxwyfLMjleWOnEW3RGXow+3UdOysVhsrxTRA++HyRAwlcAnlBbyw/YCqzRvo8sP12UzSp/gbCC7Dm2G+Qot0nQ2N7dPg1ZXFhTM48hGPvzOBN1Y1PZ7HMLIlZmSXWyXEf1ArI3p/HolLphmDK8/d4XYONKyE934Ii2eXjowuYYiPgHRCMC1PRDCw0onbe/mGxHNZXlHU3xs+U8q2vE65+UGu0qHq4yvgxoDPLr4a7tOl0GR541Dk/LBjsD2nSHCf2tumat3qrVDQQ1hqPSBqlvJaUVTg9503/lWt7d/FMPCev8HT+Ys1KImTT+RxH3tVLpPJQpfeIWF9tsM1eNb0MCEF0L/ruJsR030J0HgNmQYiwcJCWsw18xJOY7MuGHznJaLZjmneT+pYEwPz88brl6Z26rAZp58X+l+NxEL7tzTIbIPsgBc7odPnfjbej1g4Hd8c9U+MxAjMGYEng82suD3D2ROVCupxBhbgGyy4Abutw80P2QA2K1DoDU2Z3UqFNSZXi8T/+KFZrHvGvhkzNJrTPlrDooIPAgOU+JIhIjfyW5QYcMlL5FPFtoTftIIWj/Pif0RTHOjwPL2Tu+qFY63Qs4/yxAitBF3a1wBNFGAUl5FX9Rfo1aLPI41ykaLlqIYKSZLaM+Vt0os/4GTRF0hmvbJbWgjuriCl+Jtf9kg4+lNZhq8TLI6RLmpH5DoNCQ+awCIw5XZKLWnYtel4BoK2skD2F3+a9XuX+JPGywjaNd/BVg6dbC8Ff3hPKJgwBnj/uJP/69NqPdYwM+pCWC+ofN1wVPjwwLf/OC/z4v0oknQ94XQvvqiyuTINOiS2wL7LBCZCWPpu72Qqq4cZQh2Rbn+lBIVbUL6AKkM2His47waVSqOrSoH+mj6VBnokLya5JWETUBq/j/G8r61r+eYXw/aUmAHBiDsFZuKGThc0i1w4rEu9n7jv1eq144/lqL0z3K0ESH9ZHjRQTi4YSQzqjjmC0eN+DGxCgfuehEy4fuIwG2BMbi0kUlX94Bjed3N92dHv0Qmz4WlX02Kla8rhydN/VuwA7iF4kIAdEmnZ49+4IkglgVqjCNxIttCj/4F8A4383JXH/1Kq3D2fjRv+2Gzjhbt/UTeRmLnZxqPpjO1SIzS2NYMoFM/vlD9zutxJwOvO0FinOwCN0GzvJ0EbM6mlqTsT5F3/ecqmK+IFEfblvSfH7q5DnwJJQ1BrhKF6gV49l3SUt3DUJBd06cj+NbC3WvLf11wkA5hzurOfQUCDm4aTRx6qlNaS79+jSxXsxgokKWc2HpyuG0utEwzqV0+6daXo8hWvguUepEw7hf2azwCdDWNW9NluemZTjrRW2/PkYkAjSY7zYLn3GKDmhZB0FyJG7cRGZ0C55sYdvjvQmW5awk6aZoBREWKydr6Qv398mHipyMHTlNz+Mmd7/+e4U0s5wVLCS66pOCFOdS+wAcyndNygGub87dbDYOsabqswHuxj9pk/647uQb7mQCnkmd7ash8moeHlP7v8JyyWP6WuPC2FAO8HNBD2Tk5BytagaN7JPDyc474CVgQqgxbp2b9B4tU8cWUMNzF2OfM0urBPRnPy0BtX01s48GHo9n9uofO1JVtQ8wQZFCx3ToSXPwgaPbd8kAqTHe2HQAjz5X+NL2wjY2gNmAZkbcBDMefoMtURfHWQcjnJyBpcZFFid9B7mlGzOwXGshjZ4rqBz+tDDYf0Aaqsp40FUWGkICS3eBfbhd9nCrjoZZY9YuujRLrs7WXongoEN5xeMJAtaqxfrHFuQ6Y2hNehaLcSCc3ayXPv6BUt9IhLWkSSPrO3AvRDsK9/o0B4EmL3UxFcFmed2u1LspCw39dB92ESgJ64brfKUJa0mFu9/8x0osDeUxrtGKcmrYzHO1FSnBmVLRKrJCKyC81XHIdaZPJUQNpNy9AHEwlE5lrR75nfXwKmWKP+Mx/CBOf32r1odNVWRyQ2BXR2qA2SuX0X5PUK4N8RFuL2QAKZEnbhCvHMatf0V0D8C6jfNcZT5vJBQnaUFb63P1gzXiekf42PL9GRnjNzk098LvOC8ir95IV3jO8pEtPY+SqwlIZPmlVM8BynEThnVngypGnhFnQIwsuixVZEX9vpkEyPHGb/b9xlAtgPQaS42z1HdiVutWLAryPZF6Mr3awGgK+cG1jKWkNIz8ql2LdRtzzvgNkBzzCmdq3E153mPIHJUjwP23Cv+Y15ozB/Ff3xv8Bfp243n3zlIIJiP6siWA3+/ehVRSLvMUL7J3H9m4croh38jOX9ZHWHSv9sr5Xz0GnAyNEpaAwA/Ow/JyEVnbNG+SuTm6ywcSyGlxcuDjKyK+LCI9Jpr+fJa/4MzzfJ0Y4JmuVN6evrqr9vAwzl/ozNhcaMASTDFaWWG9cGuiGnvppN/USPnnkOIFbZzSNTczQJyaakQWwSw1DRYRREsdHBWi+C3bOH/HM/zXjwiR+RAWkFjEpukqgheac/zcGGpeu9yBshV+NkGrobDYDuhwgnnd/IDuMntjkukO7ooXU8TvIUouLk70h9qb43w3lwh7TI32YRkFMy3J79Q7S59uYihs4aucwQqyvd/IxaKKYvVkk/aC2FfTcfMc5/oo3IPZx/XprLkUJNv0hzKwIY7eKjscI+ybj2G+Enc3WWQ2gqNLRrGF6Tn4/gbsjcL5UOH4sdhZ/FbkQV/8QPoJ65OqIpb9r0DHfz0j0XpbDnj4bBdin4lgFYmbSq50RDN2LRu7/C3KFrrIJ28+N2iTSVZBIAP+/XnDb3EZsupDLNQovsBjskW9FegtmehGGLRKh/d/osTfYec4nt+WL3JTFVKDyW2WS2LyZeZI0efOoPUrxeciowwxBdTRpO0ksBzl+zpcmJNdzfEb1WOFknMnMg46FXNk/sdYzxacylvld9b6e/HdqeUTUtJQuFHq/wxRqA9RDbKPjxgiKWj1b8wDfxxGc1Jw2hNghr3J+d9FiSUD3nylKQQbKt+TOkClm0WSLtm24na4K2iKyQjwnj3jaxX0t8oEEuoLFOYgF81gSe9NiauE7au5SCkGR+56gAfyiPrUt45bUPSfjAZVRp72VImAeyuL8blHnXLLMWQHFiIDIIC3ORrbrEOuJBA37jAoWjpHU4JWlWKxWeG9MnJJfyp88d4ZHHOwb//luUuyfVVVB8BSKXWVDZJQNLLetNbEsULOXi6Z/Iwyk+F6fV9h1j5EJmsft/S2grauTj6rKuye9KmNxPW8Xa4txN1vqp7frpQpgfrtgmuIHsEKrXtRQGbnAZnx9MwXVM1y+XEKldK0IbEmqCScv27B8dOCxMbp0zwl4TbCYAtQN3dr6R1hW9AuQro1MrGmMMYN9fNZ7O7ZzmvSc3fE2J1Jcjg93Z3msSTZJ0GGRNRXIJ7GxGhzx85jfaFCPQhMo2jB+swncKwiASzqS1RbROAcMqBMUZeP99xnxj8M2FfVhGuXpgow3XtpZKFx7wRA4qwoLyS3Tnfzb+Sk86KrftlKiuJaK6mCMM4sS32NDDLiHN6MIem1XZqC4/n4W7bSRAQPjNb5koeNl7USLcL1DvqSz6+0tWKB+SK3xWfEVGptDzq1NwkD3ANuHxpFpdwm28kpxMJ7CsXyPjYTv7ZvdK3gFTDqtY+O+ByCnlK7siT7I5y8cnJ818ukIvNkRaNF/OdLX8jGp8gjHXRliDC3gfhadp7+M5rQ4yD7xMLkZJ7ZIm6PP8UEoBxnk2tbi3l6W9FULOgOV6qZHB0g45UWjmI0N7bMhgo5NAMtarxZ3MiuhSadFOGeCtGxh4lhAdjNdc9aKSTV56DnFkF/zuuUeIpb1VYbyWUXN67zNgkRkcnxM9vDLTswQ3pkfW4DU/JTMcbkVVqt9uBQ8rPbIz1jL/KU8iSfqcqdUYIkGMxwkHW9RJ+KrZcysUlJ+EAqmudgkFT5Y/DMBq2cGgzjAFz2xU38xEZQKIVDPnoP4ZtlEz4YPb/N3pviXs+Yqav06qMQ1RbprIKKPD79ie8D+kEMI+QpHGMqpgUGEo341WY5Ti/VGE0nLvR7mleqBPYNkEwnioJgqT5VCjjbsyGaA7zIj/feywPSnrHcoZWPOXycVQA97MB1KuDQ8W7EMVmeEybEgUkff6eeItUxTLqXA4q5BWURo+/vgCAgK6BsJSPRCD1huUexASjhr+wk2YUsp2mHdqj2FUPLwC/O6nzTFJ9hB0v1YqDfvhFDAY1LZVoGa8/iLaivIrkf6H4QJ7IfRdI2Sg019t3KeOLM6MGh0yhScH2UCFAeyA9y5Sv1+gckVg4pxEj8PzNM6WMA336W6ZktIcg74AFjC0czB/UwACRoo+lQ4ypWI4ZucbeFdqnOsTqX4cRe4OTFuqk3y9NEZE5cWRQjCWP1m9wrYFqkiT5WXGpPH78ylPbhCuReDkNHAOBdb/p1x6tCPVWY+GigRVMeLP1+bE46E2+E7xD70CJkr/bprizexo4zexIW+Ye/GSasuXnA9S1+SrH0MQr60sFbg9RSZj/KJKODJYHvxluopkTa+ERayLKCy/vDRrGKjCB4XoEINwUjmc84Q67PQlxaWNLI7c+1i/j2uta6t0miOBKLN7lP3kTHbR2clEMSvfrrz1bgtMYAbBzyj8i9An3wuKhQ9aHjoIsYEzcfqW2alN5uke51tO5vYmQp34RLy8q9F6YidN9eJfa3hbFBjN9VB3Q4f2SgmlvEJZVDGs7PMIrCWv9tp7zXC+cokeuQ5du5NO02E3l5VkUvtL2NuHW8+Evx4ic0G36AGM8vRPBrVt4fPbve8CgWoyIhsvtwTpFeSLMPtPTzrh/zrNBV9O5l52KjjMxyc55hjQqXUwCcS8aQyO8yroiI8VGra7ZWb54oqxAV+wAT+2TxkrMVlO/5aEPSNP0g/6wVdeq0B3fFrwUx6TDbWTNbSiOZrseO6EGTK3LdAWxFGLe8NVFg3gSQC+fvGqfyzdL2kE/ZGgTOAK/7rwtptBf8DNSt2hxN5ver7b8Qkiyo5x2zlEAycSdRuOf+CmkZF7Vh/tQA8P2LqhDVcmY/kIoYRDXTXQnbQoo1VrmnmdFS5RW9CjpgvvB55dYYJqceQltfZDppZCYVRF8Yy7h/NRVsaLvFPF4FDrKQ9DpJfe5aF5NSXuUv2L4amCwUyew+QodgchZcSW6/eK4l+UCczuk3O2AVKwTp8/mW77hwa4ZUa58v5UpY9opIPbdKKtbbWbMpW3qD8mVeA9g9oqnUkB0RBGSvt8TcXdVQkBTAuexIXdEymd9Vii7WebEU9WRmCAzd2UT9pBSikeyGLnvBnLZSOKqdFmjCyZNQ9u8tkfmQoDmlYL+wS+9fm+iz9MlNJxoVaHbREvcozS+URCPnSvTkGgcECrDmMP4nmylZ141om6xPbrCrAaIuYcHscT1kBpcp4bM9Z+khs18/z8iG+cwqfbN9wc85qu02wO6dBTeZ8awFrET3xZSVAd+mvZ4bfs0yg8rRwCqNZ8H4q5pZfRHtNf1sV6Gg4whnwj+HlNbS5tbQNnhDAXQ8cJElkvn4AwtAA5ioYGdUBQOiOmZ4HSM1QhvaLXnzJ6Ze4YU37B/M8yaEFTIiG3xbzbQvKGlI+nO7f8k774kwBl+MoMFQw2kZkWTf/6E8AxpBJAkBxoc8ovxhKGopRCVDpoGZHnfWwNE8AzySwrTGf3OKoXibGxOFdKIKh0FOitBWFTVHEXb2n7dZ98ztgwtiMx3psHttn8XXe9tKVD0qsE2AJTMF6YwHAwzY5RXzEpdJt6XiCWzpqE9v77IKzDz+y9thumJk1MdHPMSGDMKIzG5Hus87qgemGEYwv85DFYuBcq17RF9y7WtSFHA5zysnuFfYXMSA6OQnH5aHc6QR9q4IuWFgHmeJFbaeD2GQyTwlTXVu+H/09qdNjRCmjdGx0aErvFP3OJw6NVkcfBMtdudJDnzI1KnE6uMlgh3RydHp181dFG20TwDCFbbUSzIIHophU1vvvHVEioi5DIcuf9+WyWQcbRzWvR/wTh+01XSv97GSmVsViP3BhatQ5QWr1W9sbtd6xt0gIHCAdafeKcwit+Bmq4NHr1orxYR5ykTigZX/3zDKH9Tfj9HCe7OrocX9dW5jqpSiBi2vo+Vzg4ldNu8S7fjn/USZiwr80q4tmkttS2OOSY2Ac+knSWE69wkRAUMBhW9JkwZLByK2Ufx/Qf4eTGZo+plTHxpJ+Tr4JHeoFMO6HjXV/c0tAQTfhYidI5AfhRZqPdYTXQ57VoOCKz8LEaWnekZYxeiy7gqhY98VWGFLQUUn4XkJVbb1XTV+D2CMlWOg024T0B9GMtvnh/xGjgrrlG7bj4xKnuuRuOR66olSib2BPoiWTdyXsDREl6tEYsiQllzufSIuuxfGxQLncCFmvOg319D0usyLLt2SyzPX43KFmZt8SnruNHmB0ptumvffz+lEZ1vfWGzXC3zHO+FBac0/Oc36gsec+CFXgU6p80pazTbXOY1s5P/O8NRkW0UptZT0EdP4ihuyZcfebj6h3Z63HpJwN/0LSVcFOIylhJ80Gm1kRSGwFEJd0QKOw5wyglCgMhzo2apx7L3+GHe9GZPzYFStKnifDjo37bjP6pdB0/EV1g1vYBh4EmkuLU4pFFVjingfdbm4IMqOxBzfDpA4wFL3M6H4z6M12E+1B379uiFUW+255LE0Lzy+EWgR4u1XNC0Pr5CJjVxwWOC4Jjnc9r9FMUmF9sK+JIWSmMw8QFKBsEOyJkvW29RCO2EQ1rPRmhLB6l8F/rLmbpr6hBRoAOVB+WCK+Ma6jpQe4w/x5sBDG3G2LBc+JN/I17S62xPIHs9h5qdglyuHOeLwFAmFGwBsucz+NlhbIh/4pFjw2dG07lQraMCG3E+/lksviVR32IPv4oIXr4Ppq1Bw5nCKIQAGpwQ6pyA6LIBdoBlRaR+qScpz6lFbweNFPBBj/ew49g317/09hpJaDN1Cz3T669Vl6c7vkzSHu1m0ylmUAAZ7+DC74elfg+/PFM8SoJwktN2M0f+ImaTof2UIdxoZfwg1KwiJHy0Iv9kwwyIJD1uW4933uYshkMpaOv72SjIvNL0Sd6h7aa1Vx8mwSmvx0Q0b6T5I+Paewn5pHkBZamLzQw7AxHo0We7Mlk9R71tZuB1K8lB30JOJY7CcQo8nAczqzQDMT0NkI5Emb8Y2j2AyXrtty/MCJX04hqyYco2IsPdiUVaXxAMyuXqaMjDiAsqTpWbumxMXvBCfv/BlfwYZ4ILvN50YNy5MoQgE1cSzKqvui6SQSGjk6kauKReIV19HJU43oRbySC2kNN9Jlgb9jhLDkeqjyAaOZmuGQIBByLr4YevXLGNB7Xm7AUpLf/gp1XZcG6qa4nEmXspyHdIKIUeI29PYPBTlEeWbFZXh3J9y8mpWXax2ZCtCN3m2w440CDvZ4vGyazQtMbTEiHOPknbPNPEkwStW5SJ7PP+GPWNI2cJlU6rSd129wVppbdLqrXCLbJBsgDWmT13e5qopieOwibXNihXBVyDK13ASCaHvvjiduQxX6fHrwMIdhmVfpEJGf2jdjzej0YotdD2e/hy8PvanTij2gmq4ouEc2TgWt/MoVqucSHvXpz0SCxz1LOGzeZSocKBeLBqf12WiTEpPihp9W53+DXQLuBCnQd1CH1snV1uZVhftqeEIM7Cd5rPVv8JQvrvMi2UjkkLWGMEU+uO+dc/38WkYgrz73mPrqm5cfY668E0nnvWp7aWCfaoIIueOxqBQD1+tZmuCj4kwgbtBoPftrN5a8eJTK/79RBJpHZxwUk3fHiFaunHhZNRAe/dutmPGBJQ4ZO2+ahJSjuh9/2ed79qNydyIFQBhUlK2yEZ4IpBMHfaPukE6DkADNcVQskvR/Z0W/0xSUNHdlezS1q3mh9SMWrlGWD78sl7nikw7Yyz1cl06g59yvHfIzHAzTeW033NsaMDJze2tzlrr2mG31oLqiu8XYQNOO+JM9KAWAwmGes5jTgFerHaUNxkrmh+kPWfCAkEF255wMOpvWNO4bEKqSti8jOrRB1GBxVxQP1bhdTWg4CtHuOCIaw1OfEIgP+GXJusvPLACd3MHepHO/9invYXzsPefJGpo3zvafkM6XAKecHxOnFYD366ZzFxNBHmUx4Tez8h/sUo9uEbd9f0OmX00OXrR/oBoKU4QS4lW7XLmsJyZ6gkbpkXlcgh1MVBbS496Uhik73hber2oVS2rw+ElsXCySwxErbAnp15BvxA2jeV4ui9zi4IYPxfqkb5eRa3ZY7isdltgGt7YLWy4xJfgbfhZSP7GTWs0agMdSHC6KBuookBq+Z+YtuObEZOzaA5U9yHnFuUjlumL5ozSqEWMWQGT+fO9Vlcu3Ur7VNeJGI1rC2smBMS+dnkoNmKmXMIcAlBxW2Ixe2Vu567WFjBCybau9drVuRmcVYZK5Jv/eyhkKnz7jZKzlGottffbcg6W3H2E4NmbjJ6gVMVtPbuXGPlducaAK9vrGYAJ0vWDog3tgF0Ss25cx2lE2jFRDoQKeyUSdeBAvIqOPMj+dMtsGlI2c8alnqaDHRbvmTLPpssKl18375c5wPpANzXSU68EmIHIblh/B8xD4UJorzGRGFRoHxq6VOiqNJJrB+Ujz8e70OWKgS2rZ94twA6AAk1WIvDZNOfkj0o/5Gqb1iPWAb49mVbFoSCXvvz7kxEKQDw7F7UgqTi5YNO9f2GFFP+F+AQUolqLdk3QjjRSmW2/bU02sDqYgfTEe0gwPLYFytJk39COA16DXWsaCAvKiWTaU90ahtUiEsZUB7cFRvD6z3rLM7hCPbTq/S075wGRBV7Hlet3B7WIi6iiD9VgeZkPd8dtbEC1BrIzV0igUZcAT+TxU3CbZOD9i8JoJEKkrSoriP578fShKoeU5KMCFlsv2q92GG1WTmZwQX/MpcycSCY6Gw2XYqoWyat2W2JL8oix2AuN3P0FZAhjVsGIYxAiscsdaa6J7LI1gDAVqbDdl3f4j/dyoqfwVWhaG4RVEYyuDb+JhWQvHhZjDx47zg8YUjW62bPiqiJij25ctTzWlwgaALukJjlMWFNy+jFu4gm+Qp6+5USLyGWN2PwnN1loTaDaKB+eTRJxHIPj3rLa7qnBYReKmMuFg/DIEe9vTpIaRKMu1CLy7ueiPJV4XzHkTeE4xLGFCH4CUYjbqJHxOb6ILba8XrWOun77Ne8CUIqMamCHytLcqfewInC5QJlrAAuqpG1DWkELOvl97lmhbnBWzVQNzb19omLLgaMMFP2qDijXkQrd2f1qqyefFb1FipPnuXwB5Er6RhHWoT5tXThuzyyjbTinU6QCkzluUaee34SXJ11NNkFeKxEFh5FhPLANIqu3Gl2STAYviO2BycPt0RJhXF/jCgl+e5aVcYcZ3Hbaw5w4wqGvlsWSckFGhKzVsGW71PjEzkLMgfSZCkkDfpw6WoaTrfYDL3leTjQOgRnDOtQvalULP2+ZYTVFr5ZB8pb8lcxnCSRtUYfkVudrSere+6Tp99+p299WtZ9ABIwH2XD5YM+VF5XCwf4mG5GlQ6Ya2uRTp0j/a/kY9K+4izV+FgBlBUTuDX6bTEHoL2iJqQB2jnNMAyUKgIOe0Z/OfxZZ6tCmNHSn8QU6o1ulNH4LtysQUTEJElr4HeinKYPNPnWg20jGt7sq7lECXRuxjJon74P9omLLOWUq7c+SZzxki+IJStMJO2UZvgLaPxzq+ATNMfOwAgCYvS5as7J0ELuu8PnY+OtijRBXIIXg4s+1/maakxEtgJcjJK20FvXV635JIm9cR7jgkpLkSXSMOI5T/FcbvjhWqRbqJJbjFgyC3cG06bfrLZtDRDV7Lna4v58jOv1h+R3QtYmLvZD0JVQ9JnPfmZWjZBVUKgI2D335ch/51kShJPWzDPiqImGnCTeD4ZsRvsVoMEqKOSqzFRdjE88b7Kg5Ddy3nRdPaKS9zWJSAb6ZpsGyT5I8Vc6tg/QAbijL4ZQ0oeKg/PTTRwmYMDJh+s8tq/BfImGHllrHqAHCT6jxIWz34yIrc0PSsT2Yen5IJ4Qgl9/I2ArXzjFeveTI1JwY+JZs5WRxeDPjcxKD+vgLkl9O+nFF2ih0HT+cVxaZvHCctcGjlA7UUvbiB2q1wwaUD4VGl4ttfCpwy3IS4vMVnwEGjyXDmMi3UqEW1iDcpRf4dOQBK/MbK90h0c2ZevEQ68t/bLhIKWNpFP1PhBr59jkaX24lxIld7za7xRx9JhfVEjP1HiUDRv6z86xeUCKFtgY8mZqrfpQ2R3udklgS/+If9RYXLdEPRw9zqiJmXkXTkhJTh442idcr5MXJH2w2w06LShSh512peMeqHvfPh+9yHPsdMQrAl4wmI0w34R/RwrjQ8T7uwPwB/uKmQuH9JQkW2iYMdHoM8xFGKwlJy1BpmdqdwOYbVEFgv2bNdeOggjPS5vo9/3lupJ2k22zAIvETpZP+wtaerHkvpbb3J0Cczl0hMkQKRml6eZ74xx4cOTrixXRK3v4EHXMa2I/HMoZzBUz9iCXYsIb+QFc6rTKH70tScw7HAg1g/EPNvfXeUsBhI2IFgwv6niTQucuL0r4fa+tpIYnlC4mYkUGjfio76OA3lgGtbcv4McyInvVKOtuYEugikZPs1mRI+OChIQhJe4BCeKdTBfFOJT+HR7HFD4Do9qwQWxm2Ni9xAqtSBSlf/di6mjYyyRDWGOCatlF6iDt+X9yUaxlF+lthzvAcqwzff0mzZT4wXTiAbubYOvsnrsZjn4aufwbKok/+udL5TqEizyZ6tuIR0Obt8bo5O6PW8r09G30wT2147aOFykFFNNY4U+0NNChZK9F7qW114AAoyXSdBS4WdoJbeKV6317RFFCFcaN9r24I88Af1wiyjM21q+IdFKvcAUaZrbgVh58uCo3nKZcjcq5/hblaVqvMj5C+Zo4EKHvouctuvYMDteLg05l9j0qIBwVn4X66q8ABcqE0hKVGduxH+09JtpxVPKdhQM6N2+1YhJ2iV4+QMa7xtEzO8v/Xk1Bjm+qtv0rMOLads82LIOpqTnIeUgDMEGEPeG/M/kD/taVaSjgsuPy4NkJVqNbUsQhhN0Badh/ALWARyomIxZzSehSusjjn+9p+ItilOk/9u0D9ZBlmMhG93XTJVsSn24ZUk8Os+ILCOpODYJ7S9+9oMXhyByJT2bkSPjrjQjXggAG2fPAqjBTKsUHkklOM9qUODPu5nRO8xRSLKWylBSzYcgruAmVC1zQt1HnaejeX+Y620Id/4ok31I6Za7JKuPtexi6Om0C07cWasc7f9l/Sw5AWBS2eCaSYYnKJ1Jqm2HDK2WdcNArX+jddxW+yR/bFBvUeHb7GYjosW88pk6jxJPuv38OFHzPw+oTaUlUhIeCvHkctDm9yXpq+CxLhhAtDR0cRL6lgFgBn/yIlKP609yA3nO3bbziMIpKFhMvBU/QDuaeBky5AGKUxRach1GO2E7sRDOLVbR9hkqjrxTE2vUKtRKa+QaZz6OLyMIi4OuIlKR05i2pnzeMIskhrEg/nIxi6y4BFLcX8RVMuKEV/RpRQjij4XyBhl9N5UU5KIyCFJ4OAIn5CYjZrMqjaa1bJHwnTnc7lFDoA+D2LDEmtWUdik6vNXJCuTn5shWfYuJa2RdBekWCfqQOdPEv+SOB3HZnp+9+Rj5bIqq6qSdVxmj9FbN3C+7RCspMFLtAtvASHAqgT6VaTTYNtF24fDyvJDFuxjUxDenJiGczQRY2zhU164W6xzKwGpbLH/AptyEmNLy/rPL5LvNCYJz3hFAjI1Q15SmdDPdkdiahjsP8Ejeu0yOcgtPSEoMwKLb8hSG1zyjCPDKA+V/PG7fPsVImPnueyvgHXGNeujO9nldtPljC7us3ZysCzlcd+RnbL2gpQXuqtM37LRw4cKj/WqYk6/HvJZdYm7F6+9qSr5h0OawpTDMMlFgPHWwnjcmr3PEhNaEFtXnNcxCw+XVryZ8E32lkajCYxN4CiGR2hKXRqnqS50PK6Sdk0wkNHqXMt7O3IKtpbjQXw5wajXgryEAggvJt6pvJIqqjcaDtLJH0He4lsA9+wqMBoKp4pp7ze+/1O4mwHmWQwwKj4E+K/DsZI8+SRTHuILynuMcJtCe9wB5yQBuUpySrPLNab9YqrElNJq5pGccbUh2mn80dAY87G4XMRGQSrV7QBELCkv0K2dk3Kt1K3g1L19MaR/wd9G0y0KleN9iGU0vJMRRjEsrWSu+R+/n9aG9BHiVaImAEkRa9oj14Ck7oYHPuztqIjaL1RTdosSjzVcJmJT8VAPdXIcxGhVJInW41Iu+ReM8QorO1FuC3stT1+q1drgt/gZjks6QyFrbv+yw9sCG9fHVL0Fx95rynwHyJP6IQZCzRyIPnpxLEQs8sk2MgN0FlwmrqsNaVn/wT96T4JB3sQUuQOdoP9lsTi/S41Ibjfi1s2hF1D+ky/G1UxI/4rP7wGPpcthSznF82SB8FBbaVjpl8c9JHZ7hKLOsoYCjQ/rWvFEJtyVbgqriI+wHoTK8q7N3kvwz3ErOSY/NbEWpsrpIkR4iEOYgk0IGN+bld9I3P+RhuPQs8MOyoVKGfP4pBaCIHezX6nZUtkuokQKwU4ROdgI2HOhS1Hl8ewrQ75gbWXeQVCEhBUgnIhEEL0NZeXCrxlEwCTwAPdWDA0j6VjhWKxsc2gb60+s3mCBA92hSkypHGCGf1OnlXG18gtXi+2vJexEC2jjUQY8RzpPKL1+NZ4LdO5hqxKrB/6obB++L+QCXwEg1ml4K7OUwOWTyg8SDzoiUi33wBBx9A61EU7D+uLkRuPMvW6dEbHOz9RtgoV2o6suZpqDuzfXhcj5kDtlhOhV8B4VuQPZ9hvxrJq1rYmhZr+KrBs3YyfOsb9XVvGKKtuNruQVdBLajJFTpukwVl/KEff7WcldbccqFT2VB8rjZo7F3hoYgnyKUodid2mR6TjVHJK2TiWPEjCrTduAT4fVty+XsxwSTJUoRws8/UfX+FOfWLUtEcOqUJMvJ6/KswI//GYGPf5hv59qz1KxvQ4gxfRKfthxxhRHvcJ0XJh4IAGLvLtY+zP8f11NtrwtvvnAhjp7yhg0bYjqgI/kHBmLsErUq6VAkqJHU6F11zvBl5D3NUSyf30abSRylkPuuSsvZE8AyYP+e1xKuaZfw2tp+HmJZzz3tnIsSIAL4jRdCtX7Cpe/YUOGyQYKrsiPbozCJRfZzmQa2+pyqGBHK8yRdVXD0609tVyFSYTCipxHbJ6OvIcXPOzAhjz/xCcoBvid1y0iPvgMU4SoXRH1uqOKWtPH/sowG+PAZvFWpyQS4VQgiArCqXfdfcS4KN13j3p8/zwhe8MZ03qLcl/6nPtT5/PR2/EqCBQidj/RRRGJd5Tuthp2bCtiYPkHkjZKuyt5umh9zxRvZag8D3dA5xysg3CybUSfA/MJdB6P/5P/JqELfcvMmzLFKKCs0tME4q93ddn/PaKdkZfGvR97dRs0P6smNJ7jWwBl74UgZV7q8nVlCJlYfEWUv3XopsrpNUxAS2VnT+K1OLQmmuAtmSplElssbTImSYf3zw/JQL5Fy56C9UBb22pRieep9ilkqxtrgSdTjkB543a5t5sCdyZzwiew8wEmm/PNKrg0MZthZ4Tl5OgLvueGn8psRrlwhnL56ispmjd6MmGuUxippooU6qGE0VO5jhR7piDIroyfiTM8c+r3WfOjcs+rgiuYipZXv2hMO5hcDKKoPC894RF5vu2WldHMjFQTgTEQ0ypKOBGB78rlicP7jjXgMohUjRJZim/DN1V9FF2HjOL73QE9du9/KM6egHASZbDa6lyrIxqGFGUoDZ5Dl0fxVNVDTdIJvayy2dpUHh3DbVRxSHx/ftLnU0iXVbblGsfePiPKiEtVupiJ57TxJvZe6soWcnjDx78nx/QXR+kxaK9ekJnFm6N5xPOdcfmJYiIRYR16up+EoflsFGYQsvhMZyH5qpODJ4UxJczep+OHZ7HVb3f69v7VAL76Wzaxnyy6WFpvWxUTTsXW89iC4nC8w2XbOP+vEIzxnttKK8CDS6Gk+qa5puP5TQ0pTUeO9h1K3BgJxAoO/4ubkUoXb8WApizhN0ISJNd3y9DQC7qNQlZstWsuMrw8UJ2bC0OYn0h7u4bGyXzjDPRtslDZSIbPjA9uUrxoo+9Wc/JOTd2sQ3EdBn4WoIR8lUlbQqHa+HaT7boJSOco74fhC4/t3j5GnUyAn/0sORBT5IPRu7SHDIRKNOb3SfzhmwuihzRUi4qlotGpjfbczuzaCfxRlsfJ6Hwp9ThAdX/XOcpkp3WpMAlqIqrCBEi/qBY+pRwY9Bk6CDZgyMncegBS3yLIspPvXp+rPtBdYFb65+Xn7iDQ8Jh7ft/8kFOIUqzrQHG+j8V797eryB0mkcmfuWPduGQYdCfIbF06ZxjwU6fqBK1lzN4hG6cy/w4RomDnQEpdEtCy9meVA7HkEArzyJypJQ6+A1kozP4UOaDSrm6KxId8b7VaeCArdPeFv2WLHg4jpSfH62fuhTxihtCdVEt737yGZLYkPcJa9M1MOp0ILco3VrZ4FoLdoeDqy6JUV5li20Ta5LCh9FbFosZfYzRTpNbxHlTCATP33nbuESpCzWh3pjYTjjsUH8LwURdehZ89QAfUAcg62HU2nKZcRbj6A6gX4ArTGeTRtaAiOJFWHL0jCDqUk4t9RHwjqdfq9/NlYxBIjWYCkSVeXFs2KdAMBQy1nB6O16tAG89xh96MfLO0qUEq3bOcc1XKIKt4T56PhPpVnjzU4HFs8cVF5nt7M5MWjxZOK4d9g8xd5BrxkwPn6tu+SgufBd4bRel7gDCJCq9CtVYiqsX1oxCu51See91Vs5QLE2PAS4vDnLUyHykuAGI2+3G3y1c1utJePXi8gS6rvYp4cHOPqppW2v9FjoK2jlFlBSi8oTxh2IekgZp5mGgkgVTkInbWH8IITMXrB3xNm2MlWgOAydVpsTgej+PcP4CGcaW22G1I55npfH0asUdhvEFASB2wr6oCoUCE1W8u2X5Yq2G4Y8jG4/osi79BBxd8Dr35mpjygj+lR+92yAcosS5oyUay1UiS4w62PD+DbTw+IqmbBWKfdTXiG3ne8/1Q0A5gDYt2DzZS4eHgzO7DT3JWS2yBeN6jtk9E24ZHJ+cWs4UkKkiuE6EuXV/khh57CWdxag5obhJZW8tXNPmX1+tBaeCIcZSj7D+JqHsGi7gE8P03Otb3OCUjqT8g/CqeOn2/1LVGpExmKoS/PERV90zEkOJpjX8vfpeZ/rGiMjqTn1mVKK4OvsbEydfjn+k9HQRBy3a1IcN24bGCpbeW6Zq/u4EZ/54iWLglC7lhRJi6VFAJiXEaFKOutEVXpt6LaCpHtMOgjAnG4K0LG8pWTQiuK5TCf9jgPp9Ddt1vusgtTfa78Ktf7aLHEaVL4POGlplyeADLf1uHd01B3ScBv3KaVN6LYofYjGVdjlnfi7EQ05/xZ59kvKtJ0Fk0Y4ImDTBEVqUcB8hJUStuDMKzx5jKmVvg7AjQPfSEMCbdx0L9rOWVLy36LfyGV4uVe1uPRicZ6yV0fiE+B5glaN3l57sxMBBtuVkCsER7K78kVPN6Oa8OnFcLYxYumifE6UGC52rJOZ3Wz7gViNUBsR8hw9+L/fRQsAx4H7c3RhjAu7m3nfjjpmLdlSWic5oM2QTB/p86wpuUfM8BLmm3ypbfLVVxddBme5IB4iPcegk8XJ89KELkPIiaMhCc1tvAlH6Ihh1jmQtX9+LBTQGvQ2x2GAWRd7qlN0Ck1qpmhxyYubcCxAwJKXZzqgyZvmWUT9ABo7EqSj1qG8yGzyS+WXG+0d3xWFUMpzVAHgXJIWruuyYdRmd3FLCousj703bi6Y+sVevDRNI6entvsQWzRtDda0gLOoiLvLF7CzO3s2VZAhzratsIDCLWy97VpiLlmP61bSn2HXVXxiz3wE+QjCfrJKW5n4vlI8TKc/dE0QF/lAC90cEO0jwZwHQROTyktzRB3uDP1mLBYAo1NANFH4G3fR1GDqPePDew9KjYkW6T7oX52tNc2zsmbwJSxT5p2g4BiQ54VYQRQOpMIOvA7SgTHfg7kRrF8zpcyXG5zgh4i7oDrQtVEcZhOwS5pXuHwXkJVS1wS0WQ8SJS41DBEp8FMM9Z10feW/mLcDq22saaKf3pvPxjUaPhrB9AS8gG8ctSRhBjrp0oXoFdZiU1oxbNPzYzESj7e95L2QaeYcCLojxEMHi6fpiBwcp+nNHFzRTzsPAy4t8YuTVq0DvffVLOrD2GO6ugRSrDokZEok/KCeD8iOA3dYhmL1Q/TcDjg+V/blwoMBdkavKuFbFYlpiuJYOoyiEWJOXGQ6LzYyyTnzRm/QKHE3jcPl2OM1d/2DzNCd2W18b05Fqzfw3PXjRdaVD/5lVXKSD9o9OEGUpYHxthRYSGP6qi9/UQPHc5MusgRiQaqBhk2utsZ/mulYmseNC8aZWWyQl/VHm10WBcak4P+sAyVNBq+2JVmpN93Ea6oDewJSWcMTife0/g4AL6sB6rTApkOMTDIZhM9Vl/SycI90rXfkwHuHwRMl8OUSJRq8ZouSpz+RmfnY1PareqnAz6v5UQMZxFxQs2wXmErcTfpmutk2vvNudLA/bVvtK0lW7Tll1IEJGKHKcWuKKKJBX0b6D5Wua/w02dB4bafttnRZ9TP4JB52hg7iinoGVE3hcb1egqzW3yehbVK+QLch4tif64d9w8nmuPaCq9+eGDLMfq0d6uMqkoAJ6GUumUEXXzydCr7KR7xrxDwZPUwHL100dP0D0Pkmr8hQGbseN5/cdC9v5THN2U3A9oqwmwRPa04xo8BZCtXtD/ZEb1MvfWLPeqVQFHGFqoLw+uQ/UbmuCA2WBPAV098EK8ZeJSAIDp5x2LPmSIFfYQ6P07YBlWqO4AId42R/68nEC8mb2xLjzMLRxB13ivcTe2f5iY7ptPaRsobzJmbFo5bPeTrm7gIsQhiXwRThMDO1aTPBfalSjGz1guwpFqK1ZYc7PRQEReqJ2m6fdiJYJS5sJU7eKv4JDiGQ8CePOVdIRBJL8Sy9EuhdChtOcRt1SPyl/EGIhX7qj9hhzrhAzqYkkcvPXwt5D8C4fEDKmqXwxdM9wQctTG1jLxhZhlBVYZQAmefzY5TsWseF6hROBEfvjiUTqf7Yd2yA+Ic5LjrDMdHn2lj6n8XqeX6an8Dv+t49/3453Yo/l03wktyrOE2kePJ3mThNGo7KNqSXXz9E8KTaUrQDuK5YDWBtSnQsWOkr+RRk48Z9qqArY/7rrKfyL9E3GXOLHocWGfn6rEMXHrR11Hj1I6eF0mResNtrq/Bptb0lk+ZT9VQ7iGUFB2odKJUdkFOXs0Swi0tiulwSBi7tUBjazpuEGlHCBhpdqy5BmOsng1ITB5ntsx+8XcF3HgDMfMxgJfTPyJlfcf+moNEURWyRVJXd8eV3QrhRnjdxNnKKAuZmRMYAbU3qVPJkNUdZQRFC1gRGLBj/hLYoxMsE4PMIjjUgzfI0+v6rS1Uqt3n3y1CTKw6dEyrsCgrNgjQvCNVppVuM4mkoKzvm3XoFLuTDQHuSB2i6PBXOkRfCDO94mv/qFHEJC/sx6DyscdreaDKOvZvy5WNIdTYK3ANynpvV9fQ/vJzJzoRnj1leFRUSrY9HRF6EYNDPyjWhCcoSZmrMnZAYIlnCu+7EogFXkWYxaYT0M7UDjMnK6K+OCgEVFYdkfPJH1oa761BapNBDgLgQlt3fsc56XDSkSgYvo1K3a2tYWlNyZpaBS+oq+/zrIONsyKYMDiEnuLmYftwDAYr+6SFeaJVMaLSs5d6iTkIktE+kVd6/4Zf0XxXOYC4WABJGWr/8v/4fdxzujhW6Z0MZFa6mioXKKrGFxbU87lJCDI0rRwigD6P27oJNtYUZho5TQr2sqk8XUPTMzzAl38sMext7AHz2R+vskgXjXmp06rYSWPg7vis22PD1aj1eFabWX0dA7uOY8Yl26JX9FFTMLwLORLPtFzZjlJ4AowiSn77eXWy/p6t0O/yfysLjOdfcTpUZvAnD8B+UfDSIYocU2lS34m/Bh+9DubLPkNkQcGhQAWp1tzyoV8SWr/nbKRc28rHXkMiF+n0Ggm2trAkE0ndo+p6f9goAuXFeCEfC0ZeynsyWV7svOABl3S8CJozhnyuykb+pvO8xGA2aEFDPtZM6rRaN7a9VBd+loEgG1TZhtntxQGe0tfS5c6gq2/gyNB/Y6kST7wX6Gr9SumNrl0CTiACX6ob/Qx/o4EU9NLV7XIV4zxcRfsvkW0uW94lgMDnQmzQzktS3hFYuFXyUAa8btkfI8FheMs941IUQ+hhBO17WZNKGHTwB9sZVzH+k2wNCLQoCANBs27Zt62fbtm3b5su2bdu2brZds4hZyFF4ftiXpbMTQ189epD4zuxvIJoY2oHaW+w0DbQXo5MxFWFEcoH7UFIqbWdgTjJYuQ5IPIT2TCT96CZS6PTbGPiQpIGcYXy/fmz4AHQ2ZSS2sfYMKGwYTiMG88fhyA4WDgrzYUDMZMfTSZ3TPoTy7NkJTzd0Qvwc37MqzoAMOs2d2RKZj+zzw7eEDpw7q0eKeW+MH0BT1lH3caE1KQvfmZnyeyJ1Ulura1JohQQOGpKMFEQcY0OoOOsvl+FkEioQLtGnO+ds4AYuqm8bB1rAJF0n1ILNEfNrPac2a8zjZaPIahcwUm+ufa5swfOmbmtQkim+JAHbWTKNv3Q0FRYT6E5YVA78sOdBcfyuNY7jpLjgqyubnhUNeAy5igonifyiu3HqtE6k7lgjxh1Vnzb8/cGEemHhAJbrSFFdV1NgBEegZzrW6TMcxg9Q9P0nuFn31ft7PvxFVCvyttqMX9Yz2ey8fmFhp3gC/x0sv94cqU5zRXr8N/XijMNJIM/LcB8wxb8VmwiTmue4SWMtt5m8PD2OOPduKETaucGIF5/C7Qwfvs0V9oB1lKIW7rHf5SvtD+eOuBN0ZPkB/5uozWM6Mm4LpHKA2DpD5VxY0/yApUd02M86l7Fbz5+AbU+qXz+d0xKgnIZ8ZolEpVYRUobT3ZRjDeYWSZUAp21+O01JWLAeSYgjIYBWiJcLoE8BsU12k9h0lx9i/zxjH6xd6mTL9bwizK9IOJTc6jdFDaezpw9t2GiOvfp5CmNicXZzxD7z71x3HlcPIkA1QrP2iAWzgGNMaoB5toT/jJljS3Ty0+yKzN3WouIwF+lrxjq4ZlUFid41z6T8WGmKgKnKwGf0119caMqQ5A+5W4i94MX0CTkKcPXeOyKEsFe3YIYakw+itSxdg8OswZmzAmhapmyAUxzpTF5jMalBn9CL+XV9XgKtbEem+Tw9g2XbLZkkpmjrsckqVPWH9+XhokMS/hqcarwWXfK23AkDmx84Jv5mF4wQLs9PdsJI9orXsuawZMP7nbvgg3Rbfwzm3VsQdPeW+7VhfIbtM9QKoleqiupc2EWLTTmU5kJFf5wlLLRbAsVr+ld8VJg8bldq9PluLXbP55oVwTmfsK7x7vrk8gJzWTjkQ4+o++p6hthRhCZCKjP9i8a1NphfKalnt9B+sQ5bGJvGOK644uM7kfN4jmEIyfiw6wWJ19W4WK5MVZo54sAxbh/ogNHZ34nxzFuppezbDx+7dTWboOVScAT5lG+ryPa766DoumAu70ISI6TRLrMTGAYo7oA/76JkcAeeSAQsNUrpfZpIaD/0bO4ib96/cYapFrPHSCdUDKP2LjUhG1uLwGOrm9SAGa2vEzHQ9vBH0jBhJ4gliDMOUFEAtEAc58cY4iz+vUWEk0Vw76/6kgfxOVeKkwDOoVYRRVrUmSQNyvvB2C/npuL7uNJe52R62u+lukR9KjvCwuHBa0L6pRL8GyDTIGtWv0Yj3jTrK9sKb5VkwwLiDKN9CI+9/tm+qZ9+IdY8k4hfjr0034E2I7ktvFOZcxqk605nPlrhiSnD32IK/6OCZGU1i0Pv+eQxcwTTxyyjFHOkcnmC0aEPcsEcCnfGTwzJcZdSvj6OJBMzVu2ySMZJEnfB2rEd6/6qoBQIl3FJmMBmjAUbBvdllQyh1i8hCrxR2PpxBHzicl2lVXkXqoEfCOXEJRoZ/yVCff5EFh3qHJ30mAhGFeOPoW6P9gyyR7xT5trnrTPEJ3vVstmcNDuqGSO7iO+75PbP5JOvBaP4lidlWjQcSjF4z9w7TFUbXhjIrCx1LzsupJmACSesZg3E20lrvvH7w0rOwBRB6xrlXxkIrUvc7VZKRNdVnVv9LA9U4xUxYbysOONyBUDOrIkEUDTD2tw5nWc1cgMxRNDGFU7JCrzbZpsvgTtfSc9DdcvhKIZj6csdJgELFgmFG1CCMCuaez453O+wKJSLqaQAEZA629w1KTw7Lb2kEvhJTlC7MGvv53lTCmPogLdeFHMG0VV4TAicx1owqBg0T+tzYGJFYDZLMUEo+k4FTF39fngjyO9BK9ueigeoTiJsBFWr+YJ74SSIhWno5DlZ+WASUdt6OUYnKam8b0mzfYbsFs0rCTji8CiPxhDRhYBzoRSRTtU6b2rI389Fa/mTnVjA6sLOos3ffmfy+U9TYCM3cah57enWQHdmMVERYo/zrtUWqOd4iuNQwPjcIQHF1mnVlY1rS4L7oyvFunOmZUyCI5lt+Ciqk2qZnbaTnZNgoZ3eCN4opY74U9BhcdWANnuLLMyclxDoJmW1NsuxshFfeAv9t5fkbSyaKlcphV3JesdiC9G4843RhI1mUWjh2/zH+eiN9slWP9PVYehc+HgjZ8rsJoRAcgV/9lDb9DJXz/Kg8qyNvjbexoedAbT097Pi39AwnDdjbgVbfhGxETlBlrgMtVIgpyqxchgNbS7Cr7sIsiPototgQLGjsn/OHC1i/WGrgDGt0nAz6K97fBb1g8SSLVkBp/mKVYWGekIuh4TJgMD+jD/VBzG/Fi0bGqTGipyHbbJSxVOjmTtFjpjwvZYyM49GLNKNDnobgUilQKANDLWeM8J+M39iSAy+p0MyHPylnZJYDx4aid7lUkDkQHkk49AMC79zYnDZJJ1XsouSM5ISxbzbf//HOhrUb3RKLaIRDlsm4RcgdYR5hZ8HAl/pcjaxNPmmqPSVTmScKrzR/af+uMDQP/26I04M7qAhWrDQekR4WGf8O712++dvTWRKO1dV4fQiLiXVMxSUc2DsoiDWws/CEMwnmXlJ2YnHtYADgmizu0wAHQ2S6AbfJ8CxMSkobXCgsa9HDjVuhD+EL0zJ/Svg+3FhAYWyP90AoiYnNk0y85+Tr+Gupsi5QJtYtkhr25qcnpFEdGQK5p47UQAX5bKXxXk9iNDMpDDd75ig00iJJoJG1Bd+hfSY8EUT/2D7wg4dfaGMAjvTwu8w8jVigxogdDwgKxH7DOdko5QvCbwUn0ZD0PHirGRWYwYVImXvLYq2Yn5a91+iSntqpPeWyfq5IQX7gmF52RxmGiJycU99t8JWNhO0v/cpYtRm8C0NPo/bmmp8DxLIqqWIamymW7/B1GqlStvPMonAnZp4mlZyVzvCbS00fh01OZuMQc/Xjnap2c+71gf49o2Ah+lA/Ch2CUEpF2sZLMhTENXBqKCdMmG3sSej8kZuZCkEd4K1AIfGrYDELbgDpzHqmxcJBNTC/qIuHIJFHJHC6w2ma876vxDGrH0Q+ALMvrJevL6KZyqGZjpFHrOCOiaMHKFfy4zOEff7hRwVwJrCEonOykirr1b7VwO2sjHz2Sf/39A0Opv3GB+SdS9kvLYOPDguZ4RN32hZzZ+guVzCQWwSoSYF5n/dkO16tXQ9biVvZXGjjyeLIMvO/Sk1g8kKG9xJ9hAqELWdFmNK3DblV2Ks0xo46gHz/L1fa0fVymmnCMUwzHs9PoxKtoXX2d3TdcvOlbDLR2PKjYEZt6GZ7FsEb17c31a2YBCGKQbPdFGZRIUihfxe3avS5xbUuSkRXJTMqqIHc5RX77yayd33h7Nr6R6tkgHP4Ug5i42uiWuwGktRhiT2KuUC9/FydYe1l3WHQWd/uoI7Vt8gXmy/XZJMuiJ83yKzdjosC87cIjEjKTK61HOMk9kFe347sk+8z5hZuaNTKI2nNgH9qCfA+Zsv0xUjG5e7iigKCj9MXlK0PHkyLbY9V6OgdekNtGuFr3uPKo+WXfeGjAweGpL/yZYryKcd0SD9W0ZffS2Z+Z3PNFbif3zcVyzsmlAZsZxvBC5zTUK9s8wqj7sPzQxaUc44NUxmEJblwuMINHXz9u0a99RDrrqqT3CRvI+Rm1E1V39jZaUyQrQweHqy0JlygjeTNSGRa9EEIr/bpbiPqY2I26HGeZQytl4gfO4fIlbnfarOP9effNmEgEgK2wc6YPfjFnzg/8uX9El3Fnyi+y/LtvzX9cToFUNkSrQpP6CSFEfKPiw2lJWWefkIgAvzo6AitDhgbWdqfDPLZUAVXGPiJ17lfmPYzlFlv3QBOkXjtFRFDdwcSPVBrj4qOmU/oZJeTS1iE+3GRyN8uHlZ1LYuIZ3fPpi9I4HmuLeOzfFPKXaZlWjbrQuzbhUTZMIkObVidC3ReGf25Po1+Fycwe96qZEoy/9qy1LZKLAHJ5DQgZOK6qLNZKWJzIdeDN8SBozTUJx1HnhHcrJ3Og/pgCzL8MO8aQW/+D+rZ/1s2k6ome48bO/k5rPMyf4lbgPKLIhAkWI8JoVclXqWwmiHj93K2Ke9sTmRyRKqNIOgZb54HTYhGOjcxvQQn6hl6TaJ77EDyS1jgyg8Xf5LXOhKmRXKREt/YB2nsabhJCrRtfcFoAep85EUaglQ5CBWKIsStiFayuPAimxztrZwVibtc5oUo6igB7lxIapuHvNjluAEEPRpy6K13tz3DPHSzKR/6FcU5rYO4+W2N0LTX3GAXSc1RuB2fqVA9SRApA8TcdtGKfTAE/HAF4s3whtIVqTxsdD6GzXdeCDj3s0R5AxYLl1KecAIv5UmzKyTHhLI6oaGmDMn6VpaOtQ1wOGMVopLPe8Ow1Olz4zd5PAEDI2zQiYZS/5vI+am8jOJrMQRfbWvdowtJVe6ZQf+RhtQzQw8UJY2FHlMxbam+xIJnF6YajoBcgLlx+9AB0WZrQI6fMmYZi1iez4bFPsZSS9Ci3iVF49w9SeyQmaqZ2i8XYOKu+ONLMo8R0YqZryKdiEyonZNBDS2fpN/eNEHISvBBDL/XTs6/PDTT+CeGtLyuxrUKZDBNXfTl14D2P38V8dbHnDaBcKJZVdqE+uDq50jSUJbAkNdBRTqd16vCfSlNn2XoJ1KMSEi2rS86e+VG/PxrpeYktEZG6KHVO44WPcXLh4YS+9KAW2+HLfdYuLeNalvXRVFqSfEs9wKiEDuixu57d3rUAzhY1AmptSVaJPpxUAkYpeoQ0yTnXavzKDRKQT6VrkbGaJzojr+7UshD6hiHYLXpd/+b+5Y73kE9pSFgcq70uaMI5tyQTlyuGaLPXki0jAKuzE+j4O4jPOowEfHCabDUJgHmW9XhJ5UAImJErJJuQIW/5kqzewjoq90o650IOI0QxaQiQl6tBDC25crjnKnAUjzadgjxHDswaaplYyYbBZIDSqgbtih1MIBAp7i5xhNBogIIYlcUK4/sgqgtLosHrHzKpt94szBEqCM9NEyDiZFs9b2r7i81h7BccAlzQ2SZH4AgUE+YAMQ5K7v9gC7Jy6Q2VRuXB7+MNpkptDOGPs5Q3Q8Wulc/m6hT6R+C0aNrlU9PJC9aIQhjxQpjV+5aPn5Ic94ydrR9u6N3B2S49rWQlhSoQSuyBx/etTEX9ap2Sw9+fvRpKIdt10/HdbRvfQ9ScPMwuV5TNHbN/cic9FdvB9SaY7dX3Q6NDyXZnAcMlhF1+FfcCecCdztw00Gtv5oYb0gGtSxV8HbTSH8txv+/Bz4Ho6KWg0UfC1MOYWIelhnIVIYL5Q+hpURiFVDZk8cks4R6vJtmA8ezx17JUZo90O73OjGww+pHI1lAHwIV/avc8o981Y1NaFRvvKCmfEIUw0+bYrzkkrokCygTcM4QX2VmPq5D4cyNWU47GSlLo0bBaHWM4GZgB59uR2wEhci0QwimHBMmdmw8vOdyUwiv1dK8SaR7mmDRYeOjbIDGEkeJm+Ew2gp7bfhb8BoKCAYy8B/102WCMegmwOOV9pstPAxAVkZkkHTiFjrs0QMHqQo07GVtECHcKTAY//G4+Q9M+5aM+6/COtGx/DkY9QdyEAJOrUmkBI0gu6jyuVPu06cN4bHEaHQAJ/gLJPsIdA3TeM2YV32OFTNuHEG2U7Es0IHDJs2XqM1+twgu+exKZgxyYbXQgfkkaLo3EeOpJHIqE5+k3/4U0r2UWqYs/+BeVMlpze748BjqVpt2MBunMbnnGf9zqASS3j/ezq0/80V2eyBbE01uSNgYYS6EmqOZPN7oGiUCbQSyW+3MpltlGPHJSwBXu1Fxg0eV5GEX9mUrQQpgk6FXo62gU6PnWN1cFmweQiyvoRa3alKP29CT1N4CGU/x+D149HXllkPgY6+FHACEfy+MPuvHINSI2D/R6zfueVx/UJdOkKFFfRwcIS43CIDw4l8ZxFrJwlDieP6CWo0Gc2Rury8z+8mPTov2Ozxnnf5JJqrRcmzGA3+3WZP7ljO+XFrzR3jGRe3MmuBFSYpQ/qR+E/hsRbnxJrduK8hOaF412uMwdprsZtRF+On5UpNmctPRRF0ynQRwA8ykHiDjKdBw/s3xBYKPbxf+focuq5N+6z08VDyZC1o44P5y/XT58HTP2LlqrS/34hN02VfAV8Yw3orSVqZcaI85OfTxJ/TOb5DPt9CmnIcfXe2RDNcYrDKj1ipZJbS/+w4nWbk7sGvxljuh+NYICuO/Y8bl6lLiTzXpRf5nt9p4P6U4kMprHPHzaTO3V82W/XAb8q0U3lpkiAqLbkfnevqxv7eRHzKutirBlI+uiACsXsAaAp26dgsD14uUdxUC7IRyD824GhNdpKhjG7epsDW6lWqJRk++Kg0PbPwluS4H24P/PCWUYOQvB0FacZ01d8KN3ASaNiO8Ryxoy82kEHBH8eyEh9KMmkTBDv37Mb/1/ttFUtUW5DZxdy5gD5aJeDdAYwTqqVcRrOY5QYYZjKZUsJh/k14ySIkpapf5qkhAoOR6rjCDTP/WNN0s4o3MwVfGhdyWWX6bttQ9jDVzahes3M+LplkQVKed4UVi/lmf4x5PpKs1XvQP3RoX9YcjE7QngTLDZ9ALmMpCgH56IBFN1FuwwwvvqNI2p4jrKr7NTbbAhNqYDgKkTfgYPskSHO/0JnoopskLCzKFbILo0JOD403569P1nNhaU6bCiZG7a1kfGV3nnu7upQ4yHnYWkUxkkWce2Z3Q+JaRPOMftZJCAwtyZudtFU7fbOob9lRvT1OlKBOsx5KZsYbf0nmE7e612mFTg5iAPRDTKKWkdJgU9CGTYE+NVlNcVORtPa23CIF8+b5BO14JCtG8whA8zSNHbRSuf7DHd4bZm2QvmCd5A9CpJGStcu6nkLkUysX1QBMPxyiEchQjB4rSzTo9G9rQeW/dThw0DQo28KH0pJ+sPd0udCSCzobWrpvvu7mnq6JfPNZisFvdJrfqV8ZHE42sEun5Y9xMzhhloUo6vdcWj+o/2QFqRE0recff07G73iKGkxcmrmT6lieQjv2tZO5CEPId+btN+QVU8FlOGz//bRFI8EwH3FoH0hTywqudb9kSOgz7FAsSrHiyKuYWW4Co8EB7CsWpG3N9AUp2Ounqw6v1sapaZX+u+Xv+MjL+93XqxD6KidIQxR01gtS8NJModRw8uP1dveT3VG25poMpzleqRZA1PCpWrjCjoHLjhPyJYZq1+m4PG6OfqLbQ15n+UlKt3/EaA3Ey+PvLSVrA+GxFJ9OicDcckFc0My7KUZNYqgFkCj7wclW7eqO4cYr96UXpY2GZMjLMVqfSPSGolxrUtWdK9/kDW3vl8ypKMEHVrF0fmvNydA6CBeCvLUvKXdsEOeWWtXpNZJZ0RysGjx1QRWch2sA3Ylyzb5E3PA+Ap0Ih01U3UTGGAZwoP/42uiov3RJhORX1ip2ZttEdajs1SrbROA7hkUrFdrsKfHgu2F1XuQiZwP6RGA6W33Mw0smdXG0aTP5ZJzNmLgcrawKEwwjjGnkzTvfIaJGIqqqGfA60RCTa4JxTwPFVOL6UzBzQ5RKU+NMMluJbJQFjYE5fLmrDhLnoNhxc7qYmCPt/+4urQ15gGko4PzmcHjUyp3iTCpCq+T1I3Wd9HM6N+xB1/CjKGjJS9hIAcWJiSV3fusQ/UBZhVB1bTFl4biPSkTH8Qm6s7RPNSOhdcQhSeFG2tQZkneH6asBWkWcgi5X5+W+Y3C6I7LWERDP6jIuINZ/l052vFSsRzcOYzLBdzBKDKKb2D4Gfqx7UXn0YQlt0XsA5wlPo5gCfCYuLvFBdNl3UpJwAFDpnxcGzicW0xcKBytdm7HDtlFj9iGputBjXAs6r0wstzlykLVdkBcbLHErj97Ab1uN+sgl8D/N/CvG9esc0FFcU/uQL1uPX46s0F2Wj+PUEkT+qJfKiLoVIOA1GDlpjDZjss8jlGrBam49ImkqCj10I1jm+cd7us1ku6rUocW5q3iCGn7iDwW1iTUAn+Z6Dpy9a/T7EaZ+dvsvw0CSUQd0Z40B+p636O3LMqf7Xz8sDcrpQM/f3k/wLxve5GwCcYHlW8ycEvVAYmifMiDOMubZ9TSx3RnU8Nh4f07/2RA/gsu/ccIbUDqp1ttZzb1h5E7kyI9yZkZxW2cHz6VOAwzGuLaa/qOCf3XcWEc2uHPVt4vXRXgMhOwfUuTwxTZ27qonOLMH5VtmNDFNNbQJEILQzJiG7EL6G3HFkB4s86vM6iEjFyuNeLMBiNOqxkKNIxTPpX9ZgECq6yfNd+3L2uyRpesST000JklAQY95Uuxm9/uVSROheJ7L/qeRc2VtRw/swH+HiXM3xWe6tIgXD4s7iesg7BJmET0axyWRJdTKbK7rkigVMmWQ9s43xZQusTttEjMHiXX5/rd+FLpn4nHkV5f/jDGL8BLJcNxD8A0MiP1nDwear9TewfxfJr9bS1nR0yRPgS9NoEWtbiNVX0XD2d/oZ+8hxaCFcsauJy+NlXGy4arvYTcIwrRbQuOTqBTp6F8eRCsipwKtoS26psX+a5J37vnY5aIJ2Z0/ySqxsNGF5HGrsXyCzvOTaAPjk+I/3biokM4T7sW9XXlITLGtZo9g89HKyfOyMOWICpp4veZq8+8+lM3CjxL1N4ZAgp1UONUzMVGbfRwHK+oQbhWFF2dPcQzipKOrHxakkeyCbgUFbnEcfAh6u7xs51kZQ6kTr3z8up8Kuakj+cEZPfh4FEV2OSQH0Te3knWdqtYcqT3uGGgUXB7zZMfRcyqZYXPNCVSXROltPe3b42iNACNfRP8we22gcKO/rTdivZ3c6mWCm+Cl7C/H1wxM86CnbMSAN2V6ez1Uz2jXvWkBblCnJLGiUGdgoqF7C8tOsokeJV3Lx3GoUoGfpK2AS/FGqCAV5R/958kRbqgB0uWMqf9S3FwPhRr/avg/33HbmiClP+eXfhtdb91LYWdVh6I9qi2v25VpSOjXRKEQjy21CtpKsU8alZw5rNG264xz7mwXBKOuIsv7r7MX7rt5Bq5Z9pvU+NHwyBhsdrjOjXuoMee/zctwKo4P6GanFH/E/ZQU7oxp8mTse7MOd0+BnTtLnmD3rjNC9A45cn+7+tNabXHkRXMm4fmHFY3C30qmlSyvNvqxJpCmowA63Xc3SujVXh4vBGue8OJ+D21zB8dlcGJvJKQ4GVZxtbQrMisHJ79DMIR5AvwodchU9bLTUeSPXv6j7tWW/X+2kSNBonw7D4rW7M/fT0U+/FcVIfqdqeQDSzGNJpUwFGKQvwYj3TTpEqBGZYW6jOJ4mP77WKmkoivF7OdeqayGC965GZetazLrkxWh/kgnBhL6K7+XGnSzNQeLia3+MMw3xkR7vcp1L3OuKUDB8dpzoDudS6abvynYKJ3vqYc90VtsTWg/34we14N2uu7u/sz2mYq/U93LRLdbMyjByz/E0EMONrMSRPqD8OkdEDvFeoMyVcH8L1jCKCMFcaLiWVw27oKVWUgaZVlX4qxdZuaUvk5kRMwpY4aLVchrUjB0MimlIYJXy5AQvzNpg9PuO9szPq8XCU9NiQhR4QO4DxmoFTzX5wW2638Gzhp6asXejPaNHiyC3tC+g9l2lt1X+0WBWHUAyCNxM9OIVtD617H9rmw3CIctXpFI1nunQ5jeX3hezc5meeSmLN/N97LDDG/ybdPGeT9duWoYPoXDCW083W+5wKQ4btw14rSkc2JE+NHieKoCTQ2mWVqeobF0fW6xMNzcmLD5CCsgfIw4SktqzWkVWX0Goz1+wJWLyd03LJ6/wdk9TQJGSkvmWBHIpKJxJwxfPAPKjuAIM3PAcTJJLWfFmR5G3SgHiuBdcLsvb8PiOHBSt+oFBwtPwH+FA9Cpw35DU2pk4NHcYNXBzX+YwdRwJlaKp4SpFXoCZEfn8Jk2EBntZ263d7Wx3XUEU5/KBEeftV6TCyA7RYduydf3INZ31JLFAUtHY3c7VhkXyhAmwWBuVeEOQwgz88wGzUZMj7h5561Q/5ZgYFD98GXl3xS0l9ykr7wx7/v8b/Xnuyh2YYWltmh8lrz5ZQivp7DzFTR37nG4/314FM/zuO8M0yaEHYvbzWeLysBn2jAftzZ3oiTpNlaZsUQ2Y2X6ZMh70nXfwjT1ec4nSMmiAAl9paTH2sn4r0HiGQbBO1GEVsXTm1TwIsHgJ+Ya8w2HbJ7SnCfVKKEyWhxn4to00NtuptP+kYCy5+norLt7Ge0veUh4jdcglpw1Ni1Y/tCRA3N3H3LnxCAPkFnGmtlSk456ySzQZlYKpOAova4sZuuNUwyZEMG4h6CmGtIv6xinjOfYPx6LeOatsqIBcPjgY1vPfeJ1wEII9C+X28gNwBxc9aXc8adiUc25kkMxIZ+LBj2OeaFe9Ue1xkH6lMBHv6PyYzsFJKXRh0+7WdKL8g8WMLUm3g9X76HNrmoyF1J7eAJNOUUTsVxKDTOk7GJngBywrK+rEi/n0jNy7DuJ3wB/6enW60TrCidos1i/Nv64QQO6rVqwVOO7vVmS8H/7XMqn7gS9+iY4AJz703qhfg6yktrD3/FUzKbmmhVCvm6NSkO2eIg2dm3s7tHIGlcGINudtjJIZxEZNIj2xo3gKPyDNueWuEj9tjPU8+G0z3nJuj63/W/uzFl2PjuNRFAyhvRLz6jaGbAkNL4eDhqGS/d8GKEQXHav5RJhxklCYgDRzLwc2s7STwrRQFAsMjbyekGL3m4+RCsB4m943r0VTS+Qmt/5HS8N5uqOm8/9x6ECm6GVjbki9Bcckjikjsib5cE7WDCBv+l+A1YvUM6w8pNlVAdIiRlKpGKhZgS56jQPfy1VgZQD0ZJaxDP1JVXEECYM5hYbxXTxzxGCEeRL8hiQJ2mmPoZZxlb0HtLZIAysvVrmRG1yughL1Y4zMWToC9A1Hf2H7BJrUQQmR9ZrOk1qht7Dxjeed2cd046ERZsE95fLMcZtdrcfr596aQBb0Sp7UpcHcu7ht99QpujPS2IXpbGHkBP8CS1rvpGnnmF82I9cRPNiSs41pCfYg+DGSxQOblRtSrFtsCUap7C0BzPj2tgosKZ5mYi/cDUnnscgjNJzYaaEGBTlw3O9cth+r7HyukFUqefjjtCyPu9OyqlIIXOVKsyD56UXNwznFQIa+3SUuhUfWXDgIAgtq/4n90RTuVsgW/TCO0mfZu5WtUme2MnN6NaUjzspJVEZ/VHFN8/5q5wml1mHuX3agYQYE3t/WGAwnxpQg2PSILRuAmEhNfxPd+B3bTw00mHz2QsAtSwon/Qx9T8KvSZlPpy8STDie7Co6yAa/tOvCHfp9Z+jAgU0z6cdilUD8teeIFr68d4YMY/l0SGrq/t3HjxRxNt0fgXIMeLaNVJx0UsTiIFgUQkenzeoyrz/vok6jEcw3BqFcAos4qQrJjQ5HsIDk+gs2/WILNTBsucT1ZsE8MZag2bo1d9/vV5z6U2l8akvgaigFZ55tL8sDcqvpEb4gUEZ5/qTYBBOEsl/a0L80MNwCcRenC0T1xX3SSbj9RnC/xXaZZp2QSuUTaNcIHg1GdpXPHWwe/JsGu1cusR/WFT0u7iO3szHGafKZ/4wc0SaKGorMSBVnKVkt6quwACjFLaTTVWJLwCqR9zTL+7kVJ1qAh654hk+f0Ju+blMJPLlX0l88CbfJJq5OhW/lVITr4VYmXIj4x9NElwWmPyAU+bJ66lFdIKvUOi1JTll5QY2A42ycBxf6Bxqq472A67saaZGvADpATCS4HUc1SIRfI87HQ00YIWOvvmlb26XyFPLZLp0t1wQ/XmkQWiq3jdu3YO1GnWveF5iny/FHUOayO9fBPNKnQUGQatJ+/rj1QEHAqm/lM9VXfD3e9SUHh1ZS8GwjlWAHwnD4P9g1EI0vXG9z1H/cSKLJl7MDx96evgIZ8zq36YWYl4z/qIsutyTWFrWTDBkLYsh8nwcPfrIVTiqRzaiO8FKFNofm7495UesMVYCLExnzm0k1wJt/y1NQ4hIR9s3MarUZ8snA1xDtNHb+VkPl0BIC6xiWFqrri3He484cVX9lU03W+ZBwc/7Vzjrj/60lJfCC/37+QshSarteEF+VlDG7TNEqDjX+/+DUiQv73C4rQG5sqr4jOe7TKhej99AGI4woLt2f/CH++/sxGN4G+VRFM/pnQwXzbs3jw+v+HMVzeaEeetscmA1XIE+ZsPbVKNqBwurfBnUjgTXQlBRovds1Y5wRSHIWhQQNRQwZ0xq/d632b+WDWS7t0LtAFuGWL5pO1BA9ElbuFXBsK189cj7MrHLErBhKQR32V4t3Y24jSbnkXUXolAOzrnTYU+zmzH13wvMJ03uHlte1sV88IeVYOGNvgwbFJyKeLryqyJGA0u5GEqFi2/qssl/gkcFyu+O+K2WGvUQh4eP6ZDANZWMwDTc4X8tWhtbbGpRSbUskRrVbUpLCcCcBnm1xzfaPMmfBSFB1trJLfZ+DDu+s6HZCU9ylQO41CQKDBQhuLetOK1Fe0rmd5rTKhzBVw+J8FJXYH23E3ZiGErAeNVUHspwygJ1CzqugpDB2rjbYTPcH/ko7X8qIZdae4riYhtf7LLZGLFrfLVPvRyZEL2tlM6ykIuBJ2tbbkzTtVQu23YYL8ZahpEe3RfqlZBcj5xDlbH5Gp01FA+muWwHgiYXzwL/JeeQZzzndSrLcj8PpMPeKY67QIYFzJfm0zvcyQnMhvYwadcsEni3Up0Wn37nzNdkSXD7ItWdELbUa2NxKn+lhf6h1HXYKYpqHoMRx6MQ04r5fHU4tG4S8Fwu9mZ7vPhty/uFeTMX0k+LojW67cmXczFvQobZfm6esixLJ2L5HoGOkSPbCkONIxmwRzd0cNNoBZD9/N2y2/5at99uXRuPnXQbblihnjl5HBC0C9lNoO8meIxC7SvNUNLLjbjQIPqjEqe1BJVRETzVGdAvu/knriniMws3W6StmmhD/QZVFKj+a1VLdNa9zptqwHiYdCn3y+lSWMCql3Tmnww/AWtl9dCE6wG5bbyxeB/xPfAFC/juZE0iE0S0tQ8RLj8OW9H261HHmhlx93egpcQc0nb24VMbv1ztzKw3H867QMG7h7dijFNHkMLJhELcNLGMkdF+i0PlddC64e3z/Tk0y2qIgkuS97u7sET8tshWzIOtRtCSKVxQXiIyoWLk0Ffn6O0LdfZKHA9oPIIXzXmqnD9MFc83RLwvYg2OxcSNsYZesjiyJVnYYFeQb4vK144NYZTdcswsGYGPNtp/UTvLDcUgrT63Z/Qm+Cs25cj7sEIPYvme+IaflcdDEZUAww7OZucsIn0Zj8zkzy/jKS7YhflgXv8Ds7DE1E8c7NoiMUkEkcKxPnplql5sfhnq5S2EmqSwWUO6i05lvraYvBxv4qXbcUlr9nR7+ZHWvZzdfQzQyec2pTjKGv0DTcSz3nyIhh/FpmORJFbGXm+XeFWmdFuF412+VctDeo6pPFUvLIlcv3T02DJ+qSa8rzeEnwRsUgU3ucUNAJr0TGfjmT9Y6DvLaMN+9BJXKn3x8RQOo5EgAhMkXi/FSEaEIg2HyG3MjaLfDwJBo/hqTIk5YczMQmUTaabN51PglhiAe/TISkoVF/ye5b602uIB3PyfjDbU/yxXZbmnm1CFJn7WDQwpM7Um7LzBu2dY2RENZjVKMu1G3be4xAKs34IX87Ltg6rcuFEv+Wi8DeQuR8F4gzYiQxpKN7IQdVanIPwrxTJDroFAGNKNZgvT728PQmdvlzKhcj1c+DdOdy4XCGU9QIq+qg1ZD0ZvCABn8BKwIQRQWBpFQbOJFAmlFJ0mF53XbLSEapsTgZ/YjWlVqNrFQghTP0n167AkrBr6dtBxrmK/m4vgvcZXLs9Ilx6pwD2W+iwaiFH+A/evxKV/VP6TJbvAIsh+fW9XFo/OP+SyQ88PNwbW2JlJnyzf8eF3zlLR1F2vU4Q98/BqvZymYplTKGUzLQbrFkwfeEK031Mwtfdkftg+6wX9JP3SnmuKfDb4a4U59OSAE3a58/6NtV6my/6QCtjkmBPDexE/JwDlM8VVguhNPngJLdEAcawLQ5ISpmUiwTan4n3yVnp61XiYGPzlFXIUh1L/BbNOtV2E89YcCF5o9Ah0ga0XiGkl+FSbzwZzE04ehlA3F8QSkKVzOPFfYPWrmgm7sSuZgbEBwGKrYZYCjK4F+cXNFAwp05kg+c8PTcXWRe8k4VXrKEsbpsknaSjiDd+zZfX5nP+VUdW0H7WipyTaUpuXP5A9R40p25iPnJ4vVPCk9588YnSlppd84NKP8KyKj+RrYh76ovH4L3v7Jugw+QXXicBLeURIHMPAKxZuspfEHfqeoHMEOvddfWIpcDULw5+DtsiCCqTCbQXx5GTlA+EpWdjM+wYFz0hZWzVxyETPXT5jy9lozsHffmE2pVxzEUSm/l3zaMIeOMhy8QrcY94gGSEGvXRdFC9OFkgwWIw046MLIktOkZneQZ7sB0+6/BeKezoX1cvuR6KI2FKei29Scsi8bvWgNTCxtiJQe/CnhCWgiiHlIjd10TGNgpsJcTIN+PmxbJRej2foYwDLXReqA1VA0CdxICATscIF56vtcHIRSrxn2OtL+q23NoRlc7M91vftR8r9XwpdhDZYuBlzBxW3URYgaYcjqFCXWDIVek0e85/G9o/mw6lv9bNrVH5eetZ8Htddl5qI06DOKwq8Sz40xS+EYFyCT6Ixe1yyC1F28aSDsjJ5SUAoxSmhwG/L3yhGJiflQ0ZZ5W5ZNxowTHpO+p69RGj52mlLrXOFtaaoP7VEiHIUKljlehs1FQkykXftRNongN0upmDHQC2etp8CF0bhbElijLj3hAyXfAtIUrLPmIEhCePUgCmSdDxS4pC9lJsPwyzB4TTy+9B3ZVMpWwURBglxSnXx4zPi5iazkELV/V9aHzoB63A/XS+Srw7V5TPDECL8rSU3gXsGAMmAYuunPbb/E/nZA2pvR7566aHA4NCBzl6HlrEiQiUjzqRtEzw6kHugtoCiJaFyucxsp1IndjR97sH9gy48IcSDOIwjt7Tw4lOZ28oAzs+n7/73w4hrIJRIQx5fdRPjsMq+clHiNezML8SNC2XdPX72ahla3w5XEt/AtJiGti39o/tPuZJ7wZ3glr+L/t1FdaQHdVpfYxUk1kxD7iZv+VlyHKqSr/wZQM0GJXYBDI0dwgmGMQ5qbjkoWYoYAJBnn6Z8kkPFqB7G3yV8yy3I/40R/DOQkK6dMM/bCglAKazqqCXFVY+BwBKu8L61cjk2Dd8VayOHYN1Ti/h86g1v4J1wIEZ8RvCt9wV26UMVWieUaYjvSPcMrK+nWdrfNyO4pZKidWiuthfYiiFvQMd6cdmqfiU6OL2MuNdwblUmLPryLX/WwlREofRm2dGp9B7Bw7W66O68i3BsYjSxGNWHYeXKL9ZL3xf1eSiEA3e5P17NrpMZF7GJnxOpXzYtytcGJm7WqAOhFGU7Z+ESbHYNpqN58woLZi6Z0yz4ZX7uqIga/KFGRxIAmQ9dEX6PqiEFeiEha7AR4MFCDhsmlEQDoyhNRMb0hPHC/0kGaauPlOSkNrpqMsIPAvowyaPhm+jMvdRAV8V313tTnNul2NH4+vDBcGRaiu2OvLphTFdVzZO0qBJ2jLH3Xm4ACAbUGC+z2h/7XI0PI3ScOnL18BeD+XUUzk12sVhQ4+Fs2UCQy4DZEt2zfNOrjkC9bH8tjEgoWCzjgf/o775emYCcJRxmaZufeQSgA1U/7+dwZcZtYzkaAi/7VnSk+s0LFwOvsRWFA09pUkqQIroZLKaMiH2RoMzFLUyO+f6zkyDCSNoz2h+5LymlXn1byIUzX1/8T1HLOospU74aSwnRdd6BlG0Bie4J61/8ejpxw1XcOw3fGwedbgt+0vEpPTtd/PNq8B9nnhxeWTO+xeB0P3k2hUfBWqqmnPeSm8UwWYapssaxNgkkeFVWiA/noNiXZlayTQiNyB1Q1TBbCgTmv0GAdACATdZ1EVt9t35TMwOGaqpGauX0WB38iKjczSzemguBMSDIleQrcAAsah/tF5DuLqAPHc0BTNEaL8kczKLbs0cPQm4dyJ2hJybYlyTTkII2rQM69jpI4XJa1wMQtFX127X/evOSsOJWCqaon35uGzmfn+zhr15RjTZzai12G5fTYUefVeGq5DXV/Cc0h3q0mQ1EIvzDhPzUexPgUgVnPjkqeFBb4xSVKhDciuhHUt4WRN2eOIH/sqWdQqC75XIP+/KLyD57AB7FhB5a9PzSeMHtAcXwL2cV3pfOgvYOfCYf0Kbi1570r5tYd1mXI4VGoDuNH2w12623vrdgBpNqNseo1fUSpSfa4js12jyukLgtc6A83ZWgR705k7PypGJoH+WkS/MSEBItLm+704nebmRFCBv+qkSq8c3CzkzhlTGSmMh+lUvMBqWuCLyo/x8BCr+E8EELTWAzdkJALhL4UTDN5KQfhZN2MOShYCqbNbge/Lja8rPzmoBOPMA6FgbZ72u4BTlRczJ0kM1QbOoDSF4N9+DgOIa0X+KslTpOn1SDKsGjYz35X6Xt7yvXXjHn1YDT+SVEcwZCAXLlbzvNGitjypSFGIP6AqYGVnkTEK0UWW47HeAQHAY9VmwanqaAacoXZsYoiVYtbYis8Kca0S5uhp6LM8MpuNCQyO0CLeM/VUTTH7LBcZNHKflgkUXJ98uv78Xumy05/lpgioDO22I+YqpWM5JeTgeBNyC8yt06aGu/Ny+QYLOwxhTKoRurnk/GZ7OFOfTp7Lwk8esWz0PWtpw+jMeKzZGJhNzL7hBL1p+vjwTx6ttzej+QapyVntMsB+hU1zW4MNW/1+EuKlTlZVqvefYxPxmpyOb4GmMzhk9vmgSIUAc9l56/3+TCbmKvC3j+MCyXaNNnouvhuQo+sULpQHUn+OIy/lg4gs1xByzy2cs1rP563IAtXoXBCe2WBQwXXCyG3uMQsZM7k7Y/aH+/1aQk3MxnIjlPVjY1vihtU5CAVFWUCYl58TG1p492iD+/k1cBa+y/6h8fsY0aPWysr9GzHGyIwR3VXgAccQvkxPpzmgtYK/ykwrkSUzUTBSeF6YtDy8PbRGQgsHGX47BVb32EJZUJrEzTRw1yY6mZ44QdBTgKKag2FzYTFeKTTCpVXKSOJcdyq/zsii5jabcU+kAV0UZGYzcWKOUrLsinTTnclHZBDf8nxClH7HYW73SdJ3sSJi+ElSw3v/tJmY8Ge3sOPzWzylx/g7Wna1dZF6k2/lvwVceRcxWTbQFvEfsa/q4tEBbtDC97JtLpn663S9KFYFgh2Iile4J35IqRan1R9r2qDrdJmdqj4hkF2iPlscr3qTxhwGSXiMqjMqziCEg1XRgS8YCTYwQEtGDmGYDVyFDKPebFq8gHhW9qMnXSFZ+QqwECtN0bKFWnoUgwjFQc6GYNqGplhHXDeP1qViMiwfa/v1M4nRSmN4ZApEOKRisLWGipeksSONg+wPaY1c6M9Eq9iVaSD2mNard/Zn53TLF/hTTHJehxiqONXFnTxL0QySUY7jHpnWuWdsjHd/1r+pFcmnTOLqQ+ZnOvhrA8xs861ECc170JecpxSAeMt98ISHJDkjeyKfrTIiT5lo2tu13Zl056TvmYAGvL44JxiYTCF1HPXtGye1M2UHpOnXuv6Fyv1s+V/0So1CtJdpfoplUE+kmu/5zqCL6IJTGz9NzbS4NVHLGR9QwfTQO1q7dQnAVn/UZAEe3KVB2KJb2wMyzDduWq1AhZPwM+c6nkqw0AhDbgb0h5UtEkReni0oc3bhBiB790WSDecuhE8r0UmHNBR+6621J9DqfImfcDK0JLpWfeFTG4jpWH8cFZdvlLrts54Tz6sycpXhilpYOmey97pkAn22+7+3LhT1pN1zD23ybrWu5hZlkaMRHmpEGiZ5D2xGX5goiHTReQu9Y2VKG3mCSfuqCjr1X89/xgaiT8arqRf4CePbDOOezrLGc6ZkfzbFSbkcUi2197TsmywGFEbnz9sxpPsnSHT9Ga7gbrOg+0+eX5ozr1QwAW+VR2Op5bTKe048R9629jaB81b7dzPa3WkMoIZVdnBvMRPAzOQfmAWP78kv+4ix+Y3j4sdM3NFM2T9PPYAYrOcE77CjpGD+/2nNF1YvV2fN82SxtzqB8fit+X2AaGDL9Lvfr8uZ/okBhbtN4z/rkXfqnNSQqOp2hcA1lk+BgUgsFOOuJ+iAb/xrr3oaYPtS0GF+ZguKfhHOgoFixrPk3DXv5oecJ0WjKId5EbZBo4XhTLUEQQlTnX16N7gjLSmqFOZCCHDrm9QEFQ7UJrkdv1a53MDvfHSeNnKQI9+6uFtN71MjD3IB9CwKtmjDYl/+yYiKV9A3VFivZPxa3DGVZ0fsLcayOZnzS6HeKbFN0BjdF88IRzUim1gr64d77pZKtkAfIqcR9+3a/seHgR0Z+d70aB4NgNO3UfHA7A2yC5tzKSSq5wqRMjVIohqUO0D0GmKgeS8H6v6DXhlphkekmAYwiPpvba9ZlPfy69sBT6SPKi102a0HqOnLmiU2yuTIxpxEXDhr4LGA96MUCiJYZT+h3yK6njwZbJK/BzRHeHdGQEJFVxlV5djzx0hifakJ9HOW0Er8lQbKvrLE7JndW2onLPVjsPKpEvXD+KyLkjNu43VesJxvQlKXsuqSVjO7fPeK44EUmRerMTQrsJBmZtFBN1Ba7Lp6wX/gph8M25WviL9LLW3XbXeJ0rM45p3A0q6T+00Lnl7tnMGccTi6uv8Y2lWQltta4NA/qQN0TVnkiRVj5BewBoryeq8y+1ROdB6nmc+PNh0yqBxPI9yiBosWmYtX/0vygHvrdbdnz/XXIIWISZ3gZKG+nC+UZb8KEofpqMCQQO8WVetEACOiB3/oeoqSYyrJctjNPK/1yJi4lo+0OidjgUatzuk+l0Npl+t7Egwcxjj8l0ZQlRkdIkavztGnk7SgrrBJKazErT5S/dj+FcR5x7+XeVrJB3vgQI6QMe042OVKW5Lpk0+WwUA9BEZnF7Nw4U8sR6NeWjHbupKGrdLxd0NRyxOJRjGY1JdMTc25MoyuePDe6O4NxfYeRU0ab9qDALpcATHqFwq3nFaMIRTST3oa5CEcLQ6mtTil0QuBzDAjpmK0RM0Wck2ZRu5nDnm4vDoT0vymyF87nFQ2O3LkkPe7ED78CWIPRDVO1gEccFpUdF1SiXhefev+KdIofCrOK68NK2/uNjFf8jBfteEbFzn27hgIbgHqbSY79R51QQ+7kngxVgTRMvKMA+ZBFLLqZ+K683Wc74hvAhFNe+xWW5oAtBXwH9LBurHLK2eQ0b5gIhNVqjipBlBiEa1hD08USNOVsuzkXxJNH54KS8ueNe5dpCFdyhg36faP6LYZxKur//pcAwnwBSbdPF7uPGg+qy4D2ECth6cbNi72rf7SGkYwlA2L8Tf6o9ImsRAQHzsKqU1LzReo9xIfooDmNSejBzL/tPN9/2Hhay6JRzZSDP1zbqPczCxMo+Q2GtGUYf4xQnk9dkPWQSyp6+Z+XZ5za5vWIlcpiOTl1HaBmEoGwSR0oSjL7EwyEUQPH2ku1aWN/FeLH6iftfxfG+AikOcbMOwDx6lHa6SIMNhf9oMHzEO0bK+XyxACAXSe9y9rVkaVQ2BV8+et2R9mZc7N4agWyg7z5uFkLEC5zl+eHso/jfzXOZIC8JOTkWtmfQk+ulkOZfMut1zJXZrT5T30mrIKdEGJAimbZ+4UiF2PxDIkY17c+ZPPmm5OB18/qxyAUoZzZ7/x1Q9FaBrlOv/R+JMxMQjapfdncgaN+UrwqvWC3aMhJTo9ZbMU5lUPSE5hGV+wZvOVAFJ26rNqNJuRDBMwEaJVioOUb1xjZxuAGJ+rA6JmCI6Yuz8p85Zt6Vi7qvDqcQo7IYntfTjQbNkNZagOiSPhWX1tRmMA+Jyk3osWqKkMr8LN8PHFxdG14adF8NsV/z7vf9Gf/mJvF7yricm/r5Ff6u6zi9e4XAWJnPqHn7bLAIClwRKcpK9K/cbmIS9T6OpZ5W2pW4bRfkMcQluIqpNcTmKA51QHy4SmV7GnLodWYRJEoKUrZh/hXVUEKh6tJ2uCg72H/nKlz/WTYW5DHt3WGKIo1IUqHTz7rlIZaCZU2xELB/nNi7wF9bmcSCPMeUnq/Yxtzzt6Psfvv7xdUntiQeMmFvwc55XSYU35BXaCXnNurBZGN2YNR9U1wa7POuRLzur3uff1LmWQTxcKO4vhiZouJmnKZfJDnv5puGbB0cMI4viAHIKKTuMsn9HiGkGtA4ExMYtOHmrLOnaOBAr8x2S3grurO+61hE95yT/A1zYdWfvfQ+hJvRj8gq42o20fIfoTrL+880C59021jt4wkz0dL9RPfXizcob3i42x8/prhAVL2xQdM/KbPJxwPLklXzGG72082X6uNiNFC58yGUq6++p51iP6z4boMeKdNcAeT5PGJP1BOyNsQUQtMz5RSr5LgcTDsNcWxFJlo2EL0jdHE4KxqcF6iyx9r6iibDpVgh83byRNscduz+tw9xsnaA7FHOSYbQ/yhErYJMuc9PPNv2MtJN2NuQI1wk3lNT+ssgjTu7tQDvsYwe8ozxIi88QHu4jBGTkoWZBDx9V59LM0loNxbbCgj6qO465QPj89A7cqNXNAN23qaLfgI0d3NPyenYNe+rHMGqbZ3+Wk9I4CnzpaJG0oP4nyIEx/ZhL5DtVvE0Z1PPWRXtoo0OJKhqjXajiStiNlakY0XFJmY3wwyVwZF+l/MU4GW8wRzHIu+MrqCDrF8oL702+XPzIesZ1Oagi3vRHymoKeZZG5x1f6aYxtM2tfW0XVQSppoAfPVaCbZ8zJJCqN0r0o9dnf5E2PncEW2SCfYQpC5JyCcSN/qwMQ4PsduflUyyaAhYdOEXGAzk05AtYsxynIAD5DYHIWuJvLtBlQgJoyb8YqoucPaGB88d5p546Eej149Rrp05P6kDo4yiTpv1w90YzBI6Gs/9XvWu5p06bTSYqpROxv/X/1+WST0SKvV2nfVJH8uenhB7a/7MBfz3URXAq/lJTHTU3MfidaPT3cAzlh34nulVpQ1c1mn2IOivteKtAt6lvcnPs53b3NeZcTtkLFAKROJ1mcfqo+towN+isj04QzjZUMEpT29XdybvxOchd+ALbfBatTnxEQFiPmNU5+V2r9O7JeNcegHGKN3/HME83KH43xF1VTveKI115kvID48gAyS8Vh6+PL714bwhpU+zHSIfUkOL2e5Saiw/QKaC4cJuPRAlT6VGJP5ftz9Smr766LfV78+ferqAbz24fB5cYl34h7D+dFr0bTZ10xYxejsKlmBTEcsPRvMBbjDDzyxB1IRe5XfKtxn3xFFYWX9f6+GPkcCLVLqmEnnhxVzAhjSIFt4/ujPuHZUITIHQQW+cJLOTqchYYYN5ZxUvmN07q76hnSon4pXjLx1P65YsK+l6cFHKjykwgGESqkgEUMG7Fsr1WOK9x9ZCGvQoOIVNetHN1e8hW8UcuG1vI9o0hfSxluekELAvjkM+jLWf9yBRDogHdFFMHep0DwMykD0CfXqMFvcmiSJcTjgRakdhf2mrK91RKF2baOLk/0i3B4NQEAAAoNm2bdu2bdu2bds2f7Zt27btuiFukDf+++MmEHvi9GWAIpIy/poioR163frvAFJtTI3MFylSHDGJSHjlXLV9M7pl1fI18FQsL/rgJ5HpVFuFU3XQj59KulYBNOFJTroHP6Qr1+8GgOTaPLn0Dfp9nxE4Yt75Vve0KJyZ1cmamenWyRacejJvmlR80N+14+Z+sHuWVth2qEhVhKIY5XyFjvdxXn28Kx8o3mKfkG8T3W3wkY6PBG/6KndD07MhnXpq269hVGDc7nL1tJEbxMpCGjtRJC0AU9yxMYhmaVUXcg8eeT8SUvR73qYC1Zsz8WLMSULEArGGPFNns1Rq5q4yrnaF8MFNzkeB9ei9vrzJaIvCz0EPbbZmpj/K3bKXwtxcOhNVmZ3gg6VTAhyWvcdzw12PLKDBHWUWO8CK5G8xafuI3FXSEIGLyPYbyqXlHl89OZh4WJvd6keYS/9XHVKnbOcgsVRx59q3ySkKl1aDbOmiKcZ4ICKOvvSgn/viJdtam/rLAyjIdWq/XH0VR+lALlqdo+sMVf76tTaNkmwKBSF4GDg9r0Mv2a8kSP+PNUcHAycFsC8QQ/72BpuFuJnvilJd/1HY55YX+htPbB7WsDG1tW+yRflLoO5JIGw55EtRvxqQ7ibY3ksLba0vvRktqMJendX62MKxNUb1Z2K0NZM2+9OheGhB5OKqGZFhld47oRrR/zKCFbPBmVBpYHSo6riEVnhkDi03OXHP9vqZ7kSwdf1+opBdAI/YmHaTZVFeNj/pDG1hjJXsO3P6K2pJ5jzsdq9i/fiBy9+RcbczBWXxUiihMdu6IOcSiAzDDFYK2jd83ngXle7wCBjGNJoLqH8frycorO7WlZpBDbKYk8SdPIzeW0fueNavsEREdtMwCjiMTHT1LeWcfOWm8THShdXYR959SGCVF7wqmNFsCUp9H5+/IJ844I3vQ/7swBQPDwh1g28CYBdUUW3VGBMvBY6Dy7MLDlHZwiObNfbWrtVWBmIaRdI30V4IjcuceJU+++2oFHpBjiRyOkNIzfgl2MSvCOIHNd+a1WHi92bI+l4rkZubBJk2a/bqDabkYmKAjPYK+HPZ10CQrJgeMzRddFnByCN7RehXnoXpyKzOx/xnLAo3cgmlYOdL2Uai5Lzz+NK7kYVqNbd1iBITgD2ua+bdWelty3GmnivHCU/OOyTIgO4M/ytgcA1CrXB9O181VTGO8MyLDjfFQljQ5QhkMMn3UPngRGH64F/urh357AIVGkHERdvebH0WzLBroepmwBTAI85GUPfDhBcb7tt3YhEtskMyZeGmsVozxrFlQUHCcPTnwo0Y1fI4vL+TCdkuCAXWPvDM3qvGenhENOuz3VZDnsLyn5XFn71kU/u6Lvuj/PB6DaePUu2lnCzyt1MJXtuUbDqeMloRZA9z/JPdxpw8bAXik9GMVGpwXLtsEfNGCFPUsU/IX5A99wz07nx1MdtEZuAc1MVUXKky+jA/huMGBmGcn53PB7NgX20XwuKDxypUvRSWELq3Hsg4X2LRJXYcz4MYSN/p52Yoxcy7lax7PMOPUJfZnYs7RDvVG4/zAgylzljKY3ZOGA1mkYtWE+FHGHI305VMD2kSAJfW3kHA4AfzHYwTwpbGjGUcK67UPwgF9hNNFOexCJkAgc1Mr3TO8pmV6iAq+IfKjDsZClBv21mjcFDE+pJ4DG4MH6iqLRRAkjzmDoycyS/KSX90V4Hyf6IYi6ddMJvK01cdGF7tovg4yEY4iBTzY1mWz7sPxRahxyMwTxVQHh59UT0b4V6nzir2249IDubBjumYYAkg0t4r1Z5QXlWu7WmyQpvVSZIIbDFD8ZUg1MHcgXLaJBd2BfCMSSYLw9xXrdxxxedHD+3ZOdw7tlQ+MzYZX0fK0eZTsvtURUpJ4YJ/6oiKdoMTH7t4WuTWEhfGvRaEyeP3rkFEQZrauu1+8gTsGhFo/LB7lqrORvWpUty1IF8gP6NfQJgcTEUhn7iqoSY8aShqJSUHUGbUSOpsAxiTOQm+gJWdBqsIK1lEJKOqNwR6ODVtypSecsGOgFMKQu44zVWjMq4PYrLj8rZNGzI4z24/JopfIXMNsXz3Ne9qw3eyoMojRm//ZKkY+OmK+eKNus5EZxsbwlE9Wn+9dLCHRKY/qZqTFgHO+edrE8KQaSAR92Y+Xsa/W3EKw1kgIgv4L0lnoDrtQOdh99ItiQtwup6PYM7MJ9HPJYMvC/ksanhD4kYa8pbFz1AwWd0RpAEkUr8jXpJoUO2km2Bb0o0K7/ZFbwl2KBaCk5OLeaV7HMifC4jsFZ3cocgaKIzIALurs90xxIvBIePEFRPXcTZKJRxe6241eoxymm6IwBjwVmoHlfcAZUeSdwM17EAghiX7eP9SKOUT+qVdCslHUHNeWzCJu6ZZyWZH8sotkJsDvhveeGjRvo/a98VdUSUZ5sXAZxJs0hmyfBf6TZipfIU952UeWAT9j4kmd0Xp9M3x/3QPcK5Zc/+9LxAbAAEGl3XhGE2kp7MnmW9ksvjls8o5eZokm3gO7NimLzxLQlgTT5fIv9O/OQjU7ZN6UggU1s5cszGW2joLCQZM/cLbPpOa3bsW6eDNqiDK+PW0spAyN/eUT6+/cW29nR7iCmJEYuokqBuU3uY62TSkGwvBkg/927o/ODDfW5aPew/rU/zJ7yPYFj63Q2unmMCIv7MNXXWfVj6roFCbruFy331l1R20gKL/1QV2F3+lcXwgA4RDK0ccD5J4kyNWX/n3NuSkDfWwJVtVEiq6f0h26t8rNToBAvXXtEw+iv861D5EEIvS0sf+V9ZtVTZDSd5A/2PYJonGvtbr51SUF6+z7nQ44wr4aHw/uyVykYAPO+rdGoLoxPn+DQMB8a5RPZQ/1uvtimqWun6sArbYow72WVoxXcbwPMpNsSf0Clx4meXuBnvG+5BOR+/916I63uPx17IVqY/+9EPKv1BDEEtirVpqUN5jmFGbvWOUHwJPEGx76YQ8Kp7cbUdMA8dMaKIQDSNDOmszCjXdcvtNXMlhrPRdt/KG/KQtDL4zHpnHKTCBW8ZEAazB/xfbMR/wppFY0svKji3hojcwL9UQVa7rhpFr6JOyt8BJsTGn748jCQwBQxWJ0rd82vy5N27eylKLTQwrwnK1s1+ELHdS6PHnqDIW1jklTRvHkNrIe+UTE6HUUzUdTUg7nFh1OXltaPCsIJHstngDbBqKNS/gmuMChcIKXko3a1pvQqg9G1RsFV7dY2bCGkyK4SvvSkvi6yWVpPQVrSh4yJxKpkWrT0dejKUWnP52DYfh8+WYwm7PyYeKfshGRx4QG+er5v2MiGSTCLSP2mei8jNBA1DMItnuepa+QRoItlf56VHNH/OtAQ5nHebuZPTRlC+hTszXvWwDC8icHaekqX6NUeIuIve3Oa9dyGfUsXjZivTapDvNn6lv2am72ivXyCeYxnKgVlwkKdkdF8tpXwcpDEhs7s1JrROCUMd9CWMBSDySmyRN+PbkPPXLDpYMRl+IKcbAs4nV3WX/hiV600pP5XoY9PBU4OXW2ySn0JnkgjgZ60j9AtO5OHtj8/4bnVkPLJPaZ5hK1FhIf3v2nIEzk+tEPq2T463rtEwwPFng1guZlQorqf1eNm4htHGKtHQJIbvXNozh8nS2+a7P6AyxMi+i7JDb2biWT/uP0lWzPwc6dCz/455si2nExDL3RBXlQVnWFGn8rEUTVdcfkCGqfRo2eHj0ilsvazNowxSONyvocWkSBZjTZUZKHdVw8aVFAkfhO+NajpvQ68AgvjnUv7fAd0h3Rra3UfZMcsBsd4VCmXVOK+qd+3iiyxGavdB7FZYkdaN4dAZQ59sHNQqwlV1rjIiQiSsLenQayfzc+2Ox8vuoQrzfpR0T4IhUKHnUti7CDaI03jprT2mhB0CPeQGbcoyDhjy3/6ebo9USoZGIm3dw+RUTUBJMuGQtrmG3RXr5n63IfqvORa5DMCEB/fLKueXIBumkXrwsg18ReQ2oqYKqijmlbAAtQl9Q2yFAOZ0JnBHKA+dCqit3Hnz0VzZPYoHMtKLIT1KL2bBlLXnmDp+cIUXXN78OuzO4UkYFzXeMFpm3q1M+XbhmodnR0y1vb5NOzp/FCQh6c2Tk2Anidm71CriTuinT3drWv4PoISRxP9zl6MupplBUqoqLPvywvV93uhlvszQ+ef+CppZAR9kf44bNAK3y8qAqQDzEaOMmBh8MjMG6+tYlVWKdAcQ7N4rDU3WWX8/6zNvdweAOJGclDwHyNmYu5MOGohlVodiKxjkXWKI67OBPd+e1zuaGIMzg7RZ/9nnpIOtL2TlTOCAQceyCORbUdBGcMM11t1OslQC3C0GW3MI9muDalZOh3+2XO4EFgf6G4Z0aN5MFTfpdcMAimmkfShwNWsp74eWpmV21X/86MLcwk/IWxqjvzPcOo0cVpkFTyVpG6IyhGiSQlnl4+21iMl3Z7KnLv0RJw37/iiJ0zqaa2DA91UyiL8ue4iGxkmMAAqhqrXrQrYqJEIZSOVHYtMQsNJmaz2txzzlpMkR6SZkc/5EmvY15qAwxRCbQFZ+K8DgA4fX3yF61gjBIdrZ4gXLmYG50Gt1h+Om9hoWokh8c5/3F/MpY/BS/q1mJd2LXdQXJC2xJ5GLKwiyqA3xWZc4X4K/khW1tqX9ipt2oGwfZ0Uny7bA1AM3/etxK0o0bM8M6SrBdE3flKNQm0CckJzRlGJjek1RhzWh1sLsOkXmZGTQncJxi29PWmWadts8ogZq1uxpzdpTKPLfPKGFORgMiKZCJ0iRUA1iA71gxnYKic4nxbgl8GgXCuh3tAgvs87a6AIQOV7afxgo/cfE7Vn4trU1pejD/vZLlLDakLZAYTuh7AYHRQf1YynWJmsO5UDJ726y6mhF9y89paGsFbogPweel3bKhI6eOLmcfVLZZENqwnrNjwpFRmWd2/mg9HBEbqNTzFYmIv+fe1mjdGKnNESmN7Woi9eqtLjS9POlXNOHw0RkKlDjOdyO4EbPOynLfFTD1u5L1ix+V2PYT7EJwKg39DlB6N0Uln/SdbUYALwgiBrNDBYLRAG5d6JxzJXl1o71gRaawqTMtX4N+BaeogJI1Hkgn2IF3dZYf1JkN9CkECv/7MmJKks2XJFGa6xOBvIQQsV9E29F4J0zIJiOx7X96m+n4BWYIsl9hL5JidFm9tdzqjcDum5Dmt5JRFkNWetGCCb9pOv9JS8PMmLzg2Dy4Pl8tGm7JLTI6rLotk+/6Ybe+3AOInZhieMz4xuJ0378Pd7La58QVK4cz5FGaUz/YRWHTpR1pBgFVXFW7WB0rs0zU7bB/wDZggg1StBvwKRRk3wza1DIir11WgTccKqxZe8lauL0+v6ZRcKl9/mfaWcXgxsIxsK9nu5uVzzECSSn80Wl7UiMGoNsN5yRyW7QG4db1pho7kEkdjBuA4LkMJi67AwqbX68sQMVQcfh76anZtl6Gcrtxm+fCz+jdpQbzt2HJ15iE7Aoz4u1fPeuyzGddnUqoHWx/AnMBfxeDEYxofCWFfwWwkhUsxUllVrT5k0AhsFAnHi04vdOwTwH3SgVaXj5dMBEEK30ci1wOlKKEc0AatUV1ugH4QJ6bQeNBh0nUo9zRtr1fivYRoANpKQCeS9YqVcKLkSYh/6GDyJsvi3D/UFEfGyJIQz7OtuhEWBWC9P3NoqnjYXC7kp3uIJWfZ9OTewWVSuxKnnSZfA2zZuc/N6MoIu7VAwwk3bjtnOJEB5jNqkgAVEX69sWs9e0Fmoh+Ejid3ZL/nFziKq3aDlroynMNYutPl6hAIPX9AFuxnZCtBJDm5a2JGlwUv35889lNc33Q54zcUWwbDRU1vkdELCm91RgxeAHz/mWSxn4Bj2m6JtdSD8xSn7YYpFcx+pA8IpkIccir15d3QZj5MqYbHwtKA7cPGvtxr27hZjBqeyH/Of5WlpykSKnNJHUhQzFJ71fDJIkIljKU0TASO5rGZHDC7tqpvCL4Z7Fqmm4GyGs/+niojsq9fXihMGiHhVMIaZp//ibd1uPdJ+tlFxpjKXra56YCrPq2wKXN7LZeSZOfByyQQdBfyT5bqKTEmimeSHRAxmZpNORmXj42gURSmr7jqoyFS6SGq2pTgzQkSDHZsRcLfrZAOOlR3mja21pAUiL104mfRb6+HZvC8SKq6wpxu1x8Su8PrW7X1irDipORXue6yJOcQbLGZLjvLH9rQ0VTBr2E1XpnH99x2M5kDcYbBSAvkWwmkW/LOftaANnF8ruoKJT+rMkTg7Ox4s3uye6319atOM+Ml0R5F5HCqWmqeOgLc13AfvVn+qP0MYIPi3upLo/JIcpnCLNx0tYYELXpRkxTLJTiSf/CcBLvnOy2+Kdpaf054SIOZHY4jAdZUmONUTNHdx8JZOdUq9sKxcRpINJJo1vRcLOybz361ST/u4610/2u/IHvodrc2o2HKcIBHv2cHjKUlNukXnZML1Hh8fi8wl5hNbgfVxkpWXW0lRTv5Ehm4PkFwS/Tx3/e019V5jkoSiMN7Wn85pMqWlId5G/AsLeUF6hyqIBDvNie1/yyYqhscm3AHCU+sq4NieCOvs6TWJJ0FBaiIp1Ghh0d+U3PcNAn1tvA8HcArIVSoK4vsQP2m8rXEF258Br/IZ32sfGWbYKiwhASJBCDD4DcSi3QR/vqXKSlBleqmnT5beplWVsv2iU3VyIkXmuoHYB3IW848PmTTAfMu81tV8t68mzRz0SJDheC+2OvFV/5/F4SV4aiZf8e2eEUN4IKyDtjlyynCJ/MmGvZRJ2xcRlapuhi9WzF5OIDo4ZTHUgMmqF0b97LitA8AIFKDK5ile5OGzaepRI5uw1hnGMyQ8phENv4Y+ryRwCkWszPLTFLCf66QMPoEiTIt/QU2Ack3z7GS4vqsBvmX/MSgWuzV5H5yEy/b3JAjKlvyx4Rqj2V/rg7ql1bfOSGHsWmgRwTfsBsvFFk9Bfr/1D3CDWxKyK/5tADG5TWZZEBUcHywZE1+vkoGUG5pkOOJGKUK1rrZqG/WExs5cwOSjqSMrwpND75NrT6yjXnS7RVvRWF6Au02zONm5D+wAF8Ui6FccgP413OZeJzmFZGICydxKMtZpU566asxEQ7lX7s8Acs6XErKZtYJ5QQ5D1AKtBn8x5p/nxdcu6BUAZCEOW17wNXj9JfR6lHhcJ/8qQ9StSevQuLZSJo1QcYCBEjfxTyLRTt9zprEJZW/4HJCkKp9EYvzDsnRLDbL9Hu9ZP0cOt6LxF2uYF2604ytFjwRSoBPZNRDF0g+5puJoYQBfIOtUDtLodguv5TcrBj3/XKpHJlAmH3EsGOuOmRuW1ViSJCGbkA/nlP4eb6gS2kgyYT41aTfRQG0+EqOOc8kxVYlzlliNEAKqMExiiFQmEVBVXUGGe4qhQDUo1YtjVO9PX9DMcl7zy3nlbL92mPL4v+D/tQ2R3fw7HVgiywn2cJQlUPN7X/2tFYHYSbJiA7Pvvry3V/7TIQgG4mVNaq6bipYfwr0/kx93sj13HYrOKBK/Mkdox+CsaoJGkxFBzECHdP4uGZ+TQrA2UYfbldinlwfAYe+iK1yjpxBo9/3671yFcc4XHDKDQTfkT5ByfwkXx8sB7TUv/xEyTnQomYT+m3yQ2P4gS8liqHBH9BopHSrSvSQ92JsNfQXbeZn/BSAV/i6wEtNjuTvHTcZthKPUmZslfxh+x7xsPVGqMpzSEoTD1tmLbAFjVsoEKlJnqChUO/gR3T1Y34B5zzl87CrPF1VidWqNDTRiwtYCyXIiZE3EtSzia+q8M0gj+2Jt4O17o22cLDoeFbkpd9xtmhlJgLNBhwuTkI5yL0S+OzysMcTIJUsolVXJZu6LTaAnwGIhlW6zSFo2JepSeNejPp8VOGrNhfrzi8mkFEbO+VTpMF4xWto1St2rhDz4abqDpFAFJePUqk7PW+WnX3N1HhrKX1mUtSrpc6+qXtUB00k4OXBpFM2eZ3GThYwaGihZA8ydOlIVoeV/GLamWYUHtU//oji+SiVIPIsJ1LOfCkUcH9t7ss46sV1GZIaPB3yWvrfiYTPEGYa1r+zTxh5IGzvrp6JHv99lgefMpecb5PIpr2Yw6FoQKwxYsps6259TUHpOhY2zaLcrCNjicP1IUEPW239+3YhsgKiJghW06Y3rx8fNcuK78tRrijtBYHs+x79jiQKXAoF7o+VSt3d3lpRf+zyKjEvzbvBb/Pxm37e3DnHJ3uhml3GBjXLIKJjSVQRUu4dn8F9t+RUcnnVgaRzqJbwYdz7wOs6GCR8FA/ylvm14e8GY9STQl7XipH/zZAnoSkpoYE+qpqqlXpvTCg8JlNDGmFd3lM8f0D8ov3jfEQOWkOGbvUXvgYt40MRxi5Whrl8iaolJCKirAKbugMeiqe5kHb0sTaJRCKZMg9jSzC9j2S8si+NZAmpUBZMGd7Z+XKKOndzqbnp9HX4sbtphN5iVrZQ8AskQbFb7Q/rKlG88+JRcrl8d/hsEl0pxSsqbAMXHFOVPOtUd1dLhD8jfell/KqRYyJB5mC3B8WleZMI+Xl0xbgxa2yGR6inYY8qR9JhRcwO5ApO6xtv+kf9M4L+kNM2skdAWE9bSXExoo6KpQweH6KAs67gAxHSwMHWFgkzkaNTuzyuTxkrmX8yhv7FwnsPyuWKggLBEK6HnkGg6mA39r1Yxd7oAjIRsCTSu47LZMt6cXykhdK/kl0giHWuKk8schDMxCCLwYcc9rcfKPgrUrRfjX/NqsSHkOzCLvEvJiD7+xW4QYl8MwDLKoMWA8IPNiX3ZinT2OPVEXAv57wEn4iElgf2navXBHn47F/ULJoJ5h39hJ68LAO1IT89juaqGyp2iZ/uR1zZRhLFklgmdj/RhCfnzqMa2fP20hJRjRuVYRgpRKp6E9EhZMNBr9cKE4J4iTAsTLWkHdlFJuyoij4RBGEFoIuVlNQQYIW7Y+l9Kfc/k5Is4MVLlwBlextlsLX7TGEossGXJ1/0MiquEfbnxFeCcQMJ7oUOLqSQzaR7aOcC/42xBWlt7ZxiXryCklNc99GgFOs4ezpsgp/976Y+IAbYm3415ccJ04hCWewWZG8CR2GLi/NI7qCVaNhCsuRRlIWvIedFrI36GxAdf8d9ceqJs8htDKiIAfVgnCsLYIPwaBvJUB4wXYK1MjXTmRa6FNga89MUpQHI9g3PRFyXw1u8A9KpeGdB+hK9WYnFhlHiSNHP/0Bc226NC9IYSynyz2Q/XQUjiTV1PJyFsxpHyhVCRNEnIQGy/HPSIo5S08U3/pH4MUoGb42m+CJKTJmRXYrrR0AdLgwd2xDZFwm+p4Vz+HpRmS2gnQANwxhZN+6wGAT6RlnNEay8SunNAK921Jh9FOnbAiiYLBylYr7yC8yWGZ2SfWdXZiDHLE4qyCZEdaxQrLHQkAJ9lS67pEx4OfHoJx+TxCwn5krdlfZG3wE1IDVaiBgwj91eniOqjuphvgSu0+1lbjCJFs3+28VZpt2+3LLpCOv+SZunqPNvOjlkqw6vOmV6b6yQ5tHfTsfFVxmQpKdB+S5rFWll5Ju0DY0nZma4QviqbHW6xeGQo/VvoO/5RKljMlO46NDkgMsFpJWWUYPR0IWQMQfsZ/uRhlWhvP3cXemYeX3dLQUqHHXZLfrYTCBgLOqOwkcu0qPTG8dQjxResKXXm3brBRuzxKbTfuz244RzsP+RsNXoKQ0ikPuVWNJnRaISVuwuXr8nSgHmoeAYOMkPwc8lR5j4n+3GKqzdmgnf8qz4NHwCmvBc90OLjpSzDBA3TSjPvuigN1DJ2sHWbmlJ1FjaZ1RLcNro7h7dvkXBz76cUXfn45cGcli8IIQ0TuwStnR4ZZziLUa884njmwl+6tH9lho69owswkwuv72Wtef/ng5SRgPe2juA+GuOPefUQK0oYeYSc4T71qLRGrdweF0BhqW8+yZqGkPbGFpMtcyIkP+tIxrRTiG4iXkYCDByW51Z2b2JRDi0I64kyl1oikndHtrpp7VpeRBbEebxSWTx3TjF3hin2j3Pleg3bmiAPacUrUv9pVI6ZMpfhRRzhixzPnz/YViaBwljyT7zB9SvqoDZEpo0Z+skMLVf1fPgy8DCK++W+suVHQGbZ+49YvgwitJshDraF2pzCQ3WTDm+3Fw/YAfoT6409m9OflL3iWJjyVCWwKvDu58gswSeLwqMSEMqAuwU/ovqu65OzrFatfmy2GKvrSrfLNkiMU+BrqS8y+M4QyMYLbp30/DFfNVVF6hLJtW07DYESIe9H8D+qiTekIsw2PepfSvfCR4cTviRNE38i4Ct3ydGr+Cy5MZIr8I71afpNeJ0p8G2Q8g9uWkqZVm6oOv4lHI5D7P/8GzfjeUDoA1dMGJCEE7xVwdnWnEL7mc0yti9KP2iDBQUwMsCeKhIoehFkWE/IZeuR/YMWhR9TR+xY5aaaUUOWpeDixxmsMB7CuxkeEYJmXMldyd5nEjddY/835yZiZgOHnJOiUvnoosZBbSP0rdyntTUXD3p9TrY358w/8676vWucfqALwmchqevSl8LfXrgOyyaDWQv4ykc3blvzZcItyo9PET2Dq26OASLmK+31H9wMCmnctOEF5abdJKbzGgMAoJhEMBzBCQaeQ5WLMrqVyXXABmfH2jAeQo+DE0ek9aszJNN+pf8l2N7wgk8SeOPqUangt/5eORJcrlvO0M4X0KyRhEwXNoThv6WbCUPJmAYqocx4+i2kJqZbbOTkQ7Mm5ToU9eHR0+vdLyY2HRGL/OErAUmU+AemsL3S2zOt6JMmQSgqpXT0lVe0fem39dl8nEznO6XNSWHn2ercB2Ew/mVTLuDI9fjkrRpCp8lWchLGdTeevMsqnnhsKqs01DZnYfz6Xw/NN8lXcI92jK9TWMxkPaZVOyScjV6qDzTVBjbWLyyF6glW3eOjwGduKQ9UKARP4Fc4asNB3KITQ+WF4WzBTZUjLaLIGaE9Yeq+f4sdzxrR9fWwoas4uKlyYFVVB4hbN+MCO0EEBUcwg/jdqyDUq95UN09tT1VMWHFYj+ED+AGtsF5xq91+NXABqUZWlICEwqGaY47m2z2Bm0A96Na5isAaLI8o0GXunq1RsahTN7KYxdYo9EnoWrMNCo64CUxOoC0ik+qEy86wYZz0MeUhv0rmTp2REk2guk6IL40Lx0hsu/igD/sCSytMg9d51vpN86iIj4IFdTvbivEXxNII3szultXzIW01ooyGF72seHqpE5e+nPPeM1AjQkbeaurtUfqrp2rSebFa62y8dCcDGT5gVMt8lG7QvjmHgx1OOHpWgGyFPLj3Erxy3dZPwKEUx8CgD9RpK7Z874Qr7IfMlyAHLCP7i+nzcoLhrN6c0Em9CLWzH0SBZCDKfHpEXbGg6Tdu1iF9VIEozVw7LTkj8BKOQcs3yvZ5eRkuBQuNTdMkRnQPYzLQ1pud2ocEQKyZ16W9BBUq8nehb4PQ3cH2TSp56ziqw2dcEkcTKxlXUdgUtpaKz9oojIBVAyGw4YSfNYVFbivb5BLJbq6XA3lor9t4M/BHGHCzAJuo8589XmZLta94OF8sRpOBlstpA5bOZnuEBD4Kf0fjCIyp4WRbpBAfIrzPuXAdbAe6yvRWiLpJi17XYUSIiSxyD5pP3khCBAMzqY/MtN09xJFZohoFe7rbAfVVLpkwgWxEdxqhUOIvOKx1zD4FdPDwPUgTqf9wl2pNjy556SyrdC3voC/uOQvBOF4+dQFfrG2fppdWaIRqk+0YaOIrtiB3Rey8n/FOHuujnDJ5CwpD8RcW+/ArtuDiAOKSqP+NDBxgrqHKvUmohRUg5w7ICm1bP9BVvdAZ0yVibuiKpL1Q9fWC9n4daO1LWCfssM66kpvs7XK9QSJa20+JuTIixF8kELUQr7Vpnk8KE/5c5GzX3kR0ZT5E2E/tJcTaaacly3ISAlxug4F3dpI2CKTs59af8C3nq6JWOuW/loxEg70v7u9nT6xd6/YYk81KO/a0XwkI9aqSbn3Q6MiA9oscOexyBX/t6xcLt93Z4IHmDz/aJTu6S0jyq9FmBMQevoZawvvuD85KTOw9yYGpGtpHplkNRyeo21EYIZFiYYSIWbkFVLy6ShiBleuPJyxuiSL8aSMTWU7p4cGmv6vBscDoZxLfQXZi7y28OXkHi2PpJIZ5Vr5KjXHS6TuwxyGFl/zWRcYHfZxIkp/flu5sROwcvROJwsX71cOr3YFyfiGhxzfT3NrJQCkISOY5cABnya7xLQO34IrIBNh+TbC/EmSX0LJT3Ojjtt39l2QHt3ftNgnz+9g20cYNLyckeahnv4NivVkP3ERqKWRXtUsFg1HrbgAdo+vPmw2QY4BsavOxHfVuYQFskVJt/SyIlUWkmAz+QdIIZPIsyI1n4xHHkFhmWZrnZKD5Ima5SCzNgVugLWKHBLLILfoHXwZQAv1XEGfypNiBB7L/ApkkJmEtyF+PvnnV4j1VpzNd/D3cqer7+IvLtxjY8qR6/HVQYneXl5638rNTTU4PvtxMx+2Odj+mADuYTERnz6aCbTBtYEcM6uKrzg1wwth/UZu41pWBcPUmgm5292pkgQBZyHXuoFT/To7glsk4JsSkm01pYrSG718TwTvOJrvrN9+xOLueGfE84imtFOfoTl1kWlSWxqP1SlBHyMPajNq3tCvFWmyhnsuq5siMgjtH3eumZ6gNhpYJLekucQBgwbUwn0zTel0EzUJbtIHW9bZfslRYsd2ovui/3rBS1ArP3JtUeau4xbQkeill8Lesu1juDA5oqbTJ5lEwBKWz9PSH9ibQ8aoyMtMju1RAXKNAA27a8fqBsYvJ0ALVAAqTtVYh6kkpvQXzovjY1FnsQ4UwKW2V4t6B9fgWFgmpwhux8Oj8RTGzdzsBXrh7aKMU07M+fdnbT2cmUZHPAmIqh2wwZaUEQ6yOs1cHNmXiTFFHJsEFKYaizJpzaa1v4gxpfpKA+IHpp+jMf/lyc1KptTGgUiNonRN+AAT4OW7PcxThetmjO1pkgFA0Q0W6KLm398JssQcL5n3AQwj2MEDM9bnSCHNufmXPIz0WUZ9E4pvgzGRiw5rjo+uOd7F5MSTZ1Edag0Twi+nikfTEwCnVxwqCLkumzfw331BuX3lykLGAQx1r86wN9sdD9n6PKU1wgqiJ7tQL3sgRlIHglOpc9mH4CxJcuLDsmlBgHLzDcCR/WwmIy6SM6DUujaEvuQm1iX53h5CV7IwGlsL9fWvUBwjwhZv79GAyuZ/ANvBpyJBgTrEDps0zx/C+vMidVVofBMC+itEA0ZnkUuZMQ0B2/BMW8ZU/dQE1v87HIUCsvyLIBiwuQvbO2ZYXcvRbzm31ZnYYQztKT9MCJwXsoUwRtjiJhrKX/lAn//0Rk6gzhrX+snUq4F7JxXgE3m9mWVAwssTi54FwMnUdbRX+YJXcHKHulQ0zrQTo/DhK4KMgf6E7AZgPZB5wAA4IifLF2E5524vbYrknYhkLmUqdW00jM6v+rVRiuNeI3POuNGC3Eia3l3gH9MtNTHjnrDRk/YiKvwQcnjjtNg0cfjO2Q/wAo0yxjVTYngnD9kowuOspn8w19jZ38P/wdjpCZBzcNg8tCdOOP0S3HrvFrdCaPbQLwaGkimANu3bsEeTc1CBuA3nyeVO8ECSOlDFLqbcChjYmfedx7aZxs+jC3uF4M3+FoDC+3H5hLSmZAr1dedJO//KTiwSLh5tjCtLyp279HJihZik8yJP+RFC+YLM1Ew9QgK4kTW523hMsa1DPRDs7iBnZjtK55AtxfY9ZL+pV986FtsMApupWCODoIDnaFFDIGZEHJ5+wxWa4wpK1E9IG83HibmNAfzWrLD+aNopGQixn393G5/Kh8/xPazivO6QMgUVDYtcFRC1hK+/Cf4Vtrr4ar+3HVLSOmigWwXi1sgcDC3MEtgnSF68PmcIa3l5WKaPC7uweFRyFLNuAyPakP91qUyqCkDSpVkY283W1fRDmTCF+NtXM6KBsIP1z9wZwxkZ8TWo/WhSBF4Yh0SJQp+x8eiAVP7HgnQ8DIk3CvjiHR7IxZYbsTVCkaBejDCGFXvXik+XThSsiNk90NZu5ZDC+p5qu8+1oGIZTO6nX0eqNGlos2Sq2P3ecSO7wNubwW9zHwEXf7ThPNeIBNSrvvuX1lFJ1QgivRqNl8uyuqMSBRnzFm5eUJwTpgboQ2sEk/pI5XMt9J2xpd/C2SZmwcQdPdEdX/BwjeqVbUZDMRmxoc8/gREbyU8UWThxJO5B3Idx5JtKJ5AWQW8m8JpDAN6yPVLyJzfJElb4vTD+XEmc7cYY1QNUorgqsA+Dq5QMIfsO7LmOhSQX/dO5x+K+DAaqdkbPnD0feQSCfYv23wyrCABtMGJxc3e5vik73rvZS6xezlcBnfPb7QKgJi2wiOyW9mUjH6CUmTAIwcC/UG20JwlHiYZXFm0LMLn2DlBLmyTdGszjTUpzRXdVkGnGBPeKKpnCW6FN+S4YDssIDF2KTh2sbvhUWYLdekaZMUcg+h8NIyXZjyBAPBSGZ2U9GHwAnC3JkyMk+w7mhOXnhKnL1z9SlBBhPReYsgk+E76LLjcZplHbRGb0hQ7AgED7xJF7G9swP0j1SwssaUPkYauO2yrMIc8BvZSHEpuK2osknJWsTw1Cb9Bzi9yoEKIizQaG1N62xCawDZtBQsktHTOONV5CiABBXuqUPH21wbaKOFcv+KnEZYn8xcPKdl74XbM2Fjdnp/+N0OycfHJu5rzlTNB7js6ML+WuqM/BglXpGo+5PX+OOA1f4HlAhKx26pQrTvvbMYxtE3chhQ6NSnLsIzlxE7575odZLgIvx/oDflbCoL4tqpV0SuHusCe5g+GTmJA/cY/YeoNBio3kGM9SUt18i9sb1jZuqlKRWv02+i3JeORLnCC2vN1Hp01PDXP+N+kOZoQ0yrlcurOOkqYalX5Xh4Jy5RnaH54wgkDpaOK8LtGrQPF/vEoQHvrQwJCej7/V8PRdhFh7wg67pY9HxHJrI7NMKj+wU1smwimwFoc+wsYd7Eapqz9OUqgTfkke3AKQKTthrLAjAKJG7rlS0PkUWwAGC9Ri3S+/ar288NYqXdy0kuiT8CuI6P3VGCGYtwhaWEv5XoOVrmgJU5FO3d9EMR8MqG0RXdt76AhuoY9jTS5wMy6o3OjqseG8mlipELvp6pB4AmgLXN5/ZgToKXtgpRoaA7o/opeaMFXND4/LquMsCWZs20rPXBaR4oTcx98fknl2tPRW8k+eyboLMzqCdMRG5OUiNaLbQYLOIZgp1MDdNgGGqZsSB7kjqcb0KiODc+Uz7Cf/X5KRRDo3L31cP3mXElzeFUJ83IkR8DuNMIk6uLT3Ipw0q5k/2OIG8WtAGyBt8Rb0JM58oa2sg0Qu6pV/eq1Ax/hFShxnpJXhfQR5xDebExZdx7E+G/mZCtN0kjoOWEO6wg0uqRi+rWCku8HNWoY19iEn8qsjCQm0bv9qpjgzxLHjUhhymg32fg5Pzjq98DAwaJrDGQTsvS6eLo2V+6UV+jF8Fb4PJG2VpqjClG2c7kRvmlovqjJnUTYM1JkL+FoGxXgL8byEOJlzc8QZq3N4g8yF5l4XV+5uRHse+7GVSbbOnhKgWed4OM5D0x7wiH6q0BLSApAc2kg0REQ07Ut1+JD6NMoEJ1fwIhzOqlUcerrXEkz52Qx8T+tgoykzo2WsLQi86ixfs9stSmx9pnNVSSgbwHg/TTrU/wCfRhuFDlnLTwIRTnmjVIKbS+HP151BL04Tf+/Jl52FPVi49I3pLd+1YeiY9L1CtBHfNVadc/ItsnQ/XTc6va5tvb7iSg71YkPWoEZOtd6rhvE56iKWy5ZC6+VfC7f8/ndLD90C+OfnG3oyq5hguM/u+6cBUBy2gDlYe48w2fhDdCUCM2ndD/a4UyXfmhmCerorgfGze4amT8MKJHvEnc9wuUzuEAhxvQdb3LxS74irAjyHNclpPQwWkiSmx5w3i03/qox95LmmSDS2xaoagbj3vRgSKhlcBDBK96Sr+roV+rHxqChZn7BIwtFxlmfPV/EJvniibgKaVOB3wJ1A/S6kA4ZaWD5fnA/8uQo1/f4fXiMkzLGNW0CDEws+9QQD373VYsrtUonoB0exinRrcDMAUhZ29zJqrIRr9ugYy/pdThHcS3I3rPR7+1ddq8OZIBbHjfDbek+SkOy2tNyvNt4geeh5tqew+xqjJJ9oBejpw4hqEfY2dxxNh0nyMCvw212lWY6WIZRdwdDGvZ9kp2NgeiP/EB7x9+PON/2ilgbX4K3nbW8ykfYl3GDRSpJNzAYWW8oXt/y/FgbmjqnBO1+dSSywzX05eAijvyaxZpXFaz9xD7tfo8pfzNggMNq7ne2GY+DC6HL4RRwIezay03T6GCnv33SB7ejsUwGuLfQ+8ekPBstbstJmTSb/Wtpg8h1pJT5NB4auLcLaiBDqFR2OCGSbJyp6Who9oYPIM90hxD2s7YbZNZ4R4oi6iI4ShrZuSh94u92cTB22EAkd5s6FnZa0RGeHWwqqrgrqN+1em5+5gNjSs+RqYpjmSoWyPd/zcUFuDJ9I8RfJ0kVa1HvZAV3qu8cgeDlhpxNmBmcknW9wDTMOtuxuEPNrbiUgRkG5RG6AExo71Ou3UyGXpjvMHoHUAu+zcO0i4j2EH7ilW0+xjFfSzMxV9Brk24sLbyVfOnEjERlJmsua9WpO+tlsG7u9dn8aed8CwJzDi85CDs0MN0UqYA+wHQxqnQGbRKoGJD1+op80erKpYwhV/7xhfKqTgSOu6cKc6QCflYnRqfDbpkelokbaHLDhfB+Ll4CK/3pfH4APM0X+Lb35TG+zlgnkxOTMTiJ/CMQYt2KKF6y4DlDal+HfPyKSBkAd2IJrIN6Nafw4ycJ0e6uwjW6U+VZBLYiXwf/+lbaXrk7mtIUK7ggrRBUXvsr+roHvFz7ttK7I4yz1/PllUK5Q4egiPNbDHV8hJO/xrYdFxxBXD8i3RpmMl9QHQIT17w2hnDt/zCLmNWH62qX1s2UvuLb41PEwi/TPeFKeblZxCiBvWeA7eKc465XtWO0QOkC5wyIaD5xnG5RqjXVF4UCo7pRthlbzKJ4mTSqLRC3pJuFewrBswLGEmQgBsU2zz/6N0RqT/wglIFo4eMtPkKrtswO5X0EHoRQ9Dy+uR4uL7b3pA5iwQaZ+fhAhQCEqd7PTF1q/oXX4LSoOBVsAUMz5MMj80rJ7x4093p6i+FfaoTEvEysyXLwNiNtJKiibF3+okXljwS19s5U9Rn7Ul7OT3zQXj4GkULsuddqu/ObEhktcMxIZz6Pln7n6FV4tMDeYghDPuvZIMa7F64JH+BXdHEMdZyAVCUhmk8WMi/n8S9sHJDgoiTwZOICNyH8O8v3GVlAlZj3p9DA+zZwErLotnlrovJMGL8KfUtlRtWLxIDY1VbnlhIATg9L4jqjYMQBlJvN0Q853lgFum6fLEquJMKyzZEY/TxD2JDsw3GzRadoxZFaxra5HPvFEcE7peMLW84qYWNDA1VVZ7OUMbuRTQqRen0cK9oMhZTkGUO+fMoXo5QXSa7t0hb+mL8bZxRiskr8QE65WEmcmnjNJNd4OOfXshxL9eBKMjVfn6PE+mlJGjp1pmhBfAmg0PPmuW+wWGrLh9b+m5X2cViy00lVAJxWJUpehdYM0dGwXqjQIDwzUgSdqT6Sa3h1D2OIbk/x7l800R4YhttLH1twlh982vx3UrSVsewGv9TC53dUd1aVcGA1QWfjOrWkjNzLKZogjGEk4JOjZB69n9hb4IzNcpOOV7iWNIim8Lz6koC4uGt3+WYsOHs4oUGfSRUkK13LoUnf0SQBTSfFPwZUrTRflyIAdrKPLylFOTS45Zv9jVpVzzKZ/1zA+1zwdwKmWNnkMUuaJuMy/Jy6G4VJKVY2wYina4xFCy0yMKUuBnjCH2NNh6xlG9f0eoz8EqdnYOveAfHTYGNuRTXy1HI+zJFU5QefdHC+1iCmsQVS+AryyYVc8XYswAqiDipBdRXYyA8a+V2Cs6JMXi1NG1+MQKoQ0oGLtxjQpPgvXHB6l0kObc4O+Rqcfy5dLQK5zchwjHVS1V209WDVynx0I3YwDbtWWWbeqcuGhjbz5zY4BgITWLGyaZsCWEfL87BhhvsD2D+5IhpiY4YgUM8d398nhQsP8YCPXC+CQpTJXhL17RQOg3k/PaY1JQ0FD+ep5rYNbasDrR5lqnOURjO9NqwvqRFI3prfD+eH+0FAD9yN17wlDj9de7LsfaUlSStSQ0jAkSV0ANF7kTKLt0+mie0GJPp98yHCNAsDIpsxE26UYVdtctPtPuIuDUoQMYcPE76XlG9kHQxKD8KUktz97FHXXavgjBLubg99ubLWos7+lfgc7sbNMCnR34yDlyZQKmVCzj6X4VhFnlEw+wITGAHJpDMVZd0C3mbHLFI3R2tZHwDAjpJZwT5urTfdIYiqsj2QnJ+oTBh7q23fnWIg+zhP/w7b3oLBm/WCF7+jFY3V9IekRPkSFJOri0L0pIPYS7y5a12q2OTeTasN/F2fA9oDkjoDtIdvqtHy0QVs0571jZQCquwsaBFJ2UlLYyAwqARr3CtIP712Lbe2XdpbeyPafuzZdcvSPn4d0ycyua5pjnK5X77B83j6sJUN52Np9DBqirVRWADFgFNLcvnhrqBUUnzJfcokFByp554L0C0cSaas3FLqb5pi02UYQSVYMsw6s7JAB4UCgeLu9J9R1+ygauFvoM+VHWhkig2o2oTO8xsPfgkkKRgr43AYBWCC2LsInOA/NYmHrwzBkQIzXhc2RkufHRqeekshMXJgnvFXR3gzaR2G/lWB1RmUp41I8K3OWeRC+Cr0+xhLOYFXZVp3wAwgWXaQEgrOUI4vC3/abLth/NRzhm+o2/gWl5sg9vAWBeJOR4V7OWXtRxE6VHRZd/Ct7ZYLeFKAlQqqFnRaXQZti8+IpQ2ofd8qHK7Ol5K3EqXKraiZTv6X99UyudJefMd1UijpcHqRCBHOUizO+aliBtxLR09Me17eXfopHvFdZdtdAR4GFWtHr4YbjahzSv0dqX09P4FADoPmPHHiZbKbggLVy+XggcJF0poYv2zOBaWB67vI9RMSFq2P5lo2Zu7Y388HeashHrqmtYdrHsxa1WgLeuPXf2SuZF/1RDlXYT5ErKQ6yeQaSpDnGIamkD4LirRCDjDySvGInhXHfVVjS8amEFdqOZuy+ROZBynX5cueMjKPRPWGmFfbtcIq0j6OYh6IsmIezmLnfaBoqg6Eh5giYEUbnHkCatZ0Bvp/64Nmvbg3+7h4lm9Mo8w/ZEM9qpL3jghlC49vTBBB/2K93oByF/3mQo3hTXKRmR15Xrsc9oDxnhhm5PtclK9WXMlTj52l1PyJjHtTumOoh0sv6mbswBmZw/aXTEKqN4POFmya+xxN++c3r2UHX0laP9OugZZVbB2A4IZxK36GzcLsXaZoJnkHvV6atH0/7wYT+SNO24Ms7l1gmsqsdC5Jc18y3kyqVAq8JaW0kyzVC0CaGVbx8vSNCcRuPx0vyGldL1/GhJmi0ZroXhTeTGYbgUgLwrqjus66UjZ++pzx75xVrElkM4Tf5I9y6nxN/4UKtXR8nBoQIBRbO42G+Ol03EdrLaumx+N6CrIq/pSGgRWzox+VnmcpTWTZyVYia3F4nmZ5u8ILNgpbsDmrAEZ5BB6lQGcPJ5arrXWn9xMGqw4ECZ8K7yV1X9drFiswP1AMCrwAPyRvnkXsLI1IbpaBsvX8KUDIhhpNr66991gvW8y+vaPt9rAa9IzyXKtaP7C/zOHG48HAhqDGUEH1JXbfmp/JBBu7hAOSGDdx9uOS6JfxSQMVOr3WWDdv5gemWTOf4yqUVD9zjnHOwkt1kAT4zjvOinUad0UAcpM4mYl06Ndyq8q/i2geNyiQ0rM93Tf7HAalyw6UcbCbmJb6dSxLqTdcmhZpW46XPbiIIJg/2VPhJqwypHwqHascS7Q2lqIhOxroVvpybY7F8QhAPNn4DYC2+l+kWZR6wZLaU3Y+BntzoR4fT6aSXMwoM9ffIMOr2/tepNR7LxZGaugtBJDdGwlExuPFzU5sTlqGhjmok/Urvymj1UkrOdgmH1hUJxkAh0HBNZXLbYpOcFRXEIV/7Q0TzYxYJXJmmtd1oxWT7WfsE5hpNuDoWtUg62n539zOWxqazwsLvST3i40cfpVxoP2+6cPuIKakfivcMS6CNOsnChXtFdwqIj91ZFMHYQaROw9INNUPuKr3ucqBX2QG6SJm5DK5KQw3ZbHimkb6BPc2JCMTc5VSXFotKgZBb598V6dm88GReurFZtf/Rd+b9scAdx5eB1Vje9pmrc7pWYO4HvaTK4DI6ytRqgXlehFCnm8mtl4ct1OJ4IGLTqnmDsq0VquE051/t5b9ovvgwvbq4MxtmFxSoELGRq+T0cGEkEIwQOoSDbmObakdLL6VdUgxIg7hq8XTB8LUwYC7DExMEEZNGrUk/I7WlPdAy6JLXYgKWRjXQXWM7GuqWt3R3hycGVn94X3l3V8YTgwfRHz2aQ5vg2uS2x5vpbrG5OsQL6+0UweVJaalO3tc0fewNWwkf3M+Sg7qRPflkQzjUL1SpB59Ur/H7qc71GYFc3zzds2APB6NvTp4KjcjqvUOk55XiZL0G0f3hV1b+5isOvN1DLFFy43uqbfXxxcZcYkHpne0mee567ayiETJ7YB1MPukr6FK2a0/qMH6d9WFhmyx2AYjVCOWZ7wdiQUbCOp/aIbtQLyQ/JEk9Ha1ufDUJDO73FvDNq58nTZahzJSVC5RPaIc7j7qburIZ1JTeI4GZa3sIsgQmRYegxPi4QkL9rmuECgAV7Lo4IqB6iV97Ub+g/v3iSaloph/Z5IfB18ZMW2Csto2yc/Fv32XQ4+/zyKhzR1j5ypwx2lxgGM+IIJMWeozAnSs17NBEhex97Izz1IPwOG3L/AQzkpWvBSZmUGR0j/iF6wbe8s9wik6yTLGcXXBaj/WwS5Gv5K7GlgbanqbK+/MpqrnquipqiR8o1l/DcG7+EE9qY8x8f4t2TqgY65JaKHMcr/LsHBlW0K/hFD1sDUPmd2KoKOYxohXW4xE/xT3jc6KhOgg71pqrpLN7ZPZweSl2WJMVP6Bfy//f/3/HzXsbPrtbPrARCsIeka77s+XbUTQJgFDvApIkCgZhJ8kL/LvC0knRtTz8LbKvCfbWNuNfQcczRVNKh7g/WAmOGXQVrc+m02+x2Fe13VDFxHvlY7cKUezBvdSZR2j5PtdGbb2RZ0ZEVTHpzLnWEt3LqRuRwrS0U2I2hlZPdpYkcSY6M+juz4xRETbH+k+TUq92SzcXU38Vf5Igsqv52rghyYQ0eNrx8lZlCOnjMM5hnnM1hLrqK/omAGjs7B3F5I4FsUJCNV9/2sz25UmSPqWhgEgvmjAgZtGKEW+IXGACLaqe5Zn2khutY8HU1ak2/0NSNOZttit+b96fQOrEKU60upGz6dKV1Bo8tI8vVans8cmEY9zgsnhQD2Q2RvWGbhi3VsE7j70dNj85l4YYPC4ZZ90S9A8oKdWcUhGZh8G79kf536lhpTR0R2xmspVgQysa08DSPRS06sz/xUWkyxCVCNwi4Fx9AgRIKjOerK31Q2QV0A1ulwhfJCcau+yTptN/kBUaM1vCwryz8tyIfMbxMv8wkrjMw4aHTp6fuyJWz7M8SiLn74KWjHH3qZhTzUjN+LG7e/NQAOeyy0Yzax/YDDRwRfK+gwGuKQw/T38BCh0O3ptGbrBJHR3cJxGeAzlsNMO7mD6m2PtCkcPfj/jKlWTi+wd4f+Rbg8GoSAAAECzbdv1s23btm3btm3btm3btnlD3CAPWaa3z+sq7zV8V/sd+d88xL/TlOROpkrVsWvU69s5iC6WKAOH3s/343zwZs1bbViDT9jnGgH14Gie1/Fku9SxbL/zWaP7+Hjz9qCmT1CRIAVYqwQiM0SQzrHnTsmTr84aR3p64LtpaW6Mg3JtEpck/hcrr4LAdDC5059KleKMB5gTwSnEr1ukSmq51pVfGVip4mQTXqY0bn7ZRewhuySuub8GSNVfL7LB5TKxJ6cCzq7VAZnvD4Kcv59YZy1fWTtd7nHEmFlfznMjB00Z0R2TFCwz2wCTXyRrHZ1bmLZgJIpbxJ92axbsACs6twni1Ek4/SqgoEO/OJ7v1RY465Ev3umrurTadqdza32D8a5+0zn1MLAubFQAynqiy7juuIdt1235JFaib9MdpiSNXOJc5TdgYdOM5I6QpS9LBbDLwUCY6KKuiqfWB+hPudWMBhpHmBmvNF+tbUMyB4xqeooeIy2y71PxOSbt6q7RCbinF8T82g2xnL6ZVjyayozYxnMiXgOmxrvyWKx1juQj6hJnMneFsIkXNt6hnocwI9YBx9Dxz3TQtukdK7p3WGzhKKFNkIPEh+Nw03+50U2c94KKVr/INcshKjQHdbyBfl3HDef+l7mLjaf02ITUag+3KNGrOxTucLo076qHAqPw2Yncjl7QDr2Poz64SOZ+b6UMofEMpqugrVG1g0nX1M0q9NapZQowwZ90+LeoFp14UFcfaNxSvHIaXHH2o6d7W1hZWVyZ8zfYwVCkyEI7ogShGNwSSUAw9qarx4Ja1dobpcMXS45b8aV6iIAPBS2PUUPtPKAvGRxp2QmKbpcNvO0994HADdAUtPBORzXjsR5qFOp3jHHnPqJBFdlrAWFMoXjW8gz3Hl7l619s+PAvglNbEs0x+ArGhr89w2z0nOcbwC2oxTxy+U6Pk95SJN5h2Yrr/Ge9JWOLzNmKbRt+kcaskDUwdit6kw4pAdZtlTUoXC4ydufZxPm9nnpHZJp7vdsQIfSbusrVM39QwaZjsS5I7AU6VQjKMkePLqvwy+38MsF8dlKteQCJFrSmVuyJ9OIW8LtKnqJsH5BPl4N9Ufq2AT/vJMpNEf7uRGPIRUPUpTbKK8xKJUBevQpIGfDGOsXKRixeXwYtHYwYaWVcYXX6bgHiXuN1WKadEt26bM02JcoqwUKnpdk/BXCuSZISHTM9jC6TMLJSX7L+8jZZ88PrF1idyweGbNwqD0LKroFlrO7fmvK58UECZYRyeALrYWchq5JCB9mC0BI/yENIy4i4XX+74Fl5iVfJSLSJq9jddB6nBR/GgzpmAzaNshZgrks5shsJefTCIKMPsxpnFPhT0jErD+XuuA12NdgjRdYRqc/wQakY80qEiWyZhWmlbSQpdfwkjCKKYITN8lyUb6edLk5Q2q+YePOJwvnqR0W2EoviINL6OuIhfiTY6YqV7OyeWLEssiK37kq5mf8qE0LvU4S2jbdq1EkNJjss2SN6lWsOZhp9zCTfWYlaFoNjFWapXqJl9KvkT7QXHaqOryAYhGxClTbYOqjTVcxz4Vnq0lmYQfRGV47MS/PTpXj7jgJQWPp5Y+IhqU24uuLPJD6Y1aAiBvAK9ChZSY75O9QA0XxBFxk0Eew/vtDBvzfWlY6e53Blj11i5ExYQLF4qyO7MLnpSLl8Vnkg3pudwQQfLG/daY6M42R9BaS4OcZme6O5tZqUTyn69ux7fAN8Xuq5jwufqzYq5yRh5qZzy70G/dFonEX8Y0hggUcgL8YebxrELimmzesvl5tS+vzjJ2bvUNZ3r6cU3Pwdjx0ZmfcyKkjvaPNy1QjiTpWc/pLDrFDEWWOWlYSsNeYfy9UO3YgBfbvNwewqwFbWr4bsVlZSMiAg0DIX8GXjr6Y3FxHnRs5amu0WhGmzEFk/Um4yoztCJbAhernJIMR9UCtZSJcbqCDDuHHxgcwb867eMjawCRu8WAjjKvw1UhGZ92752/yQyrZPrNJ4/3nHqNlaseqLQSVitDsb3NkKt8E0BOhI2YU0yIVWD/PB3DU5TdOaSAvef2AJB9hYFS12x6ptjVw4PnpOPHesSRrZOJxVcv18E3BnhFZEzWiRDBPtvILbgkGYx0v5pe/6xXwTY3er9nlXwXtoWHTR5IAX8JRuOiDf6iw7ttxdVcwfKW8TEsUQ8ZfGhwf/A9ixMlI/ps/B13pmsFbqp9YzM4jqsi+byRrR0Y2BRYDD5XjKRTVhSvBT6jZQapzcXBPaSCfRM1vLSV2npaLJ5b2CiHmoc+nV1LKfUz6KH1cQ26K3w+yuIPewJQNZMDqc65UR20QHLxTGaFeU6hNrzZbwDPpARexD2Oi/E1hpN16Fs5oPryE2O0kW+SaBj7r/NRBwprcRY4ghAWdb8pVsCmtnyGedgFbLPO83Ocb5MsJrH3qQE7rwKwakcMg4bb2FPYWzHhWdaDH7Blzrq8wsIb9wbOQc8SJ8yYOOZPj3fhzEi9FyCFiSEphbbwvNjyomdyi/5MgPD8j7CW0/fL4XYA0XBRBDXlaOFqMMfyjP5htGDHEJDsyDcTE7bnBIzZzkRi/4YPK5p7hXd491mcLmfCg2rIDk0MZJN76dfNfnciLMXhg51yc2kTbCMdtip9FWipfvOruBuxQ1YHQ6TVYhJb0hAJDrvJ0Ds0xKkv+blf9NmNg6iGhPcNRbd/1L5TZO0X+L4YTYAWIhvdSbDDpU1uIx7i8pYh0FujU0Bmn82O2+1hS281MTvYAyt93yg/je0ZrkMCmJHFRx6c7tj5FEScq0BFEhR8htn3l56yDJ+wKQLr08s2LTnnYolT9i1m3a5Aj8BH/xEjXCqexalj3h1w7VdL6Ep7mAr33sbGFD/HDrWcVBERylYifOex4qVMieTYsQcSv10+kNH8hObGQRAgzejymB2E0aaQJYusp1T8g8auXN7zIL51ZMz9v5rtRK5jH8tu8Ot3MDFT7qyjtrEiYJgjrL5th9qEpTcjkl83ou6Mh7mmqfQHpmF1iaZBVLNO441DvjzP9FISh06Y7daB8RwqEKy6QrEZ5xRIYHbxReWcitj8WU9+/KCnzYRDcW8rBKvVhCWcFJSG1IUgORvJccid6YNjI8q5Vfp/IcJTA6Kv0ySUdF6iTbYIHVWzijlCSJW5YiSebkKJ+Z3uKgrB4JLys26x+kvhOYCZBbWeaZ+b+RJ0WrYERm2uZAt+J018kyg2tz1Ex85KEXu5lKYHizN0ZzspUNkn/YIavl+wq2dyFBEvCMI6iHldOeaSmMF8+JAxZjGtTQLV+aejrS+un+6rb56RHNnpUc/sKKrWUlp2a8/PStnT+DEI92fs4J07sHDoHW6SOQ+7cQakf4RhCDX/0L+VZLBAF5nGWbVm0gyOFGwiFTXO3wZa9CSYKc++gEiWvTObhTDmLotGwg3GuAlj3phPiQrFb/Ik9KcixiBdLpEVRuO/SMP1Uvb5a34Mlad9FfjmzaK5cANTrOMzqjzuDI4z9lqaY26+HWcaKpuB3x1ksljXDMV9LwPZEJwfyL/iyRatDFsYzuDGVgjpEQTX2NKt8AsutmC3GBjmdbcXZ/zJ4C3bTszjbA+8+ixj4OjEkcp1KanziO4wLiWq2F7spsX7BhNWh6T46X+PQTTqWpC70sy8qvBrdytiUYKPZ2C0XjYWN1jXGnBqnzIqbNOwRowt+pwYtuIiHryGrFacLEbae0QXtZp42nKXD3a4rmt3DFFWff5yXVznTMIxmMQtH+DbWXXg9wUCgMxttVaRzbbBrAyXdDo/QTgJXvP6Cavp3XVGPRQ05I8sz1sbalVPnYRZ9HaICDgQfeiQR0UNjodwuwLqYGgYSR6Ha/nzGmWptyqt0khOtrmyZGF2XaEpnR4HZGtTJhC5Q9c5cRuZbWLR1CGqNVjJwdEXBdYlBq8zSjNfvjrDEp5zQ9EijLioGpD5czAh6AS8O90tkQZiLB0TAuzlHKufIutwa8qOUnDuSXnQD/4QbAdEee6gLHskC1hwyLcaMp++6o4loOM8eGqmA7kXwiGbyxZ/UAHCDjoTolgnGFp8c8zTnTXKS7CKiBE33hKKYIBe3qYJKXmfaGZs0dq/HBlxVSZ6PG0DKf74L8zLTWwcYgMMRVIb5Ja5OCbbw+R3yTQplJqLytMMgkTwU+wz09LLFUt36UlCEPLnpjIWViOhou/dFltmq9EGfEGOMknlnuMaMtd7nvutg3XFwgBRWdf/eakKwJG+2eBp7LKjr2p3BbYVWVLZ0DAM1cZ7FeZPpZ/5klV4A7t53dGJyyMqTlGm2+CB0Z8Z+fpfF5Ba/Kpe7F6QiAOgqrqg8dKY6dNUHCrIdCOP0r1WrAdIBmNxkrNSUd1iDqSF2FcZDq8UpA66P7TaTEscnerme5kQOS5jW7SQRi9rh5yeBBapYOnx3Lk2mKF8XBB3QEC9CpvftoKBwGpHxNt0dpSMMyvslaeSeiowqHpHuSFiUkSEkNP/5YfJYO/QXPIOJg1N+nvysNijEeqOWALZ8o0EIY5j5gUmAedc4sKoP7Avv6i2jTZhVoCgs230z3YmrL2o8p+VGbPUy/C30l4epw2aQTNDh/U8JN7Cx0cZCCrrBecI4PJ0LAjys9pPGnVBxZL4/6LHLJRt3TkGkU7uh5q1uLYxUTCovoawRSJmw8M1nb63O0N9AlsTSnBq00Smf5Cb3k4GJ4pBAKrmR2wTqqFtljbfiZ+ha6E0/jSqbH6EMSa4JonPL4UF1XX9+TcBtJgz/ZXBxWMbl2xXcxl+PexjZm6+IEFNZ8UMBmSVUHcWvnCot3+bwBv8CaLyJ0Te00JdDnI3TIMlLS0UlwJYfUzZ0qDEodVEMs6Yr1mZziPMQG65swqKgPBXmpsBEr+hWDqFzjT8+Uh8EwG9HfcR50Hd7j/Trq0JmULbEWMZr2PCS9uozffzlE+lwymN2jWoA5DDCYoEQFYeRHclnjCCAEsTqEgHFi/uScC0sSSDmJYodo4IQO9ojINSskpGUubiIb155n/Vh1s9nKqKQw3tlK5OP4qVb41t4OWeAIq8nEqkhB/Ih/qKVHB5bRUcWwnWS1GLptcCxSNs4D66kkWDAgm8qOk7VnBY1ku6UXYWGB6diawxKTNzx0LoZBwpNA3A++KI3HBI+60vTv4qEPHM7359DsIShxNhn5l4nd1D8co9Z7X2JZiBP0HiF9RdIsTtABnVHNKUoL9KqrsJdZKeoNqMXuLyVj/WpALkZ6OIX5oaTxr4zUZJ1fFzKxVDAjc012pRlfh0R4ygpeYBvUJwR5djfY7sdR4vDOJqJ5bUu/Xw/B1sZQRAZ/bXg3g/tXacY1cElopoORMaopAR6PDP7mPmJC5m/GcFyk4UXMrQRO25xpZz4emjlbzNXLqlGUpBBwPma9MjFl8+nzHNjBBNqlVPmxDiivn5Bf9fgCd8laNtUPMbWN52DGY7xFbI7/KHxLOfSNmja9xJUtzoOhlrLwO5omCmxEsjI3qAa0bqFRpgyabtynU0lvi+eRgdJIbc2O8PbONdN+/q6OjlCw0NVq461ArAcLJqcuqKszRmNwiKdX9Y1mzWlYKq/Ah5XQeWksYngfIvxHCl72koNyru4yN8+ZIx5vtf5XeJn+FWFxvNeq9p6vnNi8H084LWrNFu0zUVOKIPiPMNBeRYtr9/HxYK3AfG9RDTlH2FuMHG3K/4rbNhLX00MpF+h7n9JcoGT1AaUPSWq5hU6Pv0bz+O/q9rqcgp/1ey4kfAitAgxKbwQ1UH9K4UxO7NeqoJPb2bYsiObBBf/q2EbqfsxAqFLdaotqsT+jmKUolo5UQhvvxwpEVX9BBGnJ8k7nEm+/vutZ98A8dc7U2clnnsiJS0r02dXQ+C8ts0pCCnAedzau2a2bwrGvUp2fDLVKkBcdHVclhP5uYlkn8uOGHgD/yQsbLSETWZOBwpeTh533aTLZOZYJSnFAVN1muMMwVqYwnrtQfEPScf+jh1S4g0CjER955YZzqLfZ8F/xcRoLTzz9Xtgfey5RU/fwOyAAxMUlLxKvB5goG8dChyw9h6mOmbSAh/uBl4yoC3id9nGYU2xgnZBOckXDBh/rHgVF5fSlao8+CIvIqIqrpBltdleU+5nScurLPA0n5BicEbUFI+07FhyqWXmq2Zj5VdvLt8assEzBecc9e+C92qYs52djkVgZEsidk6MxPwC9kZFd2YGZu+XFz8zxu+w5IJKoy4ZcuUwkKUoVv3MLC1zNqJJ+cnHHXwbCp87dcRU72HgQ3uyzsjY1u5N783uA09LDtE/HvLGbjD6HHNR1neYrzPxJm7DfevkUZpJAmTZSvyKsI9l3lNaK2paeRfrvJKhQGc0pPjB8k1516EhDPFNCRt+GW0fhfrmoouebbZGP7WDPGzGsgkQYQ+YwZguwypX7T+TIhFrmz4t22D9sn1oncYpjJyxXWMm/Th/oTnzwqKX1RtFHKYDC85WRhSMGlxnBMzUZPw3654Pb5uIQJcu2iI3vtjH/l/YnDmPZr5uVQTVLSbrRD/ioE7w6x2mjgS95HA8dewtfBFYpkM+u5Ja7uic5ZFU8Si8nsWZVZlXnyJFN6TN6dBtQYpL0pi3oqPdgVOI7ype+zyYsRZs+5warz6GiSg58ZxBTl6lyd4gBh5B+bigR7j/JAPBuApwpVkv6DMLbHW8g4IggSX8MjIkkR0GcWuXLo9/k0C9o7L76VO0tje9NuJLXbdiF9g/5xmiIZWH5UVEhtfza6Y4MU6QlLyq/ymJUF4Q6SVkxtorvnkyk1FVi3ebEWpHNuKGpsgU1AJWSEaNUGa2h7QAuEu5nFxZSvrE8E7zWLJnOaPITHfdVr/NnfDA8T4+wr4iOsxTj1gxzS3IB3iwWVroH/1tMx/F+hAHQPYv4N79taenpuhM0xIB7hGmoYxBYaC1XRVAxey3USjU1n5Qy76MCcgtXQAL3Em3RNcLUAvHFn1d5rXmNY+KRFUrdz2nqwNoRfJ0MrelFxo06T0Uy3d55hc2c36heCiJLC8WOOZMHfim0Bepj7dXiM4SzX7XX1JJ5Bta3O9VqVwH540zfnHwfhWKaSaezKCgSDc+IS3PPouq9mbmuSJPMtCTHzHmQDQb47pWqlmLh9pearkFz29UItQmcA2uVrFpZBPxpgxmRJoEBOqrVbyHBZPA/5n+LNQjllQCfikizvpTi/mgBJrZr9FzR6VRGnSZGozGrGJwnJevyugI8jijmpI8uEyIafcayzMt81OPLOKBxuWImiu4WWr2Bwjx16q6K6MyiSR+HRRFzDFHhbbnRizHj6kcPpagKoh11eKxpK17OLKMSr0N9Ic6TGBN3SfYgxtDkg6Gw9fL2uCKFPGXYCsfHki4n/q07jMu5QYFwu8wkZd325+R5Vi/LJF1+6tdd9sqrZDtUpRxqnTSAY39kXmREXV1AAFOyZXac2xrI99VdrU+BIcwTdboNl0imaH7UAcEBpVlhoCbEacxoBrjAEWzhTPlQvV421kuExYcn4LFHP5WziNQPKLqkDRMu/656scIrTylJn5/lez1VsKHEbGSbpHlMquP9SD1+HM4O/yWqrBaFeosONS5vYb1yIWuUMYKxvFl4iAJ2geRgT2eGR6o3rMh7fO4MRM196uEsCZOvYe36ejTWF3hVmM4rkQB+ZNj2BggNJIQuSk75Cp5tnxKBw3Rhy50mD5L8PiJCk1MUPbQC52MxU2IxxV6lRt2yThkMhIktBlqmJearN2KCg7NdwuaUSoQF8UQ6rFX+uSBgkinYdMS6GLTbuagCjbBW/Dx4wFPo7P3NW/eAAmB502Hz2SnFSPslrw4zIbBDoYFeoKz+krhmQov5WKOWuCDYUfk3eEh06Ibr5IrI0bPU1b6JfbGVdz8DNxs+rkvrgwdBC2jomv/N5kP9JkOEw66uyC5spfQ/XJs4fVYtnXnC72QgDXAja7jZuNkhrtas2424lvM/d0Jea7sJQ9QcQSU3snakpPWi9Agi0rcGs6eU67Ei383lz6YFVNiGYfOXmzcB33PfTT+kaAWiL9/XvX6qf4PWvQ4WqaFD80fFOVw+uexQFEt5ZdoQqY+Dy3KxNf0gfZrcVIUBbbyN0JlAZiM69Sk5TMAXL6PvS7xaGcNZwGic4U/+ZHyIi/OqF1Mytf8Kq4qgFJNyKQtBAUYyWkG7Ym2YQpzw4Zr6jG9O5u/qdIVsxgeKh/GGxJ1phMFfO3uxeAZC2swtrh41cJSjPdRaZ7l4hrrPN/LzLoIBSbK6IZjRo/478XXDDa4qO0UuthZ6xio1fZ9y6Dys8rXYGFc1WZ3gQeEgiUZgbzqasG8cM554ronaRp3LUI3399OBlAxOC8shj8G4XZ29VCFnp7hmqBoVvSk04G0udDlKbQIGb0jdEcdyhP1Sdvi6hr2CH1ET8cBT+w7yRC5mFp1/3DnVTZrFkfr+fMt9CaRN7YuSo/t3sH2sk2s6eTfVordh7W+WjA07SNruoh07LTOWK97DMBp7Jh9oG2zcp6qzz/TEbmO1iWpT0STXR9B9pS7o3DHXYNKkfHbArHHqWQt8hatuFDK9LrZuz1YrpOROO0zw1352LpGuL1EhU1kIDXXOIUQ9a2nsAFyGJL0tpsmVtTPTNnbZw2Xek1TLEDEdiIlF96FbhPlRnwK9BUh8SbzOpjxFmwfZHnz3vFAG0Y1BNqrxW8L1QqPQnLd/iB3glHr8BDwcUKGealRfoXsCrBn7aDGYtyz7g8chncjOXoOffJkHNz1AXvv3k0sX8MkSNxM1JcxNpbVxe/lbfAuTqQ2MRP/zW/Ol4SvA+SboEptJa2aTb4iHTHVpPBo5b1P41Eqar1JqILLTONF9FSmXuP6afQYGVa2PO0LndOrcYghXoGEgbV7rDyIZ/0TlLZVI9MngzfSpVJ8UHQuX5CEA3ZK38hgCjCJ5ZiQ7AzeytFEpDFysa5sj2x4BQeETtaeUrks+8/RlLNiYjYUOTGK8U+ODY/Abc66eyTnBYPK1Eyd9cFiCA9f3ciTdMFq7ExZyjLp9Ru+AicLVm8nrN8r4cf5nghjY6Husw39FmxV6y38yLMCsRkBqN4dK7dfPiRFe0KhuLYZzs6MLpjOb+pMpqn5Xu7zPfMrdc3XVNESPckOx80nlcA53mswVMP2dBIFQpqVI6dMI3WPrPgnbkTe/wweNyJ9KHAs9SkoovGQZ2lbpx1JkQzslW4lq2b+53PUYga1ncr1nJkCkPuPrEX41ErkjKp3Q3w3HJv0tHgdI/BbmJd9ir4d3KC+VC4vWOqei7WeomZ3IO0jC13o69b3KOKGQI/gluddqs2RxD8OIqJ800kRH1u+jxRb/OujuFDtfFEDTQSLjNfDSqgi1Vb9HtKAj0ODI7/BD6QPQHd0FCE6R+QCkIzC3RUhrgTIs7z95uHf9027R95uAzf7cYbgr7Xl7OYa30WmlENHaXzzABEPq7tSDpCZ4AoFqCCSWk9N+Ut88FMnTWmb1plRcfGoTpzOFWKcOPA+f7hBwm+c/gTclKEDUmvBift5Q8APEHs4yhaHC9Zu5ajOoT2Sxc0NDZ9n+0PB0gNKCKEqi9Orf92WoKCROdlUQ9lGV43BFnZRL0s7ZAX8DEYFAWyQIiQQIpnwKHcwNEq0RvOcDiHycZEkH3ayzaOfFHkjdRqgfJM4fJxs8Qw3oIlxtq+Ez99DBqquiu5tAX5fxkdoICEaUPWSNVxIFpt3CXsepQ3ONKOILXDMkJRpMw8SEh0dr9p4KwlOir/WtKOIQ4Bd5zeadlByXO+5x81ILl4q0kvnzqMTADk6d2M+guiOFJE3JniOJV9BsXCJzrlvb8GFGSWB/6kfL2SqY+YIQIkBSfrEmk3aRtK4RnxFMPpSJFfEPyVxcYgBrxnJ6TYGm/DVIOffMYi1Voc+cLsm4AE8qruCSr2gNmSCWSI7MW7aYRW/brzalapZ38o9k42J8VIYXPhjBQd7wsFPOvKOb8ihNTLU/XYzJEvqEHBqz2RYLYZTP8myCx4Q89VsdXfD3gghbNbMhovpKbQLDUxEIMVqmV40D7MDYCpjUHQdZEdT5BhUOqN0rDRkDum8c4JNazHaP7P6NA75VH5EaDX9FB9OZ8YdChLR0jUYd8XPffC5SI3tt0dN5afqc68MiR5UwrzDMzjqq7kPrMfPuGGp5bRqxDV3VjdJ1TqLK8qOmfjQ9yXMjUw7wgoE0NrZFJdvGwtEx0l4aHdpNk0xLhHy5eHlNsOvH4fMLj4lVmm93/1y3GS07RYq/Kdn6oEuX5Ykna34JXYYJ/GQ+8YfZcAoMbeZE+FCVQGLknR/qyTwsJchouT4LDksZCM/eq9Zq1f3ZEOKYIqaooojcInv2vjMG3eDEX3PXJoERwDv6p0+D5uiJPIgP1l7kcbpKXTOa8bkQlbAMGKCtlfa0nwocJvF0Vn9++YH2a8dGBsUhpZCJW85MFCP92kBOL+wtV3MXYm8uJMuh/bB5jJS0Pz59AHLqXq7iodIGZxUYt3tqIS/y77M7OSNyx3uiEk6lwei6vW/KcySN5jyhAafIL3gfqsRwYrbRiIemZI0SlObcS1nGUkoKviFKK8iBG6XsGrD097wVRgUvRzZkySHtpMQ/vLARUboueRtdnyAjWi945k+Rf15Q1tkjWwv5BktjEs/PESWK5cbdn11Zf1NoGvXlp11ps0ad/t8r+w3h0ifOlUoi6ZHKfEXx0f4JDYH/slFn9N9VwWFx9ZJeLQPgH5kY65ON1f1qiuL6B3CXEnFvJORwJRhjgCGoz73b7NsLniojKImjIAAGrY4r47Nfs1TcQTA53fO2cgz86buAXHYPXDCMttOjWZt4qnWN5QLIVt1h2yXbG8qWKeMg/Y/yIcUNSbemvRSeryiBziWHwRmhK4WaiZFE6xKFhxzIo6rr31HA78lcsZe7gOl4EQeZPezLBhRb0M+t+szGFdu/z6B2gggbp+BLw/kDsMfarMnq2SJiYSDWsWQKHS7/SXMWayHVNys7BrX3SadspFKjwXSasYajQd9uxcHtlRJNGN/tSMYjrFv76fBYMYee4Emz8EUirtpwqnqqGCW6GYUKK3q3CWryqmUsufx5jEE/qMmvsZPMmOFwBvknpfXTEnKSw37arcU/V4PTsZWJv+DOEvOqoJGy+OSj+3Dl1VL2qKSIZ7zJ2DBlmrB5sU4QrLkMHH/YQIz7OY+oQKIhIDuoV7Mm9kWeJ69wiwTBXf8dLeps+Bl7kU9eKBZ6spSTeC+ckVVTVW92uhF3Zu2YZoU7jOwB7qvdGAdAzGHQoFYU3vHyMAjlwns1ExOovO5bZarVTyzZVYELooZiXgNXlFLs7eR8eswPz3P556rMTS0XFYHOHG1Ot6VSX+F7se5RVrKq17DlozUrzTg5WLQoB/vmkvaE3DCpBGC+50UtwJOOANVJ1VTus0XYgVSXTmdoGzPvw1B7dXciqb1gxvQ6ogJZqu80IMhfZ9pRqtcI5ZveEMCyCKki8l2bv9Z+RbHKGA+ufOzKte9/FJbRxCi4XZUfGTr0i99bB7UiNKV7gLjbjphyBKhDwmCbo5z0VtHvTdxyQROXtRrEuOkto0UvqC/5QHdbsXNLoWP6aAwbePWqyWLx4AqSUZZMNSZOQ43u/piBLcIBc1ZhzqQRPPna11X1tAUsq1XIyVzlPQBrqW4mARCQxRTleA3msAmtijewPlqnd3DG5TMTK+yg8UFVIEpqmBFKSoFMHCFlT5wNqI8JTt5U6F2Q8jPEQHMP+DZfI8icl4xwWuzTZYod1cpWsfiuFcJfj0Rx2AJ0eytElQMlAYc9kq9yKlJg6UBR3INHyF5tKA2rV1ql0vj5nXy6zCnt1fexEph/KQHHdNuQe2yssDsP4bj/inrPqPEUPpA6tTBKpYuh9nLreDOnDKTkn/jEPwA7QSuNdIFt1tNNrvWriLge74RM6j6ltATq3DU23VrjJEDmq4xPVhEzU3vQT/UOpMo+7WALKZ6sdw1Czh8JX7KbM/dgd23S/8Y8o90zb+sZ5T3DgKXzMDyM3P+J+nA+NUMFRLasz1RjR7VaYH6w6VcaFKQTq4rf50COLlT/+2V11k6bpFssTI5cFxAG/CFdRK7OzVID/qIvO1UNjYbxcHlwhB5xd+CMJp7lsygNEFqvWR89REIF6nsAoidRHwTkjmVQH/ixpxOzV4UUQPOegZfyhKGMI22B8suqIvODCAXzFbyNQ4KpQxZ9WD35AhG1BNWmvyks9Ey8lkIxWGcCo1G1Cg7+R2r3uzMLmxmTDkPy2pKbKfyGWVjtuW3HLs1fEj1FStEfIiaeQ+IZLnu7dcSBPrCsjUZ9nWkGC9GIuTDHgJz8x/R+pBnIvFXUoAP9AIYXeQavK6+xt6jwE2fTFx6gC9Djxu9uJ1WRBn6TudzebKQp52isGnJEAFa28rDpA4W/erHBAZF0/RzCXXEpbMx9G0hnwybaKJb6c5F4VCXNu3x0B+spPgJZZmvjYGPRt0boI9F/jFvieSZlp3EDp8qXSoSPYeMrABQIVewCqp6N3EyCarxjrpwnf9/jtcU6oNVln1xoxbqgwZrT8gA8bSIaEz6kUbCYYJICRPOadOYacbCEWJYzAmbV8suu0wOWq6Om0e0T8CfJwHFeSdBeu1LSmt9GyRZPXVZmlfF1u5aHGg420selqLFLE42+GaeeVSxI2AFrSsPxq4FRJCttRTK5BG4vVfPcT7i9ViZlGPSFSqGs7h8TCkUXScXzfZ1TxJ9LWTHnKtbyXGKn5I7qtwMovZGvTHU0fE8NauaEXKafOEH1I5OTIZBo0w3++8CxOfYp3aR7kNc+14SNIhtinlBR9AmBBCMrMVsnR77OaTHhXmCqYOP9TTIqRXQlbluTlNkSZRhGmGqrzuVtTiJ6LTqTUBRt0gKM3Mh3XVZrHlEh0rniCLMIXYzgjQ5DC6F+8xTM7ucnKJzNqhdznhDgBcCSFVaGtJdzKrQ+pRxX8+C41FJfEBECsLsOxIIEVdoGLxuOvFvkQ+ExzDpnLaDXivS9zqEMqXHbSOACMxPcp55Fddhca+rOZuglVd4gdyV23jpAcMULvSd0MyIrU/v11lECAogcTK1jAqObw0H84jYxf8hfZl79ftS3iWoeO9gZOogzecN3g7eUjgXBwW+SGnaZK8FpCVSF0h0nLvJnA5kC4pTOXGDKVKg3bRh+m/SttwH1lll6lX4dRFkjvjgS6NQ1TtII+KbBrEO30eBiT3csAoSKsOdTB0Zgtc9JWygcNj7f43869aIxwVU4HIqqzV8AO8kkn+yf8L5LVfGIM6VZ1jT6euvc1V3T3XSSIYRyUXCsXkvmRAckhrCo9+WgXIoVxnuDfgu62vToF3kOayHPnXecvRkhE7YqAsbEWW6CbXCecDdsEK2j8Dap663cgYHT09hyVmJu9emQP5BuLnYQsSZeIdtOSYqGArJ+z+IXIUw6f9KJ46CLOI7BDOH/+YVHzrvQBgDg8PR64SOrWk/knb9pMpLYnoBhF+WCD8DKBH+Pr0fAKHPdemL1qWa0rNLFcNEQdS2wsgt8Fa+YfZ+/y6rD29cDZ0sNArNSvNmDuc0YdYz9zvqDOFO0IYwblxai9oNHW6f+4HGkM8fYvlagABgTdwCgTpFjQ+GDjHFde0qeNbaP2FSMZl39fvnhDiVd9PEBNoImy6RJ3eQXUSu48jzMsyb9M6kfKX+qpPg+yoDV2pVV23X7gISQGAO+KljyKDUN+0/u68/CrgqFB3Tiz9ySHRkeljukY6gr6F+TWop3r/lzeb7aodPMfM29tuPNmbWdQqwBrZsIXlSURne+YWVQNdejHavzXImYaDSZNaimwwnA90nxRGx1Gi25WYnh8Zb3/Pf9Gfu4hfia2ox9TJA3BUUNCOxfbeAfom9yP7l+dhanefpBmC9/yx3k2jooTyDVGpcNxFRIV0Bqop97KFggPh5n+Si91h3ZYB90uOXnvKr9hv525IJViMF9oU7eVRmEywL6fMjZU7Mw5Nqk9IjOKJuKCap9DQcFDMY/OhnYi+3vP+TCwfkFvUJIZe2NAaDOmHK6RA5zB/KSOO4teiYL9uOAYxpPan15vC6v26CSdYD0H37MFhLokSFQnccE/w3pRlY+lbM/XKMXg5pkbLNN6rmEcaay31Nu/pkgb/zmfFG/+bYg1q8yskj0a/c+iMWjyR+o1sGoUD3kIdNEnmUNyUloMYebpmHRMH1HtRS03/q1aFlyIW97Xi4xiraY/TACPvoGx3xas1yJVR/EhtUJwMtDXZlC4CEtu9Cw1eYq9ssmqEIvbjhWSBsd/7r3A4rIDLPlw2w84HMErspt9uZiyI6kMkww/K2e0L2KmCipJkI2b76lpatK/Dml5w9EmwphzI8xcARu1oEFsoD5TQT7sxRyj6V7BJHd1TKYWCtG+c5UJEamnnYyrC0XKBZdMettTooohwKx1ADOYXzOAxk4TKgcrL67AUpVn2tA9CMYTawgcyoPPq/JWRA0kAEfh0dCL5lpFuLMJyDcTHPETXyQM8nfZAxWjezcG7avlDFb7MKfbtZfD8yDwybhNGCKVuNKs/FhLYiKfM42FBM7geUSKU2GilhiYJKWeN4FQULHRI3uOwFhkXy/hNY54lqcDbmLsNBuB9EgfekdNvHvUKwYDgOoTLYTBJHg3AnMzZogb37mHX0jAm/ZIlgn5IRJSDABkBsfO5Yf5hr2guaoaYJHdFVTCI5m9CD3zUq+oaf4tOwq2R/Td7u/MekjzS41iw3Pr8jROQkRPersZETBsyjqDdxXnfxSHOkVmSZZh7dJ2cOyzx+10Hb5NBxVGHdYrLIe+jOclDl2mE2ML39AW9QQOrrSpgHLJHee1ctqBkq96HnP4Ilq1ikOdiwsmru+qH0YpujlBBkSIi5w6cAfuCecp2s+4vXI/CnRuK59IJXxC/1ox2AwdVm7RZOidpsq/tqSlSs/Z/UxxdciRUoScdmTjAz6Fk2Zu0OD0lwrTIhvmgjpe2h2E/WJ3oiNLXCExXc5DdfdY1Ys7Y1qD7F+HWc1RX0WYQviDlIBVTa4SpqLEdGfgNwX5vAohOVHm8k6hU62FDm1S+U/7KiVDEkbjT4hhKIMQhOa58His0wDlZPFU0aBVg1bafPsSjl7uJQQIWgXIglXed7L82+z+EwYD2VsvpfZAgiLyJGLfxrc7qTXlktAjxAgzthQmCqQEqzeQSP997NDCmR3viWfiYZ49IpsSU3XAYQoBrMWSH5uujgSB7coKG9Rk9kypvKvPsjsoBFfKDWgZ5iCQYBIZOqxJnw3K/e3OvrpSFUStMrYoRQR3pkzHwWFtvPe9W7fgmhQXFvtO1eTokDLaeECjShg2poNsOPkBqw8pdF5u4gWKTasQ7kukWKVqoXWkMLiatMplKkQdaTvcNrkYJBYE4FV7dSrjKw0IhVNxJhCzXDzjzZzav0ZqDD+STygyX4wO5qf7QEEqZBDzosUCxx6rXkDimOHyxWtddbYfKmc34eG54UwKV5fpxQSMMe9HiiGbujyKacTI94CCXonPh35h8Irr9v8qZ0KRUXaOaLjVLMFavD7FzXXvIYLtsgM8ni0DxtZ0RqE/OWADDhH71CoKlLZbXD99KLM9us6PAjr/czSoDLalpPtyFvM+TMoDcc5XAlxkrkKaApf20CC9vu5XYVzrDU/X6TCNKUDNHju1FJGerI/FFjkuhNuSDyC2n8qIwuWSGiW+66hEoMvUVwbj+Blyr6D2rDkYNWF7iGE+USxRaUpV6/UrRDte+Rs1TomiqW+C3uTq5Z1gp3T3b9X0gJMLDiLV3urRjeqD3LFEspRIo+qhry1slnBpe0e5OpeIbbkRh6pDWyqrdT2N0vqzfOcyO5U4HTUnMGpYrYabFlXWcIzvbaHhcdljfms6vKXujxMXrGOsaHAeUh1eO8Ptkrrz6v/sGXX2vyFHUtk/OzGyvs1yWlcLgyUhEEd77qZCLxtnTYaLxguGIA4tf96UzsWMr45BOrKCNEKyJz4ZkCmO7Q0Ctw9qzaKZhMAIXNzRWOcNiSsDmphlai1j2tncoyH5VCM3yaM+ORn599y7rp0onzlOYUkMDj8qgzX0yn86aBdueygXUEKA3qHBhM5bmBmx7hY8ZJX29WGzkHIaAHhJLWj/Q0Yx1Cse81LxDGHprSZMElVVgBBUU+0S5qLq6Fe3dUw/9xQONsJxyr59+KOY/T0Zm24BCtYukpIJCgZs5A+TqZMCcofcgD6B2ktKC0tE+mnuVVHvwFb1bSfluXTQi6LUEvYhelYSSsZAj9F01ZGy0CEGlyRFZOaPm0w/fTe7oafb0zrocxCcq0qWmP2mmqM5z2bmYGrfH1ir6ncW33N3bLVLy3Ly4eAG0foPdBsY5n8wrFqXliOJH2weUPJSRlgohSseGFU02FoY4G4svltKbDzTL4kThLRw8ri/EAph5oRsVRydPBLXXyrVlb3njj8PuEsKnJSqfvsc46jYtNm5YMjK6gfYwrsJe5cpn4P7x5y4qvu8nWbG4njDZKHVHlB9qrn/LMmYh/GRqHkX3WVQhm7jbAQFwVU2j3BngbFlPf+TqlTilaQNp2ZakeKHjkxNNvWVJqe3uCU1qVaPoZ0jQ6rcAAa5Y81mU/1BONc8N6LWtQgiBSJ6F0m8sLWW1jh/dqlYruxTEyW9A6aRFj2Da1Uz78UWpcK3bNeI8zIIr3zuIr1/z1+6IsqAkVjk8NeSQFoPVxaKHYea94BK49+kXU1LR5hCXlPUcPwg6gbOLDa7M4ChiUmzI7FYcOCRdZIJgZu/6hh+Vd2+4QP8Gs1xgBtNpkOu/dqwgIGE8T1NQ89ubocSuBvFuwtDtEJtxfJi++6+lBBLpkRilrkshdzZh1oPnv+5o5vRWd9QbQBqo2KUQlE2O6ff2IOaXOkdE0fpWFAJBeEZiRakMoxToLDoPKQLzYUo73Pb7HsocQzBejSlOP1p36+wxBl1i8O+baJKrh/YLSzLE4UIx7PgZN/kUUym2fDEq3mqE4NA+n84+HZA99FvNLwBwZGukTBXfpyF6HYBmg6Fwk0rJ098TiG9y7Jz2GKpXh0skrPqhh4T15mvittPpv0zldTb4M/Zca5WelHrNkHww7fu1JoNPOGogvBn7pX8s6Whhw2ab6zDPLqEb+hqHF8SEAozsoJz6ECuAJuqrXaxc5dOvF69kSj+RNhmSTinytIVjeD0EBm1CizY9tXGkqpYSMD9cYIclOCcIj4BbpJdztzKhq4fxYAkxxm4/f+bW+YkmDfJ0Xkv6l7HcPVa2iQnibx0Ro3MYVJ6UHqkY2lFj4cieXRBkMUpXwJY6oX3Armx87cBpJGR26t05kNfj6KwXPBpeudpcIpFnGKn7YW4NdkRoPYhCCt/Afzs+01hM2KVv6GrbVnFEy+leCGO4msuXjMXaGjf+AAw0P3M2RBEa2oiAoTNmY8robYOyhnEMQEr9tE3sAjW3dhqvtb1JHW+fm4yfD1k/6XgV6BhXNgu5VI9VVquIvS0vFWsCrVLoGxBuxbEuA1EbUPYEZH2J/peZ21XKytlFwYYQHFlBdvkwDDen6Fa2REtke0w16nF+AelH1HguONH9Y80I7O8YwzD1lwbgf06yss01ExA+pT0r53lM77jTRBENvL8+KdJaT8s/nsBZ0fmQBdMqJeJtTO60SIM0IFYT+i4Ku9KQ1muIg1l7IpAAUxyxQ3yGgc6UTT2QzmxZfczoNXUAdb+qp24TgI54pHdRxBohnYDSAmV8etQB0e9ba8m5Bi1GYfCvNnZ9D9fqh/mD6q5m+MMe/7ih8KT5uLtpQfSuNbkA2y8fqUr97RrnBVpED7gNNoBVIjkpx6BtWxg0zpqkAET4pZqoEuBf5Ll83RVpidNJfS25F9NhKZuNrr1h9wum+CQY/Ug2phFWGM1e0xfyvOuZpytXKu/E5iK1T9g9NWGRtXcHb3cAsoyO0QHHeL93VY1f6KO46xAPHEDmHCkp0tEV08/jeHLU+lH9BTHtqHPO7R83yuXNVR7pdHj/OoDhm9kjYKlFqIi5phxqNKuUu6rV63SjXJ4zWE3uwBAXPV181sYgJPEl3YRtUFZXK7B5SMvMQfgQSIxF7QDAluTsZSuAqKpJm0dSoAI6M9/Qwj9tu5Q1WC9CtdSYS6qO2iOLIKtlVqvPfBYkJ4u0NvfpAj6INYbCipa8DkCAzHEijKiTOUkpbhgMh1lnMCvjlBbCntKX87XJ84mC7XxakwwO7xllUAxKf+NuLZHIbkKg1fH8YqUQBzqeVEQeM6fU2sckSZQkCXIhj8MNrKrD7voaf6R+HXB6kalrwoKUBmeHr6nJVmEZwHPRAgIseh3axVs/KeDYlSF6WaxpglX78y8NIJL09L6NFT9S5bZE9qFSpFYiGMbW9FCW/ujMYwDlNobbRXe7gXeDZaMkm2L7JuAnNc0/wbwj81EPCWldiSO0BASbZHn3y8ZP4Em2BI93H5gkj6cN9xQrZCpAyQf+p38Yh5jlDC3iXyWI2HVS0tae+FvOjaMEmNqa9b4Ndol7E4cfPrYkZxLaNUbizfSHZ22iy/PIXVUpWxLliW+afwei5QiN659cuQAxqzpwb/K86CYIGLTfBSuCszNjH0Jszizk0qNLO0AR3WBcBcvgk3dm/Dpn6IeLQ4pF/JIng9rduuK7rlyXC0WlR4eSoUdUwi+3AxZwFHqoPU9oUiC7zFu3WXXwNwr42WaXsJMSOxQSpQe8q4FlrlGV9l3SSH+UD/RdpZurpzYRWNVyGl6378wBhoEHZAXBu+5n5ozeDURgeR1UpDINfMfgZIfhYRc/wBogtSYcVvX87+3tbRxVeD8VRi/vd4w+SuRvTooPytrWF1ORNwDj3a8HenhRD9LPiej5X4dotJJOmREOrOKSKs/Gdba3uA8lkXqKz4K+Tq718iuSVX+WKrT6nDgK0okqa3wBADokCTsF0hNtnBG0yxbJqEOFXm2FJrz88mya7pVfsZdXeNYGKwoTjZl29otRul5R/zmDeAAwvhcRFjYGbaZ94yZwo1KKdWkEZH/SSRo4L5ZOuO0BaLCYjxEpK9iSEmM3pMyoHPLTVzWPq9e7kIfTLgOObftqidftw05//Iv7V+sEKSVX41pDFbAxTknerO1P+4hw9Q/vSHqeDmfIPnrmvWWStshBWBxD6VAd5PiH7DuXWd+QOWjeFWi5cv9OZOJd4/GdCAzMpDx4++npzKOoM/0oxU64qzg6XjCRL1T+F76Tb6ksXSJQVLGa1w74Oni8+K1TVjxnu9qCw/g76b3d/847C8g8SEvNGummbxCx6b7YAQv0r8OkoZoU9CoBwtACa4HN4nDzyMW7/8Snd2fy3Ol91fVF3lGKdCA4mrvUkMCHQNgOuLOdZlV1s1eepjSdq/UCO+tBUk8B0MB97kdF+BDSdOm1+3+jj+w6ncuxGqzsbqI0ZFpjyuofCBvQNqOO4ZJUT9GlQ6+gK750tfDf+FSXnPo5ugGL1Up6v7WtXcpP57Av93cB/MuEN9SsWyPvIECdLDbZ9Uh1jstNeGfkUTjh1hdBRQRC24I2UkJqLYhqZXy91ngZVUGwAUM6O3qOSB0Qzl5Dd8JFbTIWOU8YYUx1nKuFE3oWihzwcxCW8vfPMEY4mnYy4kDh/IosxN7wLrUvoni0dlh4gF2lRH1kOQiquZqv6f1lGo9w/jQ6+SGSpTM1JDg82gjKiW/CyZ2wGVRkIxP4nc7CkWaHCtYg7Pxkndsx9y66DYtV1j4WD7+XDr71spmPMWI6W/7AQ9HNREZfifGGU4B4aPn1ECkLeSRkxGToU1Ljx1B6Q7dQFYLzbItrb/tgiGtETnCi1aIGw1O3l7JDtf8Ry/g0LGJUfVA1eubQpPB88Ash3fE7Y1yDcJPrwnSZL8INxWGUvsq4N8NPc9S3ozZcE5u8dMbT+ErxTphjt5qOV3kMw9ca05CPkx07KqZJjZ7pS0xvYtCXnAHZeqmd39bIpvXZR3Wx2Akao85iiK2IVrGtwPIrhLe0WQ7bmiES6avDfqo0pP/x4VLxTTtCKKbs2exaMiqDbURPTu00cJIQDbttdXySeNY31XGm26iEAAhOdczMK/QCOetdeP4i9sWaK+9lEp+tky8ct+Y1FbL4gEsnJwZEzp4BnufBoNv3yDpQi0Efohycxxe5vZogYmQtQmLmXZP+EoXtxC04v+f0ozj1C+NjpJJ8VBLT7Cpr2U2bVYIst9vRm458P0lkBwLUKMgFeX2h1l5Q8eWKhbeCzC2MX+Y7W8+TB7SioU+YQAgt/C7id+0Q7ri/YNauz0fBhYMDGq3XvUtfepCI4SyF9SWtt8vSxAySpxk0ye+/xDxqUx3XZxB+ZqugnyeWCOCwywqNp0iJI5JtQoPDBiGHr6vo9mLQYAhYtiFv/cLytJ9rULrp/09miyQ6SMIVDZgFjHfdSHk7rUd/wLD1qvZFOIyDo1u0+S2Mhhp8vFY7BYXXDjrAR2Zrt389uitZF+8MxMeT8Qm5OwBP9sw9+So1845FWqqkaSHr4odQHQYd0iCYye5JGQxaAlJlp+WOlH9jnK1EeAQfHZHhOBKMg3iu6wyHTVs6IhbSpF3hof5Mdkx+g+zy4NPm2bcXHYCJIZcUmN3vtoGl5/HYAeixuLB++HCJtKcZKyoz+oNlT4L1GbcwlW7W4JmXz6+mPuG5aFwyAGDUUPNnIVXRmR8gS8huKI8ML2Tj5764erIPN2JLe8taLOoKOX3lkLArXktiJ/XLoY/yFwcee/+eKgOSSs4GCG8YeAAYp81C6z/KQyELNiT3OhrzX02OoNkQ7rBM25gY2Xkqtp6vRL0frCRMDY6mqR1J2JVOxDN7rkUssx0+zOLpnGf6S+PiQtULqWL+TiuFE/xxB3xzrdwxEVqlamLkVFIh25S0Fkrc/Ih1EECma4KHStnyDip3dVZpBt0nlSZ+cqQzqA6PUMvb+L9XO2IaW/QDRljA4b9UqDZOFC47Qyt84NwqVbr7DWr6PrOIdXHXPeXpFtMCwIxrxxy2+aWC7XC5InItA+kriN1+hAG4kwlv0IzdEqETabYWGJXuLqICqchfinAvTCjxPPb1sQTqZLiTaQ82KKkvTv5I1zEtSROwdivm6RzVTdsgUqAXYJMq4wk/1f/w9JkaJ7ylYubOSYtwTn+UO78mHofFLjq12fu5kNtgFcPahKCMDDJ1hO3haJDMmZclvNQ3ZXXr7ON4IBl6Q1A4kvF9Fzy+awJL+5YFQv3RC56n1yMaKlfn9GjCBWCq+xkzvpobhC5IlvoS1VJw3C1SiGJx4ob8q+p42AVssIqFovwQvcjt9/cFIOynO/10jfWtsrgNqUwkMtEIBI4eLu3M6xwHlgtHfvJzgsZwquhhln6nMycv9TpRWTPzCr6g2a3DmVGwQyeTRSDsNhVV04NDyR3Gp+71Eo0qeYzSD9d4+YZji26I/XDmeJ5Kg+8wcRgS7QuBANqAtFMsS878qVno17atjfVvgsQbNKVxhUSUk08qDQgyzSBMTHyed+GqRWglwFjcl9iiRCDLp5CFFAWIy5Dqy9SqoUE4FJRYCTjz7vOzgSqUwB+8oh5v63X0hDLKUbVD4QfqkhklUbv9RRtpDmeMkYzmoLSa5GL1BleMNRCiV2LSJSEyguKzDdF3HbcsHf598wIf06FcxuODZ1pO4zmHddCp6NUi+R8na3x7Yo2OKxw7KuWnkvp/SbhU32i+TMiNcp2MOizoMb6FHKLx9++CRR/55t0cMd/ViM4AQ7Egt9u2wvPhKN+vE3KB9DM4h/RUcBH+jIFhAKde0KhNopyCx1vp/d0+TL7LW+v9wvx4Pym6aPO2nvb5cWwL6l8VoNjh9yyFz22PnYZKyhyaKONtqzei4acBdHWcGw9WKLErhfeCD2H+n2YBiIAQAAMLZt27Zt27Zt27Zt42Pbtm1bHaKD3MAa+Whtc42J8J4MiEOF04KrsYUFiPJouUvKlAcdsMrxmM8KwrcDCcqloAdEZnnlAELWjLmh6MUPZ/8UZquFg2XI5iV4iuTDP8jo1R37UoFzOlNSnIEaWZokCVNSinUcP35Xh5uLl9qgxm8S81Nqv8YlDPsjs4erB1M/92uEXesbZyIGKWEcMbhsl0mhbYW/qZJVkiIs8ngVkMuZSRxOSwAzxMs12dMNY7Yk9NLOLHwzDm1cv7V4fTToCuXyQs7rin28EByG9EM+2m515E/aCtoTXQ/4sJTMKl68AIRQ8faf2nuKwMtETd75OCBkY44GUdVNJ0ZmNQLrFqaxU5H6LswS6vRNVcFtngYWhNSg9hxpqDtXX/ygrpxJ1x3dqfby9TpvEOKHmgs0scoLIjDfALhlstvla6vZy/fTEsCp1pEH7qC/blmX5Cq1sox5GwnFpTEoEX4fhDan0aCg8Gy1HW5fy7IaYq6OFLljLA3UHxnQkPnaroaa5edXhsRs7xAWQ6NbUnr8kItoSj370Pnfp2vbRkjOBJOngqOmUDXRlkrwWlZR8qozReN1ckV8YJMQ5OIY6MNcSTsNmhczvnDFCwt3ZtSgQfYiucYPI/wKv0wlwd+dIPzJcb4PryDp7OI+HICIbwKq7TJYQVVMDBZ1x4G9clV566SaZfsdvMysaHW3TKMG6DXJ9rQuSNLX0yl9Y+ol+lYKk1qmMvnDUygPmLW+pAzXxVNcn/bJjohtYhrbagIKqW6lUWZUgg2zZbnu19v8ff731VBGTm8HcIUjMiY0KowhHmxwVCXF4F5zFOAx/gyMT4ggw6xIVc4l11OBvWFzd78LX3E79/wWTPXc8iBx2whBqVSy3aPpGIsKg7mzWYjQBFVHrAxR3uOn4AbOAPosEyE/glg+WRdYAogNJayWA62Hm/lD4xuL7RCPpAKp5lmiy15o3R8AZEMcE3+WqpWmmMwCHIptO2uqisKjsIvF8qPV2HJT4gUz26Tzc1PQ/ZYTAXkDmAf7Fp8yilb1ctRDQ3r473LPqQsu2M2hUi0u83HgWXZ2g55wCQOs0xLgdoCtPJbI3oJGUJRMAung3X5X7Yf0/RPC7tu2oF296cCukX/VR7R/aMFFVAehUxjgRoeCfDRptLpgTqkFjK+Wja1gdjGHNfvhqDdy9QKyrMiiPFu7sMfKS0wMpcKLNxl0w/JJcCJsQ+6gZ5cg6GOmk4mFBNTgonnsyCsHXTOfQV2DNhPk+UWLmqT4RU5rJytIY+ovLGlhbuajFFwbJY3hroa3kZkMkkC93UqfbXo5+j26PbJ2QZiSVwC42u0Kj1VG7VFfcVEMoCy3TRDS+Fob/0JXEtaae7Rss/dwR8vIMhyTVWMlaTkT7fH3XMzmIM8rUZWRQMWIh8IP0Va+7FSMOSV3yPsck7ZBVee39DTGBh17UBPTM1X3rWOZYK8XJuFsJD196xCR2oSa7XnatOpVTX/ENLpj7mePHsBab/fDfBX7qaZC5XTa7PTvmlgWBpcUAT/5sykRVrQZc44DiWtS93pHEYwmEgNk54KEcrrtN9CQc1m2Hd3Bh0g5FQ0UuBxcP7I5UuWOQbz4+mXimMkAFc4BjTf3vbL8GRDfKbs6al7G4Mb9PSj0GWKldEcSxH2DntoXWNUbThjMsr/zE6fx2bYD3stBWRl+EQqFgN/vOiIrpMXgUsXPCjU/0Phfhfq7ZwtdIZiWs0MPvUNPFSXRi+RstEbjRE/bzT4JssIeLhWPuUYPZs3lqPQFN1eTTlVOnVwnEMnfbxcSsNk+OrRjQFlm/whdC5Z4bAxMYjzFpirvgmY6Fq/TsM4gT2l5jnw3A3yaYCtGOQKn3N15h1J+cLZ7tMXHa5t8xbR4KoXLZK9PhBShCU9/8Gl7l6w94aUxS4afX8HL5mYQ90AsWoL2IR3x39RYbVFw9Sd357kysOk/rvge08zwc+ogpyr4S1u8D62h2v7e7Yc4lv4Cojued1pbosx5hQKr4rhu6MpeCvY15wQZJ4XWoygDTtEbvYTQ7TuI39ZOow84noGuxoKtEH3u+jzqzp37ciTjBu+TSnnRROhNFCyC2LBcdDo+DqrwnCTeH8wL11g9f0H0ZAcsMnsVHvw2NJElNkaDvG9dCB6NzAEIo5IxxNimBdNWpKXBGey3EMn6sh6/CQ1bhq6eIAKBddkcNg0axoxPqAagoAuEadEh0anSBbLSC85pfniUxtypVENJG09cCxWpYEptVPK3iOJJK+5HyBVFD0fXBTeMDODv2ZROkCuGcwALeR/HpS5mlE30O6gKlPKw72bMBDV9tmDryDXSQKj5Z8y3ebI0gQfP9NgG7eF3jE/UBEpogeRekIVHcgiMjToYVoFKx2aTDVXQNuO90rozJPgCoueup4QdpIIjpTGBtV4kDkqjstCdyhghXwnqD5SznsRMSB8n+HA1CQLg0u7J/lrzcPqlxD+UAYDDkR+aTo8gSL22+DHUvKvYRz5McjxEJwa5w1Hp09a3pk87l7d94BNPIrPtBi3LuEgpWP9pg+P7BCVdRl9s4l+r7Zfuo7sUx7+QqxAsGHpsDzQwQSH+M0fvmr/HzITqx6JMO5KHRMp+heJyXEeS+wkbc8/5XPe08oOw10vfJIdumE0g6X8mvPPsZpaxF+KPgiFmztr7SDilfLH7hTe7Y305Hq07iBApSG0RK/XgCgRt+BGONfZ5L3GEWkr3L2sL3BNDg17TFOikB+KnM5zEN6SqglNjdwUxnleBM5Y0StRSmHZ6TbHtxVqEU3bp34C+8+7Dnba0BK4MnC7ko+gX0tDXEBSvuiEEcDnWCuMtxOvtBp9WeHwYmtSq1sKmXr5gg0dM6GejYejgyF8MHNqazQpZvWZDFms64A+2m7I7xSuvZgmKqg/7zZVN19KsiaFfJw9HwD4RTfusoeqzpml9cVqIEKurvoxt9fE/mN6WPhpYNf81h99spMgp+2qV5Z8p/9yFQ7ulYZa9j0vk29opJ4JNcGzRtzXxeEqO6srymMV2BvvocN6Nqathnvmyw8NNpk22+X0+5c5plpyj/odN+dVtMAc86fU3Ld7t0F0+b0n6Nm5ZXX0KYQI6e7Mx4AEyGyUppiAOitJISCY19YhJdBAv9gu3c38gHmrIouRuaHNlpbhd/K1SvMKKgvIVBWUGtm9V9APK8nQbNnhkbf6QVwQQPQm4LYacjmb4JFkLPxxddDKqV9DBjHcBtLxrYfoR6p2fgMVONpdfDyyY1S0fNslFaMn7UNvPIVstNYBCNNTrQ0u2oJt1lixwwioeBWbVkapgeL0r87R6NNK90DEp3NdyY1vUMnqPLwzFnK7Y6dw+ltZESgtqZr+gfDBiCxqhEJvgQfb1KUJcWh7pqvj2YJ1DgNVJR9rWJ5wJi2OBuiK4HRT2Qiw3rEDY8A640yjNsoOxqHNdeJiHfnlIwGpahLhVoTHPXlyGUW9uOkyo9ZTj7hQag+M4ketw8MvrTbz6cE9rakKG0DbY1flEN/sCG8mm0J5MABWricVMhq4kQZEbjmiRLa8aHnhimLFbwYS+1Q9yXivtOwKkivu9cvu5nVaRSzTP5DTSp9RfoFazVlpVQQYLsPIKhGv8WVgLxUDFEfvrSkL5dl9FEXeSB+rkssDTvqhgKyAguIdCcvu0RPxECG9dAuLk+erNuTSGAVnSvacKTus2S1MMRwTeZUt/JaVRY0t2HcBnfC6hqCL4oYgWT2hZyqtRufIgo4YxYLtKx874TXK+TeXsKqkJKFbXAYfmPtxwHj3QodZRauNRMeizgCNd8UYX/P6Fd1FG0B68vdVYgfx+AMN0wR9s1E2OBypUToYQ0AAV4TfBKYAI34BsezZta2GdZA+hob+asjU6GoRFD45CCtPIQnmkOXJ/9pkrMleJmGawUVY2ncT3Z8KJpO5n9pYSbuvrM1WKnEdDnHNg/sBrh/3a1eP+b6vix0GKHwh7GKP/XUlGcvp7Qfa7zx8EYKIwduys7xfGZDMko0pvxlMIyPbEASIOiKrmsR3Fd+1B415O43gVead10KUUOmyR+y9ISCFa8Y0ChK1V4IboraLbaqCrr31TPd1oaJaEDUTPuPZWKFQb14ndld74u7gohEytHo3IVpIwWinGWPdLzE2fKvbbcG8y1h6dOGaedD1WJUvytf5qoggbwN30LHQJgrU4iAUCI1byw7yYv4bGUdzrlv8MJuBqMH2RMxNMTvzCL+8DCJzawTZav9y2DcubHo4mqb49zwPtSBxbNNouaWj2gH3TCsq6lhJLUTGkqZCF2OeowjZAt2r0B6b/ZmjBDuR+Uc+E/SWxIAXZ9cXj+xJesaWCGRvrz5rgiH2jdFotTbDpNrTpnb54YmHHk0trqmKyDrkEwEQcdxmZN5IHZ+ccaPAwYipA/To3Xa7fpIBz3J081OSUpzP775PNGKUyHm4eMJqvXLIVCm3t2vcQ8MbKZaNJjYjyLWnRAWSdBNwjd8PjYaglZA0DtlubefKCGqfiXyS5PBCeEsKaPP1haW3PI9btQ5ssZfPnLGai09Flp+Imgtv53kRLEFpnWu/X5/Tk47QJCE9Yg4x6xfwnoLAC6pxUkrQJIFF9V2aSHQXYOGTJWUzvEb0P9OqZLohRKUyAtSBKNg2tX+/SgNGQec3w+y8vieduaHr0fhfMTPVR8f7UBKx20pYPgwq1oE52JtAcRfO1B+idJDyJwwVdBnMCc8XreOe0JRXhyhclmbI3a+w3wWD7sL7SkiohZmqo4XdJKzGxbKcWSoI/hQYAKtDiNKkZQa/u6aUtET0QoYYI5I/nExB6UNSg7qva1k42BEUxXgaCV5SB2FPDUAuns6NvDbYk0snsDS3O2VCPtw1jvIuMGvZTs8uzlVkpFd6GLpX7oXOn4LJOy48yRV73wI7N4pjKCauISNPa3k0b/zYX0QHQIxlM5WURdawWiW1pMTrrvauECyOguFeJxPFDkY2+IsSbdAMRaaVVxn1Iye/WibhRMYS1O3lBNhzGHtqGrEzAgKhMkctjj0D8qlgTr0n92AcU5Ux/oUC5CCjYErGwYp5LEqvlqUjsBKCWEiQy/C2ZYAQBwUaT/HR7avJwU5yY589kHzy61msZuiP0zQYXvPV+616MuFCz+O+cRU8FygafJm9vKk77BYzC6WELI87wVOZU73CXvJboQyQ3e7vuqbAyxPwi0cBn/dQn0L7mwCeCnq3jBsCXde1tu+2CC7EcBhfw1HZo8yb59Eti4S+W2mqgeAXl2p1LUU6QDRoswHFGtuWEOpnPW4Sbs0yGzKNmCyKZRmeZNZcIEVJxD0a7XKHp/NHMX4kL+hOEvUmRAOXRT/ZYiYldcPYHMRWsAkveZE0JCTLoyvIkDeOC+fjO5iRfBXIJaQHFr+fpnv+AbphxokZ/hmNLnR1QENJzwEb5JzEu4AMP3qbnSWhW6I7WHq9PGeAy+PjJt0w6XMyBG/oz/ryhSf5QFnYTqEdVU8fzpNdRHGv4hCaKQbro0HS1jmTrmceIiy9WwTiHSe8IjFT5NGQhGUl4Hxx6T0KuGccrhiyrlBGr+5QatQxS/L9kvceGDz+LRK5cXQdXB/aMA7bbUdFTNE2Hn/NerWClr0I5jkA02rw3KtTRaEcDQPzLbIaVeWVtwvSts6fjOY7aNGEq+AFZiGamZJ5b6m9L2e5AxCtXmCRRjzSk6CRq6nCSiUqlh6KDRaqxTa2H/CUot/ewwRd9ixmo4c/c8EWI6C+35rLBKUg1/huV4qF+RNYSKl3sHQTYBVHIFRgN9xRHpLLF4H2KPWdl5nhyzczQeH8NEr7uiSdCu3JnGW+zaoUxfAetwT9egUpCgpaAqGP1bfbUGXOxWaAI4ZPZJiT8Ku1zsOm8IqlcaMMbaV64JYEz4gblEikcs4HH459ZpgzrXyuYJJNGwpcoeYsh557+XJ7+RItYazxNcRWEf5++VhxPBaQ+Na6CBvOPNRuUQEvMwjkn0gqE+lY+4q5AKhqq3nKopDAo99dw7cuoVYKNhtByQTcSAqz6OhxwU9EZ9XxJeEocg9VIK7zLwboBSfhjfBxCLv6LOgDrwCDJuXZhxeJJ955EBNOKVgicgQgBi7brpgdYaM2g2EfWnZWm9wuoX15oab5jWYaseskmKgFPcqdQliWRshd6B1XQyR+jQLLi0Y7QRgXAbZL9U4Ea0orNbp03nIbkIgvSzZ1BY1vaekbyAoEiEnaByFB5dSBHRmEX3uD8Q2sAzJReNzx1SO+XbyWw3i7kRf6zWzCY0iUBzX7NPHPIu1zeUMuu2Y/Hh8AgZTKidk8tVE8N3VGnYBooHzc/BvCdA+Ybg18mkVcD61x/J9/i9hnUxDsZNMrOfCYZHkHzmatbnKW7i+7HMHUBxculXEKX+/uG9PhS0IOS9NpNbRw/qmma+RMrlpvfR/qbnL2dNIkfVTTqsXSGmWYsu0XZ5MnzvqvVJ+b6ap80ChpId80oDXrLDqe1M8V1VKcMiEJigST89nf1LUROzeSx3XCdPNrGxjAl8pdC2eyzB7JyDIef0DBtfjwRXdRHDjLff5Qm5JDfUO1l8MhHYaWb3Zpox1B6fYwK+tBDv/vXiETD40rm6VKSLmKz086j28a6n0VzsTpzUmEFzW9j1M5k++/+G7gEaw8Rov/4OPOonuvQYrcR+HKHi2QR3wmCVnOMwughP1hs/oSIJ25oCyFoxW7rnwTfwuycNlQNO8xj72jzfRbCwQK8yG3/1utvEZgNwQHKsd0a7grEGmPsA8/K3ysqjX1WzPI9pdRF2UJOjXDWq5UOKEEzqgigG56K0wnMh05KlDqasreAgt9Z/kiKm35VApcbiKoqwgVRIPHFGm/oQNPs8TFX+hzYD0ggO14p8wTXN4rkdHgZMXzA/zRbG6g9abyJkdnl0W7pkP+yHe88fSq7NwcKdit1Jrl6a7IrxyypzpuG2JAfvRR/mPIh6itS+tOY5tsk7RSNgtbj+LZnnchc2Qr58siTBWPGwDFuVjBw5/HV+oedkS5oJ++nXHz72GGLhICZQrtMineWFHxfcOD2eaPsbCfbFNmY8tfMrpa+GZDCD3mMaLzpUUOSkrzNvMAbYvQPzZwnAdEMqKmAvM6FBFbeXypsNCZekgBjRnMI8Yw0oNE6SEMIQVO2kgPAiQ8G3DfIdj/46l2apEbpgNG+10LsNKJxCIpux5op1V5nEVsqTQB0Y5StikjacsfbV2fWqAQpZfsiJtlN7UQBAOZn4uM8gbwRflNZSSJB1yuoPZ7pPT3fOjA7tBsgNkHkIebFsA/FhqFd3tq9Ki8ePV/t3E6c7ehQbIt70umPKMKfZC5WVvzodJ18Z17G39LcCXqMYZ59wpvicceC3Jkq3SduohjDISX7DnQCMwbupoYFCw6S0ECAlPDwo4TNCqLSychNunzCLd7KZCHV/RZDwztL0EXYtIbupqbPWrzVxLqiPpI6Ndu2kRFFT816BiXRS6fVdUSgSHqt9952cNP9D+MhYGSDg8K9ZKiNpfxnjpLg3rx0o5+ZymlS3cN7EYQU4dpLGMV2VXPDn3QoDsrRaLzb5MyeNriI+K++R8PH4Fe+0FGz9cg2qaWEHnazhz8zBPJXhZq+aD0nTahG5SXycNcRFeOaiKxUDc+VFpm/r1Ar0Ww61Zz27XSWCaS217zhvj6e767RlO2bSLx+x53qVfM4QgQF6bfCCst1IEvM5Bfd0//gzAK/dB6NuLiLye0b05iluL/hAFDwfvPhQwI/BDdwAcF2yiF9oEUmi721IdVMIjGk2WvAKsgYp5LyUeUm3u5PWIbzQbMW/5RxtRend1M/wLnyEUu65WX3vIk+Yl+oHGoget1emDEBmCEVf8EyNkU0/ZJpaAAEpMg5Pn9LHgSTNS+958R3NALIFd94sk11oolI9LEbLF78LBa27GMdusB6XRXLZ2Gf0Q105++5jCD4el54SZ3m28V47/GkPHf/cbztM1QcmwjKZ5mHdkN5w/S7MpwIrYjROr1+tQNKh0w7mTdVVmEn/DhzJGBgEG8M1RuR5Ps5Zu/MczVqr/dsuNe8agFsqIPP2liv6EVLwz0F4k2aNkGGqDDDejP4KcaVk7wrvhuaIqvAN3LtvojXrEFgPjlADKJ8h7Bj4lFPRbR8yVLbYOLGrEztmIrVhpPpkSMIMtnvEm9/M2YGaIGiUPXpG95OElZE9ezHGLxEcO8obL2pOuZVbrSTmSk8va3vGBVpHTVIbWx//ES+wItfPxHqxcYvGWDpuleNr8NPbJBVNFWLlEZy0GB/B5QItrBlvezGsaXN7YdXwYhXkYrn5ZX6nQaXefkkDP5NI1OTJGRWgwKro3GAOYBtAv0uDfMYmsHbl3xSzGgm2QKS/GKhNJRi+c/T5o+v1DiB036S8iXniR4AmSy3W+D2tgBSpoA3UJNWupjs0JVk+9E1mh9vn85UrS/eH0LjAFsYCHtI2X3dt00uANONm9IrDGhg8L4EtCF4nvezTUiMob1iTumjXlRmwfcRjpQItUTIMfUhNHfUht6AlNPoJzj+qVBo1JflYlNb0VjQj38bAlwPz6aw3Z0q072218X4Y4Nnt9YcDN1igbPqz619p+uV5ViDh+dpLhiyD2m7LDjNQW+HcdrqV4GR1SpoCS66HM6FaQSeyqnkYrmo5cxtwIeXUwCy9uhrD4gVnHyLorA7kNsgbj1j/9aLGJE1RvQsS+IAlK/p5yE8XcZ26UxJ7x4psUoBqt7K4l8JPXV50FD4DeOwmEdpwW7uIAOVq33EYcQbK92tCF/IEkvZgsuTbtBOFOyo0898ucusi8WewtoKCajmHosmyUYqtD82O6spD1aNqYLm+VV0uDbn5X2rrbMspuQJZbumnRn2ot3vfLY2oRJ2uKy626D/0TjBnU9Ie6dYbvw5XB4zIUW3zNQn5DLQueJUjUmzlR9nysWDGyMsALw1ULvgq7hMMh/R4S1BfSHJabrlhlCzryk+wrBpgYO2+aT9Lj+WBEEa4qZF/eKYLa6EvwuEGPwxCUBig2wwga3fDtQH9q/B7vgg983aTe3lI7t/j+T5NCak1lO7ktb+1q+B8npG4yKaKxQKIjB4COXwgzpFKnlJWEKVBM8kMVm8EGpjZe/sQu9fmN0ZMBuK+Vhpxf27XvBqOkz41QteJR832BwyP2qyhXDk3sznqjKOpQoh4b3hq3H9sZE7htyLWzYiCvSTnJlXU5+c7eFZR8//MvR+DlP9qzIgM5aHIS9AMjGMvobz7HAotgAtPeODmXdqHL6VvetM+bhrDWIgPBtl4QPf9aUZfkgnlpux3loEcsE+sc6V0YKd+FZhQlxrcw6TK4N/2wJziilYpztGa1TmETQteI0mkRB/D9ooJAStf3XK4IFZD+bIblBnydHDVbJavCZX1wvvc5uza1s0sClEds5D5ogkWXmBtAV3+9t6TekYgcnK2q/WSP8bmWQmCJqioo76/o3MzsNtXKt8vJ3O7SL9+Xa8/uM0pNLQntSkDz2MEewmjNeQVXbHPQ3jyuh4oRqK88G5LO3smVfOYVZetB8ZgA4+YzQ9wc5VTnvxZB16xKm6E9zJhAp7V+FvOo3ZsGiQ+Nq19fZZ3u8kAMPTsZFEBuXJ/9s/HcQY1qgrRj/OsmAnX8I2NxSVKs2R0Y6azEljW/i0UsGSIVFSTQ9LEnCMBbd94i494Ja65tYH01GIEfvRRcaN8wvB7LYR63ffVQL2a4E6bcX6OEAwYXQYmyWG9lHLOV9VTjW2kkGBNBooKk3vm9AXzRzTYcVFE4Cp78gniajGTjrRKt/Qi4JfI/tjsXwN0h4YKwjawECKAG9jD/W29y5sqDN3Z0l1T5+3FO+nQRgy478X2RLb3EC+tOihAFEgaRVKalnakM73d+ZMAmbyzA5hMszuMzsee1t9kvZkiDA7fwk1LZEKek65YtTm5UDGF6+tZL8kdowDzxFjwJ/kE1diDWnlHhR2LfWhJXnO1+TuXsmhrc3tM7uX09dOv1WXJWJvEJvTFTTjVydhVLUw6X2NhdRKP5/RgjeGxjbyhIrMjrz8k2NuVN25NkWgVfrcqrgtMDuw4EnXEpyoxY/vlg7ZzjC/Ef6himrPggQFnMFc+Fz+lPMJyf285v2lcCrvNJPKDgIdzLl639mhOHpa+o4/SEcplvj5VXF+gwuN9sDoXHJkvHtyJrshYuFReHKQzXCwNcoQFn4oERXJmdUajlv2d+M7r5rvLzJ4FzAH8bvZy8HrX3NK/lm2LCEVidRcMrhMXAbxigyChBLPwkoOg3ka3lojZ8YBPuZmEMTT5kf8JjwBA9PAueSZswvP7GOchgjzk4iwhF+hk6bz+3FAOzyohUHsUA+hi2p8c58gWyNdOLm4ZAdR4x64C6008u7uQChjUy7IWxKOHzRlZ2oEoHwji+BQmEQ9Y9QaoCdBglafE9xXqC03gUv9ZnLd6nvH0MDZDWTvbSS8cvoHLL/0atGuTlM+glSWnKyRN7nMC4VdhGuYRvgdcwzGdiujO91VHZydSdRZh/Y3F9bXYbUW9mkBoyU9aZv+ER47TfDyXf1hwh7upv5BfVDby3eu9PqgGahICcZz2dYXNEmjgAXXxmV2xEqLnz9MxYarMAFtlpNeCP+zXpv/75pnEF2BWBWq5med9az0hkYCSb263dtkTDxptuNq/j63J+X+5PeR5eRRLEsXLMcqGtA9kWAEELF3urqis+AKTTw/ZzfBq8Rp7upCjd8shUDTs8CiHp3eF41FQpGRaALNEadJaeWXxrW8W/kAnrvSIQI8+oPXHV28ZViwbPVDoCuqByAvXGL7h/hemfnyW+YIsRgNRaDMMO+zS1YnfsH+3Z3pGab2ax0flOn59e0rF7L6zE2Hp15xiYdcvY+rLKBQ3Y+AxDhx01ootS+jSqKHORgtuIbj9a0BXDsisKyGzBnFxl9N2qUxzTJbFUb+VJHdJfGEd+HrH4QW/ge0CH8ItdjKZFlmrlc19BnXKcVT1PrjbWxqlGs9p0+NZthysCCILz6+BoHtb3EKQsLGe//Jju+oqxnVnm72/DOL/SpJMTv5Hs+oO8iQg1aFZjB5wyOCbT6VMOOo6HSAPKB6bEtP8U2dKyNOkpAbkoj9mMD7pMisMdgfxCSbayDZNZ7F7OMYaoojPP1pVUQYn3SCPapJVkeC/Zzr1Rb10xktGOAqaI5s6jOlFYUNc6XkAg8mkVYNcDtGYf0Velkx5V0mWdxVnEt9skIKo63cjY6Ce/4wneWKuuCpfFjz8gxXBgA9SafMoILG7l659AbwBBk4ph786Q2Z9boILLbXc/tmBfHGSBm8VWStD1ybk3ABhaVlJpTSNWtDMdpfpiXILNOp8qqDsTVdhLTlv8BHvNNWg0fqr8RoLbcEY5eJ8+qXUc49apnF2g/q7K7jvUk7d6dHad6qfmRR2nBiajQnXN92PtRCLjPQ3PN91r09CSwx6oJKSXM6HcU9VFbG9Mk0ZJwHjklFC4KNytt+ejuwSgLR7JrdGDNncmCSk1Y/zKqb1b/NJ3Qc4zB9Yacya9tddPG8u/i7Mf+vAbZLXUt6AThHgIABDl3ERMdqqGunIjAMdAyGs2ddF10+sDthbYOUBl1vOwyKHPv1x50jycC8DsbZxxpdcrOZOw2qr+Y57S2N/ppDIdU7lNvdJ3H9p2hGjUIuG2TRChk+48A8VYAJOG4TPGuSg9P2iT5ytBxskEj1LJcS3PrpcX6mRivzMt3MvnC/2jdGryHGYIxrkhqpKUl7nNS4N4G0xl1nB3Tw+UGieLtQSwt6sbwnQWCABqHhpjIXcDnwxP6ukoznYT4cGRchCrXwbOg+MGi2k4UVOIXEeM5eYnnI1j7AvpL0jjopafsAm0c0RVGLn2ILbXafxS+GeetohrmZbGmjuuCU9T9USpZA+dtbj/NhTyCoE4xHFguIhB/6+9FYK8hXh2mu94n38Ezdyj+1Iaa174lkKmLsYntpS3gEeLdVJR1zzyGNl1jFg39a2UPH7Fcn7oUlCaCcklBPOSFSpsOGV3aKORwVaQJCF99WWPJqngPQ3oWidYvFwmzB3xGEpNlJLAek9X6czywbPXJbpJi1nRGvd8AH5KIFEZKBTd2rgsvKiJDDcPJ/fjMVEigDL8SIhLF6c2OkavHY+lBgYDdugh3Hg4nglMMOPenOTiFuWttVFpVRb3o25Pqf8jO6No7LFD3cP55yptL303y1v72ANPsffnCRrR3dMdtrExHaz32nZpqw9peTzG1+z+pR8/UAYbZW2uVRRoYuEZwyw2Pgxz5EN4/k8ZKEniuCYhCAOksjwOzIxhRN8+o1yebJBrCM9bnvrWqa/cy7W3SEWz9u0J9HyRTVSEvBlO/lM4qfJkUbii9jyNr0P4gSZp7dHE8y8yZSdXsdpSERwcC+rJR5DheYvMxEltUDJh9X6fyGZNvzcS3mpHxLp/KCAg5o0ujun2exOdjuhyz0YFhvBbN8hKdGd/dxF0I/qfAIZSmhcpq0zusMZlYaOEOT9lFWZg6sQeo9kCBTm42axWGwsPJE6/9aIrdkghmneCS0cjtyVsECZ+6Wg8nr1RJ1YLilxFxswGtHzXSAp625v6iEZd/TBO9N3T07XBi/HdLq3EZ+RjxFxit0U9rsyUz4/MzGYfolVRwasGuZ21MvmUSibOGpH9o542Bfi20qqZ9qD4jRuBalK0Etr+zi5ij/GO9UuQUpSo/5ctEAvFPMxkhLhLBfwZTs9lT1qg+pXqEjSuJXQMIahDL2aB3mrA5RGlGv+6WBeBsJm6HmlPno08+Nd/G/3gHGJbI/FgYhcFysDNvuqV9EXw9mKRGSvnnIt1+34y5zt25RETxApy6rf6ldkPzJLDgGNpeQussnhGP8ak3ZxbmL9jstdpAnids+E1Dt6pLDzFTB7gr8edMcdE+YW1OI8pl85zk14ZMTO0J9CNLsnZ6jMwRj6RNrHQIa6VBJVPTP0EacR/vlxOHoq4a8XWx/BytwwyBNDBSkibLF8vdJad1jMeNiDBw5C5HgrvHqrS5b3CAWr8prdc/PkrKRO5+BIhfvMs9RWTgGWY/w3e36HkCh0cizQgIqWJShnTrwU8z0jFO2+u2yLKmLhwbtpQvgWsN9spAOChxQ8gnqIPVzULGOdwkdW3rjdCwX9L4XNZeH/sLAltMdzgL2ZTZjOFIxUHbdAxGeFo+NEyXL4c40Kbe7fHEck9DboiipM3bk31Xa2/fvsUPKTGhXW+fX4f7FmCbwsyfPOboGN7oy0or8U1aliNkkYbmI2ZnscxN+EUilDHqRFVZpY3FG79C75gxOKIeUub38TpIlL6wIglbqebMVKf0Z+PevTHe9z3g5j8+2pxEgreuW6uVG8FjEcCI8FU7Qe4+oXyrfl1PvCf4nMneIyhyKRfneFuN0XOVrag8TncUbMLCX7yvqs83AcO8eMOivA2ZF3rM8ALptciNcgp9PCRNMgekeV/cSZXCIBdgTDY3pO+3WTymK6eaAl3H9/BbZkVM8Jdj319oEF6aUIMLIb25c2jXAbxoksOrIBe932CbGBlP6s8DAZ07bZ/ObHL71pQWthzlfMDVjgsja20Hkc7wgu4yq0jPponTpote864ZIp9uGaq2UXr8+DC+KVDQ9XFlvyvl9i/kngSZXizV5Pjp+BFRpPn/LxTQkX/vf+D9Fypv7PFqhM5oohL4Ts49F9hpOYSKb2wvLGlIVmllnzn5BAlTP6qsVH+A02pkslksABCWrSacjN8B5N7VvHeLaP3J9S903p8WPSt33XszUmaPF3/Fsu7dtn0y8TURhv0wtDlD6bTw3DRTiGanlF022qnTGsoG3r6oU/5jh+FwHF9f2m6XBt8jDK+aeMRYNCCw5w01wJ3T/ch3UfVgEuc9NbwaZINcdaMIrB6qm/acYcEuYyJMAP9C7cCMXIwfum0GQkwipwvVqDL3Mtz8d4bRNk7sE9lrqxLIW2wDRVxwSsWckkgVnj39HLggXxKJxxUhMoRlBcBnLNb+SdO1gZg91PGK6uG4cqUkRGut0RNLmGjGLShtgcAL1oYHTQjepSGasbsla3ekPBzfsONrOxpD+NvE/NHW9+VivHHoVU6PpIwI7HJ5sK2stlAnpXyXstnL7N0MsweW/ndKeAq+x4XLY97t3vmJYOx0LY06TcTnISTd+ejNOgCxWOYKBAaYB4u+nbLh8O/EY3tAIrfn61MTF7HXA5q1hYb36AstI464FaSGZCRvsZcbeUh6fI/o2cPltFVWcakwrwiRjMTQoKR7LKv2VuNU99RmnWyze8WPJutSSPPhP75CGDZ7GST/OAZ5H2kd8OV3g5GUbXAO5MOXc983XHaFGDk/R9xlkWqaWCUfNRktzcRwGSjOPtybxV1nqWI2NqgXNcWFABqyXTL8gU6Q1lcbPJ8MvPMAMwpEAzEfmQbV0BSvoi6GQ0WnvgjqjbESdMLnZqP1SlgwRwQvfvN8OUJGvwfS/vUi9YgF602L2QYbP5esANWoPOLGnlt6pSl6bYypqXD1BAICS5V97kaIk2gRarb9DRitYRhhSNzx3vwab9Hc1D/56yRLr6vqQkeXxuZ2m4eZDaevudZSB/My/y5aUvY8/htB9dxntmAl/OAgeKdnItUVJLQILGrvP5W59PjO7QAYQZJrmLUgbBRJ5JzFzv9Jxgsod0K81DO+xNbOUDv5CecOU/4CBN6Ovb7282uwGPlHx5/7GEsg4ZaNITQ4tg9NZCq6uPoc/t6V7eXjMJtdj5XNwBG6pr7KUtTrF4EE1sO/9p+eLry7Q/CWJJ9wYVt0IZ+xabi1GTqiUlbMi7n/QbdKffmZ3VRKuZ21VZF0qIvEAVnMwHuLyWtiZPwh2qGou5WVrAimA74ao/IIsuWz43H75Fk2FUIz43B9ttWZeE4EHDoMHiIBV7X4yR5bE7yx7CucFRKbuF+oEG62OWStD49KcJ3EKv48eXM2JcHri6RoXtkZtIzW4U/dHFUqkMqmDLW4xA/ut7smZ2kS+xHuklzAA3fN7VaJNBRA9maeqVJUHev9kT1t3+aqE657GAsZ/fmZhZri4g2QJP3M5XBnGE0JNTvLlHW2kNKjsPMGfzYHYpQSJPRGAYfjJVbBqaTDEh5bLLprO5Oq6LCb9A7N6OYcjgFGbVRNUbQ7lfCOmVB0w0kGnUMdiSdjSvXOv+AwM7DLpRhbjcPPixEAMXfC6SWGyRROK0OoP+qusHAIyAujvpDRBYbjR/17gnzssmlIeumk43tVRj1JP0B1a4kov+g1ekKwUjvxNKxo9lb2+ru/7W6V98UAEgi+uGFquuGV4ZLyaKSnG9qdmY7qQyfV74YGzXeV1K2Hx0pRmZexMYg5c6HohKRO1VobY7RMvluTNK+xnP0fFcC2yFHSVGS+vEDRf1KUwaYVfSKp+HwNvT4n4AO6V3PLE5h3SisiSIVf4OvTt6x1g4hqK1y3mwzyUY5ZWy6r63kCWMBebhqvFCkktjdGRd2k9ZWFy4qDWWQ8eiYEhDp0ocUuHLe9xY/geSEbS4XwasvM/73KWGk2jN4WniiE70x4CM77pRlk+s+6D8GXo9+iFgu546Eiuvi2rWUDVBWmXIN0O3tsVA3Ar8+P+9sizVGTMJ7lQFQ7/zmc6cloFivcwc9uff8A9g8cxTmOgMRWfqV8F2ejt0g6wYXKFUv3/CMQjeE6uwAAOyC9gHibaBgr7jsan/VGzyFF4hEqTla4imTVNZriQUTKCEzt7PUT1juVuLlHjAezZaB1LXX0pcewrVtXuBWee4xhZ+gNvQfmLBumjuuZZM+FrQwieVMAhXnuxVWIlD0Zv+lNGpc8KFDpgSpDV16/Hc+wz5xjH3QhU6B29xNVvB4LT2SpECcxO3GkC4LDC9qnvyDu6vmzSFsHB0rem0k5+oW3yhQZy5LaqgfXWCkcz5tF5oxSKa924CLa21LeipjZx7byMR+hot/kTB7YTfy+RLwB+anUfDnP3YOhUeEZHfJfFz5yCcOXrOKQ97ikDMo8ABgYPsNoUVgyOnt/HJuOSqdKARQqd6p8/H5WtQ+lbpKkLNNlHvPnTRTi4IWltWiK7fZn0lwD9hGP8DNOSWBlotcvCv71algKYmO3suLyrax87Xr09jcYAUFgpTwRyrVH4SimI7EfzH91BRFNF5WQDbA6qU+uOeJ9fSyG/veDK61rRiwprprRc/vrWEZ6PMdF+AVxln9cGcTy6q7Z75pur59KvvKO0tuSGM9703YaKHehiX+8ejB26ZhHcXswdYITN0mDsX1KXUXhllu/v92i5301WnD2huokFc3ztNgiwc/tVxZcURUqluZchZTU9mTr6mPDUSrP0wGnWxg0LsU102p6RK8WWY7y9/9gaqwbecopR5tdxt343lQ06iMq+vQtNceFmZBuLAzxVbVDnvOqT/rI0MTO/2Z4xQwj+4UyAchDBLPanYb/psrZl7apMTp2BwYzagu39sXwPi8Snu18u7Da22m14P1Srg1RUvXPFdEaQq9lxS81TgkijshWcQsqYuHxK5DW+ylHDGUe/oHZ7BCvggRSG/DVr9Mdox9MiGRc8w3YQgkrs+SvEq/8AdhQx3ewPlXDmfy2o1rZUPr5lIOKpdDmW7g/4+DEAGonjMG8LQ5l4N+SwwBzB2ZHk5s6+Lta8Ar0BBpWclHEv2j9g0v+DodePukJbzvVFusqWPnPciJRUs62rSaS1u+13Pala8VoRjobdZ/QW9NN27utg+z4AiwZtPKIUy3DIVta6jzDGwUpFYyqOzbUH4wWOZwkh3fZ6n68oJOerJGXdDOTn48gO1ykIMjU2XXqlFkYrZrJWJndhjGSHht1Ufd1nEtyqKAoPfJOJ8OqUmwYG1mz0paFJM7W/CmsiTmgbtSvfEIH7shzJYgy0Ka1UreXvi4TdPyT1bB8FjKNHlidhCvta9YaIZGk6vUszya8SxYi70R/IbbCOnqY2b+2WP3uqAFKnYOzUwnEfvE28Omrglhm5jgvpv84YG7U7RI7P4tJ7TnJ7AZ6G8Bsubs7/Z9rqQxFT0d/B3l77bmOfwvJ6VdQz+6Qj18o0iOeBgbCx+s6H8mdjK6gnTffdqwLfBvfpRGvKh9PrbVk/3AfnS0KQGBdB7ljUdtrum5zs4cjSNAtzNoIEx2g5jriEwH2+LcLlBNUrTFP4ELxGUvRRtZPGf1RPxARUvvFUCgtQLH/fCCWL36qHTMvFmCWSjO/gsWC4FDGwtXoQFZ/jB+qrbLaNXIdYR0hxKOw/eG0N08D3W2OhWgln+IpsU99vpiZUVec2hxbDNtihXUzvie9HFva3sSSdm+INNwqq2qB/kW7MU9wjM+rA5gUqMkx4ErenK924MwSan9GDciIj+CGTfLIMAD8IOFZgnxdhJWiUD2oiEtjgO4iQa+mDqQEm0c5gdZ2it1oumeU2yG10xwIG4fIRDWBVj6opWPpFr5Pu179trNWVdYKfhbXrUOK9xiAALqN1evBoqMAQ0gFEbCcnz+KVik9qlBF1MPeGdYbv8K3SrKKKIxlOw4bYcZy5mctUAgOyTIFbFUdr2UDQBR2S/rKs6dJC+uIu53GB23NJNMsWLHFeKU1XYgk8tFj4+qmDeAZIpw6yYmDd7tob8mYR12Rncal2gBQWpJidplfJ/JB+BZlegIYooikDiToNOaJXUgyDtcByHS8KHh8c7u0XiOu7TjocEmzBvpEKbYdPtcyFem+GXcP15YForor1T3DgCpeXNQBsqzzXltMkRyfAfuaTmuBVj56b2dE5nZDp7FtgsK4WKojhkeWflaOQw350vKt+H6sq0sxWg8J7I5lcy7kecD6vr5HIH1I8DryS3y2KjaewZgb5RNCF2Ribn3qwunn3BuA+VQ4XLdVcN8T0n8Mn9YtGOzEK6zlanfD89MMCmAuAUsUtXUuqB5C+UQqfJGu5pLrgj9dG5G1EoxZAAol1rlPNbVcUHGwdGfAWsHvgrBFgPOl4cNS+WsVkb4URLjHcPF0gGyCfVpK9JTuWkznTLBYrpyjag+629YYf8hfIUg/+Yll7+kNlodL5WBel5PIgSo3wpx444iQ11WICQWSQk3M5l0YozAScQAENV6uv5AdcpehWps5K3VX2SzCJLFzKMW5Lf9On6SzWHY8H4KybuPGi3l5Ej+IKmIvZItEiNuoZiq0w1TG4O7r0lXWFA9+Ju88LUaKryzgW/SHntoakwfRhXSigIfOuY8nrsc0iQYuRPK34a6GmyMsVzvuJi3EmUHNj1MTXUu1LZ4MtAgc7y6LZYNvb9YNEEX3Z/kJiFX8QX7vV5ucXJo1PSA7H0c/MC7nNyZDCbHKFGYJCmzJ71UTAqVWpoa0IxoVE11OcXixjfiosANmhAk9o79LR+VQv8QNtxeM+3Ah7MESN9VrqnCI/wj+wtP67e77ayER1+GIAqyPMBfDQwZKxiV0OYDZJesdjtXdTGByKTH+zKW2MAXSHfC3BhzU/gWI2786b7OKPNdXCMCqNa4d/9OsX2RViBj8fXxf29DK30b1bFCk8IW9XBbDbm/hpR3Mxw+OivBYtAJh5YPIFOGo8VWRJh2Usan2ELnQbTx/MAo2CFsat7MfUDu64xJPb4q9LMRzBt5NUWvB6s1M/1mlHBYoP16n7pjplK/7T8VSiv7IuMJ9Z4EdyXsAMdMGvqZ7k/BZv7IcfTpYavh8GrcEnT4+4GSiXlFNvf0N0FY07WoU16hhwRZ+28vQs8aQlahRQg9hD92pzONJ6/7goPc0VmXx31LK9edoZclJOvPxQREgECtyqIwhWmcueQn3gayUMwY1hkHY0XbgpAHBrZghBfnY4EbUXJDQ+hTI4/uhbQ5oWPWIGHNpwO1z2f7QkVu41lTZ93NXEN4qhNwV2c0tP9ECEJm3wQL5q/Y8wgFi0913ZmFavJj0zLKbhSZ8iLZM78Y9/lWj9losYlYFPlT4eKJWM//b2UJW6+ud3AdmPjrpipLzJK2IOd52UW+7tNY1fa3SSYILZPE0xn5da1xh/LzctwJzciEs2P0g22pi9FNUg7YbRqoWWc3H5UBXU7IWSyC6icGTpcJVlAdGxEnmeUTaFGBZHKAkjwmchIr9Y22HRuQmOEmdz9514ghvdtClgj8W7DAHFhXBWhgHI4gIsOT3u0vg6P52VK1aunkDnAuYjcZ89vJR1quBzO2Y2YhQmFZovq7ag+0G1PUJeLnGa060y/o2AMS8HqgKMdsGM8rvSxQigP01IB92+n1z4TjHakVhWpa/KEkvGLOeFUgyXkkV7IyZC5H9ht422UbvmY1nK8s+mbi3HuGUdO6ixXT+QnCSU5Y5VAKcAi0VdB7evXEvJUNTFr64n7UsTdq3zzQjv+1d26r8VVvf5+tV0xm/DZcKXsnELcPwTEacSRa4FQu0rK4ApKd24CtoCZwk0B9hEcCzZFVO+Z1nAeB3JbyyWROYXIoISCnRLyGqVSwnGPD6Y8iihTN64cT6E1LqxBZqUvffeEkn2QZxSQj9eFeFPC/HrWPNY8pGdAX9dgu22lLkViRVmCwWs+kYBDCrN/jI0lcB9nZz7nZ2Md4nZ4w/WRkkpUETmVKSZfnh0MJU+n8k3Ou0UmvY3FmonG5OFJxu2ayWmcbJ+A3FUlJvc+gS8DuGC+qi/9BGyL/yhtJaCj3F2ah6FOrHJgRpby0jItcTJp1IRGojZ+BffDit0cxi52xkl8MN3iX0A7b8nyxEp0SUd6ivTHNO6OsZWK3bJFGDA0pUpzVpxKCvPh0ReH+OY3leupmn6tAYLWLMMT9jVWQtdjQhJ8bRiFFQwLe3Ch/rpaK48i/re7AfSUqXFcr9dnN5qW8fdabT8zqPnP8TFG12T0ArJiyjIHdhyeFIyI0qWTD/UHefXTEHwxWp0uRj7b0K2RXPLHJL+n5+3Os6jzYtwETbm8kN7jx/VGCqNZm4FNFW2r8EfTY3P9lDDskWN2kbjjos7HCkDFGwE4MKyiIaC/nYCcrHifSxOrDYpE2dtlWqoLbIof1SKZ/a5n1fCFnPoiwTozNxbNry8q7Uxy7WbhJDyxbFTczgWZhKRg4w1OVXVO+aW8mGPoEUIDjwzutnodfZpo2joU72D/H1D7V8Abj70gzBLmbC9X+R8EhaSsUOCZmxyihyd1aK14FBDf80irlE8b2wl2IsfgYR2vPs/qi5JmBOX38NNfgXVXJYQwzjvBhTuTuxJ3wgA2bNiT3j23nBKXOryR21wngLzPRYoHNDAAujCsRpnWfhCHRBWCSRY9zGjDmsL/sGpqLrtxJHf24hd/hJ+SfJUhgzPblgYCxBz+o/v/o4Nb7kcZonxbWr78YjEjepDsfK4kVDjat4pZMmcxTL1wgtecGfLe8v+BT3yyLpKTKA5bkj4umQM1TUNkkxt4AMDCUPMDOXR63NJ4PEe893hwV4cXaLmtQtqWx5MriEfrL2hYPW7x94tFmI6vHks2PuBnIuLF/F/74BOgFvWZ2wL8AeW4+HDPuqDqmbR1Y40S/35r0gJ5/bSimr430PsWvrAvSqhqAm854z1iLia2KswxbsSvsaC5vN1K62TEEO9MaK1qOSZQiodX25stUdhPBXAU+R9STYMXWkJAZikurUIUEWCJZwd16H45hB9qmkGCKsU78B9rnvyi5H6vhEz7NsQ2Q+ShiyJPkEnUshBifzg8XFbiZmCn5FGvjbrLyClLIDeoS7zvgKUgaQbTr543U2zOrJLxVvKCu1vf2ZwKovQsAVjA438nTPq4U1WgKP4u/10tKPbz4wzvhntLSYEhzbqnxwYzLfiBzcfS3Yv0c1voZ/UJ1vb1QjaIGb/8FWSUAIo8sAN9gk3IfbOtoFUWHC3fCwNjQ9DqLuObyiXfvo/8HRUd4pRN/tO6vfPdNztbxLp3S532qdTF7h/ldDamNlx4mjN3lWZ4RyO0jlnz9+cJAkrJepQy3VOV9YocqQAfaOsB1t9aMiHA53swJW1S0kePRwI/GViblVIujWlJOEOUoNfhGSvZVkslWuyumNfxEFqmfR2yHWEiHRl/5TT/4o9OKNmndNxc74WFSfgmND+r/8vdduLIy4IQRRC4SxRjC3GCJi9/ypf3kNqLEvnu+4QMhMo6yM4MxNomqBOrz+JWacRpkMof0HZeLqNPsA6aIobz9OdNrrj1uJWMSUI8z7DrrPnLjCeqPmKnR1x0t50kEizv1mg3UrS5yv94SJUGTMk8jTQxTBgeEGSeHTLnQwaZwWmYLadcnnhC+gbCHfQ24EblHPRHlpJXm2LutKYyVsdcKzI4ED6yacYttUULoL1iSHTft4jnZKU2h29On3//L0t34Vg1w4XrX1fvfhwu+cmOGIjLdC502BO5VsJF+nzKyjjTWtQry09IQQpaGaJiMsy0RLv/+3WKtqXAVXO7q6umpWXq4tOxFHZNk6ydfhFZTyKQmAH5J5tSszKimVizeB6mvhwHkr46uKr3SfJc3AVVrIxJYrrc4G6/rHGhOXZnbOE2TzK4MTo2NotKhdj7MbLGM60+5nZeLd2NYky21Q/DoHa9jT39izxyt/AfkmNlH03Xv+aTS1epjZMbUnmZ+6S7e0rlc4wIvI4jG+2fe/u2dL/mPOwk4ks26w2U204nUD4PWvgSeS6edYysrMjonzSSEZs6laQ+7yQ8lDVa5omHTQzjbNICJgY4GGtAZx7njoNMP25kDwurnz/RJDaQPs6O9fkUDnbrIb3DHRoEiOvto6/3rsyRI5Qq3uhLojwQCmhIoH+fdYhjKZDjJ/EKSSwHD/2BxbmBzTZx0IsDXVK5SpofjQE0BU1L2uiJtkpGSYvHKRZUExOVWt2/5306xgoaeUZ71OeQdncXQo6nx7aQ4VOC8/u+MEjy55bBdDRFpi9g5gNBtAOEEmdBOD9KQ3HzJnM/o90ezAMxAAAABjbtm3btm3btm07H9u2bdu27Q7RQe5NSfNo4eUGbWEU+4flgJo+7YuIzMsfP6x/OHNAFWof3urYApiI4E/VfqY/o1RqcGetZYlow2VqXNDTaQtkT5HhISP41WQKa9fKxeVw7pVc8QtNvDnqAH0Hf9UvmGs+K6l/kxVCPyOjUucrgG3m+YasR4UWa+SXpSXCUHjUoyRy/RWYgjSEn1uL5CV3H+F5UWCjK/Ex+wiLMF7W1ILbz+kZLa/NEGf1Si8enTspFI42FRJduYYQm7SkiBOiM6HLLRclrKZtAgjA1phRbHC34BswyuOpXRj9PdmI1fkoLPeP1Nwl9+DWzOBmXjh5Td5UcjqFTkdhqQ9Z/nXVDrjO9i6L2nO6py99+S7VEWbmXOwc2rLV/UzyITvdVmA6zApxt8GmemTy1n5ho3xdUqMwp/07UsaPVHBrA0E8o82+RrGIzXF/oFRg2MTbI2guz92jLIZid+gICvQ3jXI3hqOLYeFlLouLIN6NI7JXo8aRxcg2q6Wkkf4NMg7p8U8DILwpBPGPpFqNW5F/4CdPuPrPRYtQEOq6utKkLjojd7yH95iMhSh50+KJ6kNdhoOkka/rYT7VrJWMu78UMKA5yooXwx81YDCgs+KyWnodUwwk0HagVDPmN9czEa/L/gFE+uQtfXLW8UMdTKIxRsCr1Uosrmmi3aVZsdYTcPCg1KkKGwL5Y663nZoASmUwpk9P+dTkxA3PLrfInhFA6ff2qQ7IyeHY+OfptZVEha+s9lACHXnsUjEWKl1ocEyMAIRrKLk1VORRgxL0u2ejdzQ5olQkrwRQ658PCkXm7eoNhsv9YqcfNRZawL6QJFE/FQqbpDw1b549lIBIUHdIz7UbDvXwD+1AYY6Tfr4RtfjJWyoOTKowwO3C2JvoRmhbk+O5mIStpzBY8b58Ub19yWykutj9aU5wCasX5eh7mUjrpKa6o0EO5HZK7czRZYJPHw4o3OEwq26UlpGfGLt8KDWTcVBd9ae5GAOxYB2459vRcmddc1Q39L5wY2V3he9UBrdbDXd4YcHya3y8I2myETfC7IpZFV2NicB71T/UmNEUeh/sDcYmfRjnC3K+ufl+w/DN9Oga9hhHILE66qm2erPxT2A3bfE8Nmdt0QEeY3nGGzya81ZOd1wk3w83FPZS2S65Mrp9OWNEoUniNUsKqf2ra3F9DtpPEHw6xFrHVPX0v0eQXjijTQ3UX2CdioM1uqb1ssSBdDvYLfIAae7R/s06U+c3DAqBTL/SluJfWQKFnM5XfucHX63NeuS318s4rSVo38o8SyPwhS52KmYpHGc3EnchZ+IuHJt3sMAAJ4HBFq5zqkEWFTIjE4W7TuTxeYV8uGlvBPpmRMVv63kos+WBYeKZGxdjd44yGm1NTw28JoU7e5Z5RsXGnn1D79Eir9oTAopiQuJ2lizmW0Da2M0cmno+T/NVTzgDHRGIn9vq24zABikyCNoK4Nubi3BqiuAUlRVdmQ5ZozP2vOvprH1H3uBU7AHDoIGwLRQM1Y81WKMso072o2AQYGCj1+tlLVkqBMf2UMkIsuuGFEL3YVaS9hlnkVaWJhmw+nO4PhXbCW/J8+v7y84uxvQRo+Nb08fd5k1O7PQEunsMr7lWO/RsfE6NblrzJY7irNAuSDdXnZZDbuXkqGlb6HBsDMnz8kWKgh3PhKauHPKxzaPHWTalL/5xWt2rW7zRuLXm8+hOpR8blp4t9CHC0ty9Q92EDNQSem554Xv6LiCM8k523/i+2H242Nlr/EkB8PV741lYtDHNxbZcGBdC/vKwGPQKxS2PD4UkeFNPDTU241tsD7Ib2aqzDk9rnXywBWN6CVp/+KNkmgcrrQOQ27KySxxSBOdzT6T2jBhMtH/0e0IIVM4+A5M5LX2kNxVRTxxnCJGd5bkZSyjqGqIKQQJpWYcRUhFOPJSoYMzB4EJb2Z0d1FNQMmEGuJG9Flrc0FQMEERdApy+QI7VMocgHu85tNHbaDFtRTyWv5ipyWkmzHzTXYYXKwE1XDrpZKtJAOj4OSPeB0su7198XZMOZUm0Mt3cEMrPA0VKMn6ZSfeIKBTPPeRbhmtnn+AlEjzxJKYvjp59k3nnaNfwD/E/gWH05bSTw0kEpDXF2rDdcsZu9GzjLB0TCHDr1EUmBmCuDJv14zopYC0kAVi16geaF7rsRpsxmWvfsqiNw9G+N78xBwRc4G08Hykbq9lOEUChWY5vKo42WOBacyOEV3Wopis6XO0+ItEOujWn1SPjWWpEFu7VKZM7COBvhEqbeIPvE8jrUaqBverYU1LqIXbmLPjFTtk0edZtANABqZG5pz/kmKlNAipk2+pBEH55wqzy/txivkww1YuNFkgcXIeY0lhXlwMtROpL8WyX5lIxkP7VMVDG3F6LKcMbvgug5uCwDuN1WkTKwhSgXYQEFf+GvLe429zz2KiB/xn5hvTsufLT6gAwmfPDxQrF8L+/wTG8nRSrS9HGpqA7ca9CQQf4GI4Oi5VTfP74reN0mwStH0SMq/d+zjE7TxVDvkSFBamw+sCEhbS7Ivdolj6xqGiWX7OpxeByXMjftrQBZEcIN4+tT2RRBHkV8UmtwfEeZzZTCirCFukLsMyG85Bs2sUUu6chJiAXXERVryKYVMkmxQITKOM8Rh4kWc+hdalifzCQA1Dsk2nhuh5Sv9iDmsFqAwmIv3jtBR/npaCFNCVeOqTVBKCwPjgIO51o9+fnICoMnYnidFrCPX94JgVoWcDmyEXgqwFdZwOKwZIcNDhMSUnJgBqrJQPKyF4PgrMEjblz3yLbtHyJpHKRqnFymehpo9EdYlOxYnziuyLS9EYeH5+jT4vABw5GqB5XseNzKL3TGGGdZozG0T3gWDsIoBXQ7cdMiZHzKVx/eJDx4az36Gk3DKx9qAIgSNknAjLqMAxkrOTBHVcW36CxASpTxtxh9BRMBp/u8xWRjACV0eDnGk7qDjF/fchBdQfa26HOBWNo2fx4HCXlsjlxtj9dGJydDVygCPEqB+6a2G2GrSfieX4+hI6cvV7ela/phjGfuj9NkrGDFnV5yj5WMtwSaQ50aAzTH1ETdsi07MuCOiQZwhbyFG+3ntNVdOgWW22MgcOUUW/tqyCjWmn7njMydzlm+SaHNyr3UsTGR5KrjrKTPuvytvNQFhVGYLZT+TNtcpcbce71x/ULhDufM2CA6Gag84/PY3AC0U9Xb2NUfOi64TU60PvY1TJjjltW6494kAK3tb6bkTDBGtFxT1KFTErxZnl/w7hNLLar3RuKCB3icCcMhgrJBhWw2UvjN9To8jFsGcj/QLQ0ddg0sVjeBcvDrarVY5iDGBLf04dj/o2eafrl0ReLThuAVJeGK2+Ks1Ni0D0zaOWDJjDgU7blLzup//2KuJjoNJG00Mk50nzEBXWVjYuNHc98CzptoUohvjnKdtFb/b0mqDfb+Iimrrr9flTFe2CpBxGflfE4v6NhfH+oYUziIkhXhe5HxOWpU82BtC4SPcnZa2zWAOE1oC05+5GJ2rVkcPuro0GOm8+zacnHzvyGgzPLMvwRklZzBBqXR50YxhiuUWjq8TYa7eHcGDgT2JGVzYbeWKDb8yJtouLBbjp3mDRd/2g2EPxDddLWbUKchI83FCoUklcdGkrvjkGJNIo+KMREHWx0qUKs6BP3rNSKFFZCi0hIh6eyAoYAWIVPeBpj/5Dpm3/eQxe5TJ6slZCuZ5UspBXdj8Swzp0oxDRhYSFLxd33OzRfK6kegtFvJ+WjAXKSIv9Qea1MDkfzsUXs4AY+vUC3xUrMTEtGo8hpbP+T5QU12rxrny65b68CqnoUUJmIe9+MyATingxnYd5JnjIzlz5GeFU7ocgWO1eHW1W+vKqXqirnVbV83QhMlwkucK8DAnNDisX6FfGjuhIgwjQ+NdDhX1SMiXlyujy9KW2WPY01NQeBg3YKKWQRvTd2x8pJRmgcD1jKgfLwZSJTRW1ua7gsqaxz+2g25898mFNOH5E/b7WCE5bs2LpYWYTcJDfu/7nYlxcOXlMrzfh5mIh39MzSqaW/M83TimyzK4GzewDLGodDHQb7ZMq0B5n43hYIIpuVvkSowuvA9ALj7+Oc8Jrh+hHH5N3YMc9S95YZjL0KZViue4YCD7smWXK/UqBzfaS1JEo7hWo5u66/MfgXSlDDdW5BWgdGuiNh9wwRCl1pogAt8/er3r+M+ZdRBLhZJ2dWrhpCJOTtIU32V+RoJUlZtDK2f2ywBkXXlv45RHg7xmsNVT+tWMeQ94rykkpT4Z8DP2nBG/uv4JSVH1CYwlBhNcYTpnHuGumuS4kr4XhCrHgiLiijp0+xsSkGZZTr2+t6+Kw7IKUZDvNyPlz43I+a+CvcAbCc/i95Ofr2nYazadcV4SV5dVxKMyEs24GxxrOlmWbq7HNmrANRZTVlQSV1bb089DlzVCseUTds0WeRDo56Bu9eKCdns5Ev2JUxBc3KXyLrbbJVBD12oWAbUN56SaC/EJO+m1ghmN0U6cex5v7ElW0HbFsLKYA3cKq3tVs4ev0tR+SO83LLm4fFOQoc+x5DIdwIeHACtVB6P8vZVIi38rH2AjNM14n60SgeXsmrQFdBdk7qiA2gGwdC5hAyPseKGCegq8VHp0e0YIgF4GEA2OBJQRf1fDaewuCxHL+H+dO7XCKtU5ykEHcfeOuyMyVcpkCJflU8Q7ewQLnuTOPgUQ3Fr3pkw9AFV4FoMCjzfNppIQFiU3QHr5B+nk0o0A71/CaDXI2SdU+5b2L6aUNgfiqLn81W3Ir+CVlqGVYFF/8Fwp0bTuH+D1YMZdG+TnDrFIeIN4qIJr10dv5fjgnssFRz29U8h1P+u5LtqWyqSsw/jNQqrvIeAbld7VGlnYKRAvrHfakHlWOTtkRwLpe8oDJ4fLdNrMPb/kb4A0HABkN4it1EzhuEQkyliua+uiFsB8Gpt3ikZSwAGaGJw4aFB+k0LKzM061K8/AFSZ9D+UuopsViDx+2J/v2ZoWwyU7MyXJW6UO8bwZb9AXJy+uv6HNB8OsGPrYd0Jpdce7FobpwgDVl7IDsUqdYvmV0dMGOcoRNG+UJB8Zl5+8L0RP5n6reNT+CFbqqVxrQg8Omr5sUcEcKND7YoeiQBtw/rV3y/gbpmNFi8cV1d1lg/VqvPKzztLXE9K09l0X6F7HZTD8jPOxsXeCakux1aO+jZU/louq/+qZG+wrvSjw20KGwChk7fXnNi/vwWHHdCsIOHRbOO2aDmKUVUHwkfln3+I95ZoGXMOjtdrPpxMKYz1Wr18SHXTqA8tj4StiroQ4eZ9VYYEc0BWvN62Zd3bx65ijmM0pbohwUSkq7W6qpQ22zO7fWI4iPe901VnpluNjh5P5Xx+jiwhM1skf7zgcS8Cmj+hxlL/e5I9vmDYHas4hCel/O+byV0hCUxRknXzsKa3+VYsuw3+3UIlh5KRXqE5hiIv+wjHyZi7VFyBtALJpeFttkjy4GsmpbJi4/4t97yjzRi/zaMV06vuOYxWe0Kej9+/0xSWC1gl03N+6IRETtZldFfteto9UHmYeNkPq9fcuAyaclCvhHeVVNGeoLnaphszkZtqwWHEGbNVpqP/TDMydf+0xPWRL5itwn2NneU0/fZplSzJ5rU/8uDL6taD4+rAlyB5qL6NyZzlJDx7IkQzGZU0iibaRnuCY5arDsangcUdrlS9Ebyujziq8hZElpf1HwtuT3POxLjWlWRc9wxzF4oAZ4p5JeQ3eSFXaggRBDS3CWVoV8rUG8SWD8RMh7ZrcD4QosWTX6vRiyqOCOlH7ydf07zKgkWFgA46WQ4w/SbGzRTXU4Wlof/IdQ4KOFXCJuadtuokezmtQuXyfLcMhMJU+vKni3esIlAMl6A2txwtNNJGFKYTnxIkjz2ETTUehn/zg7OAO5vetLJorAVrwTvyfhKvOtLOPx0JdgaANHqh1sGbMM+5IbyhY0d6AQ9HwWuQaEhtxnVMaDyeDaqpRbN5A7eSQ7nwWtRY7j2OtPyHdSGGkJ7AIcslgP0s6uf/6CsCvqH0p8SOGB+dQ9et2Hit0bTAqVN0KyAEnvgklxYlH6tommzX8Gq9Wmz+I0f/O3HnmfROJsYX5X0PXZyp5Kbu8KHMrBqvjh9JcUfwcj29HwRsx0hvYzRGnzO+6lm9d9nCVqz8iG3qmRJ4ELSaFM4Bcb2NHIWCGpL+yOZow2WsDm7H0f+TP4lAw0Km2FpTHJcpkkHAfTD4o7rAUoFHlHoMwPLdWAbBYVoRO9ElD6yMQdu0M1sZiu/2ApPzq799oxIFhp9S2GamnTSGwWLUr4dYa1e1aiXl1IXsmjnvALgHRfEeLoaw1JFRuFuGzlpGSCMctLhLDv0+1k4Uch1qbXcXbhd8wqH04+3D3JEXe+J2rPhrPrWJeH2XUhdq6L+xghs4ZtCvEKdpPtYR5f+yAKdy3Na1SmA4I3z0BXf0eftVl5JcHGgNCm++HhzR2aYzsWC7qOP9jyjCkVpeShF/AZGQ7XnXPVpcIRP/+hQr0+fBWWDjXkk64xdjlnWHhbE8CvUaozFqG54AWbGuuFcohnnVQ5vENjcGK6amScSI8yjxvbcvuQvlpTT+DTkNocKS2WAei4JPmNldfNv51Dc2OcHCof6u+bCr/wMP5IvR0nxjLbQ4fcqiqskzdnO3efL3sNgofRupci3NRa3OupvJuhcxssf3p9eGTPIW9A6OZVNhMO7OcvGAy7y4BP1iADCZGy8ZYMRmCns7w+Tjg9eJKGE1aTlKZtCbtS3rg/Wcb5ZMGBn4g+JdUj2ocmn5kugewYOI0lxycAAbrPObPmEZqbHpOfUzApvaulEO35LXYcSYrk5l+IlaWXTndgHPvNOgjsdI0JX14V13zriHdjpUQdEzPYIUQXrwQVHsphT6ZKbYlx8POru03Ft6WJNqAz8Ikqml5sdJHOu6tfuHUd2f9ml2FgGm+bUGxTwwvgS3WmmOXBELnzNyYbmslScpSXoCN+fpsUZk4GGi/rpQ2zVcoIszX5cKODccgK/QjIicwPpnBqDmpKygY2hLbQV+fsp/fC85T0ShFFBAcBg6Tnqx7fO9izyCFbJaCij8hPjdD7doS2eg7d4xwzqTOgl1GU03mMJ6J632rkRtewW0Z7Fo+luGPULItwe+uySguejTZ8KF753AmD+7EM3NjsKNcmVVRQGhX01GTGA+tcmiViwf7GHUGOy2xs/3Wfdx2C+/0OcUrtwd2zH5UEkJnMOC7+cKK98KMFaj1nzjmESeJpqHV7VeD2D+tCdLgpSkBBU8vfwO4QRyAF2MBP71X4NzVTuYQT1pq4b2djpDIyzxRg0Hxp9dEDD4xOhonGxNoXhZzX30hXSA9KsbPUwU+EHQXNy8/UgO5xvvxwoRiuXGA8l6eR5ORpEPK6COirG/RkhJ4nyYzN4EAIxVl8JpuxetNHJkPo6VOPLufPvKx0stai+HDsGlQLzmGJPEHxcM+UzZj7Wylp60I841l+WAuBPJyI9gzL9XtmOFp5m7rWk+pLDkGs/eS39bIGCB9yvfztfNj1kDgbD8odjZIPvnB8iASkIKXJCIQfdk7Aj6yH14wyKRlKJ7Q9R0tRltqtdcqKcltwrpVUEQ9ac/7wuwvMbACBi5wTfwHS0LYKtJ1awFMYCvlPvvgfyreVlUoM1Yct77Mc1uu4yqlomlhniZyaVW9uNXRjvKj3LS8eQ4x5ARIosSMlqpy6Yi1iP/4iCLJvPC3sTxjT5FotUeqbGOxbZOlIkUtmKwX7vBY48eU6adHmuVT0/9TZPyu1Kzf3JWrofvtlCWKQl4NqDHhrZL2UUjOkbUrtbmN84qAEQO0NvmlFVxNecomC8sZe0H7YhoOerKpoz076bn78YietSXe77ybqZWDbyuq3nKj2EHFlWzMF5cWh54wnQO+wE7GaRVlvFJQh48VdNvj2uVy9ZvhEQuk6BgckJ+YTuV+/mdrE7SgW/ewHCI+sXZBvd/6scexN1VgLs9i6q1omkGdAq6tRlZPeJgUQD7dsEkaBUja1o+0l4hXBMWrc7Kugtsfa0a1vh6NMfnUFN4iBPgQfTfgt5at2/h7CPW2bjNSe8l/p4tr17JwUMOQwGRragmwuKaR/GVKo7bDbLIb9UNxQm31m/RuSuONAGBY6Mhzf3nNTN6/He9oa9N74fLrpxC3q6ysW4VD60CUybUc0H4cjz4e2Yn6m5IZxG8SL6n+U5LQzC1TPmuWgHfeSTn62kh0SYZXlbW8vAevcAEIr+k04ptPi84ksCSGIocitD5BqOq5aiW6USKPPewcAn5+d0oECzKndf/4FNFD8IGnSCGX9aCnjIniNrqJEIMXWOaqLZW+EIBNq3F7LCWFBbjJcTIjfZkZ2qP9M29+foJ+HF+3M7HDo0bMDLdiVLmA4TdBFvKpf6BqCQSXN1iaSr+/dogUGoeNxiqkEK4QitEL3TqPV3zCOJk4kwhbwxXMsE/oZuzULeho6Cq8CKMku29Rneulz6e+bs4OYJ8M/GJbyhNBDdavbh4XmPktmKtGjGwgsLBZFsoCqYcYTOHZZl6o4hTLjTHXCXtVN0f71eWkocNA6zxOwluIz23V1v0lsohywSzYOzzpgtzoijX49HN4O8urQuAw4v3YziIB74ekNk37g9svg5xDn6v/CzvsS3smmfdqaLjAGxCq9GD/l/gwBD5swBHAYzVF9WQZOJWUZFqMHzzkxU8fxgXZ96H44rMZ+NBIHP4d5BGqpRcudZcr59wygbRJltcRpzSAoC2DWT/U1i/Y6qITN6+RzNYgX7Bkac53lj2aiSa4zeiaBI2so/DgsDYTqKkvA74VvjXpofNnXId12syA07RwbIdZ5telXRUERYsVPh0HoB40CkkyR0xFx5CwPrUiUhm+vZ4Dkw/UZy+y0BHTcXziiMmiDqLjhM6hpCY6562rSKOTrZNVjY2PheFYxd4yjxJh/zwwjA8bwPjYTPTckuk/m5x9Zw9uvv77PCrZ4cXDd9rpAkXicZvH+NKgOcwXYCPNMdFwYkvKr4M4icLrIOah4TNiYX/yThGxKO1shtJVLX/PQQ8eXx07mwdPXuCjt3TC98gE9WLL5zVAusl/3x4bBhO337XCkhxHZYFLf1Gr7k9o4KfSLM3OPLMTKvSf+Vmkzbvguj0MlyJPuJ5udC0YqrjI7KVjqwuDVSRR8eSeE4w3dC0S6SjtfKxPvbWOkWXiA+BY1mFvwdVqN1ACegbAWX5gQFZjezr2VuIYjK6QRdSWCBF/BGXTg9sDFTMTqoTks8eGf7iJMvTpvtV6vK+piwHOyMCttHtLfdL0m/vo48uVyH17gGR+O1fsFCc2k+6t+wbHsbZmEmn9AJ5DmmfI4ZA/kLYb4HiQ/xFzp7CH8WBuxahtBPfoTy0/+pQV470eLYlMG+aQhmujB16sCNzU6qHDLI6O1K3NeKc9KelZ+W7FFeTgXlt73Em5YyliFavkjF8vz3U/xPH66zfMIvdXowSenGhN3g0Ct+WkngdasrUSh0l3mLb/OhdAwaEUyQKksFevnnXfBq423eH+cf4MCH3FpB5rXp4zIGYnzUPmVRJ3uCr+c6fIJFLyb+Qu+EobBBVuLoRyYGiy7veQM1Pymob5TUu7b8mQremFZF07J3IVjGm1Yn1fG/L7UJ1Gv2oJhAzU2p+Pwwg7pFgrdGumM2Z/vaRYLpOY7CXJZe9uSaSvmgc1jorsERSQfqUexX6x1nuHV/7G9LBV+zzJDCkbqL/B4XL1pPRhGfnV0Mq9UVgOS2rnPblC42kQZkuaPDrbWSnEcQo+RdLSZh1FG/qoO3d4UfDjNpLzvjd8P6Y0ErzBKhpMqKLg5PhntPFHGxHG/QBD3+CfFW1Lgd/EbdaJ+M+UZZeafvb4wwB0YiKN2/IyPQ2uEc8zWUTDRvzqWyDj81JuYuRhsiFIZzAduHXWd9TxiOcU2TkgppnJzidIY0TDiJc0q4kSKaUYfjOUkV+KvqaSAjKrl3iO5I5YcQ9W+jo4TLGqDRHMGCITBwjWVfWLBXESnKQM89E8bE3FcAQnZw97Yba8ojeCgtxxXIh1KobeOn+THgL6vX4Qps5heeOJaDzDzVcEyoyWxK8sg8v42DuHoQL7j70reF9shBRF6CVg2d3ea/vU6ReMNdTPws/HrTmFBXj7YW2gTJH7sLyt92LQ9abjsufUL7gBTL2U2ldbnJ8x16iIHzx8rsj6SVTD3lLKIsN+9lAPn+1+JsCIds78bYc81yxo4QOVGDHsVWM3xfKzv6Y1lzg0vj/X4HAJ+XOMf5/x4JodoOF3FWrVrLOLu18PwTYgy+OA/mDNcj4N5Bk5Pr8zqawH2QMDgdjQWImNJttgSpxQXqkIsasmMKU4wJA1C/m8zm1DBglID09Or8bWog97djMShUAV14v3W7YXJ2h56m3YrMLAY30DRPsKdJFcVHJv+iASTc7QbXnsfs+BqHpREXngXSc2cDkEmIbHSvGJlf6g3TDHUKfcSTCbgNsOrfIA2EfQTBORHIQx0/rgrOGJucKI9poLkpW2Y42zKIpx4Eo2azUf+ZxpMwPcNtmHDgG//xSPBlx1JGVueYjq5PxKX4HJo3sqxHNL0uHcWL/jkAzciMuU96GJbxMZ8iAe3XM3UR1LpMpn0mlq9hkfrRynopKIgXjkA+IH4JE/8ttzgZgLAYpo15CPA8t7LaGx71DsWs0CurOkOiH8JCe2eFkPM8NmxRAOl1gn6zi2JXmiyEA2cDfOpuqqadsVVEJRfKUBCwSS6flBRpaMO5Ngl3M2C87Gx1KCkJHZrZfi/WRkYTNdOtAGd5KuNDOu28KWhojSjlNh4N+qqS6WDxj1zulAdUMtbFJsopzupiTxtmtC4TrSHuo/gUHHooBstYbJCsuc1gf6wF4/n95cxcIzgJsQzo9yq7Tcuv7RvTUMJGX0lRE10iqQ9HQGBMwK9rrxdSTEm8h/l4UPBz2FUNcN6UCHKWcTwoGrWQju1AbWSnmAbUxX2uQYQ2sA4O7S5Ts0ivXzoZ2lsgBbHMmMAOUc7artTQHNnyc9914o3yR72AKSTUcE5h5qa9+w398xwJFtqqPAsDk/vEb8P5gMG7Ns2FstoLGEqhXySrGBZfHHWUFfhA2ar8zzUo6MPfZZy4loujryI3a6l51pMz8PX8HtjR2+YX2xeuG+mv1P6NOI5c5PPrDfke1TIdzUELH5JkfgoVh3Jg5bVyKi3g2+5ctUOYcHLdQDWvHWl3Fyf9PHJ8gAZAEX50go3f+RGzMyhvqMYRlF8Yvqrhbo4MiFftDkC0A57zXE+A48Dg19IMkad+/KAuIw8FQa8SqjuQFmLCWqkA9ZchOO00PWn2p8Xgha9cujQ8W3gHUi2ORpwi1lwZzEnI2YUgcgjP+RltlPC/fvpoXv0CDu/SLaT1fvEMnajP0SL1QVlV4AaXyPjj+DuZDDphuw7Q8tSqBLjPxB4LfrxQoUWCbSp4XoSN1U8mkDvd6k/a43d8mMIvxb1AyOOhHBoH4+McxG9zicubV71CqZSxBmypxxFjoLhjwHV2XkffIQcB1L2jTiPXu/z3MaLsRRcmSF3fM2Dk7zfZbJKW+RzeXZs8+E+mw1Y5mHE3W4wUgrvEgcvom93kVUg0LidNd62qkaRJgmDm5U72GcLdR7p4mzdgwouey3rYb+aeW+UuTH2FBvv3x3E0PAvP7xCXh7iQEKATcUSJLwlT52RwD29hfUBGIWsFpdtA41oYyN0u+nButjAhnBKvMMGoebDT8PehhwvW01nlI59Ud2oZssqxA2vyfF/YeFNlbA2tKzeumk7iYarqDdPE20URxS3qDnpF6qEIKym6St55AeEA60C+5/30d5HClpqwFINAI1mV9vDTFcf6jglwf74VyFgDH6ahhALvgjArhHmp0kWGGh33wlDbfBetFSsQhyFNAfBYnQpWsLaqqmlO1Jk3XpVBOL0V/kOt/O/ymBrsRrIlwJHt7/MMN3oXrPFDkbisfDmxtRnDuwW95kw65TdH92V74/zNvp/beoIpFHDheDpXY7/PlmolojzkWaSCmHeJ7ZCFRdDQ124oOixYVsG39mL0lGtChJ1o2mn/ibvejiZk4C0qZ/Ozk6+1zCNoY7tpYNmuvutUF7ELHo7lcAIWYgB1RGmy4QGpiinxrxSzH8uCkLFh5He9ruKhATlpv1HLjQy3D/a5SI9DGco7G/k/ZSiXcrUIGkfavuyx+vZWX4EQkokWA87ospkdyiS8GrxyI+6eJQg8Wh8n/VCYB1H8HooKIs3RSi6NYiPP9PYHdTngOsSrhtGUvdy58mAJgipWNndq/uS2YcVZQ1g6OfvGGN0wS2pUTdDbsICGd2e9MJ+AM5wdci+RGIRRCqGD3USLqh7gITeOZi32xH2spPZL/vOjq+1iuhCE0tnnj2SPX8TgX92tKgsuCDokXuPj4pn9OPheMuJxjpxxDckRJLz3zBqK1+k4+0xHwLv5gdD03S3A87UAp317+oQAOX3BlkUKRuZqBcSQGu0bC7XDjatC46qAVtvhTolDrH0bv9c9+vtMe4ga5HxjMTJXpUHh+p0NfezRfybGGA24oHtrPbZyGZUAVQrsOSylbtLVVFGVEiJENCS6KibqO3d8EKIE4xVAxJQWtBUawUDWQtVo1v1Y7SdQSCHuenDHQ+Qx6Fvj8GkDhXJAakmdlmUCJISqvv16dQDhgxz46qyqNgiK11zSeQZ46MMjFcaGHL1ZPQcbvaqg726f7GmC/uyfk9vB4LgXsKsmnt+6XJBlIlZuIqJYrCQa39PUHe0ELsdR3SqvLmg9Ki6QS8pAB6PKrPiMlx0uNLqwYaAkJSFvA/pmcvHeZ7+VqKHJEfWvPH+/TULMzcQtf+jClndCGh4ZCevBR95oSF2FIFgML+JF2Gkw9njihxN6A3lvRYnZwNWvIFNYLSoEnPrOGvZbVPYIEpQ3dIX+x4TbaHBD6IozgM/BZp2c1KSkyETAZXmuCvzanHAuOPQi7RC/5cScOZ8UJZcJNMghUlE6u8cy813tf9PMefFawn8I+aGeCo9brnH2XbyIsFiFZjDNBK6M6G8UPsCtLINZ4rhyJWwKg8wWerf/KpdQO8nhxBuDUXM060mgynULaetld9oEzFKHLjh0CrGe51oBYmyC0hmT67pyrlB3nPGnv3xa5nFWRN34sbQKXPc42J72MP5lb2MgvMXw23MuOTjBhGsqXgnx5vcc4+L6ANwnaRUqmeu1bYkaZPJ3rniIlu+7dKX03BP4/TZCtM/H63z8I4DpPQIMcGyEmKZLdzRgFFF6dLj88w6HaGjCu1Rmlz27MchyVueKDNLeaVghm+A6tVohuKRRjC/ySs+R4Z5Ax2JrMis5UsJf41MDYPk77EV02bPBR3/s7NyiR9SBxVPw2PV83U/ZR9aD8a9fuvp+qJvCbyxdbRN6eeIAmidTnZIns1ahBkTx1hPaUXZoyA94u+YQCTRMNA4iF8ICwtIcMzioOYJVU+m4ZUbn4fBLiH/+1OSg7xlmL9t4MG5qo1cKmgHpZn+FZm3CnErgXspkOEgfl3IiIKAYF7XYxE0272Ne7nOaAbzBvXUq23gAGngOgvB9QI4CjdgQwlrbnpiPCIQwcLrMjn9foHYWBJCAsy3NnF4FF6X/pjcv+XWQkp662yoCWCrZW2HKD0S0B2pe2ROW9oJQfThjro2Bs00w0MA97P1HyP2l52Qgy6wZjXwv0a8aKzbY9BjA0FLKPtJZsyjNTtRSpSnXiKA1lLeqwVMsHjUCEaCWqykjgOP2JakjBc6SwFbZKgXwhoeYSNhyUucA1NzkKEM9pTf4OA3Mfax9RYVJRjme3Q3To3woYLrfiWXIKaGHWwX4ybwqyQA9Oa/+9Ve1LyDnJ5hWyHbOzluL3AO5TeaufncoAe1gDB/TBzYfSyWVrsp+DuRHXDMsj6G43U2VS/CQzaxsMRavW9cIAgtv8tZ3CyE03kavul4pb/GQSao5X4wGiTc05GNmvfZ2e4CivIzpgadnMD7yxUL6qWDn1Kf/UwujqGzSFjC9dXdKb1rwtsL707L5lv/L2COnk6RRsj8tXKZhHrN7pqgUnaY0MX0zDB1/aXHlBwFTAF3UDriRYEP40KAcTEuKlV+2dQ1xLyTBSeJozRZSoDlzoAdEv248pzQFLIY/+D+ojYiBvxncoLo9kfW3GiDIOkR/XrN7swrThjZwGy4759DxVQzY2O7A77OmBRnBiEgPOO2woQpdnyJPRNDRfvMESLINgMIeRgNZ+kQ6+CBqmS+nl+pXXXg1vnwYhuwXROaYTjGmkLIcDICiBg0BnJ5OntJUpWtYOub191UfMdjG9q3ZmvTigvz2QHLnAIUWUk+igmJawPSNSNg5M3xib8vUgsUID/IkFF8VuEhqAXP54pG4MWzUs/wu1JEtBs4oNhof2ULWAJbgLtwgTqpMteGHyJimKET0DJpcLg11mYAQXkjC6i8A3UA7iQephW67lmov6WfsWS58Z/9sGfZNNyAEDFSh8AJu7SUorjdkD/Knen1mcaQDEOslHXm9Fzl16pzH81D+Syx46CXnQ4xU0TXW5ng2g140cw0LlgI5s0LfNiX1zu9jYOsWZN+7UeGtGZPZgo1Ypn2OgYji6S3GZJfekS2VUoIDe/gIRFueuBC4YxuEAt/TJt6NxIDwPEWQ88Vv9xrX6a7vOCW7/DGZKTw08yILzp/gKztGVw2k4po4pdh475nql8pSbWQVw73vZGMwN8uaVHns1NVgpHmR8gu8ug6a+2FSIdcvccT5tnWlu0qozn6hAMaVTN/jwH3fXrMb5HaKu8rVdVP+m8rbY4vs8qJ525RS9jFTTkGlzDt/d+1PBgUA7u2p8tBl5CYFE5F1NQaq9MUvDcBCP4ECtSGP0NyaHe7Jj1fC2jcvEdyA52KRhY7VXhLRjYuEAyCsEG39RYUCQeQPzBcnBPAjjIulucfGzrcyr/5bu9LzX9eTvVs7Nv42CmDIMzT384v/Ibdtb2meug4FKaWxABeJ9a0fKDL13AM6p5aSixyH2IVpXp4aEm7rENcw9T+NVt+VfHUVP61YefW00soghAUjW6Ghfxe/0Idc7BysqSR5O9QbueHy7VwEhc9inGdNREWriJzgUrYFkT9hX1iI3Zn4ynYYoDxcdmLfjz1pb8eXsPa8GWS7RKpnLQZ0bwBabSzZMRMgm4i49Lq2L+OES/0fNn5PWPNu0SwyyrSKi7utjS1m6qRl/ckKWCiQ0AKyG6RaXD//cPCqnLX0suRg9sgX06nH3LRoxgvu4gp83Mbry2RYwjW91whxg09mOZLgBfqyXkkmba7Eb2145kjXz3TfdpztR9N0cys2nhMWSGN/a1uRNpX9tXM8NNKF8M06tKNfauu0v+Gg1RKDvqnpAUrpHVot02Hm7ERGR0vSIpJiLSDeOvRfeFBOsMU+wifE/jzyOt2ObLMkVU6k8txQyeyUqeYW2R8JkQt9vscYx+mGAs+lOnDFZyiLlzRnrSCO9n0jmaI51cH178HRaW8AvWK9iMa9Jjtbleh5ZBp0afy5nSZr8+KyFni4sz928ig60FOsrSivTrysuqdgFp8MB4+GeC1BYwAZUU1yyKgBuaT2FERDo6wTv5Iwl3j7EHXH+haj3/P6SBHk0dQXA5K7Ug/FOLv2gW7CILWk4YjRTMNWZRsOJl0Dyo5e33KwWinj5t2k1Wjo/CADNCNZKm7OOozUItU9T6MeZ+D0D9nQkBY5kQrbfIF6GNhb85UDASF5kVXfuwafeobncLc+9BnvOJYtw2sW4RYtE6OFGZ9JClvExbMjaqiUF1PDgEQ6mGXJYHKtw98bCNqvJ1R0dKRsaurC+mwBNseoJxlagUIFN/hYchV2xf2MYNTokRUD7XQdYH1Gk3ocWS31sbCY6FSNfweSKkasDZLhSqGm8tYKFE0iNJ5nIK7gVb727yvU5B13jHuHroMhiODDJio4P3yuXrJe5+77BQI7h9SqE++q31YDkn9zMzdeZgL/j2adFztYbhxdXZpBH32xzcR4L9PN037A+oo8ZFLmyk4MvsNojRfOjBw9nQ1S2ib9a/uybkDoDtJR1t6NxmWbDogRUh328cLuW7XhkviG8Uf+ZrAmgz0CEzyPeGt7wBh5R+GzBHuJRrnR0WrzTJK0Ts+BAjaYITXm2qliDFuaTzwgcrQGuSbLhKF5FahYq7lILK85JLa/G87Wygpty3xU/nw27ADx5DsZhZP2QsMUawMMC0wRi5ZJsIHFPsvrZu+OztkoXS0WqGlbaf0aGuiXMW4ynBTz0ip/PSgP1esHNF5DrERzKLkuiGfF7Wt50hOXUMAqIQjUOT5WouLC9lVt19IAXei5FQDvogllkVfgnryH6+U+fAPq79e6x6t8rOY5Jz/2dMqVY5XX3lkY0fstowe0DzRNbRwtcC1yObJoZp8B9yz9nibg3dVpdRu5OMtVUQwFVYGgrIS+1FlvEP3KORSahKhBjS5ibUSg93uI4P6orLkmZi4NHbvvv0R7Mp1Gc/czm7/QhDBTvtRijO28WfrLXSZR2fHMXBgyZHGhpuOHjv7gR41miUEBxBL0stEprpd0pDSIi50la1JtvINkmbl5LwKihE47gILv2P4q/eJsgrY/aEzMdJUc7z0xXdSvMTkO/eydM4SeI0Ijp4vvucLIGu+8r+pVLB1gooC5c2ad7HBQD74OVBQn8k0vELELbjr2rWzkBs1R2ExdKO2SX0/gWLSxVriC0ETokf3x6teMFiQRuqHy3MbmR5kTBBf45w2L7WUGNo9m4wCfFSSl17H4En7YpONz5wJ5gFxuQqINJr0bSLaPwrKL9I94UVNml7gebuSDSKZP4eNso/YaCpNOEg/AScDp0dL45p/WBGMZGBFqOFM0QDcBJvaLMxgVWsZsBc3srAsvohlE0tqyKz/uC0WkhIXV+7GktDrh/KctTk35TArLpfjbNOimohvbOUzpX/mkcwPK7ykAQdDVZmULlJ/Szop+8SzvOf6mOtAFE8u89PE+g95/xKHrsV/FJsKuzpgkYPevMnVMtoIAUkdX181DyfCd8xKK/Hf4mZHTObsT/c4mZkPj67a7hrsx/sglhbRB7b3lAXZ9E6XpO2DzwX5g2o2EcZ9LN0BTuErt+mSV66oixODIhXwsiQoiWzu/802rJbHpdT9gkfP4D+AkH1UYa6SQWQLFNn0pUAvsgBsieZlYaqfAlSSRGHFEGhQp/vT4ny7pqZhLmlUDjfyLcQZbyoOJumU2WpCyGHXRb7oD8ev0kN+GM0ih/pipNkleSoYXiqZK/iU7xlCfRXt5nQPw87NhkYRfydj893a63UIqmonO81BjDdg6C1Gi2bSt56j2uPDXexG1oWQlmlRhAEQv90hIxyyUca4EL0d7B7J3O3k01u802CCemFHmmTXJuxTH2ZjMsNlU8lu0Lv+ug67n4C8W9ORSioJ4OxtTuboIkHmGRYXn7whFaXlFa4wmM0gCsd5EktN1d6M93k10AwMl/J3q5K/DghdEuutLEWUSLE7mCS7EMWkE/1jbYJq9n1cGYJZFj/RlQGJyoAUkdJZ7WnLMCEiUxaCn+diXAynnbudWRaKFQC/yUnyOWqog/iCW6oGMVugj6jM4DiOXyEmhDaOZtBaAuWBI+VsUMeGV/s2MrJWUC4Cv07CkpZQ8zOZNmcoAs9l4oHnM/WvH2vmPcURWFAHNFcSndLlpWCLUYkUa/UTtacK+pSx2ns08w+Ly0naYG2Gn3pDg9Mn++crfODdI+3zYbyEHITv+U61llR/Eo2zJiR6DBDSGhZR9xSVbrllYEEMTMA7MuBK6+wy4wqoXLLJ540JYKrIPuGuAe7AfJd+6iyzYRq3peaFTwfZmB/grCPHVsKzbtqf3/Ck7qCAn5CejW7kUyGXzzYjox9z2972J6e8AipysfGL5PHn/LDx0UEjK3agI435BRKHbpYZkCjtQULRr1cmsMVtKpayFEwNERbAD5/1AXAz1Oy+gUwx+jZPpmQyjuGTeCft2iPoUoMf0arHriTpD2bgibdyXyGaYBr8KkypnP9JQU60hgkghQIuYNmA6fi3dDK1M9pHtGbIbCSRvfQHVOKt4q+fimFsAP+m2eYbnoqsEblOcu1BK29nRaSl4H+hu5K+EbF67W4wj9Iae29tQgfhKtLNIbbsnqNDriDA0DB4Hb3dU3CbYibFA5xSIQKjXLqdyT0oPTE1EnskZiJ7AY3+V8aZ/7YefJWnVId5+by+QpF4H8dQHOXBG+V/dRMC/SXxLFsuSgEl4bTAYYtHIQvalsPMftWaXSnhIPVrj4YnmeotOa3mzEhsHqu3MX3vMYEIVR+leVscuyMXpZOUR5WPo08FeIL2pRhWoRRgbtijpBUxl3Y2CQ0cF1CroIvXLnybmFd39adcD3aYGJWy868YsGwMSIwhMERcILq88ERzZHMbsOhCjQFtg2mXnbb6XjL3K4SYI8IOPzXZtZRi2m7ArMkaZl6/ul8RLpmT3z4HHWfyTDvI/5Z7ubk96xHOhw0ubG9xrzP+U+1lgyjfpvI9qCqUL2wqOPGNZq+DgkZxsQK+iSF3ByKfSCGSOoXAlLyNu0cp/nxBmvuu8S8h1BHGTrNW14cwqAj5grydvEKADcXv0J2jFlhRVvQz+iZe+FFuqNmuPqMnm4NbPfanYTyqpfOySsgUsoxDfOjW1B/NP9tVqxmQZhKpv2bPq2WOVm+aVefyehbKYVjQaF+AuY9/1Sv+p2UGuPo82yGyRrzHwpva50t2lFKoE1epuiLqX4qEv4Hf4iI7rLoWXcqtHPa90IWeLDZe41vN3aPu/AOs3Ya7eyd6hGoCgT3WuGeqBWTbiOXyv2qa5l7p+/FHfSD9RcrS7IVqhTYDC/2dArlY201SH7sxfqKMlfyURz+C37baAWrzoHUAZ7Yo/rD9hhnOXrTt9dT5EL5ov8ndJiXBIX5k4PUKpSIC5xf4xtOVcjhJ2lTZD7IZ4RJpKkb+P9fiR+ip2oU7J36qfVZHqWfHDbF1J1VzSjqry9cwYoNTY/vJIvjy+3P9syDcSA92NxmkhP07I+mmyO5k2AsxLEYmr6INBaDM18UeViXnT1QD33hm/8WHhUeBBYFeNpgW11vSlX1+ZzMtPLNQHPKRJ3JHihylx1gP0i1By/kAWOl5QPgNPkS9uuqBncZF56SXkk3SztUry0Eqk9fEjTzywjwkNJCShxHE8u0/tmb3XQ842GznYY/uZA8kQ2L/NmIJDSxSbt47bvEqBpiHyboVz/aKLTZFmlW+0t5IZC9RWoGAVL/fAo7xEQ3LJ+XmPuEGg5w3QA/JuEBAZVR/x2mG0/aNHI36/A5CXDaoTlGii/c+71tMb2uI5ECGW7gTvGHOAQ93s/TGHCedNi9hVOwJ6D8IUDBQ6LzYobrZ7k9Gua0/PXnbP6OuFSkvYUqQevv4/l5jNSWLJ0VyxeiItynehKoI0LsoFPJGM6FP31G39Lqf433ZDMTCb4WVyY9eYEfDxpHB0Yore45K2yIOChnScYcCC7uelb4lSNHHLqD9lhMtdKH+FZKVkJ/BCpuFvmBNw3Y5PoSH3ww6bOchA7nmY1yAk9PiNEGsYJhpWbGcLW8VMGKUZDoCzMubLNltQlseAn3fzKUH5y8WO/37eUyrzFLXCImBGCqq5jrzhsntctqD3B5TepBNEu/lJlftMhdshqUq0qB1HSe2L9KroJH+a3QOXKc1XD+ndZp/kbQixRD8uVcVG9RIHNN4KscI8On7smNJFZmGfjKJCU3xpYxzGz8qxerim+YW36QBcfAAexvSYwXQveRYZPG9pc3Jti6vNgbtuhFycbWSUUl6JRNX5qne8Yjsv9AkedxPBwbegaGweCwJO7ody8FBuWlcQHdQIdfk+x7lIN8xn70y+i+gLRflBZAyl0EZ/S5U8Ad3VnNzhJ68Q3YBz4CgpuILBs28rCeJHJhAOMIud2xrIpobKG3p6NBrsOEV03CRj4b1KPXF5BIZvpwbP6PsEoJeWb8hKOe+TC0U0IdKIiVZWB4H+RetKGCk57JHi4AF5oOlCsFnEKXy5ywakZKHbPzy/mRy57DDNkedTLWQqKEssG2jB8HuU9mdUVXf1p/jumHp+ojEAD5GF5EBjGTBC24SA/PoGd3IWzV9IYSIoYvQ44qAfI5aXMPamqtql7zq1lp6VVA8k/a91GPqXV8V2LJlTHD1huNLbwQ/pQJupkQwFMYvIzeeGbofdv9Qnskd93H5zsSSZ17kUBuN7MPbbqFnV3EKdJvlbDdXzzezBqkSARcqqT29iwTubAf9+rYKuIwiZdqa/yW9eWvWXYm9wayCFUmYcZ5EIVeY4m+Rx8Ytqg30FSrINuiDbnONzq5NoOXcRmPPlGvweZ0e2ewqqDmiNPuFUPB6BpyPfvAskhYTa1BYtIg5WBZGObJg6mv8hEvqQTJPLciRJ2Z2vaK5ZNQZdNlG2Phml2kocORmlr8m4R2EQo8WoKavOdOxwTT9Gun9E1CAgQDlPOJ/GvBO8rW0o3oECfQvJ3gCMHk0g56d9zVPc9Y4A+w67vn7bKItty7k/eqGrw6zuxLGy+PutD1ocLTFNO3EUxiEI1/u8kPIBJE9TVMfXxEKze/Evnkirp7LJCL39osevQEUOpJr84g6y34rQzSdupMvOCg1ST8huRVwWB6VRY8tdj8WA72qjteBazt77Fc8BCXKW/yEjgy7fFoupCbEjFp9iCY9K0FFeNH+YusfH1RpuI0q0L7QjzpSF2mY+GHJytCa1b7RcucEqE5e75hF4MfMMVe76jvnpMRyLqmM+RTWZSZERKbFnE3C40XS9bPLA6cJu0j2R06TOxuKC2WCwTGmABkDr9nTBns07qMbx+7+FgAtiqBj9i+VYSiGTwdXuZNDy8GMgt+tbsWdEY8XT7Ka/IE7Df7Rj4awKk9XYmHQ/b/+P4cTs1xKXMLh8yYqB2fUgFCJJRJU1h8YYuo7c7waP2Nmp10eXkp2R11DHkT60eBUMEGZ7M3GZPy0108PePlbM5eOB5TJSHUdHkro23JlgqoYZomshC+U9V5Y78vbUgryGgD/PdHUxr6lZ/NQoJ1+oGpfooTHwdtMkZQlLDBijMlvAIU4ibJnuk2gq52/V7l9tezmeJJzRzkEvSNWOb5VtgmJ/XVbW8J+sGR25vg3PXqh3DUgQVbUfOGQYTbvG0iz9/ywUGauGDW8FvXLj49kp5jZERitICGShUAfs7Aqx+dUbCJ4FsZeF1umjpqa+5ZA0w7yFLhl0Mu5sR+vNYKNzMdGW/qdscyJBII2hwaTjMWRS4kwRlVU8xJvd50FGAJPbAoU2VGSbJm+kcXDeJb62ENwfi7IapDK6AY/px36a8bhHTSYZLRj8OyUUPyiGGst1uDo+hldSQY0IJhq5wFZ8jB1FE4LKkJtZuYcqxSuAIlYISWjxcj2PFa/WuyfNgHp98FFuZo5TM1xizmY6WQDlCgMw9hKJuIAyXat1rWY7Iy0GjVUqojqCGNbtbBmNql7kr/IQlZ1gTUlLbSRXhcIPk2ZSla1bqjzYON9UTQ6AFMhUnVdEgevtr42LKdZ1FjTYA/6QgcsDgkqsxauw0CKL7WutSKqsiPa3idMomNS+OWdYiOWeg1ifaZ7lMsx3TYKQfzGzAZQqbQvgIbbL3XNGg+AJwMCAP7SyYoU+D0uDc6nDQEyPMUYS6e9r9TBFdv6wiT41Fej1ELJzUYBVdSIoLo9P3WBdHhsUJXvRFqPH01q7TYjAtMe3J5UFJF5N4LZBNcQ2BO3H0wf6EV2dazCXmE64FMpN05bhmGVUdrTU67hSYTzKcS02ayt63wbN+STNO+ydFdMHdyCrgNjC6NaMLsjSPd/pNuDYSAGAADA2LZt27Zt27Zt27Zt27btj+10iA5ynP474yk1O6EwA/vqyXS2DZMngX6U9A1ODcpeLFg7JxG+ERmcxOjsuIh1KiEYLv+sBHHd7fuLkFEiPSoRTaTjcJ0Ph18eztKTBhCKOjEK2FpwafLMZkRUkw4E1njUlRze8MLJPRhQ54yI+/HoxYb7EwuAL615fR9eq8TzGmLBAo6C5u8bn98Yjyz1/polG3JnQDaHYVip1zgCoNxsC18ZhcJ3CLD7IQ1U0YC3qSCj/BlMAudJYN8gFo21rhbgrFKxVwatVs7r1re5AhoLkq1CP7er5GIU7KUMUVkAxJRMpTW64OCKcHs2OoFjZK4gANrEQLpOTHOVMZkHJX6NGFrIsaSF1wnbCnOvTzOMnp8M1NGhcRfp92MP9Clw48C6Ew8luaITV2a0CDkICSo1T7Ow7pusrCTAazeLtt5wH43EFJV6+I5kTC8y+5KJQ4sqWj+3TZe8hY5ze/Cai8s7qrpssBnnewIxsVBMJeikddlp9LaWElxuSemZz7B3juQD+n0a84e4rmf1d+EbIJJxE4rpXVBzyIXH6R6S+qUsLjjSK6ygONMX536KRwNbvs6C3XPtR0jCTvBIVOrnWCvg8iS7TZhJPv6NQxfmPdQDDAjsUime6nTFSdD3cXjFRdXhEnNjNoabdSi6EM7gCj24DxYkwwPt21BJntQuk2+r9T0dg7GrD42CWBFlgMZ3Y8JTwXSfx4vGiXoyQZt9hLdXSSIPF0sZAp6MCC599vX6ROgl0iytz4T5sFw0ImyDOqNidPBCA2zDTvHSLt6uUG7ADAIx9dvs74RHlF5IlzIZsJOVWqhve9fkx2wHQPkN/Stl735lkPKeuMCaQZ2edT3GJFQmoBakwlBk93hHZJR8hJnBEM6IFtq9DtsZm6xmaw6sLZhy0asutr4n9qNgUM+q+oITYit+jJwJZBkGyiLjeQ7F4KDMyGIjKc2bR4LE7ae1rri5cf+IRVRwzCmbG9ykWqyZEnciIXKwIEKI7m23MxD93G1HaeDc8ojwsdcOPiEeeKyKnh4lIWiyn8SuPHMqRJoroNuAw2FkUPgJBqoJoldJIWqcEikN5ZY6vrj2md5P/QawsQ33l7IHOZgBX1toYW7qolCCux7lzTNVumqSArBJB+MnA3VBUrAb2gY88qUm9b9hbQ0lU8HW0Qv8aiwu9UKTKp1Vj6cxWbollZNzrpkEZaGlWjUc5uDgKoGxujyoC9o8reDQPXz+VEFPKwBl3iyuElNz0eAFH2keFmn7i2mLF1ihqci6LNHOvDiEzR5Dh7DHC/+DDISixSs06ynpnDutTXSmJomlFaPFKQorIomj3QhB+VnWPXHYUhTNNNL2JB2OKdGLcsoCq3WoBHfHbofMRI4PIPiQfFNbPD55EmOlg0PsIrDh3v2NEXIkk8p5SO6oBvpQPJ1TQZndtG26YikMkmxXA8N2sWfzWGilaKqH0BJsEuwrGlrnGNyWtgS4c17XApUya6GNkwWel6Lxv2p2nJXNpcAwLM2DewlIXKPmcUj0eUBNGOq0GaEK4cJbijC32cJGvIV+iREWLvBmbuGog9TicVAW5jgV+jOPTKCGGWxodTeeEaZXmM1asoXHv3vEHfP4SXjF9vExBjQ8uLsVBgXNXLWZs1LHRsIG9j6WDBJy9djWPlW0H+D9J8MQ8zlYte4pi+692QFGh0kRRaqoeZtLtK1K2hzVWAZtgvrur7ItFujLbH0Z2A5adBR9be78Y+6jdGACnIIIJbYTuu+DzB+7UhRnhpE5pRv0o6IjIte2MZtuSg8PTzq5S01ifbUe8FI4Np9xMX7PkuLXWFcB9UyuqOc/gX82CTOJSlRZJw55kJZMW6S8uFUsvevvbHtpNX2EV3AU/vE30AKHZ7RPyJ8midaii1HBWmZZlrYQFdO/tqKnLlCVD0X4DMfmCPee/ttP/aZ8S0vvGiLVJvH4h0j5AAi62cmTbjVa/VVkSdrM6ABc1WLC5qa0kCsfQDkOCqzX/qADSPA4A0ypy/qG2PLOikw3kVbkZjPYcw9pnGB7PdF34/hogbHywfqcmMyUUs4vpee+HX4uEDbr43yXDKQyI2LsKk6I0pCmKehHAIjZWJMLyv/GShYHP6BwsOst5s4dEBy7rDrqEz+WB6CNxZD/pAs8OQyPxSMNtsSR5Br4LgV0LdKZjtIUTARRBkmChXEmMOB3wGC97oK+I2YSSXRt7/tvm5L69nBwin4C5iuKPLB+oyHSQBWV2juDWpk2ZuPnnWPcocOnUBy4IgTRdpPCpWbMwHZ31zCuQfkiQ+d6662GpGmwPEKOIzFwrNNpKWKCxTVJoeFS7ILJUX0HY4O30rv+W2aXfWbXCEN75md/lmRkMaI2DORSP1HKEydAhh58ZiAOLvyqPOh+YPyT+av6EMzQlOTD7rVtAGgAUBxVpiI6ZP4LNUnJdVUTm8t0lotJthPccfkaBFkN6ueXC0HL+1IBIHaH07JrRxheMXRkJvRA0C+HDt22SzAcGdEuVlvezFLgqQ+hyuJij43XUEpNCsi46lJHa94m7/s6D90BeC3rX0zrS9znUyo1erqe+ZaNjvTyAjCxP1ZFWCp+1c7yljhp4owTu0qQik3gyAsPysLGjl51amk8e55FZgkxd0EvvehA+Ojbhxa9c7SenBiUVtm2q6efXHU0RtP63F1SJ45qeBJBhh908Lgfw80V1/k072gd5oS3qivqwGqekbEY1zNDhTeW/8n9fkNXnR2KGcn+tlslCgwK5GsLzOmDBNHenjskRVqX+YU4krIJf3WvPQkkvGolQp8u1qTE9nxjQOstsQOq0NU7WxY8V1X/UsCJ7eHfDnyo2mTrwCM2grvYjEtChBDU6pHTmvOvKYp+AUioklSo+kfubiOfvBV4bSBmjqL+X4mXP7ip06QM4DIuI48+GZeglXCEfuS+2gdr3Ut0C7CbJGYslm45CNlPityopQwGx6RQOCEDoZ4atwP9yKd1NNb6b4KxmeMPKwf3s7OXea2Ui72P5PgWyCoOUiEjc9WaAr8jd7rmd3g7jmd2rgXlQPFQA3TUOUE1muhO7RDJfI2a+bbqFaKqxlPKbN8ZPibRGwdN6evTUr3jv4rvtQMgA95FbP39cbPRP2uzKPT6nQxY2UKjdBxg6qU/JCNpw+XcOzNQtBeZbn/HZKxFd2lx1fNqrTg0C9Z8y8kRzFzHnV0ELmSlNLrrLtOJfoP8C24Nc8gRarl1jeWvt0xkROcyvuTh8gKIuDH0CgjGPLtNCc2AhLKVWp12TebYXnwhCXBlRVueplFWSWqnkAyH8qBoCRPeQeFOsytDrINbZM+7nmlDI8WL8Nv8WWvjTK7hpu6jb3ZnKA6lLLOMEQ6Nb6YIEjcQTyn1B/l8EAMuh9WHIucrbNpDZvhfEcHzCR2+92F6hdIW7f4GxVmWqutz9pKg2JBRaiH1YEAUHj1uvZWrv/qaxHoirZeR/nHNuD3QpciJZPkxLTGTdg5qSBRbnqEr5YVbTqguMw8qGJpBbcTAmBR3fikCok2C91OapTVwMrwucexmV4Kp4iY2ExwyG1Gk0cXesMhHpBpJTl7F7M/MG1kOqxvBl2jslhk8UbHMWq6LGzttII0pTW22sH6Zp/BjAbBvRjl4oDH2W3sFo9vsVnNibYAzWUUzCbvCsi+5D/yKP32SnqtTNvD2OPSem6Rar5kgHqlazqgpNYVm3qULxQzdeBw7t5cYLLcbktMcgd/VgbhOoCq/lAiduBq3C0ngF3x/lelucpJ4Qx1bnVrHGTRUUom2VTUTaHcigMqGexHZIJp7qafmUk1Z9K9hc2ig6cHg38yBP4qRYLCDZK43omKXmoY8f+bgjtlIxyz0cdYaHf0WbN23hWhe84/Qc6TwH4y63nam6fdqYufEMSRFDkJgOr1Mz4Rfi/kE6lWJ9CRbzBNJG+FhqlOHxLXywGun8ayDsRxA4j1PTTlByhwsRL+5UJP+rog5EEp/OhS/2QIffaREHpTN0pl8oWr2GCtA3AAkZhBWq201qajnJYqb3COMwQ+bTl6Mi+Z+T/ukJroYE2mkuCTqTvCyWCb+EQjGq1RTt1t6L17M/V02IKbnzxzQIY8MW23q3Ylu8z6tD4W89+4OJHRX/XpsWvElHHxY8B1K9aEh7w4ejZkNOKWmnJahBXaaTZGwoA4YpazUgjtPB7j4teSA6ZJuSOveaQ2Z0j76E8fneMNnLKHXQe1p1XDkHZH+DrfFVad1H3SHEUlK2cTMP798puE6kf23hywzzdWn7O61OREB54IkVdwl+H/bsGDqUAWs8H48SjfERNDOXeKGXP2clV/NZd12RJNOlXyPWIYKs+8MMthqJR4VZzKCfHLXhmsZjy3qMKYLEhl/kGZ5QEK/QMDyX5ulQ/9yya4DRGKLpIBHxHPkRBGXF3cbtu/AXfMQf1koXDd7UXmWnjnQjpAEHXZSsFOSHVs7R9rqLncYoM/pDt5tUxZ9Tk2JfTyr5mDCh4BdTjgQxAIU2HkMraHD7glh1HGcnaF6nr0SL5geJrJosgJ6BhD1R7S5rT1nWvuZtLjYQBDcesJfQQcdfONPmlvmu4LvnKhCImZrVP9E/ezwcwtg9KL200NyjTfada99xhof8lN1FoW4ak832xgDXlwCo0GMVitQG9UJrZpkPTXC0Gcyu728ehNNdWrauzKZ9TiVPjXfaXXs9Eoy/yIu8ZcNsB9lwXd9ENj4NShR0Mu5gUXxBW/EzPh1vfLMNTOXLash7zbiz+DfzsTHRHJSsED+MWyRIJalpS4sNcRvHKzbnNHSMeiP0aLcWvITzsCzXYsMWIOPkWMYEPk5nE7+ZpvU6ylKrh/WwSxVcc665xaAYv1YD7HI2gD//ogYkKYwHt1W8+Sj3T1QZaj0IdT65Q/c1RagmGdUwfftXxgU7UhcyHnGuaKIo+EC42MyexUM/2otJnzglRbSrHbpajKCnIFGZmurnJ6fbSoacGzD0PiBHr8sg9/Q0yMvcZ225q6a30OgYaa52yrYt/KhPDqUEjv0I3NaBOlEHMxqZo8zxKMaVv1aC56Cl76xa9cVzk6W0BxJGV9JlAXF1hYyHNP7CQ1wJVDmX5t7rQz6ERZOJC+kDrkneCbzIofyIac9QVR5Pr7tyVsbhtjhiCLKfmZmP2l4D26IYjxjdw5W3/ZRMBEAaK6ldj8b1/YANGm7s7MMZXq/QWICmMRZlauwuOVLyPZvuIbTxpORBFl9yGpEzjjl8k45O+dcP2Do7X8Pjcn3RNcQT+G+VkssnUSBBfn1S3eG9JMpl5HIHp1T/GEQyqz+XGXC/dhwQ4a1rLBeiU0hDLREyZNCbhAJad5HQVA2aE3UWae3qs1MhxCUeL7sJzngCSJcB7yCKRM7zWn479oDhhlZx2t8s+ufTqmwIFjMU7VGnwsbA8430nCZvgULxM2dv6h6oInSi194lYt73p5goQ4y6bCmCpHfAnzBx4QPAbQAOlwWq5tWv5IEAWxDeIKhcSZplxh1+++2J59jn5LNGWrvxMW/hOL0amvJzE0SpODqmGD4t9QXGebsE4SJGCu8DtfBcfGLovjxN5oEqHrFnQs4ys1T+HnY6X8N4c7bxNwoxZ6ckQHy2pGGY6poZn+qLzFPgrklhtFNVdSmWZWfkq3K+w2Qt25iEUpcXNMIdSz3V+OmLMhLQu4dQGZ4vPmMb+6llKe1tjvo87jmUl36DSbymDoZ2r+EiYv7m9NhURg9RyHYYfeyLGyg1pwsyR64IdBGDMNeR4s285iDiXEOdi13j1VNoRZqL7+jevGZL9vDviw2riEDHkweFQXQ5wACfsGkpi2bzjINSYGKVadnA4TqDYM2k+QS0xS6CHVV5S4oeFg8W4KDuizYiYtSAZfhwG7o0o3e46C/CE//ZQt9yS0zvckV+omk6X7bVY3pfoOfH7SUtn/Ift8dLjya6pAeGuYWozEQeJffr5siZj52ogR3zegQMqcEIieeUxUMJ1GtjMa+pr9dWUFLePqJqzMgkk8FevS52iMRzfXZLCgjbGjZyGvejqMhu7u3kZ6tyKQTVodeECEacB0gI/52gTHpPWfBWxSdWHWr7lxA3m2NmAjPDajcEcb/9FdIu0FvdIukL3dYwnJwQP2lyWoPY73gxXca0vWoWKWueKKVFxLReuWIGubiNL/2hyxLDrNSsXpeoXNCwW3JIOOlstfY2eY966H0ms838fb+JDGz79AC3AiA3Q8aGP8hA+6y2gg2IGxFRMnNHCBIp4lpqnAazJl57uvfkO8S3CvrAnlRed7lSFkZFCuYSyt0msyjWp14nlELbHJsocJq60KLUus5YlRtejz+TLuh+Xs9+PQMKY4xiXaB6dxBi6NDrXkaMC+q/+A8EigtePtnJtmsxgEz2qeWbl4EvwnXVPwEFH1rG5lvTlC0puynpWAkRu/2CIgh45mmeYFvQSp63EJXrT5N2uyl1OH846hpNUxtTQADscJGi2cyDf1en1XNA3AXyHN85SIkugHAspJa99H/fd+FwPldGSV0/2dbMk35+aLDVBfgL/0aU/neb14G27wIlM1NC0ueSYGWW3Z+UX0oNcgDm1CwgZtNQElE9tk7JxRPf7Wpc/0vLFLc0kMAemra19ORJREkaXv7qe4fO3sK2zBlvm+K6TV8oH6jEDiw8hQdiHV1eZ1wI15PMrA61jGU7z8q67GaqQ2zPZ1+SY6M7GBd0LpNjVkfRusMaB83Qsi1iPU7bdJRDON5jKCkRsD+FofZvjBeyAjoadfjnozVeylkiY/sVhqzDQk77r7Uji+9AhfDAdNd7Fa7WtqJQwgtu7z++50IjALbhm+ZzDLb6oMATobZuohfgaosQBG09EipyI7nMiHAkGEjbaaQ2FruZWm7zThTm5v1e66zvgYOotGKOm6zFKR6a9/1rUHcklejwxHda1rmDGiqfkV+D/pjhMXYqcmN9OuiOsnkQF5hC4F5kJr61SyGc9Xe0Tq+CaBGBzYUZyaKoQU6VilmWt+e45Tgp8/XlvLuiX64FmdVrL+xEsiNSFDi1+jmHNqbwCUqOXWV4NsNl5h8pQGDQE4c8nWvXw7Q4pnL2O5s4g5mY8BZz5Z8K0QXSvVePgVzGZw5b+3E9b5V2tMNHVVj6LxoiKoIPX27Mlr1X+bZQO2Y4xnzKSiZraUBrsssT6BS/o4a5bi/SoddG5/q0Scj7SDTAinDuaEVQf8jyeXhOgpP4X+DMWKOf8Z3XdB3HjZrtvebb1xVyNFArJHrPUt8HENQupT3n3nqg7n1ewOqZvWLk9jMUlgXlvtyoQYL3cxdZTDLP/oiWCmsbm44Hc29A0lb1FZ4NFMKza1K5Ia6JI3Lv/oix2nZI9cy0tEzK9NCI4G194m5BdHpZnIeGVCS90GWf5gmeWGzvhVRbv9QBDdKn36o7IpZl2TdfNNNnclO5ORS0BE7pYW3FuIvIbZn+XsObrKcoqsjIrWIVpcDPazW1+XGM3L+mdsgUj3urUN5GaYv+M5XG5E9dxtwmzG27VQ/dKRGfgyVVBsDQtySMuxtnynKVKx4HWqJIYDnx+kAjPaTxLkFcGcEwA1Qx5xqyc+Do+AxP09PvGfUwKzmmMc/5HgF7oahLh/mucetaTQ1ABo/O71+SvXiAvcc1XnRY++2weXrNqGPpC6stjI7baXlOmCd68Yt4NVQZGErs3ZpGf2Fwzm0NamqG8j0wrMPLJMT01ZGyfgdm/hoKsBH+V+epWjz2lZiMrWElXzdJHCw5fcQfFRI1kDQzDHMPsbitUuHQnVkWOsMVUEbgAh2kPwELWEMVa8IWBVOxo9Wd8IF+tnt1lB56zSLG3NXPodZJ1ySI5DtLgEMVRqd2D7EkCkUzof+VrxeL4xn01hM9D/vL7swTNeGZO76ISjpDcoRVuPZO1UAXZnZdHkmc0ckVDED2HUEGFY9Mr41IHiyfEFaPaLlpPcq297P6JW7PCXrjKUIBNNWq8wVOK63xuMJB4yJLIL5J1hwswbzr2ansUV7EjA2EO3OJUTasOi4eUZrtyx3wUzuHudGeVLzn74wY1NsmTo3gxas6MuswAj3dhQIUVdP6v4DKAHVVExw0QSraG+9Athq/0h90Rc6jg67KVooUeTa3wMJzu0rv4D4WwXxl50rIulnqD+5IeuISRjqFsc8+Fzdwhw6PTs2cGW0yT3XBTA/Z0Bdb4wl2a+uUQKx7lkYe8wfeEtgwyKe8rPrnjs8Yiv8CKPRTLdM+XVYfevZJDTQcMXa5qlbpPH3Q72z4gAPEbkzJK4CsROPy5MYOq8kF8zVA5N4uZdJaLjR+TVuDjtzRciUDZVsuspZd4gPqU/8lQ5+AdQl3dguQRe/+kDNU6o6TogCuCkfaUuTVA2RXbSmtkhwM07M1TkYpp1yYXkGuO2RsEV+i9FX1vOZVlbVsqdp3MPR32JPNngt/1a6ereb3m7uT2BYmoFClUU8Kp1F1DkHsw1hNl9AYjYlWtzOE70yQvjI3VyufY++oRY7FCJ4mfRC6oYmD9ScV2fL315W0qcgAY0/nIiwwcmyxkIt+fYBeTTmOXHS7uWzYURCQ+Y0D5HETOE5kbDqav56eFBYymJI7S8Cw5gpcbEC82iAgZLAZ1oHO3cViK069lCKuFsQa4SUBLtZVBOeNbzIF2hnY//23zwByHSWbJTJX5pFhx6efAPQ6AnvNG41o6k6XL+gZYQOEKm3IaYbB0PGHNiSYEilvoI4zd3OAfR9CvILzIlZ6ctXnjjnQBggGfu3yhm9xR5ChqdC5HnKwrGSF9xknB8QkXUVuueD8SfY4Cc/E2ZjU1nBzVW0TxBrtOgkWwo7BkOec/IkCnfucFm4fWshsyzoATjLXnF3nPZZ4wth+ClxCU3bbrmsUmAPNs56lYlWUOI5gWGJTESOz0GHcsEjYvo8w1uRyvfyC/Xrdn6+G9nUL/IUAlwa7eHwhUb+Wu3ePPfifTb/8KaXmT8kY+AgTBjbYBSQNX+cYYbPjtDfFQ+A0AcZelJpM8KFm6TV5tVxFJWAJ3Dh216kPXCpKNlxu71NPJaCXASrEuttO1lOsvucSzu3RKIFGiPJW/eGTLwaFmbIw1BTo1MnKMOqxnGU/Hk+W6CIkSSXW92BvUuJxG4x/ogiXHQh9lpiFltdUBnxRCyu0I8TeOqS+KvmNo28CwxB3/ZeSpi/FljvAtJQUT670TGnxRb9M2bDdxiCNd/fXaOpIIMWXq0zyvP8aYxwPQvjbjB9EIwkSWOrGgSwTb8/i1BRDir2xZFKz/iN/yZoDGbnsUXXEbnQFHeY6e7y/FPg7SKOxBZNBj6P7oWLgWaIL8MgP552pHtghBgE/ZW8bEBIomaVz3s8lZwUdPA284E7f3V6X3wZecwLjzMCkvzAfRA2jXWbnCv4ofOl4uzyxCcFcC4nR3sM6sJR6rbDDREvpMoUQgPYrMLKQDbCezMeQXm1uM3axhsjC8sWs95mCPRPDxEuhlVOJxCfPh3fQuFCwMv4WqR9Z6tUiASeBAGoYvBFYiCYaOENj1yxNhsIpcH0Rl5NOgxrrMXggejA+kgOn/XjLbtpEokF30Mu9/HRoLNC8Sr0zIyRea6BkS1QtevAOd4k4YeU3Edk4I54xyya65Lc16hRZYRpxQ+JEFML3JaHnwXORpED+ExEwfwfty0l0o6o6vBfhr5rCwTPutsc+QIaJBSdLwsC1w2q9Hyt5S/uQ0X3801LXOayb8GBT1njgF89zDPOH3vBJwF/PmJVor21DfEH9ph9b0qHYkCCfPGK+oZqOFo7MFc2ZX2z2I+jGRd4tl0YOWY/kp0y7TC8Bravhj11/mHJ5vwpJXJPLoTd/mNUt5Fr/cNiwgX/rv+EBpVwfk7uz4vkc16kdhWyWrcAWFr1mggwYZKxeuUD5/kssOX6sEMlKz6zG3pNHStxMgiY+UbWnrrfM23OTQwVT1640BRqdT6JguBcJh4mr6W9vAy57wfE1Bpw1II7DdCWHbDF0h3ZBI76pInPyeJ3B8Dtml1oWSBYX3mwReRnDIt7sjQO3zLvb5tIUNALmjgQdL1GUu7ucPFocU3Xaxftr5TnID98MPcFsu11/3tjh3MVIhXTu2WshdDJ926xFyynZOerfHh0v8gFq27PVNP156KqXVW+60cBuC6lioQz48IKjjPuItjrQ3VBEmM+b4mDRPgwAVFAHd2wE0EgZOGYlrc+P1zI0PDvb5DzdS8w0WRoAg7sOurH9eu/NbMIztjsifkMXnR0TBxj9ftJc4094qPAYuzuuksBdt7JjAoZr9wR7oYF8VR/6T/oAGcNDBF8zImf+oTCDyV7F2ovaoPWUZHSUHx0+MI75FVkrm4ZJVrQFY9jR0PGDMxvaMIUFg3f1eivfHGsWjZdpUUV3u/MbXTK8qCbSyhG9yA1OALn91R32qOV0vfrm5p5DRBpxFTO9T7XtMYyd8naj6aMpd4xM/BMFRt0ED1AQAiN5j4L6HPh1eFU6+/QWw1lMgBMLoBV2r0+nPsmWGijtVZK44HrZe7P3qmin6smjT794cgdgpCEt+J0f0BWWpfOK5xeYp8webLeAZ2VKPNH8wWSgHVnq25obx8/SfQUhcFPILFN4pPbVNe9q2d5p7AhfixXmDr3xgXt6qqxjHvkVumK4zqnbSB+UXVShEObdsmQ21qbVtXjEoD+i+ENEXRpOJrinGViFdS3n2bxH+gV8BclqGrH9jGFTidsiRuI58sT5M9LKd4ryVncFtS2+2Ww4tMsO6Mus0AqmXUeBjso9VPyIJE0U3qcniqI6CYWYwxbKp0tlCLW4Ef7tfyCK+WPMxuMdXFXL+EHR1Xc51ag/nfXjyLqm9vr3VAToQEVEd/3PBukCAQltTMo02hFoKGIXYV3DFjAYQSh9drzhjpBRQknThm9inu2aWCLzViQes2a9KV2mjR9zXhiw/3tYuZGfbBY/XthEm828QsLSTjPWhw0be3mKJNpZe/1uuR1dp5UFvjE0uXm16DQpYpfe2LIWl8/l8bW1ONaE0huVc5hH1hV7DJkPAUCtBJcV8KS9YnGlVwsUFEZV15P133AqAkrqdXI8W1bss6RqBWI5jvWJqfZAJzoRkjIJIwAwLBtShGBtxsI4R9pASIq3dRfWUGvCGgR49RdJa6dfK1inACSy6olLOv+CCLJV91OgvaIxFmOuI2gA78+6O9IJbFgpTLyQVpfB4M9URqfYBc0yXLl2zlnF6vKUe3jnCQbm7XCT8jqi2+8Jw0OU4re1FGw77K4AefogeDI8p5clDo82ysQqLPY1pO7QxwkDO9+N3XabtZ3UF+/v0qrTcSZURR+OfbPezpgwvgIkX9SVMw/6ZR9FrYtMN2eOMtfUqXWVAnUiHutFEciyTFDnJ/QeD80uyxOlexoXaposGHVAo87Y4fMT5biC0eVoXnwh+VuBeaL4Eq9CxZVApMQvoWuzGJ5x/Vlj8aorQ+ixMWlsxQFBEy590F+7zbRAGw11eEaSV/2oCXiokjjHk8w0Pom2RX4cP0lxR97X8EV2ZK20pRy3oCKMNW1wtoVOG9EuQyZN0Hr+fjBxtc3V7gVQA037y5/05g1m0BJ1srsErc7TMA1cXhO2AZkjnwJPaCB6FQfdub3FNvXyhgqpvVump80Nq6KfFPBFKjIjsRCj+tgGsg2b4mktkK/Y7VPRQJAex2Or5gPx+OO9UOrWnRA2ZHQ7gO8bWwGA05oi4Zg55VYfZntCbQRihNti9SLWP/SWvDVJr7AmijHMwUi0QmGAnSOdiMTvHgSqLzr/Fa+fatlKNmdsUtJi6ELq13K6WuSOIT9VrSaxL+HNULTJPeFIjfttajY1KYpRs1PteoqRncBOc9k+G5YlBkZ8cnqxdthvGekt+qu69+m0p+LbsDT3tbGHtYl3otiwhCg3Y1JegH+gKW2kD/FfrCePcSo3szw2Qu5ViCZfx2HIPLhoek4FdgMBaWmLuE2E9aoWFb5VdisEBj7ppgA+x+PePwcHwabZg8oMT5fsujtkDzHy2P/OD5Tkfyw45XY/efot1cIDrfgIkUKGJ+zUDs80zlsyuI6ZnyHLK1S7Ubv4totXxisB23x+4Lh3PrhymSDOCsUEWOZIUPEZe5dS02oAT671Fs49Ulpv9A7TphL+QSeEFUmumMdXwNfdRDD0BnAgbQTdjTKA6qbXe3AUbL7TwBIv4ndyvFQpGHDDbTfoMgV5PCBp8n15jgJJj9LaSWQOhxPTVo3848lqUFv6FTQkdnPUcs3ml+c6a7tPxueuzmcM60ifYHfYPZRZ8YIMAxYUk6XiaYkln7z/yLIOpdsAf41LMwcml/QdDRq0uPNxFCtAcCUuk51HFoRimk0GBPUvBkzvDjLpstRHmUxnwfJe+fC1/YVS2nw1LpcreqstU8Grus4gwEYbD5dU4CveMcQ6Ampa2XIrPo8NqwnrQkXpDhq9qkLRIB03w7toM/Tn/Dln1hFPlWiO3u3MdWMjEaWtCcFNgg8I9iqHJEmDcZ9tdLPBg1moU5244bBhA/PZ5DRLe8BscMYvn+TYESiQO6k6CY7vuyKTRA4y/JKHMIiYWZ2bR338n7yKnMn04EWkArUSuc99N0H3rREtCbOzxuYF8Hiu70OMBxqwxQA62BQXFn+nH7YNYKydT8JYKXUHrEgBP313a1s79d/eL7fd0idIforLKdqRKrzoy28naqT63bfpITjNCqBZkajWkAgRNeDUGpw8WF9UhiNaqj9gsjrvleWtZyk/VLKrMxYzsejZkz7aSPJWycPA6nyDm/6UNgOXLoYVfdWelu5zwTeRSqVL/J1hMJ2Zz8w8/eyF/wS2sOGb8AqH9vuN9eHNKMFBwMK3iVTDpMT5yq/hp1fBpVmtRBEccWXvINOD5P76gTTD23/fBl6pjJ3MZuWCrTS+ASMqRkXiKW3zijiMgjaoe0e+Hi6YMOT/mlFhNm7QJsSipJPlEUHvQHb5/CzcYfeRLVzC/pwy9JakV2QblrEoXnvZmIresXTr1l8hEQaBmy7V49elyYtq2Qw9zRZPOOhrYZ8y+vLdUqV0omgJq7rsoe/wqgecVH7fo3Edq5UuTg/Y9lCU+zIh4e3SHGILQrUoyHOrj6wYLOwxd3Hzurdz/D1Bk8rXO3t2AzpBH5DvVheKLb6uRZLka7FxJKOJ9gMqDksGDATZejVLy266fSCYkG/La2Fzzohd+6fh7/Xzysn5CQ9JYzu6ChncM0DBIOUa6keE4MDyLSYQSzdS5GjZAXH9ILUFSfInVwQw2OEqeSCKJTqJL6I6kw9UKs/EoFcCBVahN1ALeomwiB9meEFrwD+5Z5LMEF+iVT9jGiRud75uFXX+LnIg24pfyHuborO+uIuPmehQ0QvQBr+Stx8vnx9OHEnNGivfPi/MjHUoWMsmf6U+n1vaiUIvHgCA/VaK8TukiHZN8qGCAYI3ahyMmgOM5c1LQEeomhNdEv5Mff6Q9kcWmjGOUctqT6cxPZscg0aiLwa159FfITjTJblEgZFvPl0dba/ieqtaM7FbnnCnCsTGyS8yhmc0Q8AueYkK06KXpvXl85PKNlNz0c+usgmCB9mWfz4CZ1+JCwtXcswmBOOF8rvZhoUYpfyqq0Ydq3H296JN4Q1uD3p5BC4MarEvsObF9KvWWJFTHg5ywqfwupBibyE4+JOkY9Q0McmP07tBUpTVBVnMRaM/qOMY2TB9fVzcu1OVbyELHAFyO/FPFe7Zi5v3CflLM4R4whfEFkAC7fEzSUn+lF/vcsgp5xIXJVa2HDowJarEU5RfHIVQ/d1kRVZ2dMb2+hRuXEkucJcvao/7ZbKJKEK4aFhRDH98QFi7bGGovr4vCFL6/VaXuTl8jDOx7lgwTiO5nWrkYLQZaDfXRLSTZGieVaglnYRckd0Sevpl48vwJFA1pEZlVU/qAzlMlrLdGf95/1tUoAIM2nVlvwgFQAUPY+7X5Y1+im23x5a9Hd+qGFtvCzqY/Rv4LmxcHehEh+N1Wl+1poBOXk/RVwjevMiVByKD+OPsLc/sJF1gPIigxUVdhf7AMx5+hMkD0ihG/0UJcdeuNPd5Z04aLdXEDk6A1S+hsM7EdMw3lXJGZPhO7svIIiF1vdN/3IDfQvAnoeZ1x6PH0LSiBWhURZ5S6NBTINqrLV4Ag2Z1wiZe21IC+iPJi5I6MrmF+7rmOcuA/biAZ7rfokVvnngaHpNjqESEaIUPSX1eOGyYa1WKDf5EBJxd28m8BhVZN4onwBfPBfkK69qzFqgcC0HHap/HdyhVB0qlmh68mnMDkcphsFaJoP5bnh0oyE7Drv9qDh6Xmg0wuhaqFjApT9wdQywMKvDmRfpmgZPFLfYmDlH89GEsVK+WKqJ9S8rABQrlTMHzqZpsdQbC6WnW+GAwfcdviY32zk+m4q02IT4itACwfbbxbgqrjPfWGIHR0g5clqADo58Xs2Vy1RoL1SCxCl/iDEEnjAFTrkKPD3Pvwezf/QcDGCzZtbomWQnGdyH1hJe27plkelAYMSCibSG4PyLFyjKxvuboFqTdQ4hWto7KFaDV6t5/u7cZas1PRBRdIt2Q3rG8HFes0N0N0+fr0uBSh1zjbdB9yD9axTeybehbmjpiFW5rMDINDYqzC18SMRySo6UjxnsFRQQ/gTUg5S8V0gc27Z1aiXB1UqIfevSa5I+Auku1sJSRzhYD5GeqYvsS90nXAlKbodU6nDwN9WDlZyGGNfRU7kc0NrHHeRX483dqFelzEcS2Sdtgk409Ealw5qwra27rD7ClXgGsBkwVd0TTbi/fAlvB9dPHCn2ZVQUJy1SokR1SUYs9U3BuY4oEV9h0DwHz7EKornch9ngcduSwPf9yqF7VJsybfcbIYIZ011jSDsLBlIQsd5PFPGk5nkBzDg8SzsLKb4FaX3LGwfrKj+p63loJ4bkIhmH83abqNrWxJtC/wZ2+ktE7hY0P88Az/pGRnQbP3q9E5kMwHFlGUQcJ0SyakA9yBj3ztNulPHwunf7nbCa5U2DiUmJILHDYtkkXlro7sZqlK4VK4xBRBRLt/Bmjzb9DPI57rIzmrFpS9Hvc+SQzRLor+wbU5oBlNe446643J59ffBMfP0aqulWp5tFDVpAGUxVpwcz7eQ2S5GcbfxEBg+3QHfpkEXRoAF4RUDRxEckcHuoWOdcSXM1FY9xVEuUfmdi8p/brPHN37e/i9lkTxpu0flLa2hmM5Sb3jIWnIZs4mda1JNHHwPAzROmoWpHxbhx2wW2aAQpCuj9SRSDHsIKB4AiE8UknGeprBomtw/SP+YVinMIVA57FVhmWh0klX2yyi8RB5QXOxJ9Wp9B98WpOnaAakWpJN+8uGsxeq5RZGPkbclCqF6Zh5dg3F1RxW9Qf/AM9Vs4mUNv7EoXa+jcaIezrSxenqvLvg4/DN/fTFMizAcDNA43Ed7toewtQqSJlnLfcyY0xE0/1GhBYPAzNpDKkBqdtqJX7ABbeewghl7bdWJYfud61QMvbFrHB0XIlZ/XfztgJCfjc5JBbSKH4hacsSxdBgbXFcy86HmN9gHXb6AA0izXG0vB6FbFG2t11ynxb8k10XyJlY3iAWvNSQmYXmYrmf0VVj0EGNA4aBpmD5XPwubT+c45cIpyyyKB7y/wMmwblBzoOn8zXlyMfyRNoS2SIzmvazOoBnEMZLrc2Zia0XSGCDj+wiBqoWP+DeJdQnH7pQh919buhWTqQic3C985NJSYnAqXF0scW6HzXojd+SJdVigqemN5d856w+TQSFoHFWSvYC30DAzV9VyDQnqF7gBL+Tg3ws3akET8loBmxRjEAqor3MtbzZ1c4g1cEtVqxcjBX6LyhFJG4wdx5lNxFJIcdpy2qk4HxTBx7lQY/VT5HSk1dv7bDoASSACKq6wnUgNHxnUTbpdJDSyyd8HB2Qk/5izBcQOtrkr3ed7oJq6BFesGMaoPL5q4IOLGP7Q0UUJoYf2vp+8tTUB5LHR3h1KayfqwVF8FSWC3q+d3IqAqjo43EXrIo6xrVmuZvJbHQD8G+lPrZiQlSYiorQmwGwG1SOtRGOw1hQ9GzaNoLhwlJ13OXSoO/0Bh7acmRisEt1iN8KmvztnFaDX1dp3sjxldWo1u3hKI1FDwNBYtbJSZY+56pEfOMlmt9a/8lLFbyV+dTjy/N5FPJe0TUUr6T6ejNqQ+LBGUMFoQ7+1EjvGh2l6Bx8HzhQW13a0U5W1kyb2vWOoX/Hez+ongk+rEaoBwwBMdUG9F6wqQyL+oYlLJOwQKinnIYvqi4duDvDEgfqY+fsqBBhlLqxTa4ebU+d5Xek3DxhrWRi5cWJqQnKQ+Womo62y7hc0lADjBnnIyyVHna6lvjp+eSB0k/HmKbxNFGW+UbD55lRSYVX7huLNTheAo1jCX05K2trhQx/9wTzsuG/L3u8hQ6sDwBcABRtnYH6hKLk/Lo1lSaUWhxs6p/qL2yOw5Hja7WVOxCy/gosg9X5tvtlkp7YunYTkushm9fqtnUMPx3CDW3g7MUW+ygt6vqF03H6jKGDMUEGOZ/F/L1vyBBpS5hEuk0BuKjxigaWYmvYVEd2YspThq3NsWvwBkgcWUAaTgeG/faemXjc3XhDEs0esKRxpJF6hKhqT6rgysiSjtLTx1frkwE3SImGEvcAauZhX4C2Bessh/2rB/SSa25oVJqOtfewezAvwPPj+MzxvbdqD66eSvpjCV8UojAe58JftqAxWuPPciTmtXbEe0P8Vn8xD3J5JkeXn+gNE5s0CYAXr8TfdWMWR1R+Teyerxq5cFsFxyIprF/JerpTjnoIDo+zfFhLp3Wp9dQRf5kvOddJH8EhdTna/haJ41+lyH6nnjDZwgLCMTtsoB2RRliWvUToaDZzo22wJac40aJW7wylG8DDRVi+jc8EL9eBuOuwwoGOWbfMleN3OccIWQFHu4buR4FzqcwI/iLk/kkKM2TUOlMnp4rxTRyEvQ14avn6bcHXdlbTEgwNzadjl4M6fZ2I9fjZTpEkCucM8nsCB2G0GgbvwzaQouj3AvUiWujDJvX+YkPYdWNisPBSYCJK51u7X6hJWGUKYmyeRxtl5Om+uAtgyO/9jN8NNhPFXzQNSdQwLrX+aJwvTjwxRgHUN1lE8+pTeAVRe6B2bsvzFN+vGe6InbaF1HtUZZexQuVxqz9tgn0TLmHDwOzBft0NaA9PeRYxs4UZ2ZjgpdKi7abcIK3Co4W1DQ4yj/hR92pSFojXeIYUZpQSrGZSfKUmJVvHfwi/0Ip6W3mYK+ClwGYLxzw35LEyyzHTdh7yMwPM2nztX8QvY1/q2lzj0O/Yr91pxp/0bWHz4ps0Efrsn+1QaTMY3THDyj543dpmD0EkAT1ictd6/qLrcC8kVFlkUIaSAdLD0w3Oem2qUNjXUuBcNFk2ok4P92Ivf2zEQEREYAplWaXKCBnzqCjBAV6+OcQjm4Z7ENhObWkNgK7506zIC55ImKL8DNZtUv0KUtyg1YSJBZRkrYHHjW3fwNcJEmHojJFlXZ1bs+lHgBXsLMZiDXvOQ+pZPDWVJd0AOEKfVZmC5A9/44AsPga3XckQgJMzj8jIeU1k/Z4hpkH7xDngaWrXLT6lmiuZcL1hj4kezopjFzqn0w1mdPAyAPqknHR/CAChvDEhrBC4pS9xy968fyZOu5illnM5YLLLxPQhkJzgRDxX3nQQjqZK8FenM5HOJQhORjEdwmsqkxArc/IZUNwOt0IkkDTjDjuFE6rVwvEvOrwCzSqm24IJ042zSn2qHm4P9SKsnUGTuBlZv9Be8gpx/GMurI/BpmyHgNwxhkm8oG9p7TipycjY1nnt4ccy7Ci0f0jJNZzXhNAaHDxR0PgTQeLt0w+RA86RzeKInsxbPM5dizO3FmxRtLl+iBH9bpzFhclF4rBrgrrwVQx0lxFNBc8cU9w1QjG9dYh2GWFfjMMdSsWP2QwJGjGYV1+nlQb0WmK+JD810sTcis2XLUar8xmejj+bL5FvNjPElMPyL2LJ34kKJZ9p0KaT72Z7HLJTvbiP56Ew/5ibZ+otIeXkVDoxIlgtHuFNVQajWMu9bn7cASQLc4sPmigeYE3L4OuIcj3jk3BQkec6unYaNpYNFHv9tshpxK8fzdFOPTqCXMHtEqUwQLlOGxVinSG5V7VYJP0pO5FJq/lx0reJCogYgZXO9xyusSrTm0jYMMdERgnfazzp884mHE6VCvVsok4Y+4oKYY1UePg/jtX0mwFK8Jt69oG6VX4nD5pY1zhXnfNzt50CWVhQNGTZx5jYLCkvZ3kteozto3Kxa2jFkHtbx7pSkFRXd74TDjtcg2lXpagCGZ0c3doq3yqMzXKX/l3XZlRv8DAHaaufcGn9uR1jOYP70k7251NucNpI3h8/IB7ty1eM3cQpYgNugh/BkvbqViAt40GMNxAX090MdC+eARjkRVy2hMxtf2xIySNMro8O5h/7dRPj0kClJcFkKDA/YIvQ9dMjdfkcUwFZASUPdaewCPb4jhIT7K2xTe70S4GmDceK3Uk15D8Tu7dOqRi95lPCiQM+TAgldlFa3gyJgl7+P8d2N4M3hOcP8aaDgBHiY1WZz9oJHdqA+c40N/V+QI3ZfNeGQCx7pKqFZuanZtkBmbm72/0sZnchxT69/T2b8d32mmGu+EICfhl62848YzH35J8ZDnyAIA1qOgWWfiaDkazyZJ1yldAb0wgWGDyf1Txz381xyfkaV0QWpvniyZaKmUTxo3DIR6K7htkn7jvV2mmomASk+1LtlojJjpygVRG3gnKW3HHR3d7eF9edL+w+8kLNo8T+RWUzM8p10bSH1gnCUtgF3h/yXBejTz9+Oo8OrCfsh9UqJkJnnVzXeix5EKapiwiXMW0VFCCjKAXDt4KrKYsJI9ozLokn3ZxY8XIb9cd1l/hY5zzjT9Hm/Yltysaf7MO3dZETNQ4Qa8o2DhyRYVgvkiRIqm+K9R3m5UdwgFj0hFpfRwhVPJ1BkRBtoN8tHBkreqogMxuocnQbsxcvIq0RGw2XJ6gXmzN5szK8qsKiZbSEoJLhMTz/Z2ZODPb+CXzygATWOaHRhf1/AEyaAjo3i71Ky7m8TkwiDT9LFIBuRn83L6903qbN/f5ZWiRaz2m06UAbasVQrIF9NhgZfKIK22wEXKQxvUiCgeIai2k/MtQ+VJrzvbUfLfw2gns0oSdMSVSTF1USgVY9EuXWwCXe6riWnUBLcYd2lnEjPOgf1qRHElq/UtyAfK9VgMxqzQ7BVCG7WZ9yTr+IG1UNyTitSa+SVAy9iwyT30YlyNu/nrCiJv8BWRaaOJYv7Q3KI3R2Y2jPZdyAfzH59PX8zbZPG5yBry2meFIGUMVGlwsPpBsfoStN/CJp7FgjmjkZ+KSAzL1tRqNVkwfZXjUSfUQ9bimKrj/qVR1+kagbxP7LTwgLbRN1o/O72TfBMhoBn3WQXtCSywpXheL5afid5lmbK3U3mfUGzmz7T+E90KGZ04rupjzTWqgrOgjaMGgmSstsAOWOztuIKF9sE7YQxCLqqV7pc3UzSUjpCb3aT/GoLEKeXnicbVeur7fBIe6JfSM8qY03gLlO6UArOmYlEDWgimunprxjLfLZ91eKciCnd1xZKex1XDyQJJQvvGxQ+/7+u2ECC8GtvaEzfOknXB+3pPfJaMPZNF2AFv+Hu07swhOLM8/sI/eos2j5xBkxxI+39mNaDnu2g9kmJjfLjgpMyIxI8lDSk8sw4+cNaTw5ppKfY4JIMYkgVUqMPkqI8/aOR7ev6axfL/us7mf9UX2i2c1L0DB8fVLCasvV0RUFDys+QsuuxgHaMcx1xr5/SCnx41Wh4ZkElK6yzDJTaS24Nmyc7lPj6jlcVQyR5xmOy7Vv7NLVaLLvsBm1PW+YreQcP/EtsAemAz0E3E0z9o/FNV3hO95dsraNWdF8F2QiupGWMEYMpwivvJRWdivO5+HZIdoQddzdIf2Pp4LrnC2SYHYfdlvscpiE0BzLtyI9TQfSmyhOhQF0twhNeWBFErs9XSSnF/xb2OrgZCFGZVfFSevi7pFcm/x6ymGwkVQRvluoMuir66fA0JWwUgkmWSNuK7zhnBjKEjpE7F9yVT8MWjaHHuE+pxOYdQdW+FT77y1fdP9dNilsrc7kRacRjoJigJ0MMjjTbIBW/JIeqOdPetR+UdYBCElbOpdBMxwg9dUs+9/6tzLFd+aPiyu6EsbVDIJDNK8DirkTSJTy7VP6JsMEjFQ13VpC2BYlLdqy/w1NrEBudKWH/H5o/lkmfUMH12jSr1Q+ATGfelG7l5ZD+bB4cYtW2+icUp+mB7NBIAFJK1Htso2ZqiIsIIWpzsgEXMkU+t51yBLCalN69xxEzglT7AxdzmwDb/U3c5M8Fl/FRd6lGz27tpUACfCW9ssjnwK1V/dHBldbO83L6Qq5KrgZWD6Spb3Eg3LLpoDLW0GBragw7lIEdt8AV6JROpwxERJPMIjAsFWuhQQgnfV0HOqcoO0gnTn1nQezU743cCDa8TGzAp47D0uDGx9UNX08Y6nokAPZHOMJo7JKf0OXTBAG7P57mH2010xqXgnBuyXgDSxOoZnZ7r9KS9CeNoad/Sjc1+JFCDZRb7eqmvnwxXkoyuyqmchYu7ZCYqtga2GX8WycpoXCVWFHTkUDmGCn/fwdcrvZOfUJwkMOL55Ss+WYhUCs/dNEPKY62uLTq2D2JMZ1S1d+nv8HmQVxwF4CQYNl2ZryBwRmAzgAJ+QrVpoEgpg7QQ0UV4Ju38SSHBMwTixqbcJV+n+qi6CPMZPG4SY86M2HrUDVeiHHFkWRuc2epTuRdDySABf0DYhcerznqLeW45rC/tNJM4B0q6AYUYNhzrBN2YthZHm057iyDrgzb0lvy//p9J6sb3VSdASGkHyCHBi6EhDamu01kk50ogNQN+KJZX1ySKTd2lDZ/+gWFgWOsaXx6TkBvGj5lCOM2JpD6t2ieEaZIZr8heEohimSwwAtYJ+I2aTyr2YBu/4ik4sCkxXXJ9lpyahSCn1VKLHqjtabuDLEqqt46zGtIYL73hMaH1mCAEejXT3KlWO96gk3HfRxKN3+Urt9Rm7mUSXYN+lW/5MZMlfW4wSFjCe8WtrOtzSCV8xDnlHCIzHIX/FMvLkekfJT03dwZ7qBTQazn8tTWO5aBh22GIdRweDpGSd3rqSYzwkFbabV8AWpqx1INosLY6Mq4rJCKOvbZfLn2dcsHvWVv/K1wzZ3cv8gLlacS/H6zWjSgdKCS87aXp1ohibVYs/cdeZoClJzJ74eNgTcPdX26bXO7LzPFRp+TtHATwXtKqsd4sazKBG2KXaca/eOM5NXuFT/yxmZHoOEoCpo+il7Ix7jIRFRu0J3qV54eEV9WM4O4RzKfNcyOGQmrFlfsDZXQX7ZPMpoiGDgg7mONu8YoKtg2FI0hYufelATbY5R0cbsfSTXrO2YDUoNa2AhTDzvC9DATeoaa62UQeknfvMi0uwH1uvcczghVGAMmvQ1lHUX/MxRvWuuJc9UgG/l3PuIIUmD8d+45n3tiEGxq3sDBcIPm65amtFalYoLNw4ApUbi73CBvNA4zTpQouPfXPneZJujIPTx8UM6OFjP0rKnFOE8MFg2/lje0c9VA+4MbWgzehrWfUKMnKfTvbSHqjwG7UzVXzoSzyCP/Xy1WKgbrns0M5qQxDY1ilJZmUSPrl0YKE2ArTJHKOZ7XL7hfRnzTZZcLm2sTwR+vcZ+oxedDhZvjr2q29Fksd47gtbipYoy1stG4gKD/3kui5+VLlkswrA14h+9o1JjbUVJZOEb8TxntsiVm6WSCp1GVv5Xf6u558sLH+RnrEtjHAidX9VNb7wHm80QRknK4UES2vlG723rEva3NV4BQaTIGWklac/0i3B4RaFAQAoNn2y7Zt27Zt27b1s93Ntm3btq1ZxCzkhLSKCy6cpHPPYplSe2dsMGZ15FtU4hyagh9U8WQgzFOFsylTd8+K47trQcmkV9l/ZFNny49orB0gQhi7ThkstU581c1deD7aKnAMNkaHF4VVV/0XIgw6zvi0RHynSE/O0S6Hp35p/vqzTIPRtrhyYp+ePIPjPZrcobx3AEtPvW7mQtTapnIxDL7CkAj8keOn6VvIqU7H1pnDKr5bxYq8quPwczZc9jnnEweenzCHUE6lCxrJKGtEQSIIa/tjPT9skgizROIJo4iLDb3enaol9zEg9W/mlMBOaW/r5Pdk+3YYjadiYTIyPmbJ5QdcNh2I7VoJmi6CEhzadZAbL2y5ZQViZjBSCjj5CgAxuQCvN+q81K96vsioAjR1kUue6L9QdoKP4UprPnRMXlwAHHWb+t3tSiFw25cDcO3TzSYy3Lb4KD5jtypv+MAgW1dMpiawxSdSvDIb0h/Imqx31vIv6dY3v6ur9HhJqFh92dRy4xsLPBlHoENwfNFUSFenIB3p80mvk/pyuqgMwsAYrJcOp/lLvW3SEoe+R2HUzluK1SKvwkoFsUXxa9kyKfsjEDN6dOLvy9T9Ny6go3LoN3m+2TBBged+kuaH/tUy1QiJnirVM+e5sRz3XFwGoZe0aFZeGnHX9V6brHdKUzNifD7ttogsA2Er9CGMO5N17GCUjj385kgSdHVF+uOUYGMAknsbXG4fOrltuw96RCwiP8hHrBNmZO8OeqI0NUohI+FtSeKNwkb5lSd9MWvaOuZZ/pyxyGNXtUruMx23JcJcxFV2xhwv3/lXQnsnALFRBmdRnW72zykYTFt0DL9Hgu2OeuB8zTK8WCvFqvmS+n0VcOx/sJKhlljzVhpQAl6IAq50fxeYSBG1vqwztf5bzjItGNqLW8z+yYH4x8eKxmDlJZBl2O2l6VrmVI5+gua4pbJJmGjqHveANReGReFsaDqrbo88vBPHv9M/kxVCt7idNN4jRbCl9LJJNOJC9+XNThP3Ida07h7zDKCuFTEUiUz8MsVsO1s0vlaVfAihyLOy32HMd8C6eQivptJ0MsmjFcRIKadmw1gaWxjM8a6aCNhc915n29VLfsWLQ06/1agDfBgIGpvpxM2fvLzGufILj+XR5voT2bv9PKKxcfdB3A95tv/uIxG/tGomrfjQb2H15MU1yRIA474O9st3ZS0EuWm0yTFUixBs5LaRippWD+9g/Pq/NtthHTjKz5deiaXFJswC0x7CCRk9g5X6BrzdmLnUVQOPMhAxO87HNL09R74tAsvzBjVMXjaseaFkw/MN9J5MQryq5rmwEl4DSRajxBX+964JkWHGZ52QUK7vHm2yFHEObfgfvYqPQMgElOztMC1tybrhgBDfwH0uduuQm4b4+WT59bD7V8IpxbBSEJWx2VZLshbRNbjvp4bSwD1jsQekc3d/7x4t6Sznnmpn6r4fCxqZHno+hn/1IlNIC79QNAU+KkL6LRgCL9QWizO+zUnKeKo/p1Emf3cz7pICA7PpbAcCoyxseA+n/xwfE/DvNEkoalUEQIX+RJoVy8U9L9rAxVlfs5wgxQNhlk1C5rKfJ4xgkUBYlcnKG/2+JyJVqOTIbZx1Y9cb7mVeNuZtSIGK7n3JaU7GNSw44oKjfBd6+YZ5cn1xOjN2UQVz+bjdMfJiV0z1xDvOsX+0/huvJA8MJf/OAA8UBKxUaJJDU2sRVnkzH9CcX+3XykS44IvXsV/HttUbv+WrJC1j/EygP6Z0o5HIhB24Emyd2rugrYg+uJjpqZUIZTB4DETNqHgK86HeFS2QAQqdMmkt2UXULMOX9TbPr6i5HVBdlMc+oTNyqBl6iYejqOEPMr8MVv0uCdpkLYXFwj1krAhJR0bXezPvEP0AQ90E+STlTCwsEGPA7+uKTV3UJjdoVovKbbBOcNdEg4evimKWje4dLYtmAE6fBLV09fW2AAq88ht3azMwBoXtJb+/r0xJfm7nGLNGct75vEy+rlOthalspjwqX7cItAprZDveFgMvCoFnSQa9KWZXccEceP2d/6tZYaKndqX0IcA8NpU2mMgEIvrFA4nd+fkbGNwzr8PW7jJhHI/uMI+HN7bgse1BkfegFbYvcfJInCHkDiMsrbExBXooFAhkdCNm2BtveAx9WWxCXOvD68QJ29sw84bHmE1oUkoDPMy2jrORMz0aiO+7E3Bj32aVeb3dE5Uni53Wnjn8wZD8b74BcZeFWdJHX5DXj8Y758t3jRhE25sjoWO6z45ViOpkcAw2+g5Ytzqkhd/JzPtUKS+SVp2nBZMuApFGEYPt2bEQ71lANNOKN90xJ4eEah5kC5oGUZzzVuwfmMvYlOHctdiEXHpm1WMUFB1lHUYnrrmbFSrhqvI81Yi3JqjkiBRlvKlwqgVDjey3AbOdqh3gothiiXg/KPCyGVPX/soT/Ja2vK0pJZPXDUSNIIHIkAqSjBgaWKGauKj7ko+ABrgQsFQQB3bEQPHMpEvfScqL1CxMqaCLiUVKqu3PawZNXApLhOtzRV69962+uYQCHsZAQPHTnlSd5tEEWHdhcNOXqK/CH6+7a5Zy56o8UVfgTfu6u+tk1Nli6JkypRUp1fLC9FbQjLqHwxvPF1uxhmXTZststXhsKTv3ZwQHog/ITNd7/k5eNWyqUzvz7ZxgpANDfJUqOiiKKaHx98Mm4QpFJE+kKdQr044Dl+7x7dEb4FHAwO5b/zfvJ93jMuSRz8YzqQ+1xy6GWezjzBGhF4irQVXyg3nEij3pzjYMFGDWxE0Quzh0SQZXVCLTebTc2e4uNe+ni9g3OHaColfDYcJxDXjyiB+zhECSi3yNzAw4jK3ZU0+2sgypfUfJi94J+4sqfL75BnJeSYijq0KxZJNF0kjfWmp/ybjc73gSlFKb7s45HpNFr9Kta/S+OyIocrMlQMS3FX6C04ihDTBRFxmdX9scfvva61Y3SaOoQfqIIJR5PtZ0YmDHewsw2jytgcmCwAlOymeGxQwlg0OjTKbs6yuVKqZr9KetI9zvwYU+apUh+uhazcgu8n9+JNvUuwPC88r29oLYMJ/akx1CWC9NVb9Cw6c2nnDU3tFrKL83lgh5bWm7AJNdgHWmbzw43JxS7eJD68kb1Eg+CXfRr7hO9/C9dFlcZak2wURefd9bkIGANP3gtODoD52wYJ5pcW3861Ue7+Sjdtn2NFcnOtj+KLCu/FTb/6BG/e91Myi46jV93JE4C3Fpa7ibiTv7BlFq6O1lm9YB5PZQdrwmk/NQFWF+2o1iauCsmAbpZpT4UkAwHwRNEkTxPFfHDS4sz4KJpLl2lVDo6WHK0cqQivoD0lLrvU7EXaOb+vafFFyU6X9fcSQ2e8S4LHe5QPLdIy4cDOwVKmy/2JQ4SaC/b28hoQk88m43avGjiVpEIwKwro3cRM1ChQ6h7+Z0aiJ70KXq0C+ujCU0EWnNKnB0RDNK+QSh0h25MVU6h11mSpFiRLWTlSww8ohZHcyFyZBd6oxJ+/DOmkAuFN80FG7iSlAyIGQGt8miVSOLBSUfx057JQJkTbhzlyNrv53MmqitI5tPCAnPgVvBjP+RXAq70RNSlKOB9yLC/Uu+PGY338A28oEHA5e8ATkTwmo/ZYWYWSpE4VQtOpvhYpfUUEh+grdCTwbsNjLaHEZm3O42OpPLDa8SR/j0mqywdk+uPIZwzBb2Oplo5gh9Rrk9j7skaGjk9K+DSRDFWm1is95C1MERGdhS8NYvEEW7Rz0HkCq12ejyD8XWChZP6wJqzT3E/uPTjj+DcTz71dgbJdSCMoehIBgCmbkE8+MBoiF29uuAAi1j59zE2xm185HzJnjQyWiPu8kh+fbLMaYZsljl7y17UkrBRAfZH5zAS24KtxQN+sRwr1O6a6zF48gLKvFiBNvkA3qANfHpuwxkV9+IxlCrmLtbcBy3+H032PF808zmO0oseRTAPElfJRyv/Rqh5fGz1jmvVmFn5O56fX45CC5mBdtZUz6rGOnJNaiprS1Vixlax3yRhITgy4At1dSvehu6hEfm+AI3YgHwfOBpo9PaeELl8DnYkLtY+zYDj8KcNX5cKVeEyCSWZ2Nyuxju48IjVSCRl569ZCoBEtWKi8SWtML/A8Ebtf2HSCIE3ziMoVQkh/lFRDxYJwmyo5lU1wZzxVi9vubvWphkyPy4xn1E8+VUuWiVRz9+4LUuliOah+uoaNKymjSmzcFpa09G4y1HNwKs/dnd5RYl+4oDBmDdcrx5en2q3ykGN9Im+geJKsXrzkwJCyJPzXExmL2FWokJOD9sRlmqqAAWh9I1QMEm2MqCUZ1RgIPO9LDLfMknNZLRTbnnxAP0bCmaEj02u/rfEvuAUPLGT6h6urPkXNKTMmHcnBNVO8uZGD4ESh0j4zqy21MO2U2ZDAliTLliLZ+HvKvV3E29m7zwHlnMLyOrYsxpxfD8G0W7Jrm7kYez1gqj2xvpwBYJjQ/cCGeftlK30DUdFxgA0ZeLCwsV5HOSJw2wonJIIAmR9wjtC7JMQ0YbTk+RVczIF7V3UugKd1OqBYo72P6HRDRWQfFOacbeZosXvKxclD2eO2x+wsqxqKHVTaKbZQjopFkN/UF8MI22WrP4a+LZP0y48fFyr8H0anfFU0lgIcd+8fYRqaRqbe1xw/nek67n02TH7ULX9JRYCHICx0+Evv7cjbUlIuMdK6bdw8pJegJOZS/PANOJkL0u/i6+lx+M2+k5SWb8KkIguXrK1uyO9LX/1D2/Em6OddxUZoelfJM9R1kkbnSFcweDta74NjveoGTQPl9WWApjsDhlYjyh5eSjKN482VlUuQm7FTr5bSLuWVKQ2/a0clks0DzVRmmylYdz2A8DdT7gxkvgw056gObHAu/QHYgHb4gOWIXeU/1qLnh3nxZjB3XrPGfVj8U9vdNhFmrLC3zFdU81w6VZFppUXKE5fOQgvbEhyRAllE2K3olJvGKkDQvuffAkYtTHW3qBfMTdwwxNDSE/xBfHfRuNIhiuYjzQlxbMH4JdsaX9CYSEK59zOFsY02cSAtfBNlEK6Fd6ssEkF3/yCTluczwrEeb/agEe5Awaoek57lipmuZ0dNqd87ICIqPrQYnpOjz5lyce4psgNF9DIrI2e+D5TnDcxbHW80h/WbUJqxaxkfwjcnpMhlr4RRpiAJ3T3WHUsXP8g2OyJdB48Fzi5VuLuZmEKp1FU13/I6pbvYZMcQQfzO7zcFHPkRuouZa9DvObXNODjABKzV5NDKMzBgqHLVsaf++QsGcrY8gp4WZzJHSYmhkZdmB1jd1TmFopjotT6WfGWtxy1Iqj8dTVy3XHfS/i8Y0XVfXu/VP/B3APwlfYaaB8EPas8EsZFjqi6ACGxpFyAdbRYnbft3F0865UXBJglb+a8rQXTxtpYyLhHgrY8T8hVlzMPyk6f4Y3oeLCdKDmNgpT611/j8b3RNImvKx/aRzbT5/U3NbUvCbm1LU7k0/JnyffNKKHsmhfNLeR+xGdXcf9J8D9TiyBLv1uQA1GtDGbhLyto7puyTTyYh1dCz0QLMCVG/3L9poMsxxBpTJKgixB3Kq/gO2kolKM9cXamxAFe6JxPIrHomiKkZU9qLZQHDx9A4epR1oRJ8eNxyfnGTb3I9e4CtRRBQXDywT2nPnpqL30OLsQ8Un0KLo52rQ4eI8mdNjH3pVXVLer6EpGwXb0c4QQgnFIYnZQfa+ZzDqZVhsPvc70M5WkKIzswMHDpakFD+fvlcC4CXm9QHIfsxGiQm8/obr7xQt9rQ1e+3AyQr88tcg3PVwkBwgBw06T2n2812hw0cD5y+coJHMyh0ztSjXOetTw73hBLZM2NDq+XeZxaObrfho7SGVSajLFg4BTxgaCZ7bP60zWUk2CMtvmW/ysEPjrr+garPzNVb1Q9u32/PGNcWdKBzLZS+AIuukZJUZF0Dk90Io8n25cldgqWIBKZNbdXSg3dQCqPalwam5utZZuBJI592qPdM1mxNXjLSzyhMRWVDBSDp+8NunV+jBl/TedKqqItMd9Ts2s9Z3FSppgrlAQbUX0CSydLFbQ2n2EatI3xAPXrg5W7dcan4jp+dufn21MlPHUE/DoZJTBogo0hNPgxOg3fU57UayxaeNp1LiDvkYPCT6X3ZUjpQc3vAGQy+Lo5x3livyqitlfmK7VLN+66CBx0sXhcWIzlo7pZz6y/swWqJGjEGeOePEI+8kVR5Vo6pg2+JtSbOwLeP9AYAaEFNGLaEnfRJXOEbzCFc4+xf/cy4XqO79xRwklpeHCwUj+2oaa3QRV6b+nAZ5DpiUG+SBXSVla3Ik6vGoqP1oS081cjsYaM2obC/y0wL74/9jaHUFRTdqOUBvH4iwEx0iVbMIooTrZguO95j9GQc3qQsGLwJYifhd8yuER+/WeIEPBb9RqQAoZdM8RSRHGVEmPTcjj6WX3SRFZdD2HjRTxsWa3m+h5lTxqjk6EXjm7GJNM0qItU74B36uKhbfYKP3yAy6KfSL51p4rlxs+cr58As2Ukx/838BIXw6sLafvM6TdFR8W+RKy12JP/lWOLrzNbQoj0cWmw7X3UiwW3U38i2nbzkAIOLOHq/kLOuuRpLOcteEub45Rt9d/UMxNAdxfgdgiT7IsuydRVgD0spweNHwR+IE0XSc4hlfkubOnEncYdUu7P5HfugetB0gPQPne9KFV7UwHF+PXR2DZ2q2oA8xBcXowMTcbKaspcSkEBjgxrUmhr+Mx2c0nCfcLZEKzoFe8PwiZ/zqXJAPzSO/oW5KUX94/fIwNC2HINEqYRWhmk6nv646AbNek6MpaakOo5NPy5Em4TMYJ8EVH7nq/CJIl/B8bUtkgWUgzhPFheEGxP9ppc6kDqcZhrlGZNiuBUbDv9Z+d0V61Q5kQ1YH8ojsjNUnJvURB/kNs0tOImQuvKEHJp1xSpZtZJK4bItpdNS149ihw9SFFMlIez8b04nenSKfqdUCArZo8TPB2O+VIStcxM54vCNDMk5KtB7vyC3FKmgn5FVhlcSc47YDLXY/WG6jCmQSfM9pKOe+JwNbM3jXWcHsqLfQwqxKLqb8ggWOuIcxMFRmqlo9ZehtwYHqhK9ssJ/ppMl8LeXXEY3ggyJx8yLONNoWCXCxWaqyiBFORhCdAGOLI25SAbc0TCT5+ZRJwY1j2EEc5q5IN9MlF3cfu+Xc/Ml0t5GFyYcBpP2zzQjOBKNFa/kqZ/p9C88j55p+wVafRmalI9MiHzjO0DTMQcXlUSlubFmFbuxvXegveI4akAoylkH/qiLipit3eTDOHpppi9M7XfMVgY6mN0Gq3SpDio20YdmKE7PnhYSAlEMeZsk6rY+mBEeOz0CV3lE1DricHGVhoWyo+939rffPxMx3EnHeBXRCc2Lc+2F9cpHgP193yXcnDvm98DIF3Ktq9a0NhHqTXm4KTuPjTjaiVAjewMM66wVVxAt61cOl7d8bX0XYbCeCy9olfLFIGCBjzEOJIy8zJt2PVuthbLc5kOXKQfxhfhZg638z7ES5C1fx2ghbNQuS1XjYs8q2k7MgPTtiEYQv/4rmQqx2UmkMhvSe9gh7PkTpnLsY7h0nZ/fFg4I/AssHehkdX5AH2ZGe/8k3kQiagO8lsIEQJdYd0OMimm5HIatyzYzuUZu7tgtO4Ty8Pvo3VoOkFd7t2DUTpaRwMxEiU1NMX31KKo3AGeISFoGlmYM13hxRrbTM1LFYzVoX5K/hZ8xbmtddgAp8y4L4zTGp0EJTcHMV1htNatSjpPlICdy19/xLCaGf9IkND6x46Usz9wU43J03QJSMB5EWWEdbPbapfEZa+5byyfx6rm4NgZxt6aU8kGmtOKxzH3qWNfV5wf6YX82J+vqyHZ+JQSkeOlwidDYvvxcZkliWhpW2OfrA/oUlxiCqmVU52rDlbejw5vLShUP5ouJrt+iiFYZdlHvDcL8aG3ptvLp6kmahVPFTLQ8m6GCsIUolPG//khVqz2OfYBInPrOKTA7k9SJXL3hcEXiv292nbKRHjQUgfTjQpNlvqI+pYyxxE/pWZvz4pAgOOety3J+pzalW/uDQuSapFSUyRomtWWb1AswO3jQKCyPkT8mrcd3VpjIF8CEn6Kd+fPn+/aqIzicuBHwIcmNUQMaK2WCV7D9ktjpfhyNQxaNBUv9dyoRyQLCaVObTACUpojhWqmDK3KCFyNVoslNHwO3xFTPcnBI5ohlqBwZHjK5q9h5tJ3nKSLh3uta+zTrwBjotsghcF/aHD8X5KzstYEU73YMCVJNMV8YRAAUmOZyP2DdjH3qdWy2RGU7fU/A2w7lwMFI8UGyzGPPwWklr/lbz9QGVxdHn2fiJZzuR5kzY6anKx6oIUjfJwE0mntzxdIH0xZPrYv2FKr1oUaX3LR+rhx7LddbzgiOHrUOUO8O5lqZEB4aaSbfzMyUaUS5hVML6jHfuvuRozCPmjvKBIdfRc1LYiHenRVy2XEysUATEpSYTU6OSld8YGQYWaPFCabPZb/DwVfC2ZQLF2ncgzd/Fv6vLKOH5y4UG/8Bkny/eXIrrN8EpwDHsTO8vYzioJklAi0+H+/Y5ji0MgdNNsaVlsaEMcaoNDF0i6zvkNfwr7sEtq78pvH5BY7tQkK1f5g1nj1DkhYQrq8x5/1BS+gIYoum8cA+10zdMCPypnsH0r1HgvaZXzTRh/EhdSR33b0uQJ3gv+vVYtJsgx4UIrlVvyKF4VdZemyhF0Jq8iCws6RaBpi5Fys6omV0gi65/lxnn1K5xyeRsvK5O13O2eRzXIoy3sU0fJoVQ5rD6e2QJqD23ubZufHnLz8LOyzk2I+OTCG0JMrePb0D1Me67en/iC+dz9T6rNfJpDGxR7O9XLbJd2+rXymZUdaQ17+rgc2AFoVgidfN+nVKTwKPrVuzQfgNpNGT9JBR33WCrs6cL0GCqz28NkzXW4FR2Q8kaLyQisjY5J7FonYDyZrIbLwEktcoDE48trx+zN3MSMnDInSWQ5rv+XHoeRzrtIjiY+sBiWMIvBagkBkSlakNKCpSEA5lbm2TPHdirO2wvmgk7NllPabrVn1Om7iVBc6Dohdf6Ydh8xQKhKIw669d5VmdE26cU/3000AeS/vkWVEH1KV2Ea4Fx7eEA4HDaKMdkPXmrec+eDXJTP6UUtIArvDZRQS3W+s00o2QMEW0tblqNvSFaPSdOdP+xp9sLRL/96VpobBaKX3PAI+DkOZOVooFlkiWuxHSvH/GpxMkIqiU61dI4OyNidIA/jTbzo5PIERcYy4oUIRbQNJjWe+wWXRCuTiKdB5BxeIEg3Qki7z1lOgZN/2szvloa43hxy0TJhUgIE5nudlEE3h8r4URnL+TTehyYEuDRchzHRNjBYZmOrmVegjNkoFBgzgYtXs4GOPeMGD1ItIqohEF9wW3TBEP2+rYQ+EzHftCYjH3dkHnm1k5qGcXBRL0ZgLU/UZTTI6tos2/BJo6haYf3yv1jWbTg7+pg4aeG0DZ/NId0ZDLQc3ESDczlXlP+KJzxald3KrDlp0qFh/OaQH38IdqNPTLfALEHCkH6YfnTFhORr5WAxt/5imByE9Oeeubo0PVEWJBfkm2x8EzClboryz0pv1WLKrFfyFuhRcQ4UJIqlbuSfsmd9x1VzjEDuwodljT42PdgnyAr7OWa8DzjrohyIIpQZrmEdvCpOcCwe/ZoGu8/B4RnbjL8vpHDKVfYCzDLFJ+xD4FATqviY6J4OtQxmCd32COlF09T8YQ/WNEC2vo2ljXLRWCS39yfzibK0myfC+Q8x/QDevPDK2GQtByPbO3IwAyb2xjDC75k8MeGU48FajRTUAwBp1FAHIgH4qNQT7EWUZzUxhu70lHIGtFKJOk6Ii68/rdtmIVf6rzVbksdYvmGxkymEUG97WHxCfVruBb60WDN2nrnrml5/Oa1EdMCKlAj4rPdywe112RyGRc0HH5labAwWC2hjrOxQxeJSab0ogaTX35Jq23zzCKr15734AOHOdncnx6sk+276KnhWzWRda5xIf1sG96MjuiLhupXESHuoXrssKHNCvVxY+/y3r2Q27r3Nf8kMLeM4bEEIOO6JgKwHKIzmBnImrVx8ebTNk+JcEqcEkStKXbX3vGsNmzKoELx08t62A+S3OltLUvvmkrhr+gKXSKu5OEcSGkho159sLsv2g2LsyrPr3i3ovU6/H3JxqEfAPguupEJcM6psGiOVDx3uq5qBPrV4iIMET8jPsy7pQd5VIccgSiDKSFhGgulrH+5fZElaSoELfTYmbfrFvZ8jcnqPPQSBvyMKzzifA9R5u6/bW1Q11vhFDtnT8Q00NFtby4MmUIOF8HP0GAjlWmLGDkyka5yHUIZlweE8CUr8o2Ve2FusiIsY24Tcot1SFAmHbcG6tzWhun7A8r/fB51i6lld9PJwk80EUKc1ei43NJQ8EeAkHZp71asSa6Az6QqpZoXPcW2HhuGSgBktHgRdtI3qI3+5PrwkkzdvKTjK0y4DYKMgUC5JWf5Mi3imJhv/Kxf4LuDObAZFMU4Otmu7IoCM4r/wOdB7AUWjw6LgsI9ekpF72Hl5aVqZbO2kuQJsrqfOPCpsLU0XMU/GS14rtr7zXVQBEVXwL2Uo5Ev+NnM0p1mV4EJ/e+txTNGB1cglR5ZMkwF0uvj4RDydMCka3cELIGswdeXIuySSnInPfu2WlBCL6V9RZfNSjIKvZWIQ1xtPnRcMEtZ//rLjSwGjsDnBlFMJeK6ZxsKaD80T8QXEIc9hLeX1/Vf50nZsSJZ6dn1sHojOGYQ9PhUs3VbJrHy4+exk8zgdWJ5P0Vp4AgK5h3RtTlPeOxL4qUxc+PL2iLsR2iwF5tK2/njPd4pa8EMvT5hGA71iV6xtIyMUZS4+4Mmd/M/v6Da7BHJm/dZ1YPv13ErGIHkNtGUQwWL/rFeJIOUpeKwQzeslCMZc6Cj0JjfOD2ppgJdypm21WBZ6l14nAVmjaIcoA1F1ss5vlDEtwi5wJo0XrLVRB+cIYBIPHP80zsbtsPrlF2Jl0NK5ym2rk96WHGKtcNbuDzBxMHCO4dQ911BXY8GIINLw4vJhSBjJAcNJDg5uIMyE2O2GS7aVhz1BhGteaUbaZEefmrZHu1XuD3wL/pEuG0PnuIuUvaPI5gfyPxiilnYacSn8S4YvmcUCAQbdIzP1Xa7cYXbYEGZqrlOJjpXqVvfkH9SDywXThjBcxyhZZNPHxPGPXV36YgDaF59Wd6kFTd5/a2Y1BR/9gcapy1g8BgFC5PDkgdIhmKy6/zk/xgSPQCbHDbjFY7SSrhpZHlRMbTivmyQc8C2JJgjGWCTCmaOTQiyZxOn3FxLpAUuywxsMDr+1oK27FJm8yP3LNC/WewThaUyrrUhVmd4PIFOcbx+1auUKdbZ3/HGiV8NQeGTZ3NY6ENoVEzxPWRREZjjyMvtvLuWtD1fD+JYTqhpvsG13sUyYaIPWJ3Z/ld7EW1/JTSlt5ICE/deOzaF5BLzfuLOORUz1bjSEYYzQdH5XbCY7VwTf1JySEdFCbVv5di4NhOV8qXu40Ljr4r7jvN4e4/R9+/zRa5Xxux0Z+gb+VXXLCDx+U673rq+hull8ktPERhtV0wUvvlIAIwco4nVRhZoWMRtmIzUgwYBwTiF8miN8dihskRg3PxDu8/vcxuP5AjNAW6Yt0MF43MmBL2/LaDUjdmggwfEitGLfP5sdiG+80wzlo2Oq41oRlSIhbi4Qn9jr+2BYj6uC9sQdbce1jeeVrTiP3E0jNLBw2XQ/nYEgSY/zRhqZJgPUbGZtwiOnPwpbTf36UrsRUsBkmzzC2KyokHBPWB+b68BPELfum3KyBYsCs9rL1WHMuSiWL0a371CvdPoe1SUseEC0FDH+kswlRFMAP6sFA5LlMM/7ypojDiM/+2TQvEycnu6MvRfyPRav0CHQQNy/0aDCVP76VU369Mcg3Uz5tWdhXwJBg1vNuD1dWvutBdphbbe//ki0j5VmQCvXAi8lWuiWqiLuc19ZkQlYhfGPKVhWMGU0M0dg61NBuE9moATSMacM9bBgHsYH4l8fjjgJaFt/K5xrmX/ihcFxIEEKHcjPeIS4/5jjefSYu7JzFLEIM8ii+YbfIFwYDRDCxAg0DEkYdOBke0XWrcGRIqMGxcZdNae3rJzfIF1ffRPj0X/z4hZdMuwu1yr38yKc9QOfWQ0Lt330BQvSWmHa6EOHIWeqMVjQJMOOzgoL13uMxFtu14wa8tyJ8YOdmpT+Burrkd0FENWWoEfN1TuDE0bXuStJ683gOrJYVKYEH61VlHC+uAIjaMnhNkYHDVzQlplpYNa75NMPLufic5poEqngKl4KkUwW9HZnlaxPThI5+7tw2ElAQlseHE1PEuGuQc5aAbCM4lt3lCQWqEH+w2SQew0soda6IhWQXsDNe2CcHQE1sz0Ens0/QrejVXBBClj8KyfgbwLbjOaAU9cGP20P7P9cygjsO/YqgO/PKpVedhtBeom3pMEYcr1csU+X2P++ec7Is7fzqlLxdSqQyA5xAqzA5K4jv3effQhGIMeXwdOKU/BZTdGLIWhQfzFSvLK6uJZ6wSFpDxBSQ1VxJxqAvvMougrEhE70QoMwhnNEeHIa10fkrV8RMUbhb/uZUk6GN5KPQ/bMB0GcMpqcWHZxcWJ23/P+8QerQ6AdUmvsfS1DLwcvTvHMnP9dG76DmY8Rix/S0L+J1YeS+xki6fg3alJXasm/TgCfYxBfGTkg4mTUV3T8vMEqa13jIWhEjPXn45SXLH//aZhbkYqGTqvNbNlj6WDxbX3y85miYti3faSl+DJqYp29r+EsYwmNy8MUDMvzWiJtPVflOFZ8MaRRmLfnnVaEb8UI/kcpGN7qY6YJXrZBr8I99ReHQFUuOD+1NfU0zr72s7CDeHohSxNzCxmt+cg8/UYPUeK5skARkuqYdHH0Q5bB7m8nbHntXcoa4G6Yu+Z008eHq4XaJoh2hQrzlQqtYDk3n+BhZeRE6UFr6MTOCUgLsU/AT26MJWQaG+pNrW/wJy0gnpvpkaAJZE7+wJT71sRL/IudVhj/xDvFhpyDT/a8cKvaffEo53hTCtUmDNeXMw0AEXKrySaYC8stSrfboutnAT+USOYebW/SQgiO8hqW+xcpCOpVxJFie8x7k/bLl7Jov5o6gRR91rLcKnFdcd0AiNwFIZU3yJ/0IFbwDltHchQJ2GjL2iCyVXbNmU8QB6tjZDSQOul8cOdQhSkhSE9ysO56lTn9fhe0YNfSzlOgj/Be66M8VPIwWRPmtBc53cTFFflBq66Eo+f5tFtAHVcdeHwjk2TeDDZVVf3pwpaj1FhXNVU+U2OP/9XSkA44dOzUC0T1uLEG6F2hgaOISwVKscfLOrZbkkGzDrFOHXwKwosv6UjwbvTTVhKN3lVaU65WDJ4stWbDm0LMM2k2d7mNQfniOaYamjpq2jrI8w1D6PZcaGIHVdEq/LfnlwGI9BCtirOOGRmvjibY43AtoAwfN4iV/+JZrMgik4Wo9C8uHK0sojJst4/7bvKDmZ5/eybRhDWU77LE41OIrIY100GyaRToHMO+56UOAnIWpElPXC+NGIm3suRp3RmE4qsQDGJJe9QDJg07/FE1LwR7UWA2+daxTnvJ0PsId63bs+NaPvxmjswBWK0C7hO9c/wAYGegAzKV7JqgVLlT1K+DaWkvC8Ob7Hz5p8CAv4GzZWqx8AXuujmqOsEw2ZPT/t0JbuR/pmuDdO18OiwXago2AENiQ/dWliyy2VaLXEw9ToyY4CuQgLLjVv5Ni+rsBr6wtBVgdJUbUyiw7fDuPrybzf5YjulyvFj8jXKPQRMCexSoCw3FzKvrJp96kkkWK/lGHsVato989ghR4or43OuLFQF/Xsdu33z18lAcy8k2Zm5qOdqZOsqNV4ODnzHkKuVGAtsEsEv6QWr/IfCnfwhqYYeucttEqHO8B9zPJIyh4zDL3alwWTF4JZa7LF6WsvSkLyA5AVcogkohG8U6mzP/FDbC8L3l2AwPfkAzW+sJdDLLuIAF1dHA0N5wUfK8bnTAYLDqYKFaD6U2C80QSdBdo5EoEOmeiTq9aGMc0v8MgsgZoa2k/JRVkZ2HYnNHHQThSYgcLC4AFLE/wZ+ekXemnFPPjVX1RTgS+A2KAiSrQEcMRugD2fglXf8pJg/DOqx8IGdADBwa3FeUd8M5YyFnIoJzwwkqdNZveFfLnNVmpxWbGQWj+ITGf9FEbHx3a5cEVVs7nhUiPAAwiR0oHPbbQyrUwTyDxvbnrdlqs7vsizV+rL5qrYy/7mJmdgHNDo0W2BsLSrRpEBQPpEwC5XzKLlD2s4FwB4eLegtcZd1AzqJ113IE1969lnu6oWivxO7E8hSOCU31qBnhkLMmlYQrpGzB1UeumJvvreZ083TAKl2clwe6sr25r4HpDEp3Hvp+MVix9Hi0CL6V+0H+URfoiFX6Fah6w0uxzOei6lOhesRut1l0KuYqaIVVWTvnPFv/kBo/mDTxNjl10jiUeyoVBeQXeKGieL/QM1i5DCsclyk3uX5rhpiO4vYqPbPjczrRY7ZUgdf4vpzJhUegHI5583trjG5+70VsoGnIo56AbrNPomKAui8pPNHyYZopTep1GgZQL32S6trRERIgk1AG0F2uwOTl/M1J6hvipOyPKhdRbvaUeFWtElavjedUyjkBtLDVPn+9537/HrHdENAacfjXrRSH/fINlYwsc3ywJb4yWZTbZ9lNoseKpxh2l3GFKGm+kfWlN599jUmH9BTliPPwERkfkNYpn6bry00TRoLNn+sMlg4ozK1Zm0Ar31wyf4WVD841mUU5inlKJblrNObaqo7HTh2pKKc6IFEeumI9vAL0/+vyoGln6F30eiY6TsETQl4v+US36zyX7UApeRek9CW5xe8E9Tpn1IcLSh+gbjwoceZGvD3v1xjKIWyW99oMqFju4OMrl0ynHg9XIz2ctuBUUt9MUN333uVQMFwbNvMmd1yavtxFOkVEvr+p6RuYBWPcARRlOpW3wl350bWDni7d7Rawz2g9BJnJhyLyfIG3iVE6OIHsW2plpc7hVySaf8rlDH8OvHu0OuNjssZv4s0IXrK/E16/6ot+4i1Io0D0FlOyOa9Ze/7s2ePa8mGmUcudiOaLR51C+PC/Q+2QwkrKJxNybtIZmQTJ7jMBKKIj/ZjiXm9ZKs2pbr+9FQltWh/1kU1E4OpJGrp6hUNbXj+B4ZDSle+2VT4GcC7yd8rgll8XVyZU7sMiDc42zpPrh9p7A3NktoCNLvwFhxb+fcxIsuKoJDhXWTswpL1f1/XgJ1F9e+EQRHb6+aeE4HdsoAhoaqJI/BsYyet0YABDJt0AOO+eloNTk9/8u4Spw+rKEqzsZ9MwwIHNRlyKw1Osf/P7LD/AIgvx04Usw0Y6Yy8kEYm/ctJXEPgPmT0nRkKWI4WpB3Z+Ysk8FULXNpRbLLzBUDk5wVbbIN7GdD6LMAK7MA1POrMI2+oVIVZnjEAD5ViYZZfNvolKQTJuEcOTMPzUQrJKCfdAbYIU8UiP4mmq33oWEJME89M80vxpjG3Ju9gM38bjcJDXAwe4HOACfWjCv9t4qLW6H8pwAUZij8Pz+G/9i6pfPXl/Ayd3XG1AqivE4MmPG6N1LJkQLxG4kNjCYoAYqDUoGCXa59TEuuP7ZlHhoBLaaZLRYrUm6uUdo1xHUnVCSqkcI75xUtJucPotWgIPqQFhnRBhyMtcRUbp+hhZlMoiY1+qrsqyepyfFO38LRV/SpDFE0BGHdhsKGyKvYdOAFxdCrujzJQe/N8zFz8v18a2NTJ3xiDq2iWUa+tNRZdUa1RrrisxhdGh4SxEo1tO881zRavl17NZGRQJLMpKlv2xpwCCFRxgtfz4cN/pLCODADXr8zGZvhybC6ilIz7+MlVjYmn5rILa2M2KEEOO0FUPvjkeQv52jLXgIufiraimklYUO6FPCSqTAbsnhwZSavbFR569CZVyHx10Z6eHklV33bZImbTDAsfzvxXEPYiIBpQuRpYW3X45tOZ4+OaCEQ2Sw7hKUyLQgtwwlndcnkf1P5VsJJY3s1JjFWYMwsTkrbinvS0Fa9FsLvMm/5LUBsuBFGcnbrF6ITREEp9ycTMuC2EsyRc+UjrIpzdeP7hnCUfDsFqTFNT6MCN9lpZHin9C45pRlMUs0nV0elbiS7+nvRyQn/BAuAqdGcRlYcvZ+IyAkeBSHF/x2rSRvJo/TLuKvtgf25lKCVrmGSs4nhbcwYgipaD/8B8XUIxzp2Zr+MYaaDb6OriYlwURNcEQjvkUlo+8pydMMgi/WQ5gHRha+LpPLvQ0g+zqOb0rV1FxZ2wQ0NNNLVlmVpa/fn8PWlzBpl0TrOSiuPle+eVkOl3K5Q6YhO8vqrdM/bheAhnwn6Uhxntx62WATi8gJHD+G9KjXsjf2glvhiN4N2MOfKJphd+8cfiv+iWt1xVJeA6mJSX8t8ItOoWB7bHBbrLJehRPOJm2Z3rDsDAFwOYmvybyOudHy9O64243nLQ5qDmVXLJzJNHhM5EoX2XOahOkePpDekKm8mgxQgHgdOawZJzEjf+r+eDDhZF84gBtdu6+20XidePxVLtpovK+VMl2Bu8oonjS6KyGSTt2pnG3YJ78fiBdyzULXzihTeSsG7VySciKva7GflVWDG1/4me/4iyIY2Kpcrw12LHkvwKiJfrPbpFCuZstJ2CSJAdDA7ZajexmbzvpMKeacUYMGn3w40omPPGvPgQhbI2nAMnMcljizX9/E3/mFNn5af7yzuqbUjic4weE5U9VsjhVet/41DWm6d3jkELuMy3kkNhaGrf7gyIcjhDYcIUqjpUviA/nWy/MK+njtvdTxPLyWR2g0Enrj/hFDQl0mJDV175IscIIZvOrGSVgJ2lLtZXtmuB/UkOZnaNKE+ih0ecT6m5k9JJ6L4Vo8Xo9pDtQBgYUiLwE/OI6JudO87oNys9WeHNn4wncon5/+ga99B+Z6UOmvqRPW9nx6q7G6QeEV6sqRYHYUJKSquMdC8JQYKqvVjlooRayzdWWUlKvYrcCMM9/AzYg8wDoU4lW3vh4OBiQ0hxnnCJ3bsE61+/+uBf55RnxDkZsU//HMvDwbh9Bo2SoMa+T7baUuv37n+3zq8n/LHf0eSEn7ZW7yIiAz59d1pS+hTMCni5amxOgtOIS1MxCvjDg8jU+KrpZaKXqEoPGZX/ixzqWvQSYU9vDHruwWbpWOphzC54W8BZXCOr8URSz/XufEak0sp7y6SWFR+ZGOvf0AhPVyEOqCXReSjqIAy9OQezvMM0r8czAdjDzJiAgSAai298G+oxRZ5hKECzXMSQkoQA2k6kGs47nr0XqiUq0sTuyCX0tKgNM6W+vkBeS1TFZGi8ZXtJbiQRetEuNCsO+5eunY72eIaPLJUjCIbr8QIt12cOzB8X09xxqTqsHXLaiKKjUR3UB3/dRLU68pSqzoi1YD7uvE7CtKQsISLYQD4K87MjJneILtHZPtMb8aWJXeP/SOdBDjVbnMJRafGG+3v1EvNrbH9RcGMvstMbNE7SR6POHQ8GxMiyDAjmZNGmzIzz087PFPBhCZ7gB0/izMSIUXiZcXJ9/P3LdyoIkh12tIyQbGvQu+xWns2VfxRp8cG07OyW0WlYMGiyECyyPHOuzhUEnxGkVz7WUeGXZozuvhSJyZRPwox+v2dAJV74MK5Sh9IdCwxYR3K3MMnUGA18InhRgncOfqYE2/9aIW0GS+/JvLjInmT4IvK8VJgswRiwPFn5oVRTMSujZLAGbxRMFvxCtzCFT1owI1k2jJ/qmDRkuciu2F4FSGef76PLo+uwt8CQ5MHNkXwF9YmyRNfvYGDmhJym/fVrxPN0sHhbS9J/MloUpp8JKl7NZ5k0Mc/QwfYNEuTuD838pAI3qD96vAGYM46oFYzkQEgRWg35SDiVlMne2kAd0EDg2CmrKqgbWiK7+vlC0EG9InRwQNSTMaU5lLzHVBzpb92AZW7HZT0nw77M3BuYsegWEav7R+PZd5tC6oXKrdEc7+a49RG2fgPJ5H9yxqrWAxvgiJ/erNg1tqF1GYwADZU0VZByI9gTt6dAD3vTpbcV4KrareN47o/tVsbV0vcpvCcBjvlzympNBIfrxMp0GWDMFWxukqUdsRONnDTFOBjub8DadxMbqB1jthf3ZlGoaBBIezz739fau2ZXNH7MQDNSDg34wNu0zqPmLgRBOa7jBMxKq3qosYwCLsCa/2hu6/uIxgWCw8WNEKt9Efd+B7TIMdIxYo7OHgkb1v/EMsDB9K8yTLyugp4lZp4q6x941cYhtOIvt9q2xh0UfupeAvOuAj5E+7x80iPcN0SJuxBE5quQB6BQMk0B0EnyUAs/7Ht6wD0Df2vbBE7OxLeR06Wsm6ShiQS6u9NPj9BYqcImKjElwyA8cDkWKPjRp4PrRheJvJ0NwIaZ3hJQKXzAvFwahV5tTOOAvET0S2fpLyVZM2hvGFZ0GH9xNdhE0augBT5LjKTwzfvxRbraxYxJE9S4ewVT67kHwWh6P+pFXmeh6Om0060zJI+EyWAkZVE8WxFw1dh+B4D8pF53uCirmR0ge1JerfSwlCTGHKjLb+dZttjmFLVcIVEQDVNHiwIVO7uap0M8N5bzCQmfQBMfKbjDipKBO2WYOriJnIqEzKIFZYESgVfphmugo8wkFMrqazhlpzqeDtoZW4E9tao28CLH8hSgYQxc0bfi5VuylQRxXTCpZa0C244pNdP1fU8oVj32clZq5JyIRWlAGxdFw+5iM6OTX13w09VA/NnFinOzmQ5hjrS+LPqa/hhtsHTn9AY1LmQOsCP+ooK/S7HXufO3xCC0KRUvNZygjuN0qenFoqKbgMJMAvumBz5bZW0kk0Lvl2bTYTI5FgXhg7CtbZGGOEwxolqEkUuUIEOVGYBSf8+5ZKdTyBGokOx3EgPuNZ0FgzZE6nbPdXkV9UI5F7RF0a3Eu7rYyyxA6UWXVd+OyXDBoHLPB6+g7zQ4XAr3IcMxNH5mQw0zaBtH5f7H4kPqi89rJEAkiQJjvRIFKGYgG8U7d6gIddVAbka2Dpt5nhmOa3a3ZSciU6dhIZitepOvlZ5nYuILyP49vE/8FkK1Parf74hjzbcr3Z4yZTtQlrm5yISjayOEO0XD63qp+xVvnq/L3Kz97VsFhCv1qrqKz1M+tqykppyuOt6UnJ5cURXBAVxB4AYA/CnEUu67MdN/PO5d5wsjEoksgVZ/VncJvhI5e+INRF8fCRodkoEi7SlGXptZMJrE0ZYaIL+KLU9dpVxo2mCV5L5gDj+As68MwTpNBYcxioql8Sez1c62VJy+r9m5IR97fZvDcZvt285ESyg2EOylFNHRkyWT33zsW1Xy081lZoFFwxuc4YCnpkS48W8x4lbguCQki2hyEU8ojmkTUjinuUSwZIK6kFwmOZCw5dwW1DfAaIoqUg11hkh6riuh2fhQin6CD5KATwhHl+VwYIQEPAlZz7W1haUzusrxCduN/GV+DeiNUQy8yGYPuHHy5+0OpF44RIibYcug7Pzgsd0MNQ6QIY1t7QY86zYbv9AZBKwJgm4imfO1XIZcN2o0qqTRpPVn5ODdEj8onDMUUutW/xiq9q3goC4oypOkzwKg39MbCNKuI2bTv87P+yS068RboPyzgYlmn8V3N2KR+fzfy8hbxTXXS6RpmTwtzZxzkCOViLIxEp3FJ+pBcjrjRkuEMCu1hnH3wA1g9M1q7Jpy/OLkek8KLyfnTFmUS2nRkhE+b/HIwJW1olHHE9UFm/574bIG6eKIe7tIsoLkU1HslfMB8ZUq3vUoWm6d1WtuRd2gxWn/y6LABZeLpV/o3EA7Cvw1Y+iRV91Va35ggtqgEnuiG7CwG+3xfpf59KNcpl2Y0mcJQuR4WaRUVUJXkGndah9uIqUPGvA/wGt8xnCOuWi02BsMR1tuqU1QYZS6rhQQvCet95sHQRwyFKLqdV0fg7JoiQyddyKTuJ0lkfGBcMkysLk2/MXz5S3Y47Ykcc+ylMjzseYxZ8yvTljBUDaBQ/pV8Q+6fk80cSxE7tVxzh/bbYfWnQN3U/7LtWog+JiI3iHb8HcaMmHZvYcnHHQpXoOBJqgyNJITgvYgdzMqyAJzQhzAniJhRA5q17ziSQQ4oNKOc78NoEXnDw9dMvEqt8wOFsh0DxGDQTKut07c3Y0LZCzkKQz0w1FRz/En3K5s9sAEofyu3f0xgJl5JPR5QG3j0OcpijlD/uXv12XlvczylTpP7bMP0ZEaZzBeP0CdFgzjFjpRXbpfmJFdiKHqoxSkP6l7eeV6onmPwldbEDsujLUyx4oOFYzGh4YHRG4/bs3A3WA5y+6F2fRh4RdwzXpZGULIjwTUz5NMlNRoYpBxPlIrjNecRlE6FPgcj9kdvkJkiDR5MKijXxNcbgV/Sv98aEvNN7ik21EDnk+UqZX7N4TCqWZENhqBKq3z44OkHjkTV7aV4taqWRuUhoy+N8VHfERADR/bqxUd8Uft8CtEB7rLWmv3HapkWhvtSPXRoly3Lk/o9oppZlfw75RkIdhafdsE0Ld5j07sODFbh8Hm3/2tZQ1rjK+BCJU2yJu3m03QNHsbhG2WWzYX/njrebwZDVmRxlDZtzt739IeVqWgsLR+FKx77YRHp+zbLARFdEO4tH8osOY+lEHyf7iHgCPUzSZm8hVA5jhNo7SPzQA4cWHd1NFyf1DJwb/X4/QJ6V5Tj3cst+X/9f6tYHsKROf34IpvgIEl3z4jT1FS6n5ewAu9jXtOuTeAdhG+EHcTTvt3B5h7GiTle03BTrZ3kkQ7DE+x9K7ZTp9jJ+vn9RjBv5PGSgwvwM3pKaxS4e6/XIDIZ/5MbKH8LgdzARVs4Pj9YdYO1xvrv7f0J9Lb4xv2LlNdKT7q2cdEGQeKlB/dQ2eHOKTwjlXmQBR9Kp82jaHSMSM6jF1NG5EVUL7sv+mYhQlqQyqLTVXoFEYfS70ka8an6z/d/d8g8XpWTFG9+3bF8IzaILkSd+zvV5r0KFcXAmBxl7pKI6Pqmz10s6p5faMsL/y2S2oLMg0XoCQXjTN7XEgiw8KrjMP+HWnIid/O3X2PvSFPDIqPcuQsq6pV0+jgKpf/lwGZMTDux6ieviXGO2jg5+I5nmKW8/DHiUTMB1h/bYB5bQQgsNk+Pw7s6AD5262omiTF7Jv/mxZeNnBGhknLZtDPQZyucBev6NBlPrIniQuw4vyhG3l0akZ2CYw1AZWdOiOhWnTbKxO8j4a3awRP0BV8X0+tBueE2+ZwgIxPxl+t/5hHvAh45gnr6OU5OY+B0Mv+qecCtcM4ppZ+Jf07QAp4mrJC0/HIKhAhfe9TvRFJHnjvTIpCIDw1Qmt23kxmlWr0l9Z1BWWQ13klnd3FRpVDEgYPMitOEBRvjza0lCKbDBg7Xu6SoLWmLWIqQAs3ZgscgR47JSlUQc/ZL8Nc52lnE3uEZQ3w4PabG3NUgmW1k1mUw9mxEWY2XxznD0afCa40923HieGvkezS8ynBTrP7HqoSBQ6M8SIJGtpciVr11uEmganAWvQ7GFuh6MgNoCCAHKr+6D1oRn9tql35fZp8ZuunWEldiswn4Ue1MRHpvPWzdzJa9cMWVJba3j37vMiJ402BHZoc72aNQBCPknEcfhvhIx+n2Cej75aEss5+MkgPFDlWvUO4udAu4xOQK4loQkeQ7M0Ubf0rALbyTeEiwRR886NgUXw6l3f1hzRtygswLkJ1AhA5zaHX6w/9mjsEUYsVotOdCf2NOPBep83KExu+fHhEgdTy76rcw1iw2MTJsgUBU1zYBuYtpZRdRFmdXXT74H+n2YBAKAgAANNu2bdu262fbtm3btm3btm2bN8QN8ti4qu07+d5RDPX61Au24eRRUb3TWXFOtYGu52g+a2WXLG8L6xdPMHgNdpNisT34+pOPTMFzOji/cyq2spfzZ/s3/6RZpcQrm78HfoY60yOApJVShlXzkrAvUXiCaS5DJvDWJF/FPfA5jRcj2Jto2/Xz7uK0/hiyvWwpCKd1tc4Nq8wRCbQCKjRE5Ssrjd/0VyP9xymg2wU8mFgqEePAiHHt8Ed9nUZKdjfhtSQthM+3oir+nltC52Gk8RPc6g+jtOC84txFKTUpZYLJrJAZCcS0ByvIsc+XWyeRKW50M052OoP+EtxkldcVv9s95sXdvSag110H+WHwBJMOvUzcfvVaMgU9GUPpTtUEYR1c1v3Qns0C8fhHJjJC3bLz9EkO7WMHAjXLjdmLSm+0y7JAzKiN7jpIlwN608gPa08wv2m0oJWNqZfnL08hgg4zdwBXRUb642IsyAhSBGr9TwMVlsWu0WhVSW5CPz7Zts7GQhEIdCqfIoMzfFaeLxMiHlEzeujNcxPQy8fwnBTCJ6EXSzNUUKoRmilMF/ewD9FI+bPoQ3h3z1tGZkZthXfDsB6axsxC34ri7e6838mA9DerxA8LldVjltqzy5PTshqOPYBLnx3aaKaDrzzV33Hvk82WcjfxNZOZwksCulnoGXq5BDxBDzSBovZnTpPvDmLsslajLaRewaivZ5ytGtMbH2gUov7GadPwja7wl0co7fPOuZzGNjrVYQIV9qm0+9HLye7hIROqCYwdccmHF0FQceugEoWo4al1eYrC2ncIUGzS9tOb76ZjjgfV3neURWcMJVUXzopLeVySxo7uSS4lp7u07o0CKARUxH5O1PIERg6sCOTc/QHj4XbNCg3JlQmE1TLKFk/S5+kZJDGAxCp+ezFA1vJmLZjEQa/IFT+vz/YMpAwLYZalBwAdx9LxrhH6CneV78E9FLPGTexgujlVs2Ridrfxg9+B8Gkw7hUjZR+E6fu1UwCr0L3z03lgTgD88uNEbFufYYQ4hbXerzozy6jzt5nVvAeu3npfGr8gFBf7Xj+4i8ZjPROAqIU5XCxH1W6Rup0oNs9SwPTNANHTt3a0xD1TbigZr212G9aDUevdbDgZD6fLgyUwDhD2j/YsGow23mkbSrzX2oh0N6jq3YquFL1RXQaabMGjjkHYmoa8zNX+mdDHrJv/GesKng9nz8QGYyeHRWCKukltmsDIINEMoy2UAZupZNOV4IZxDGZJwSixFrGVu7EOUkn62+Wxsf8wVMcJ94LWyWwjP8cSS9OwQXahOHv4dknHr7WQsc0SINuLq03uh9GlU3c9nA6n/oiWAZOclbDtRiFtv2aTY5g6dHR9JYD0PQStlfDyvXJOhfuQlOPw3KQDsvBsOn8zNdh4+TIu9APugZ8Z6cI5hEdnADkY5talgq9XNb4HT26/nZKPmXj1Ye4XKKw24PPojee2qBYGjzKcT4YhsnP2k92JCM8VwqWeKoKeJvHAHqKgyoNfR4uPRoL3lRdqnSW1i+9N1+pT3PhKVH35wSDlZOoM0TKX0XRWjvOIzELg2F7qjD9YCmSbeQr0CtDHfrErsfNU34Dy2NlTB0ZtEtEUI7X+UAkyoqMok2eslZZpOh/ByaGpgGmQ5HYXk8XUul3nK22KKJgTiJpvxkJGAjXn7G2OxaVpvBELynPRZnnj0YwrMbRi1aVmQm9PDhxS6I9W5PpTmPcax4wDClvMXsb3GUw8VJ0HTtZAUzkjpeVNjEsO9pcQf/HYOPrXw+wpQOzsgNpAoKxRA1B1T5SdMfu8bVavqGKXrDd21VIVe3R4vWh/LVib8UxhtOPeHxt0HsbSq+ZL2AR1aL8AuWNydN6ztQFw5BjP4rhHNwRtpPHs9uMREeewinam2RimINk8GzY7WatLEO1cDaHyK9SYRPNXgkHLBfcudKkeYR4cpGMf6LnXGpyMpJulMDokrQR07Yji2o2w+5TMTvDRE/EuNjBXZKYID2H5fR981zV4feRCmkPAxvwplSp6wpvaqhfNUbDdVOz+szlR6gF+Po6AiSyuZ8L4lxslhg0qg4ImfAnDvdzGSnDB6khZAOnUVWM/iBBLfMI6eBts4nxaY+rtDJX9q8iwV6srBd4PoNEgYouqHP9u/z4RtV9nIJRr6n7Awj9w6Yv1h3jV7hZQvCDiIQACTTx3OTF538c81soKouT5OVLycgwpG6RX00BYrf1mapT5UQgyxtm37/45pcS1KjqYcy6EjWXMj+2WNt/LWhIhPdAUwkltxft2xn3QZY1pD7EK61F8IX+VMF2uSaoFME84cFQrQb9d8y8g12bTPPZk2OtfB5QsL79A+kD47EQDbx428tzl0yN6tRpcGnugTaMO+KQuPviL59oqumnUi9uWRywluKx8cHUPTN3Yttl6xfuuzGJV7WtIyr0IKk+4cvEZKUW7PW5ErwJd3WZUIBVVKKfYvBm9WikXzDY3O1S0LiD/OhWJ84eeNykv2yvNkZSUqSw8WyOgnsrJ3GIdCklaNeSy35IaqV2U2UHgB7g/Ze7ZWc0Yz6UL5iJGgCEailU1JXo4TqnhpeFE0dbms6wc0yQ8KehURGsv00s10ETS5z1bECHKm/a0WDre+TCxOvobIURrscIJKioXQHg1gffOY6QcQxx5rEDlVOAJ4NiCZieBLhmf0mWtQFYMzZZ4gGSijN9llfyQzh1qKWAj3DAruFjBikbv5u39y1PFPIkGIDyyt504UxiTDdTaA5cE9yheOPST6+0Uh9lqglgiTPIvnHc5blp960UvD1XlNfgW/WlBjAPWPksTeXLQFoGgW1tobTp5OEHzbXNmrcIaIat+nq07Lh5wpGb25qx5KxPcooPqNDThE5hymC3ZFZAQx3RfvUvgiKMtxgTjOqHXrqLEX62OmU3g67RGyMNwYIMX3AfgtZy6O+pbHFGPY7SMQDtvKT7as7zcL8tk22EgazNVQXTB8MfAKOD5DGedpc8GzxOK+pCOdjPtKarbwkFmwyVbm3s3aihJthbfYfCwsXGxjBW/S1N13GwbkLPY8K5QB7zdYjoCmU3zlr+88Sj7Kf469E8ne0lCT+C+hsFlygSe2ZIqU+f6EDmgz7e9af/w/Y9CeX5Gf2M6xMJw+LhXbtRXB/xzXEgXRKOG4cL2aVWiGRmx9T4URyybTHKvaQF9GCmRjwKExWbhtJlk4yn7zfIPHs5BU8sqYa4Tiotv9ifnJVo8Xdx5X6HsCExc+ryfJI4kf+LFCZ0C6loQdzWmx1fAHs7q4gkeBnBHQkEWk/mLYuZ2UckpkX85q7ETf7hc02WfL1xh2I539lroO1g+a/62AWGFLU9+Gm2ytlNf/cnkmUR6YRaDhu8XzMoQLbSZV05Cw+xEPCmIplARC9qPVfbNoBDxv3ZFZ0kNKh0LfOXyfbzN3xMI890CxuR2MBHMdL7ZRyI/l0gXRVl+pJFoEDYdgGZcYGFoUgauQIol5nzuf0w87WcjwLgsUmv+lIJJbNS27AjgeVwnWg0F9L+4DT2D+s7ndNw+CUn5EQQbTo1UtaWYEZ7B2RtYxPdVCZjvVlupjDSPRrvsrpE0INYXvLSwsuyKWRZxp+HYQywbU4JE6eQLks1auWMQ2aX5pyTMUI4zNFSJTDuz7RN2P6GhjFm4pZjyXrSm2yhDo1sNRgAl8tQmPAcHL2ii56SjVBhwVIZ760maCjtjl3Z0H4Sm9PGNuWMaUkgC1CUiXTFCynAMOeIwcQXps0P7FklojES1KlrRZWr0zEF5OcRaAR8NoSKbHlrA+wJwHLoFNS1jwOGfL6Jr2Ibs1SHUmGn0VC+ft4AgA9NN5XQNBjvKoykqC3AztDTExVsv50rUc+ZIyVdgY2GEf2kbvXXq0WoSR3vi9YfSqLm1GiW2mpsAy5jl3z5a4qbu3FinND/4avwqVxkULSfsNpRV8NsmpP+0piDS3fmZZCRuAb7e4uuRMAoZXmIO0iyUtj7O4ntaxhyGjhUcz11ygpAlfqyKeno1pbz+6fD7w5jR9zLSxj+0a9k/VeYq3AMK1/8LkZ7hYduGORxIwIdlTg5q5UFmKNam7hKdf95zbPprYJt88+3PXGaVlVW5d3E0dadtCDBTjLM1uF58CrY4jaJXriaSmsEXKN/cjZBk7rruufwNG78GiZ4QyPeAZ+R0xcNuAp/wkAFylwGzOXbjdqPJe8TPoMh5xc4cmU8jig7nk34huImGSr5Wyu61trOB24DKld0rgmpDh6cz79c5Kr+SGdTQeCpduhcjnTuLAOlNUauEO2IRmQ6WvJqViDmSkRFUaQGrcrUT+jrys8hwfZdtz5Ont7pJZ9LGgrZlslk+Jq4BVxAeSQ9PHj2WNQDSexWUq9hYOKRdtg5JTAHXucHpx1uIfaXMM69svpYvNBa/SWBZe70vdjPu31fwy8hol6Vm5RnwMARIW9Z8OLUhoEuRutm/YIZLKo/73ZAXFNOe1XqIvtdh4QdlcNdTDiXPjiJPCd8u8BSpa8JLAy0O9cepUBJHcoI7k+IWoXXBlVQ1keg6PHqU6SeViX4zJZdxzAP6fWKP3iwqlHaj8WNOZpFPYn8IeD6gXA+GgYVPX/u1+sirJwSXgHJ/Zyfs6tS1ghHbs0nGFIt/Rp0N861zxk32UPLTbOONx//nut5rUjbno/h6MQ9Ngspg2+xNFZziU6EJVfwEm2Q8YITmJB+VG6kwvDb8IA0rFRnsRsbM8GtZR/PXESmef46nT7Y6sAUwcTik+T7BwkF3NEPx8XdgcTzeU78uFJDUH2BaWkqbAwn6mwPQ22/nDGMoEsCGMA0w4n6/yh1qGYuzYnC41Q85nD/gRLYohy21lpMf4Xhq0Khg6sS6NAF9aB+6l9ZNQjYFslur2lg4FiyxfvO8+K09Mm5jO1JPbNFmKMxkXbUDgn/ia8zRU66y/WklANgI9G7Py3995irHYhDTUIxKngzSIN1WMWHzI1Prxr0tVg9gg4kZMLRmfmKlaleWTyHrVmFTCV9jrNOcxhtXlkDqtLom5HgCDcu8Qr2CUAv1wKRiYzFtp+OWRr/QDfEHr4wNZU8EeSjPz14+ihpHTzyVkvRToSCFJVxxcocx46jM3YeoLY4RKbHF4RUEeNB3eMUZRxM0uLOQjZyP17co2Jdp2EBwzwZ6LtwWb6uX+4IX7IsFn/roH8tUAdtYzs/5+xYQAlKeUZTBM46XpKhoIu7400kuIh/YP2DswWzmV1mV9suNlqlvFUdE53uph5gIjrImz2m0Q+LCuQhQmOviGeu7K45ymc1WSunlejNdSNaEFWdx4RRGiTGcg8ttj5ZOuX9CzKLvWzjlWnZ1T39epZQaupv3chsSE3M5mer/IoXP5hnumP3SnhacCEAZZHHzs/B3VWvMhDy94hJuToMZFmLqOalNdGeqQNNZSg94+zqjKgokN5+Xrkr6M5aBplbl0DGBZ9MHrpo8dQLHWBGW6zAU0enDUvBa6gGPw0C21OQo3Wk1dH0efP5tnjjHnNW5fRV3330GQNPPRvTGifXSoiOYOQqyfqMTj53cEoOZSq2yvzopW5fed0Akhci4Vx7VN0mmW3ByPB4lC9csoRS+rjLqSfbYz+2LGIpemFgoqkCfYwtQdIPUpm53+VgqfricaNliqNIiN2kPoihcdKNA8zZUqpHueKU5lOg0k38tgG7L7m0MaGlto0j7GnAXJxtrQ3YwdBsdwhAt2Cv0T5gnNgdwKVlwCDNd4ea7p/B2DXUYGB7eI1hszLK38CmpoJa8ZsGeQgsgdSsJnDddR+6c9TasAp2ePSMxF8VyTJIwC4axQP/Exkf61yGtkvKdUjn/4tYqBjUqWYpQJQZW6Y6DjHVIAOSsgS6RbsH1SzpMa5jass4PhpDhGzzNgLyaAT+4sFarpj7by8sUUiO8cEDZOh9gq/i3FIrwqww8NO+y4btYi2agp3aGKS/3wJ3ekDkoMn721KMyGfk1I36bmhIPBdFZ2WBbAKGZbCrSL/JuuzAQGAkYJKqA6iWrAlovaFToGj/le5zTtspSUzhKAuP4PXDDL590x6NjkRedRhwHng/9WHeO/AXeyvJ6nv1iukgUIyfmKAUJtgywVbd0tFIjVoIMautxU1+ot/MgEsvEuPMZkh18SwToVEyfPKZkgCj7t+bVdAbdpUYP7WXpyH354jgmrfM8l7cJHp3fvLOu52JjCTTTSKHypQ9uUxfLOxbkyFDeHHrjMVlreJAn4EA7KVuES7vKWy9F2cdHfW42V+6Y83k/AjMEXDNO1FXw4oWPrwqw1rfXwpLLIuSeSIlin7xx35Ui6GsVFJeNRfhw1OuVJsSFU12WfZG4vfCRn97KUEmH8Q9itnMPFNdMR6ycCbWThHM4vGah0a1mpZWBEwP/eQi8tbWTaOSqxmWEEUgoQCjAi3OXND45q7KS43COSwJj4w0XMdDJKL/ndo2Q3Qqv9/bk5hbMfSvbamBwLhQJWWXMO+g7cfE65ZSjEZ9WgDKZcpIv/94pPDy6SQUWRQEa08yan5jAGJzRtSO3UwpCi3BHEdhqxhNt07+OIY4CxAYmWRpBfUuPfA4zpeCerzrB1b5vGOwANR22pYo8FnTOuZVUTwCZtNcghx6EtLus+4cVq9i4ihzMZsobbZJe1j/XcPM22JoEg/OXDCJPIanv0OVYKJu/ICd2xWQ+yp/WCMGopxWehPNqBueK91kBat8qAxaTLqaB7OsI1L99KVlEJU1npW6VXwFzWA5qqZjUs9IRqp0lqlpatw7LAusg9Dw3oSgmHnM/g4kjZ1y7f/V1065iv+TGYtX2q03iNzGv2m9bnr5Mwqgq2VJajTC/4/IBaWBlzWeyS0zC5Idp/YIJr17azxYp9EM+d0b2n3mNl8Hv0SWww30+RfzDUEIE32OBCyCo0qn+cBJfR0Wtui43nALFHm9bWRnUpBqcXW14an2lIcXI+INOQqJEjW51BH8Rd7sCcP2aMFqzVz+1D5odasUTaqUD6KU+RPGSf+JqqQY3Qp3Pk71f+R9bnyhTuLvuJJ/L1VJkAJZY28VvRvGlJH13PFNAB4k5469Gkn6dcs/xbJTwjUaqOc85XtHD9j84vv3gfKOrF4Ww5Io7JWdtuuMbxPNx7dqLw/WKn53RhGMfuxd4bRNVqAYp1NR9+ozlPnC1QRsCbwWvyfw3nyEdPRQMOqglBlcT0yzVaKfJZfB8zGNKV0smocP+2agwlwl3TWz+M01r6RxIP7FvTwAIlRn0AsHDfsHXa9pYOqTwa3CmX4ODY9OpKlPISwWhtw1G7pUAyCYqB/lZcg1IsFPQIeT4sg67tj4MJyYLCmlxLC9YRucoL1wCQXMMIhfwCBiO2NMujbBheRfVPotSs9ojr1sjiUYtTfSDmDSKPXrFKIEv9HQH9HwAgycWkjFOQRxJB6Gh7Y+5Gg7b1gng3A8tmldXWJ4tCCVVqGcPt6VbvldFHbyvxSMAvhuvVfNYEmHIyLa7rob5iIxyrEEh2PF09+IHgbcFKzBfelgeubDA3MXnKK7rulUJUw4G0DQoJenah8yVN62FtYA4UWZoVhNbL7r1NMWlPHUDCOy8kbqEN/fWMG+xxwZfexOxiXhPftj4gUUTlgSaKUtTiD7g0jLTQfnP3UTy4F4aksBTBe8bKCG8LhSLDcSAgcswdtoAd5Y/lLozpZUaTILHAktYeM/S2akJd0Gyio1IbtNQjqv+rfegeYX0eSSMlzIr1s6C2tPCDNVzJwRGiZPItFSqHFzWF4QZGHaqLoNmTP+2OQw8Qsyv/JIVjX/hE+gYI1aXhmpHZ83Vk3kvHH/xqDlwo1Z0Fy2lD6taPu2sKeAVzjxH3inKtjQUWNVg2AnvOY/KM68u4vorrVQGI5gieIATlfqnAMUqvlSKbeZoJm6EwMRC94jg/P6XH1erN8BxWc54Z6R7cxgpn4t/dxAxbOM75Zp7+AjnAtxbbRCkFUNBWe0q9Tk0zQNecTK7O4muTFlesWFQR/gOKDG9sFJ7XRlcUxtu4GH8txASmcihvu7zg2J/LTKF8d3/GtITcg5zATVfgZsPkDzzn7yQovWAeRaLJ3XU4HVSbC7s/R/9ZcGdVNlPbAPVY2FytcfV/cNbYPMMvvtKZ9Qm7b8JFV1dzKMZVGP5ddEL0Dfr9Gxtu8RkXIYpkI5oAlW0br1wKle308tmFzgnAs3c5G5aHvjH+a7z9wZNor7uWFkbbdz11tY0S2FNWbV3qHiNog0/LHR+7Cj71r5RnY3IbxjxNK1E2W4il8z0PTZYF3j+suuAK5yKwDJddin5vIgy5cE/QMMjWZ7h+Vtgzax2rzDpgggip6zTMf1BA6sZwF+I5p0v9B5RLjGMYLRKtlRisSKccKdAPYSE7ssvujvOD8Cnl1dFWzbrIdWy7WEBMMAJaQc2GZ3FX4IR68mVSBgH5rlJ0axt1fu3XksYHwW6iD3JBly409r9dKItFjkEzQY11tuWzNlG1spRSV8w2K6v32TtXL2vHFUe+NTnymDDAAeE6YCZdX+BbtkA1pWZrk3cGlCFEJDZlfisLDTNw55oafeMGaLKfbAsh4lf8mNQYEnRLR/qacaNwIQtoqfcRdzHYK7FWMgvFzlDR7/bDLNi6xvSwkAh4J/ac+LBE0XYIkAZoyaLjQiFKS3VoU/3gJTcpIwvt9yjrvhQPZShx2fJq+LP8VY2/A6nT8arKgcPRqJbeJpgwIxMfrUUWSmrsJk5NFyfpMJR79jKMmTescsJwXcF4o/PPx4ltmDsCHSpFpExSkaygfEifYcCpjXYW/h259UJSlg41Gl1QnOvuajunQk2tQLYDZ1fA7Wfa6OIdNuEoJ9Ei5ncznqCe+u1RdvPGNjObSDIxqiGWvNaXhFJ2dbREuvGrPMNBBKlQaEmVe101tiIaMq2fF0oY9B8SLgvTkv+yf8TZENmvUG97HcaZfwdfcLyM3RFK0PeNCmmE/46yKOXpGvSLKsD1UjpMQ/L5S8bCXvtSL5L37Iz7kVTVtvkLm3LHkSLwehDySUARe9cigMdA6k+A423xbzZm+NmNrqCgLqL8tcPb8ZFKKTivk39+BA0aVwU8rDYDlErNyo1P5L5nAtpY4FXIjz1On29RJSsRP7CqRKZxJyOABRVovyRTLu9VSWFlQ0vhC0yycvN13l5v9opNBHe4n/JmAR6S+bpuLpW1bGYMsjibXxS4UvygWMLPZgriC3oj4xQqT/G/DDkzewkWbc85VwbqDR2tQWOP2mgMvk10HA7J4BtxGjL31PiJntc2KmWTKNuPWKwIGD3RRE+66uAE3X23ZO5LtK+B71Km/Vkp61v0luuT//8CJrjJP34ok+iUtAUWXHCRPpdZprcF2QJXUSrUwasIo+WPp2ECB94UadAVibyxnrdKQaRRzTyZWlkfJnMtyy6z3Ac9fmeQDfBsE4Ud3rFp4Uv093EjWTgLndJ2rB0O0PPZms78bZZGtDm1ijFm4Xt6rDwB2xCqN7Zrlo+TCSzjpoLv/ytbJP2u6kGe57t/Nlg3Z1AwuM0xDu47XonJhAZ52+dnL+JaAe4/xDy9sjghiUYaUFfkuShQcvVpQ1YojI41v/eBplihE0l3Ro2ad9BtFfQAkhRlmzXXMe6x0tQZkrIry5iu81u+MLU4v4R9HGGa+Q8hQuNaXgn7pNo6wwHd6BiZyq9pOcY509eD/6bfahUVHMbJ047TV3OQOxbXrkjz8hYyzdqQj0ub92/LGTn3ZKvYPOQjoBM1JcDdcZMGaCwLgtlWqbNxs0D/zkJumiIbuMxIbeYThmAgIqRhdZQu0w6t5X8dvTZk1EsK+X3AxLLNNtCcHCezJFWVk8aaR24xIJ1PB49UTnR8FU0vqQS8NKIaoqMw8zXWMhj/InYQOTnjg2QNwckOmCUDFMRwrj1SkzjeIWSYSUX941pDKtimixGljjkJkxSiFNRpZ/Tit2QfDoC4LPgfAnGSZFrKSewBWb+aJunzV7ifPJDGOKeFyscQdjQV5QXwZLESN7MKrh2h1df+QILSOeiKPYvE3RPSby7lGnG0evJMEQZU5svWnZbcicQTcpaUCdvLMsdRWGZcWZNwDb4GHDnMG5qWyYroTvr4niIhV6zCgUHucQdVim6XO+b2O+4vgdiP2BNeSHqSwk18Y2B9PtRyIcIvbFaVQhKpTIrMFqn5axCjbvEANAHKzzQDQ5hA55tJ4ciKwJSiFCJXJfM4GGENcZpzJSlLaIf1GrwNFt0oHKNx7PU1xKoaiXK1W2anNnO8iNxdYle9e8nRfvn25mGQbK4xiQjrhcoiKrQDy2NN10VDxvjFp9zkl10cONdaBpfukfK8Y8AB2AnPXSExgeWs5iDUb4LsmZVVQKvXZdTZNPnsxMbDvnNaa277qnB3fqcl62tMDjhmL3c6AybY7lzIgFCUv6OSo4R4LNet/tpNrS7ZTZQAm3Bb4uCwCj8ditsIqrVWfKsaUbohZ/t/dMW5JQv/meunw4QmyeEtK+al0CSPH3Rs/RJ3lSZmkdHzg2u7UhvnNq9QI3zL7U6V+Z79s1VtWxxRk/Z0/SY69Xd09dE+EED8at8uU8RqVRTzh2L5e06AfDZW846rx28ZoSlFfrKhiHDGv3Dvwvt7AgYqkMgd5vKIESM0bCgnNzs9sEHz9QeJE4IPIkln+Rfy2Uzd+tbHNrtN469Z2fyy4Q5Mun6Ks9wMnUYArROD8YTefCoJYjgrEtInG22Sv7lm2UWrYauvKBuhVib1/i6Od48ahOu45jOBxyvCWATaD8tpj6nPTwxoHNldOTbypa4o5+r39Q7bJy26cz792zYq4ps40nyY9qRqdI6kSkmMznSaDLvE+Tj8pJZAducKiSuYlK6rAv46/Ipwy3+2PCyAHNfTEA7kDLJSTLHHfUC/a6JsQll7HZH6iHlAo64ff+I1qxBIaCd08WIAJocwrUnnWVYr8Su6flnzjnBaTodtKVuH7VCjd6EzQ1ol9g5bbq9dXVP6MhtTUcMf6RcAKuZ84T9AxmzHewyEvBm7e1F4wEgC04FunY3Xe5wfBJMpsnx5ICHgokEYl8E71GMomMwUykFjBlTRcL5ynq2bTtfJafUfPpfpPRYC5f82W2E+d7LSWHlDKouJBwiuojbnnCzjlcdA7OvZzwbkJil9OkUUB2cNYJxtULUHzEWcVLipumLGvBiK0UFO+e25INOXCVVBLiGz4Dpkf6nv3qVL7VMg5SULJ8ldc7jzcpcWaFRYrIho9czojkV5KEPpDG2LNow7dYw3T4AwdqIkI7KJFzT0jrWfR8gnKOZaMqQvAmyxnekhwPQMXacA2atLck6zPwFlzAwQSvQxyH8mKCmnnnzPBmm9kHo6mXAAqEqO70MUW0+JQ3MtnJeOJBMyTD3ZsNrmGaOMVlhsOQNh97eHcor5LqjAltR6L1Io/mgz9vS1FTkX4PMALzq8nqmrzkz0drxrAZyHsUejWPFiBAM0E1g3Te5Scawy1JEfTpKFe6YsU4fOKktatPgwXa+U5aOPfJZBcKpIhM2e8xBo7UEf3uczfR8zaLzRgYL+cdSCFxM8+vWZojsqeR6DwY9ve9YpjePow9V0PEdenCl5xaOjQtR1HjaNadWQndj+oU6t1oR4P3pDIOxhpWWd3fJRWeteHHLejquBjHFY83QgbJ2+UjUuixLs3dH5mHwdnJ16zF4450JCQ0F/PAlIe7COV5bjjVUq6+gUPuZ7mlu8vcMqFYtirupzf5Zg7S8qH5xcb1ND0HnblUdaJwQlbk0NBBpdl8jtmWtXNntz1bJI/xjKhrsr+U9yxR20HEybFU0R+tp2nc2B9I1CA22polzIolQYqKMloCGkJrGqmuvkTTCxl1sjOKCvuHn20jnhmL9mVaAXZqxzBrIVmBdsia2tiuX9hJCpHOlcEPTe56fPfw81WF1NODApamS85zmpRJrhIgIEQgKn4Xb17elU/cJO5RKc2bICtQ8ejbANZcxghSZHJTta/Dv3s8tjzsdk4Lf8LMJFcYUTC/BbCs/+erduMiND+R7ByZoC0/GC98Qm7FWKpS8n59i0znSzelgH4APc+NFxaWaewQpt1y70Rud14YYOpqC0zLtyEIO7phI01Y/9Ev3zhAyq1XNCE9hXkch9t+TZoCjCpcl6gOTbahXjzawmab0xqSKEtlHPIuEkGkfiy+O/7+wGrU47c3Wfnf2MsBZfSwlJBT/Cg8jRtV28ViB++BHB22MRNhUQqzxE7YBNDg+r2rfwFj+jozrIEwrc57Yu76mhENHkSq5IjEIpvNfECDhud1flqCLng9NF7q6vrbrjZoSNtyLZFvDIMPRl1wfCHBPp99475bGNHwqp/IeYpOUEdrxJK0qWMFgdVHESORVB6BdEGLCSF4HLYZYlHmzVNH2e3RCnGomakhgVI+hFQuzBREcMyNPPk3irt5nYA4/hrC/HW6dGDCf34pRPK7W784a3Asx3MKIyyhiSTCh/BqF0ZIijTJ4zod1v7z3C8CTu5NmVDD1xquBKDQDNo1Oe/11ApgUpM8pbSdARJlt6qxKrV2UyI2+nPX3r2i5s4pzsH0sdOQ00vdr53Q/PBS65WPpBlaXY6to7/GihXniuoKvOQFTtSiT5XA6IZMwhanWe4Nx3ejrtIelMdSgJ/HyvNfFlkOBnFSA36oriIQHsLjXtlOItBGoDoSNySEtk9hbc1RrcMvj1tEQ7G9YJQo0PID+Bk805OI30bs59zlJk5Cys71mAZzDbMmvwlSES8w74j4NXzKhvQ2Kwd+fCuy+MHmFnNF+Q/QjA0eT5GAnbpgAmVrzN+kE10cIk/cRwRSRQtVPve7vHXD6U1QgKpHowbysBAKq95/84IpF6IojWkACMi5YpIgnV4wN6ICwFV+/u671AkcJZxGpcaKLa2u5+Ma+x5wkXTDIoazllr2s8lvpKdu8a+9NOAV5tjhzuIm+h+5mIWu29anvg7/K4KfBT6zd9pwc6hyusC33o1+tL4SV5aTN/MBdmNbwvPoIpWoPANJs6Vmrl9EaA5afHBimJ36/Hi3dAO3i4gOVHB0w7HKnGLWgXf1o9VuxjNn09cEXqqyCIxJOJT9APSe4/tOF6UmmM7GNL2nVYlWAYHknKPmLCQTf49js3kGYgpY8CpqU5A2dVrJUntIVBtrKVpuPNZTAlD9+HJ/YXhdZPV1LBCWiKPqqlvwmiFfHxZtHOBuDWovcSkpzH1DHR45mC8s/uBY7UzhJvAeqpSIdDbn7YNSLEsIalel2jH31MCutrcKrbSRQ7ZgSyy2OJMHEUl83BdoYn6UQQdnRuhdv1U1dnQcHNUuiJ0GyJlvpiqZNvGPvx3fQFtEJjBDjQZry46IL0C9pBQ1XF40S6vDhjEUgonWqgqDnaLjpPB6qm3chI65I5I7bXaKM2p2i0N9RRq8hHHOcLsiNZV662N+qBn//yQK2n2AInvI6APFLegZuBvrC4U9cva+hU2epTu+vVr2N5WHryBBK2L+dwRZU/InEs5LbdJGCWeSZReAZOGkyCOVu3U3LuZseD+sdv3jFtVCBmIDeuMzd2cBOokVg0cZNWc8zxu4tjXzEnO5TsV4jgRHEkDWB8yqE/XpVgiYGb03QlKH712Zxb+pRMEuuQdyjaMuPDxU2u2cPZB+Fejt6ROLdvOB0O3LrJZ5/7vedvhx+BHSAj7E9gPY3asFzB1BvhcL+oHvsARidqH+LkAl2wbg03mkBn3R8ukuLsfItCi3bLcO4KDf0Psd6CfJM6NlAC6nY6DIa3uow0gJfX/y3vANlYi0QCMtAYwbiHz4zZ0Y4JIk9nH+R1jRwQ419bz28AWGsqDWT3qheV+KFutdeOW8orYc+jWbUmj0wdOHAuSPNGXQVGmIbKybz7dCW3YBvo3nItX7IIheB+nwTkTxRE/ChAuV2Zum9kTBi6X7VOXLT/Gw/hbQnDcC2TJmtH1N+OoeYr91k/AlnHJjoMuY8pAgyS9PyMkBlK+OUvj4VvBOcL921QwSO/P+2aDDT69JO10aNEeNHL7B1w2UHrpQWYNsgLRX9Q6iw3uEDlQUb28bGuVOlVAz9dOJumbrgnAQ87yuaaHSEqnraEKxgWnWqjHWOLzCadSVM9ZIc2Pc5EiUZhnwSBZFvzrEIXpxvqDivYJuAEfx+qJIZa0yQORb+CmJHpk9+2Zoezxf8ytu4TkPwel/w2m1J7Nzw+lWjdd6MHxnAoZEf2XTw9unG319jS82hI3ezEUuOnm6ThlNLuaB/oFh6WCdkUuCN+3ui84rqCdNcZrhqtU1Ocrp0A4EYzz+ZawPiPSv8mDbJZ/AQNwU9qnm/Vvo/8a5ok5Pi7EwitkzTmHA/Nec/EM3wufTFtPx6JZN1lA/EGQBw+1VWvjPOtwPzSwE86LOg7h9qb8nDElf29SdCcOtidCtO8Rra9Nunch2A6QpBfXpkfwvWIrXf2CgUGFuDR8VwudDux8WXoEr8O3W4IeyVNl2RqK2WgYHfVPF/vC/iE5A0zDmmq/YZW2akkNYEPfwaCd/BeJKW9uvjgDPEtOppJuhiBH3fEQSSCJGjmkV1mSDAqRveDMF7ENLwnuAhm4F1n3iQJaaZrKAQboNUp5wy1AsZjHDw+WB0Md2FhyFnOnYcAPbKaDs/2CGJ4clYALWy2AoVQt39WsBJnqI3gdnP2rMnA5Lhx1pRFdNTBVFVIAd9nu3WAGqft9xyo6HYhYNqvyWUailnSLOAHmRAlliLeoq1XeTSXKJWCLcw3tbgZzn6iRssHP+IgR2/zBaEw6N9vn/i1K2rXNvaNPJG7W5rFyGB3LdSzqyvh4HfErL3VC4MgLg7Hel2Fm7UAWCm9K0a7ff8sdWLeI6t/3Bao++FDcUC+BBOdtxhMUAC0mF90KyJhTaqy7bMcbDoQ2s0J6XjI5F5V2mcMbMuIfnYAXr2iBsb+/Nrf2DYWwn4WKcBoEXz1V334lLAQI92AZumJnW0jibCuRxmH+pnD3EsCRP98BilP2WwS0xPT8a8Hs7caXY4oCo7KhewDP45YUtg039TuXFgGlGH0Sv3xAa3fDXwHZ7Q5IO0DsimE9hc2bXbX7g51FiwLxeCIB1znQ+g0KHuhCms17vorW0Fd//+EZNWgUyWUAp3gftRVtsBqS3lOID/zp4Y90dlM1I4Q3wQCMZnGe9LJhhHlIcr7mSn4wCTjYkGIHNcjnaZjaL2Bhd2tRWT0pXcFqGaHAoCNEGKq9H6x3o0p5SQ85h7w6ieday7to8R+x6BO/Gb/TC60RAsIL6Y+O1Qh1fiXVvDs1Xiy7Jm39Lpe/3knX9VoweGXJ5yRWs1qD1HHPXsa+2jL1rSM+JOG4gb6VCsoJItryqWBgu9y6DHdovx4l6VoiC/2SWiLUhsWiwWskWE/eTT28wRq302GgVc4Qm5iOHqXJCsyUrWqLRGCjuhzMkuMXY/naAw5CVlM0CFCCh4OGx2PmbU/9Nykygon5DsMjU+O1Z8In7c7vnioLI7DNVcViht8bVU9DlgGovWeic1VdOZb/VJB3Gp2p03awQzf4qyDSbQxIIgA4s06f0pmxM0fiSS2BozbofyJzODzsYVMYfVIa7HdP8TDKB4b4a5isQ3KOAuA8eTGBxAj1d1bOocoIqc/xKOTpisV8C3SxlpuagZ0B7301wUglKXfMQ+2SdGGk1EYn23f1olzxYRLufIxNv8MnFH//yCts5V6xq/zv18rdOOv/LUkLmNj1VFmHx0g587xKVJ1CbXMkQL/aGaMQwdayEqvM8Yv0DQ3081MNTLNQZJOIvk1bnafoDEoG5LJCL5Rpugpbj2y3+AahsX0GddKo8PI4RRTW9ooisfNVpSufkrw0Wf86D7Bi9m1MGqiN1gFWr2ABPLUahyUSXNM5scebTZcfx4Bdp1RwS4TSW6t6POQGzU4ELO224iw464OoDTsgbZ2o0wZ3giCL5wyBhjI9hRJoudJx0xVW6MmXAvj5v2QXp0ps70aLjTdwWGYD46xc/33fHnrbot4B4ryjhsQaSvqds02zIoje5Wkl51X4/F3rrkWeul63iNKzD6lXSvPdQ0SggYUIh3PPVeyNIWl0ksM/MfwXVtZyVctgGhRHqZ9rMAsDXimRr3HJsN5mhTgFFz8YZ61yiTnrURFA4EWSrUk6vd9439t6nHoE0dY/n5M3TcN4WiVTHudTA6cJmhSZgxHXUnkKaKQgYHEnesL4rQLFeZ4G7T4W2gYw0GMzLPt9H+G+XkePIeYa97Ebhp2ibM8RuXFGoNEirkAMZL/bNqAhaD651uSpWiG4XiqCV46F2htg5Uj03yHOO/ljgi/GYDTz/07N5iUnlU9mZdweUhgrcKcIhHgBQYmk6K5nd2qQFyV9ocJU936b0hSw+83ilQwGikEWyVaUUMUMSewNU3eSLxJ43oG6v+C1lvprg8t3w2mWBCOfMIxM0oCVFgMeihbIfGh9R4JKz38MeusqPp7bbYW8x1CGnkhFKHk0c42CZwRTM9iao8Mj6MQVKgXhrYNzd4joNAAaMHsYFKyfz9yhaa3o1rZt5NIutUizx3STVB02gyXkxrUmpoOC3oJD01VcbRoj8JoSHYa3Cf6Tgvv9u5mleiOZ3m+ZS3ksr9BcB3AJ5hreqbDOiKvxQwZW94s/5JPZVt8gVHwJhKhstc0OO62Snr6RvHpY9fti+2pP2JOt0dOnLR/vIprAGlyC6F3VZCjFbYwdYYS6DrB4P4qwZo4d2SbMRnoQHjTwzdzFaFwtH+STV8XBkuAhUi5HcOOkK7qdoE7Kw5SS63ORggsO+JwmCdIm0lJsSKD3jTHuBi4IlBD/Yv/E6+ASj0+Yw0iWqfWXqE7jep1K98ZUCWVfKNqnpZ2AU2B8I0mCEcq2c5aDGz25IPMY+j+/UmgMqht/72y4W8Qg+y9tggG32AzPTflqtQz1CRFCY2pSktfrJLWir2HZK60pOHOWQ4hiAYP5dN6VhXw8Vj3jaGflctY59pHQqhsb/eOGnS3Loxyq9a5OoqCcdcc7jmIIvgP3Isq/UsXDjDkXAVYRATaPwboXyUW6QCgguz+tKh6G9elIXWt/RWgo/YwT1YMLTP+TDBX311itMYMx1Si7aQINryoMQQ/Bgxr9ZmxjcwFXM7E4VLnx63F6MkALaiaIedq2yxkQ01KrfCdNGLon/IBZgi7tVZVVz1g9JJDUcuQm5Ypaol10nejl5rIDY5DDZYisqrbs+957reqeZzpkgJc+eNNkoepgP9M/EZvsk+sVO0kZ5Wls+GNjbQPFBTKMU+4wsO4+jnops6k6eSPrelVhqTlyKuRqftUsFrbJ5BtOTHwPXTr3xtEyk4PbPDOIOLLyAGzQHs61cTP3LGn+rRBpuLh76TvjbNxAfr6EhKGwwExmP1cAnWe1gzT7RI9Yk1dOY3+AM28w0Go1gFxDhDShvySx+FVKn4YVIlyE/TmZtfyTrhgnE8oo5zfu26OCZ7Z57DmW6xZGxSZ1XoiGhSCcHMrlWHfqoSyy7cvh7pK2J9+s+2AsDUMGvOBqJUXHB6WoOVqelVBYC1AdYx0r3XP2zfFwc04zJB6w26VodRpAik8TtgWTOrz3WSJDyQR5cLB6J8L/suMdbszNlnV4uqi6nXYx7hlcMpHyHsqdGFYyok50Cvq9u+rWRIrtJOrbqGT93tBTtyQSeITvv/UHREa4zr9VdJoQ/9/VePGE/+Va0d5p3gFkTUBy7mHs77smd4OEQUU5i+ohIVIf8u7dyaSfKWohR0O2sjpCdb1sExRWRLgJNOrQ4Qt9xEYeaoIOfWrnESf98t0dXgUnwVAJAgx/DnHiGZ6F9Y970rDwarwMp2ZHxaleZo9rL/adx8aKEN5wtY2548NcgG+tEzsiSNPyhD3c4YHdnUfHeM72Z1FxXSCdTPf7VRwVZsyKKyZr1Lo/6BQ2hiF35iFPrti/LC3um6r4EHfAVOUnevLqYoq+gzylxaZjhM3ylrsn/q8BwnsTcuuRnbmTseHbtAATDCSHAkgqm01uUzeajnKDkfB3DaEm3M8IB+7cCXpQFW1ZDGgxIrSKZiIYE0m4T1jczVfU0VEuOisrYHPWtp2h9oKtAoDef29skgw5eNMqzw58pkjUcRutEUvOEqBCjv9WFV+awZrjeODTn0OEO5z603II+XvVm08YebHCA8l+EYjT+A2TwZHvK3O2ReRG5ldAks7/j0iE+mCZWUXBA2yZubstkCD8gLyffv+inO3qsMRPMkqt8yqzrTwaud7AQHUJoC0XsmZjbypwP0E2SPGja7B7eeNWkqhepB3UlP3KivjdD2dYAH8G/RJxXs4qApTzzW1Ofx1a7ztlOwGW62+xf3pZnv7j4MwjhCJvzwFRY5M+dUiQKAoiynnYu7hdt4pKNNZCahWW9ZjvKmK5SuySHiA9zeRJH4685eerBkfARO2KT3x+cH+NbLy6BZB5+q1xYJG+zJvYR85uiUck/cqoE4/cCKuuEzxCKrBG8awJeqT/e6aTLzmP9JQGF4L683pAeTqnc6hINW47kpUIi9c4vGd6un9kxEcfJJ7NfcMr79q9vgaZ62c5zttQx7H7poQpPj/nb4suknnKkhAY8wo+Lpv05qepCcnfgWSbqaMPrXcpbs0MiYngk/n/3bcZm7Hr07fnJ7Ozz0z6Fk8MaSAJHoQR9sk9hXA5FwkWXDMvMQxUebnGo5zYc0Sxqb4M/0qnaktqcWkblbEtMhFcVtHXxkfa9APm896ZJRfcXDD89BI+ttwnoxag/+E3M9JSRlZmcz1V85M2ywe4JJKlOClQ8gjDaBl88P01L+BwGMd+PP+6JW4ooevbytRhUIAAROujBSn8UrJ6q+VuU8VMviBK0PQHMibFEEiwRGpBmoLqbIb9avPutmRbS6UFyfHLyX2QSO9pP0K4fUorvuQTi4IEktWeR1YQw93SVOqRmvHrqovsyBimgZQy0/aibhO1XSJPDVmrPXGWQx6Ulp3t1QNPN2HRYQbLHNybYKETe9xM8lhNjJk83BlNz1k0hVf1YK1hKg3tVMEVxicPVgt8K58jN5QVU0LdTp1tS6RD8qLjqRdGdITRY7NX2A05HsdwNbH/plgLUVIFXYIsd0oJ/nsjQM7Civa4rqJ+x5hVxU6xEeM9FrPrn8CAYpp27Wg2CtfpZrusL6xRssaTKldxMJrsVkHPJ+RFPEoTYAeV3XUGkl8ZfZBqWdRM5HI1uUt9wArE15+RrjAFKXf/tUzSXtwl4FmCMXFL3Ji4dkLamBkkmIPW/ncWb+OyJpQANq/1YuDGiQcC+xbf617dd3oEeVBYcVgO87sByeup5EF9OwhekcENVurBK8HkBcJ+GZVf/PqyJ6b3sTyWtQpbbSmWtzMxAzPaSCm3IZrqwBUDB7rIjGNJWtHThO6Y/yhQWr7xKPKCKDLU+VjrWPrQoB/WJ18aRZN667BdU/ySR0dJq3yeU+EEyM05xHNIXUvGv6XXJixg81BIeDGSj7piOhtmGD9n27KDnJVQjBum02h77Mm0R1q9D2uJvm2t827zZMcCPN/0sA1UJzlPQdz73c/JYExGO7rJd+yOKSmIo/keusRuLm46hx0HXxp1zttAn7N/8S7gmU8mBoAKHfHVvyRfBkz858LbYRaHxY9VoNOdsMg3cJUTSFjMKSAvrLtADz2Qq9JnW4yOFGh40Xa8V9eK8tTUR2JyVbCBQuZHA4Gcq9FPaHcQd6LS4+OvMrf7SqhHrVokOFUuv68ezyRQ+HqWPUgpbcKZxaURbs26ObYPnaiXCGFbY1F25u/VstDcydUXxdS4yBZH6jyy1v0TyqPijQjlEB1OgahYQZh++bgyjyfSeIH8+t/PvQz9mNPSaz3JXdJS5EIOBTq8SeBr9SKCM/zz/x6MeQErY2uIiockH8LqXNYnabFzVBku3uoPlqLTSfHoESff2r1rkCNyI80Bash5cnzPoTJ55bxc7+iIgJRui8BfueQayxTH2KtZTlMLerdPk1K+KmrtI2iW//jeaa3BDVNKe8XC9HlXwFDHruyhZ9TrMtoQ0TfI+1FR3jV2fOEucG/WjFSWFTFxbui8gu1WrBiXXNf3dwXnTqgU2fFwR8dvdEHkZmJ+ewWREa8Sbf45EClJf1D2tR4M3ICX72BSTu6/C6sXBZiZLdCJHo1Z46pZC4ySQj/7YsDfsyzwuv4RLkHKv8sZuVCS2yDei5nTInttw7s7cmbSt0KIcdfrLBbfjqnxokqjPJsrqBD28Ka7SJ9rV0ZAsZYIt3v56P74MHtTmdn6SwTgP1kOL3v/sEY28pko4Q6ZUX1YPbQSe3XcWsMwOiX0jMZhyEuWL+GPv05X9ApnLdP/mzzAyynVWh/vfNWNfMpHD6H1WbftPxXBPdt06F2kf6yBDMDaS7dVz4egP3QakbsE9ED+uxPPezs7yD7iyDQi/Wd5dBVapT52qzITB3IlHbEU84Fdn9U0VCSaWIv1FPykWrYa5EJzGka1XAtOgfCLntqjFTYvCItFTHVwYO7sMCws+kkIY4+dl8IalMrXvhCxPEzgJQgekSQqBsCt8DyrIBZP8Q7tV0wyd+je8XLktuFmSqu7/0+TUBlZ8AKk+5+q2AA/SdOYvFAHNNZynaCw1yLT1mok/2AcTeWnAXGOmRxbWZ4kMaB9aaE9yiA8kSV1nUfTIWn8EaoLxr2D8axnlVuj+EawTtTjE/mX1lcJaVCkqffVOp6Avb+uQH2wTGT2LUV/v+r/+PBvjMwrh6YrxKRWUv1hQyHQOWp4eVwwVH+7KtJbg8zL0rdjT7LSEsqNhm4WJnm3Ae/Nz8DDiyVjlbLB41AxKUJpnrR7cpbLYr79L6MLI6uPvxpfSjRKJvOpfJrB+I3NS7+ovrAs82KE03d5DVuakgv0nJLih+R3k0/zTgTItUt2Jjoyq7AN95d573hJChiiFQKPU/vVWZuBz6UAO++knglXh0N7Nur1thQSMRtt+kClxn6EJYbfysG5bPvByhI+EFkPJRX+7oDrykvXEsoKQbRdtZ9Ru7/3fzAIyRkmyWU9gROdJmBjTlctqwjuI7hs3tMNScnMpfGCnh9hzslvdLJhKWZvNEhQf0mFKTSrtBAPvdcNTZ1m2d6l1V9VGDTJFK3qOuv4Bad3A3UQHurIbFDNYMq2noWukp0Y+NVGeuJBHHM8f43iadZSilg+3dAGlLQH9yRszuVhmSbrzk9o+p3+xhho+W84Qx4k5sI21Web0CztxNJSSc9Ix7V9ekQ1BLSpZrMGoqyD3KMrsyiBWYxBngnTrf0Ol0P0Dtta/QDMa+qgMHMFhTHv3MAXf3njpHkkFDpo+y98Y/Ku9ndcBV9tbHYnhqHnDs23XksN90VjL13T8SD7yxYvpJrmbcW9rketzQdqAYORALk6oPGJ7ucIgvIqPzQuiE+PxZboGtQlPPixYT6gIDaKjWbj3v/RcjnhFilJNFWxk2te5QplGqDNyeDWD457GJoegdiufUW3w25so+aRwXxsYdXFjKgtdHYc2l5Y0Ww4rMEuWEVKalUvInlDlgyAoymIxp7Q9AkIEF8JYbXq7k3wg6CYs+Ahg/pcuG6O1r8ZCoVbTxnq2AjSKnsyJzTvw81TXJ6YZE6LrXsHGB3jEklGBCtx69X3vP9xoKn7gx95N0+O9PbYC2+Oc309LRR4ymf3I1bVB8NHxYBgLZQyNiKuEli0WDRf3V8G9jlCikTtljwTsnS3ge/85lh0xJBW4qIdPjlIMfec4F0yv2Yyiwl+ThXtZQlwf1XBDaKvXXXnulUAJSH6BE2OW48ZJV19PNYZsk6i3bpBPkb0vMlU6O01GwKET89qQ5Tipq+nqMA7yWJGEy3aAqiy0hyg2lV7H8UKIpcDdwuXcdXx5N1Lqccd1MulhPdhwUqAYWzKAd2KFP6Qq8/8gG7n+k2wNCLQoCANBs27Zt2+5m27Zt27Zt42fb9bJtzCJmIQca9tpt4UNLv88LLW7Rmi4nHGkvW062Ne+Eeynccr4cQ5vh6z8DNncjvijOtQ6VD79l7IxCWhPY3hTfzRxy1o98jRr5AjV10YNAjZ0VQlycx7Ni29O8leACAaS5waqL+GU0fihFppjkwayA3W/DaQFhGrd4JsSKTFlJXCBrnIYerIXy5kERhJAjhOhU4nm6H2UxVDhdEhD0Mn2IR806vFLi6+pn5tjxbw1404w8AYkUJTC5uO8Tomzm9RGnwMbuCpssWFj2gibP/bbBKkJrADs3JzMGg3nl0gbi875AWhow6EuhYHb4AOcJkKGGxLzsuQSBccTUnVKxUlwrroLtEjj3jrsJU9BQhVop3GZxVgg/JPqikgAFpBkR1aGdf3Rifcgkj+ua5mjAmNS0dd92Q1wf5Guk0FmsalBvehty+9DuhAHK8djAKJt7mm4ubLPmwe/v0oJdWfZdraBaFEZUYa57jdjLgj7XinToC/WZR5rcX4NN3PXAQsWjaxNdwvhX32nKeftwSBTYRC1MuZ42SjYGMiqon+zIANr7HWvId+LIjWFpFBUoi+wXneChuiRm37uJmlaaYs6ymApjM1jpeKvg8qslatkaXhsJTHCa0o4cNli1xxz6bsaf5BCKtkfv/cUySEf2eRX2eKXbyc1l5/D1nIExrzvgZ2CmYxJ6finIvxLBMg8gA7WRspGUU/ViQBjh8DVqW4eT/4fQyFj+kZuzEVwJT4XX5f+sXjwQhvaixEQLqfXzQY3f86SpVYFasEm4h35mhFF1tHqTYJYZYF3Iud6QhrmvgSVCPAT61+KXLU1XIaUhwnstv6GE5VB64M/e650cVMU68ezSojKOPQYYJb2M8oo2K53k2HCvBwE2U1k3JaJwMQwf2wVb2e6hQqCTdDktTeQ5L2ivwhSiIP1HHamLa8TkXzaqbaeMb6PF04J5aDVz7L62yLMKxeDR5z46XHn+cKRR+b3QhMtkoqvvyXgz/5EDmxYcj2olcNDgMvamJhsZ9pHajtryq176loHwpxOLU02MEFJm7P18QBL0Ag7X+FjXx138ECOqG4mXa8HuO0mbkoX4lHgEu6U2ikr5G6T9trcCkTx2BMFHnuMjW73p3lUBpZDsDDW+6CzYY2VAS9y5ofrJqff3SSVfpVaHIDYwnDSBbuhdOz+whZWiUiU1QZimgnaOnrWj9qEqD6ZIYBWuhQqpweRmHzgct/kDozWt65oWQp9aB0QNvprl9+LYwP6bIVPIsdw5zRgU1rIRFfJYJw1Ip5bqI4NUTxtcBUGzUJp4DtHEAHSd4J32MZrh/dkbyeKbsc+PFI8tbalCNuL1CUqZAUiH1ZW4MSYuFUVE4Y0tc6nApiB4ZFre0zVyp871qJ2JY3B4N0Lb/8XWJs93EqPiKXDICCkiZnTa6TCxDzWm56fxV3DIYsVYdcwKVW2e+TwO7ECRIaVUjYcpkx4xipuWwx2sH9K/j75MfXaD2rwvwHMNtHsQRb0Q8bEfr/1jrNr010nykTk3wj7njQahsQvy+csCM4XiRs/hpclVZwR/msklzYUPJs0Vad2+njbvjnFGnpmDMva81KYY0q+d25pnS4GDfGDh/Gw/QPBxd8ZxWVNAXd3+dA/w3wZ4PjUzwOZo+S4G0YPytY0OweIiQKFy7t05DEYtHd6ibdHD9UidunZsr4jkk1zdk/N3P9+Zdzqm1eq5HfICk/fKAhZhjbYc7az4PHg9O5QkY0K1x4p8g0ciFe3g1ZV78e4DgFlp/Rjfwu8KHmdxUXxBaRiB0SzLUwWQuDdDrVlzavLd7otrMFvJwG9ye8lZTEeLhbkau3zS7Dy8EwCSt940HStA2ZW2EnSNITWxKgbf34zV1ENw8zYKeS0xpO3f+8TDXZoqsNdQz8/n3Cjx9BzJnCrRD6lYkwB9JUeDpTSu2reAXQlXzdAFPsCWs8SbRewFtQMlWATwRvnSq3Bupkioe2qErlbjrzbEZ7raAEFemjbIa6inCuW5uTR2YpnxBMgNLpJNm8gCPXWKGpEFGAxs1mfku9zgbdoOcLm9hgOgYhDkY7x3Qt9Mg2CLMbRfpNei2zwDs3El9rsyuPqD3BBmTIdQAXSFEm9h7Q6uihFZ5r+lEjlMagSD+AL+5j5ktkdYN07XJQPvXRc+KeRpX6t+2bzgvrzMczKXTCCF8xjvSjrrokiDJEEwr52nd4mUqGtVekX6z9T/2mSpQ8e4rrugT0/GHcxiRG/DzhGvjXCY5kAjDSNdh9X384tXrgRHy3wJQaDWi+FUwhNOZC9j8cKUeZW4sUsE3OyFVLPOyOn1BVWRJAf+CYp2j3d6mqlTshf4xwnwz16yv4JufKEP9pkMxvIcL/3GtovK1DM42RlQ9707iv+RdPnSWG7AcQeObL0XB6w2aaC98T91Aro5GJJY+gh4c9uv75oSKB4bNYEFhCObq2BZETcGcQHCtur8Ak4AGcf06OcpggC5bM8doyzAsuHXPBpzA7emjO2MmSLK0qJNhwiaGFtZXZWvkr8yJ37oLS3eIjuH+YQkBvBJfCqCiH2gxpywbnBN2xyTBBHd37Z+uQD8vYtDp1aMb4VUsOV81kjhxCiVUOrEdNt9DB9rIq5u+k8/NglMfsmpHV55t/QWPxcD7NeqAkCpMij2eXqO5totajK/J0RDMM8L08ixoRVGwCs6sivhy1YjdrEG0NbxjMbQvCGjhQtUkJF0VOU3eZ5cwVzIzt3cvO5drPBOUnfJdSD7nqZI968Althudp5lwjE3bTfOFW/TpET+MBSTWWAE3aEmJTG+jslt1v69tjD/bgSqE5nCQyfEyfXsN/k2HkJ0Weuw4dJcnXiGjfJssy1J6dvHfjvpemBos6dXR5LJScbx44f281Zk+5IsU7iMeLwoUl9w+YtYBQjpZLou3lmEfpwkFxh9w2cb7GZWqCzm6He9qiD46cuSUq7QvQQ0/Vi+z82iHMBxti9vMyT8IEl0CcJYoPO1fvEA5h8UqCZvekF6Ob8j4FMz3UaVNKA9DYRduQibGvEpOUryzgN0p5qfJNZX4L0KqYGhXJE9ppAJwJU6RKVqbPP0OhVw2FwT/HU2fFsM+rR7AMJyCT8aS2US1x3odvDxS4aS08gRpCXp1oW5JQgbLbhI/HlrFTyTlT+MJLcQdCTBNMPeBL6J0qzXNzmbXOHYiecCEhJ5ySUFdWR13TYv7w6Pq4YMXckd1kD0q/1iXUWXWMU2wDJ58DX/ysf87yfuECqvKgvvGdmBvOxA8SEE9R7r8Yal6ahLkoSimRo6lUWRykNeq1KYlf3nVqYPWxV1Qtj8oMYvRHCF6F+XcWgxYuivHNakzIwErTlJ9xDjjH3T9nO0Ej1UD9/QlmisRuARCpwSylCVc3R1e69eYow9Tcj2NvgP9oo3FXGEC8KRkdpA9o6YPsRN7V+aO5yOPQuJeelyiCBZYpc5c6hvCKUX1wtQtE0ATluKIgGDUI++XVTK6FeyssA6WmhgMgLO5mJp7gKf7XHEOr8L4jDhGgpkYwB0duxLfC/AL2j/mWFfql+KpeUrJGe2vBlZtAR5Ql6NRlhxmQxGXanZPGYkujs/IZO4adb22aEca/9MDe7Gnm3AJptXe9q6L9DqKBD1aBKLxyR8ThPa7X3+hHz1xu7KNfRc4WNxBU5+j6isrqCFZvcF2S7x2giA8E9zlBopjf6aaiC3XomHtp0c8dX+FH0DDK9ewQ3Z8Lf+7Yl3l/CyrobURjOpll7D8RXK8TW5XWvBInJUz08mWi1pG7K5oGXY21d9JDiFJIwHU/9umeny5b52ZJw2GooKI6+7b0DHedXhbkZvLEHAuZWzK9iOOBhmsftQoTeZUVRxvg6cqLY4d+CxB7I39Ml+kvw/PZukEe1DHK3iDkqyohgVYZLhLiBhJaibxZwczvj1tmMzl/x6DlkZzh6MCmyjhuGyKc/lJZwv55qs6vXNE9IbDdhvOuEei9Bs9jp5CjxiUjFoZHGX/EKCX7C2lPylg8OZTAyO0FmbAAO5QJfSemCTD5KnJ26blgYTud2xC30aCRe7nP5bV1cO2Y/DJWdFOFEVuvlP8Eo/yJiLj9c3sAAzjBwLvRSKzNol+AEbe/VY3ozvBxmPvDDK1Mss+dwNmGoasgzis9tD82U4PIYkzEiRhrCKO+IchIMob2WLdS4a/lO2TiWwkGJ8xtx2x7fwlOHT/zJCbUIl1EXOL7gSyebaBQlR9pUB6+fgIgn5SrnScplEoT9y63006crhUpYRpM5maR9U3t2vTtUPcU5Ic+MesL+/P0cd+p6WZBZB1Coy0mcLO31D2RUX1OeaIr6ksZ+tzPd8MT/hdzJk66Y1PBwHpUqaEKytgsdkYPeP09UGNJ3vLsJaOBI1qPa3sasC0e0iQW1Ty6T3ggt23kXmV/tJYa1xCI2nuAu9Rp3+m8P3PGe4AUZFmoHG8P8dYkkjq20JUJQIDTLFgVDg0Osh/NyiwnRW0KDk/GLYwkt+7hUjPsNilBq6m30mx4QRNqYXbKMoCPnYU0q5FFhlE+jpfPxnH5aWWUSuEbzQk2sBP3VWfVD9pXupgWBUcpLC3yXvunN/piqrEUHbm8Nyilv0QEbssRCMlRWAUhGpDQFoqT97bPd8NTk7So6ujTd+yczpcFxbW7sSQO2VsFYHZx9It661H19Tk2KzUb/TGOsiZqXI9kmyGke7GActbjO7G7VB+sj3rdDzFKBCL86JWa12/k40GX7vQ7zO2GLaWSprsb71TJXgeQOjTMPOnPRCcXgtFloay1xaOLl/jQ1pxoTDihVsjaTTmy4mU8vaSu/j/R4kMGFzqaeYCloVb0stTdhdMimExC3sJgTrkRLXSVUikOfwOYTg+2ENc0fbH0tuRfO8eF3FEvMyLE71XrjwZD8sWUpP1Ez/OyXESenfRZk7N8xU/ixVXsGmgKUGFPz9QEwNapNBhPW5WOgg6DdoOPM3MywFh9B/yZ03NmRE+dyg2OwyLllrnPM2IUu4p37oW8UAbf3m9mP9M937x2yiPVwd5iepCVmnS4UqN9n3/gFaRG5DpqvoSjSjAh6yQbQqe5pxjJo+zm/y4dB30VPMUYMhzYdupMX/jm81CfFr6E58nt47WhVxELb78QTwq9hn5CLRopGOg0pY40uV0f3+XtIGvNhUoorLMg81kmgiF52SWV1VwofAjqWeVUTH7AeESE0pZp7APaTZMtro/meRjik8z0XzR0coyfTWqI7LI/qha8/sLGdNMwID4u/3G8jKcz9uGR5pxba0eS1tCQdoGM2wUdoIf2NNoe/ccbxpQcDMWszhyzBx9nnN3PN4iVZAtfbsPayTY4yWwI1gVTeQD+ZqBsazFFXuD5g1NP6nvwhwpKoOkJNU7OKm1DUZd/cukKDS0SRT4XJ3DJ292EK84ug0C6I57oOMElDSvnNAG5iEwxU50y+DxkCvLkbCDL3cqQ0Nf17NB8uyYtTaBZpDQr+pJiBF84oa98ZLKVFV+v6MCk9N4yTROyn6l5mUedFSfnfIalySSIdUwVikBVRyE7z8iJOxmmjcPZIQ7iGbv+DpybxSP0Nbap70beBAhJCaAN49mymmdx0yFvY1rfvctK6LHp8rjPAnaj7uv1IYHi5XWfClTcMa/KxUbojMLYxHSlWBJ1i8WFwUiJhPOyW7emrFLvkfg68PJnIZGOjiWbCuX9EcbfzYmtFULwDZaZyLYtGoST/gXdlGLfZ5h4HpIox9EYio6F9XrC8lC27G9SL8hc3jcfm758GTPzqggK/LyZsroCuKys5s0Uk2qAV4+aWLI23xPx8sOIUuVYEE+uQbifLWaMAvSW3DOeJUkbwvN+ERBYTRKEZFxqrqRPK+njJ0HAkIOHINnDREQeR1PlldKXYt1ckCFx2K2DQVx9MqTzqlVx4eP0ivAuNhxerpXvnB3JeQa3EgHhfBk41zSXP3/XG9WTPbGQBZHTng14oNm9/xFYFDSCLjz08zWKkVmZlTprr8DyOhXdUeVELIr4SoIcJ6IsMQLMp1bYoRJXcVPXDjLQAGfx2s6zv1MaXTXaVJOhSKTXSTHtHOwleXqeaG8hDBCd9cIB/kJhA4Jz/ysFSWgMi+Rvo3o4GpX1+S/BCYkJlGheTiHAq75dFEI5cIZ/FuLqRpN3S1jPo4fmjJVk3z/qWeUCbv1m6Ax17E/i13v6HdWELO2/2WHtGIr0kKtv0To7vQmfZS+B2VDjVrdveOs5RttmwBVkdaPQXWttrG3xjKbwWuqbG0Unp0ZoR53mPXQLdgNUz1+KLzw2Jzohr60Fiq3mxGGn72l34+LPTr5OmUyHNhbSCWQhcCyQv0ppR55gGfFsh9VnmFMy2njWLuKG9kQZdvuyY1Zd/NXLwta8wZCsboip7NQST2l2Amk7nwSZZAwkgBiggCQQP43JiYtAu7cFUKNH3PkyOUGbCsdYBv+pmbsNE86suWHLwrnv0btanInmhXDGcHAuWtN9iBqNnjyBOE0z0egc+WuCJc49UTSak1EQ0ZCLWcWJYQIpVvx6RtzVT+ETlb393+h2xyzS2NGE63fSsfsoMOx5LPPQA/dxd2S40OGg9Un108Doq9pGyJH0IS+aNTfUHQg6rD1Q5Beod2VYLbfB5ph4jd173Rjy+9b69Vc7ngqU8+pktbzokIGqt8tGys+AK1qRqOuaRE4N3H8Ax2Pmw90qJ/VybRBrZBn4e/rDe6Xs5fhdoLm9Sap1lXmgZP1aGNywzJ03e3vFnE0z527YYTzQzdeF6XSThrVOqB+FzRFP1VEbzGKQin5qKIKxumYDJAw6tI+uItYMtQH04+njROPl+jvutqWSk1e2SQU5/Xnr1AeXX5G+2zHOXLdDI5EpeTS85nleN4W0dWsbpV8ojOQlddGUQN0gGRsJSL1fTarD+PzDx6cgjJo4P8QCx3hyTkWsTI3jGrYTOmZ5YLjQkH6EFLAM7E9ifC9IBi1f+e+jMANoQ5B2KahnEyY0g/fihMCy0zYoJh7W8U2OhnuixwQV1cApK5v9KMuse+bdRkWIXnl3mh6831ahhh6YHnik7CXtwoGhNcfN63tdQiloDfjaxqezsRfsNH/Yz+GKtFzcL39SgRic+m+LC9Rt9FBheljq7+fETIGyYDAUgLPBQAmLZtXjntZdhtFtgyH/r9Y6Pr+bLHENgaXQ7dpgMb2K+WvwnX/gLXuFKlcmQWaXf+m7oHMkV76k1v5s1+vG9KjuKdkYzCfJGRPnT9Z9EeKAEdiOWmaaFb0BGM2zOsrBuS7kJMt/wN7XT/g0JvHtm+1tYF2hB2fvoDAWuPv+mdCmDnAK+5A2LsTV1kQ68jKHZ/5A4ocoFiu7r1Zl7GkcNj8+OW27S4JR8pJ1J3DbMDe6MD6/0kD+hwrAkJoxisG8InF0ai5P+yV0P2LKQhRBRL3jSoCM7ROsu3QSmvmOGvkAzmY7yVX86QHcTvADe8AHY31DPaBo/6SfIs123LsnIEdmmYCiJ9Nak1+FvoStD3Nn2xTDN+6Ywp7AmjJQmS/i+af10iXJNE+4XChiikpvhGOWHkWO4IYWYJydkk4N+8Kjg12sswak30SkwblXlSqhN60tKTX9BbLkD5hPGwuzQ0eMSCDTq3Ih529mY4wbKoWIJFJw3vA905u8ddYLP/cNtI9YNQFXIimi83rMlGl0zummu5WHbYM+6f3PLcPGzctwA0tIC5yV7kdr2EVF6uLu3P52tYin0N8DAfEkz8AVGJ0GyVxX9PA2cbDCbfWko7cmS89120OIL7CpZ1dPp+C3YRrMNY96wDGDOcmkBDGxFYOqcGsCJqdXkfEEGD22xUM9XI2PJP+SHeNeKmh5/kCg4ZvTjGdzXqUzIBE0NvTIKr6o0VVx8V04eZQQ2WgaWUlqok4yDJgQ2jUDCkVNUBglNRMay25n0fgFcLkvuDZsisjE2FNAPqpgDRp3Zh7NAYCvj3GY1CbmnGJFAN4iQlJ3JnQuATm/1KK8u2/Gbp6kaRhNyEKcmGc8GXERw7oquz63sH0u+rj9URZl7PmgJX1ZUd7z+mdtXdo9Q4WFGCYyxqpyMM5c4OJUTFLlVmxUuFeQlMigCf4vAYz8FQVZFOHdnbQz4el+1uo+tbnMl9ORBlfOp/gxEv4oOlF7TM0/i+098FzvxBWFhgDh3KND2fVxLLKhtbOTz9QQVcRh+T6XyavBNEH5WDK7LrKa5DwNA2h8tNHRTPtv8YtSN0ZJay/eHbCrHQDik0btr+nRThv+azlT6jxRpXv+FQhcHIRIKKa4N0IW7560pUkq4iCPxD3bss1pJoNFAOX06+2FTPE5oXHSUPxTZOTsqehAB8i4vj24nWYz8vxZfLoQ9OX9p2qNERf7foEOJtk0kBn2yVzrihrj79Kwo/Hq7z4b36h21B+zngFsuYU7lNlhHLYYPkPn+//SoxXyGZ02aDSZBm8ZsiGjzcPnmbjgw2DXA/H9YU0pH4XOcb7WvBdf+I4mx1o2B1XdBHe0U8nVIqFkjhPpZ+c8o8F1kJrKA3SNCXgxBV8+3yMhpFTz0lhCf2BXfDmH1Lpk7ydc0N0vouvDfwK79hFbgSXRDOyGsNyjEjkShk78Lj0RcWWFh1+GiJQKUbGrpiKgZQFvlZKO5qSnPRCoy8zTfEsmcTrBi2TSE1xw2VpwRWT6044ohpRvOpWEuXgNU/lnoUxhBjyjcvfEciogkquGPWPLzSuK3vaTfei4i7h5A8Zi7TUlXxt9L5FsOa0Xr94VyUamGizGIFJ12qFG2fJHN2is7lAKDed+ehbfhLRB/Hy79aDLDCGkxGnZuUxhkmsKHtXU/OGniS3dDCYofJaTcfrZayKzzGs6LWIZKE+H8SyVuTukF3H4G2Oicb/P4zPR6XLGHrbAcL7HxLY861axwS67tWKga1BtG0wuJWFA2A5o18w6A7qv5Ysoyd+y2p+Dgz87xRPtTK0ybkMTeGX6qhRWlt0CYA430U/Yv5aJxcPf3PlvlJia1fqK10leSRGxP1Owz2iuMjNvRorINQsdzoltooo0VvQZEoL+/1ebNt0rMCpTqzjCsq6ILwnmzaKoDJHw77YAr14q/FvQbIrYT3ApY/dcQYb0MyXrhFTM3Bu+Dyg+arRjIlvdvk92hQIF4RKPCuSqLG5+6EQ71WDvp6/z9h14baR7r8AFG4qT2SN41zwxuwbSMt2TVJ7XNc/7WlT83AqUUYD8AnRRBsmCi8Oo15U/vhj+kJftWyg/3qPzMtFiL+mSODD7Owt/ztss2JM4a8eIsJQ7fmLSsulfMGCoIlj+Fl84zqlHP9WsrmaVqKuObL0Bw9StXcMDpQfdeSrgxqFNYYaD+aWQm5wK5ousPUolRfWFGBSK9VP07shzvD9cesyQVvQSJDd0dv1BMiUDmo7uqqG1COuLL2E1TDMZ+qZb8kmTq1MGLU5LdNc5LYq/Nzr8LEZa37BfSqmQr20zwetz3Z9PzUL+xxERxBQYknF/ZxcTiLgqowmE7eSY3Y0EHYNLoKdNg5Lm98SlsM91hRHiDYulEfpG4FVLoXKQSz+pDF8uhrtqVBOPdd9pNOkix+qjYSaTvfRT13LeVMq3QUymeSRhC1FWIwiZudfoe09IQgGWpQX0BNRhU5jhF3LuKiJ5TMbRRnFpDtsc+Ke+rWB94UlqHKps1rlENzKZvZo0VkzassUFNp3pYDhbjWhic1uc0NshXQBj0cZwGCPIrqeLzDPL1wve9uTYzXSR82xbIy3Z3POCeokPO6dzhzA/fUw+nkq6w4EP6xi1aAdKiU5UMskMjagdHqUhUs5ToME39J+vaqiYwIkRex+XMRJGlEIThCj28iTdiEYCPGtqY1IfNCloyC4BHPgYc6cPegFWNrEMDFkfMtmJ5Zhyj4/Fs8CwSAjHoIk6W6ynkbmeaXxu9WpjnzpkIJr3K552IBHu9QACAM1j/rX6Rwbl37VR12W2PixOO3S/QPGXmbTo6BsEE7KryzOsvH8BD9WG82cbjKfBf7yADNkCSqbsl4g7OWA9nKA34g4hg5hgoiDuTBvmKvV/VGBQ6j3j5hYhNCqN2pLlIoDGWQeMLSDbLctJRwXWB7XJVLo9XtrhILFTce0MtqPNr8ECUdUbWSZU2gVRjSxOSFrIYXMF3hxmBl5oBcPB/sZbUypu80N75BmrLX3uz+reqBS8Z1aL9lkwrPugyRDI8yTcDJU4EjJLtqv7mZ5NmriawDwuJBuuy4QxB9ub1VCfHcenyT14/F/1Yg0HtSUw0E4lFn8Cz6k2HOSvLtnnRBMOwQhm7NtmL65K8+oYR5qCiwBEKNymUhoLrW8XHMQLzjlNH0J3lQdfwR+AU9XQt53ohJ2dSrjqQdswyFA+zVyyNDcreWHRcQcdCIQvzB6g6D+ms6Yc4ZDguvD0h4l7JP/oejCVotR6bF/4Ac8q6+8Yh3LNpav6DAa680TB7Lhhws544AXvs4HhchLbrvTU8c7/ALfTU6xGOtHIt0ziyAJuPIlYUP8VqCaFytzsjejJKRikL6OjTTuwumUJYis+Xcsf5KNU+ZHaRqVyO7qLzm4ptORm4vVJamQjU/OmZjkD2ixt4s1ZmC4Ltvj8zhI++bPKVHe7Bs6wuOb7SPQJjqHW2KKHsZUFdcceSPimoVb8YA5wi0YnMF+Eokx9ybA4lVbwaMztHONdOKaCebiLkUnZs/+myOW5F/zLO23DZRrlLRhCd0cKXpfFTDDciXtgv6qQlO52YPcl8ZgVFN1rVj3HFyfRDFnfFs2uHZG7jb6VWIV6V8Qy/f6rTaRbEXAxGlpxCHk6blzo0LCJRliEuVDZKEn1IYWxNN9W430D5yw9IW9FGHjb1wKg64uMx68KETEGp8VGYn1x0FBxsqxG3SGNhhtYpLOcAWObDYnPwmo5q0PreTUM4DVoyzEJwfifKPJCJpDjYACuSZcK5PvcNsAorKweZ2FPJv+kV1ZdFzEoU+HFi0FSk1GevHThZnZJSyyVeeZYOeu6a+cmckStnixukdwsjhPha7t9nzJ/Qx4Hpf/oXPtCm3TXsy40qPqXd37jFabLS6/iizo6guY+QkVW5yQ+cihKZR93vZt+tToqNGz3AE0+9OVIqRkXETnGiCqVv98ihak1av+AmHmy954p2zjf1r6Md323A2oSKoBNznHsIBcSagSnjc1HaNwVCg/m3WyJt3wKjtvOoN829UIebwicpHwWHdTTLsLEAdqsOY1Yv1iteQUZR4WxAmgWdI7BLHdumRrb3bGUPh2jljF2k06BEITlcCxBHhUlXkpaPq3+NLbdoLI+44b+DAy3Bb5Kd1/qLbN3+Jd7cltwBH2J36l0L6wTt6XcRUOZA3fNGs/aZrmCjKoaBSfr/g2ZfLidWa/7r1z15JNtO8XEYst70daTjWqlOqP0ldRYEwzR1vpJi+56N3oP7bt1zxLl6YnMPHXHAAYenLDJwnUIBjjWdBG5IdgXBUECW5rrQJjZeDjysZkzMUMu6Tne52UenrshDDMzfkt+i+ygXbjWabXSPFpJWx8SkPDwnyrB9mvNWaekLtFhYzFt9/hQIiinJWjgbCe0sXkFCKrzYCFiSmq9SGMaXxh1HeIOmEeg9sePAtcrQEp95SmiHkDwcWAxqCbetXpsAq8Ls6WyZW+dcF65FqWa6ACW15MSifd4PkNwwbpgirNVpfOhGwND0i2+ejCZ3AmyBGRzLUip15QweOEMaKSFrK8fe0wS6K+9zwMnDDVpmyBP5f4fW/R8WyNKSzAY6ipGGxrcFZiCrr/mDyLIX4w7Tg5SprZKNidSKHZ+hUmsgQm93gc9qk+XUaZFQVFhMTdthEENFYKt7FziursUB1lz+Iu+z75dxNXq2PiNzOgudcB9Q65HYeuM8+oZ0yUevn8TtbauxnhW3nqfOLhGYQx7+iPkrjI7XPF6JjmYl7btbqpodilTiyA1KLJl1WRHLANA/IUXmQ7hDt8/ry0+7ReUXrJLAzlZjM/T7TJWT17BUNPz19/eDYVP5htKm7YEHdH8wr7kF6zUlXdtIRk3bQ8y2geNiHS7Lsac8EOdQP+Y5gEykKJB1CMX3oATsLPfb9hRhzj3gH/rXaGF+Oq/UB1dsB069BhDyRLc+YoKXQE7O7ldn7NynO9BDfQdY4bckFrZgL8tsbJFKgSu/RqYpyUzY3CaR7+xPeBtfzTzXvFiWEWArETIGilmJrO56V56MQougzxuFljnNBiXbjENwYN5zJq5TG1aJC3qDkXRG409nTnWXvfJowsGaQv7Lz1G24Ux6pcYQ331JEYuAm76oeCvzXo4EZwRYgZF5ZbjJdusRhnAHsW0yPKe8uXUDgfNEmNTlhOhmgI3Cfh8asTB94liF8vn4NKxwEnUNgD3E5yhoAxKgT8X7VJF4ShW3YKlHsUcNaC4j1r3YKoV4W9/8HkxtwI9VeOPHDyavSY1+irJFLcTiWnaZY5JLSKHO0RfUVFU0xpP+QMrAF6pqanUggeLUVUPudWnauWaMN8aKOsBXtSeyLpNbH1kNTR2ZPCGXb4T2N43+dQsNOSlzAPs4btSCR7Qo20nr7V4RJ/6dYduHfiSymZyVzUKOgPMawsx/UB8L0OxW9apkapK0VxQ+7QjA4NwmCTbooYAB0UGvgeaTDHd54yq5IIFt9HIi1sB6FtN9kJx+eheY1s2k8gGVT4hmlNhusBKqZeV81b6YmR+yGVWkpDml9gbb0D6Au1fu+0ihkqAdQf6P0xiVEujy/4LIv2NZXHaiCjVy2Wq69HYkNNlqoWhwL2FkngRxIC3oNpxdB79eDYnaK5U2GZGyRN5DKbE3n+3myeGZwVqMwJK1jgP32miZESPN8LUkos7FvElfwq+tteboux0gPaOCpChCwMGSc9NoLvTJ4iMNkJBy4J+6VPDB1P0kDK7CV9rBX6Lp19vV5OwozkDKa2q1zgY51PPRiJKti+u94qioLx4YlJzl36t73DZcnYTzL3Ex1FIOygfSfQQkJIV7H8CaHKs1TNRpxgVVp56XJrsfsybAPuVIMI19IMVmgnvHriebj1pohvOE+OKA7lSDm9L4JgFez11XWd9Ad9OdkD3mU27D8nqbEnl02Qvd0QgKEGRXhPIBkRrUqVMWywAV/21GkfcPt6ZgSdblLzE73OF6K2X1QUYikLP+Re8grdWstn/ALtlh/x+GfnH5yHYEiSdBXV0MdayhocHSFOi+SzjhfTbCJdcfXkJs0od3GdQ8qdQr6id6z1Odo8Fmu4/vAww5lijr4AjI8zr5/utwEhNnRddYDNSWK0S5vs1ObMdzOfMsYyigqhmn4en+CogQzdpFf4jE5KP6HnP5NefJqDHonA+mIlSqUC01kfqybjDtj1/wz+bebrSy75OiTsZASbzJNrqaBw2MdWF57cbC9tHM4l1GS5Xdegas9R+xcQlhHImO7B63PfmErRdDvV8JfpuKUmgmdaQzh1iFoytpNNBzgk+jN3W+bnGVoANuxd2f/lxRJd0dzfcaSw3myBhd2mzFLRIBbhCRout6qYhWVH4AX5ETu7EtiSZF1nuTytpZRydaYPsw7RqDA9AtFaHj5GUkRm5+NOuapJonahFK2VTYoL+5OK8UksJFJBQfOzENn6QoDWNIcqSoGA0XeONGk+dr+W8NEn2KDXKcROjJSaJdpdM2zzsIOHBOjkxwPzUDPqSur0d4b8BxnwiEKYJcW3y4q0cYXLL1069aWChBSHKBBcfoHbo1M5FPgbGwVZtSaiL6J61/js6MWIcXnmqsprMfJxF9z3GK3rWm9W3l21vYH9bKi5RskGz/8R8MpUu5BmzTlDV4+j/d7LRlvHvCMeWqGh6buF/5zOvIHNqRRg1YKSX9CWWtAtfp4hzjymty32ZTUhuHfpU8cRHxe5D0OhH+TlKL1biVBJe8i8h9WUjFa2MfSUn3hIq/GkxK9B1By2cUHhUpXVJK/m2KKavuBJb9nHALpmLkBPMv87doaSmBM4XeKMTFbhEg9wKcTGLHqOTVk+MoESFzBs73sKt2hIt1HJZ7WK9yLhvFuEDT2AQ9HbCKiCcMp5+EwAHw4aQz0hi4DwSHNjCHwpJlIdLtszNLfJc9rVXeG2LYhKAqtabWiPJ+ajZmewMr055fI4jBzGQODjcazryvlEbt31Y7JlLeqZKAlhR5xREeMwUBzu6Y/bbaI8nqbLwBbwqOSqQduI6wacT8oV9Hhldh2tty8cMY6l5mju1cEWRZibi9IfSM8zporFzKV81QYY1vrWOsLGKF74BkTEtXPxl+IPLQbXnJWOI2AktiOvwcAczqPlvldx1Sn9Q2dPSBWRR4Ho3hqWCEklkgSfeURHpXkrpvlt6K9voM3oP1jKRJJ3NjUaBTkXNQSE8FNYpBlob+9prznNXPrFh9+unSO6dx9hDC9T8VJGV2yVyi8IjvlkA/37HR3vaZsFaidk1cKgkPlg4ZqLeaEiGRaioNXXlzZhm2V6IC9Y/A4Fvb4eI+BwZDuI6Q5Q8s4/cnZHT5pqm2M7hmVXjoqKpWqSdZKpQdzoyBaAaVpeWzMH5y42FluhLyJiSFQuWMvfErF0Rd8iFAJtiWL48C3EHRgfEwlTxv7xbFakEoi7HFmE+Dv94e4njHlXgMiKtFQZtiEXb050Sd1yNkbpSQOCUvRsyxT0dCKO5HMdnlqzZzllzkImbqZI+L4ew7bGON6yF1sfRK3RhKkqQZJZg+sQ9s9KHyuh1LXbGGamM58gqC3udOVAwNrlxYQnjo3el4E9IPIe4cbSycQTqg8p8lK+cSOmRvFV1sK9LN2VJncQbVcbFHK+kdoKzKPcOCXMgknNrz0MekFUkXpmNdC7/r2EDxV515K0TTXjcmtVWrftesK0tG1mZ7S0Msrf0NYVGWB4tV4auxM79Zv7ayjORAO35sFsgF6aqJm7iFoqTVzc0F2zlwxeIPTnre2fGIMZgtX/UiazAJD39IKu2xbuy7xuCYxMN+oB5aCD09LugS6UYlvTCmjvhFGh4RheDUHcpHHqNq6EU6bRnYeFMbSL2xSjr//lZjfw/DpP7+T38g9xmOcoKmrKa4A5aMEowvQkV6SpK+/6eogs4aKV5CSg6rkBKYXX/PHu/XP/1sXbCoXJ1BXMUfZSYNhSJoj4YDSg+UW5qanUO5Fsen9D8iBDaf7NfeqsyXzHJ3fAranj18qn3CjsLy92WCdaJBjo3QsQCvokZ/Q6iWXRIP+OQDy7bf5C6hb5dILP3rJCDCLHnL1F8L49M2PtPu9FxqMePXx3Bl2WKxlF8j6AygkayczSizpTUOwlBGnnSoH0XKpCRzGwZ2er7fkDcbjlMmoewzHayd5G9v4tHFdEH2vX5quN72YCSrwg4TZsvOKWY3V0NXUH+LJ7EMK7tiynMk/F7a6sP4vVA2588vr24oDe+6bSjhFP0CskCQMeNt4Mic59B0sZPtbt9/fNL68AWXGM1PEcfvxEARrfQJsLX4CMRGFYWDb9Rq9do/bfKMNm/WKCLjdCR6+ZEpDI0Mw2o3t3lcNVz+iJ4VjY/GjxNw3H4l4rAtoCc4fRX/9oLH5ykQDrVTVd9h0K0x16upYf+rsUJ+mL/b9OX7noUek7H7fBWIWeNesrqxVz6RBdnGFdY+OeuYX2m+4tS5UtdUPZGO5Nq/Y+fqvJ+OK+CakzQb9iq7AaGNtnfEBnKQl9BocOL7Cw36ebaVTrXfXOE4vln/g7eMj8KHitht5GWZGVNTZxiM/UKQG+Eshxz+j+W6cIlTiU1JDBg0Wkk6h/yMXOjQ/6y0YRHaOEvob294guTF2B9Z2UnYfWYg1rz4fDzS5hUBbockZsP5sD/Xf9b7lkGpiZgIOpnInsyiagrpnsZhSW8j25fiPHh/J58VnhmgkDQ4IITyBGDP+g8HtHOu4j9nu0G3+9cAozCk9NKFG9OOFPAxHG/toKEz95BGrKV8D024J0D2X71vik/5m01njCyau+FRMjQMc52PCWtAcPPZmxOtPuPnOBZSyqQBHoREEX0p+FKkP1ikxnrcaIOiMo+zuvqCuQZBg+Nt6aSYVjPCC3x50DP1HPO2pxPpyrP/nxUqiRohXMiIlWhyXCjEC8XtLXpj8cfCy7DDI35BcJQ5F1axh/9tYAFcFiEKvqyYGc3DJ04WORCCI/D8MRX9lWJujSGGC77lVTXAmPLuJSaB5Mt/yQmUEV+jmAlQYfLi7bhVsJjOp8gnK/dF8yqoUlc75Ly+yvavGt658lDzHAzXoVNTts3DqKeKdeO8txDUUyUp4aSxCDYVHq9+vhhsT0KA5PfQHILWavYDY8vs9DqWEl244kj1iFTP4EYmVugJ29XKbl3fGpRs3YTntXSTw/7Ab/YEvy887JLTbUt2yhWKRyRYwY48ys/n1F0Kt9YkTp2j+blZMDqhWyWFtnLOvAS40ggPfHUyZONCXxt2W9xUE1tyJM4MR+WuqRjB5n5KF4xP2Qjl0D4tbfFfytzDrSmf11aDp8i+BrWlnJnxc0Y8eA/qjWk2Y53EMObOjTpf7E6OuaiQXGXW8Gp/fq0OQrOk8IqpkHDtW3oP+BtF2+slmVvgqnfS8koHufFi7NUcnKnQSvM7wLQbO/2KbsJsZnExNStsUXIm4zpWnG898VVX6mGxSnw0VvNNHeLj4sACPxdO/BtVlgyHEPSwpvM70rFAnG9BKDGBKvrT/1fNv42AG+Olt3EhoQfXlABOQj+YAl8T4gbI+Wbr8xd/v8wNcfL2inx4jiFP+CUGGQjhD3Vzc6Vr1xsKeMh3FBhdPk5pzun4mKvjitvUHSNOr3GGFuX4zg8L+WxLao7Mf7AWyHMlQu2ycttp3qNpK+8bTjKP1KsHS0yyHVKWI6n7gnSFz1FoY+CnvOIdg6mMbjbaBTCLQm8KO6BcSbg1rRfZmmY7dsTODUvpjjydKVL6fa/IpmiQXsYxcOf8kFXwnFhQFPNMG5S8OsNVzECmjmclPC86JlkBkIagKG4bYcx1jI6o7EYwF2Q0kdIRTjhQE0ZzUR9NI2oMDYQYrfkNUbl7N/UeM28758k+1Wvqpp+/PxTh7EJDs6R8Tpdpr9qwkFfrbHM+/RTeXhRwv3z23QjSK98jS9QiWNtYbFZdIJM2ve1iBzPzQmYLxjJYY1Js4VgX2Lr1KVU92hMMOYZFxJomQ8ugJishj5KArjCKw7izH7kR4e6iX18Gm9zZEioQbrgW0yHZSHpYX/22iRA2MU5y1hzI1NmDBZ+/oMdmoayf8x7+jqeJkFFbMdnjg2Bnbpezr9my5sYhX/QObAvH7zPASjzL6n9BIJuJx/ujSZMJFyaR3gYLV9qPEoJ/JDoJJHCLRMQZy7EssNbVLjAn7cd70hi+eNeWklNiijWvmJUeP4JcosWFw190gWMZALh13A8eeIIQKAgYL/jBh3Cvuii+FfRi3fs0JBb48DDrZ4R7eSKzfLVz0qDe1/wgy+jUx8NElSHqyyyXOUb7WGvsTUJYvouNVx3J6YRMSKKzRGXcmKNsupq4sRu9B/K6GfJredYDgUTckT4Qb4ZUawiZGD8JrfCy0CgSbxO9b3fClhKy13IzkDwuHyZJRU+EQlTBINceAMlewySsq9raQmUNFU2hBD47xn+BTPChFg5geIckLG4rUk6bc3wJ1kkmq5aXeE3m6MYkTR1aK9hEuLeVM5HPH2Cr+FvFMvhhsKYNZ6snIuzjJS6fGJPjrm6fP72D7yzmq2fZ/wTVW6VkjHXvXKegqxcbdpzyuB65oZrsiaOndqs73OWnNwJ/t6ExpR+kJ1K7rdhHxqX63DHTFtSvXWyJ3SFym31+U+DVbfXT8ufaxfzvx1Zz9vCMBqLjC4yH2evRKMPZz8xI2vSVCNyiXiNlVCApzdGMefA4Q1/szr83HPCseWcp0k4jMa4qT11hEnyYy8EM9Inp/5bOwRGomOC5SnVLQwYpUdbR4kuWsF99x6zpHhGfasrYNB9pmfsdV88rZq0Xxq+MYaRFPsJSCEe3eKqmMmBG/fVukyeD1QbN1RyOWE5WhcCoyLetNeCfowd9lPbL7s1ePPg4RgU5J/ori7Jr4a/4dipJvE98LGIrBFntm9JcaUREsQISU8QoOxBPndantyGFhx1nx7p6scy6falLuWjU1K0aAXaeyU1xvlAmnij/OA0bF5YgRNOoGUSn/lFZBFsj+hapeSQlr0eo+cwaGXU5V0MV/aGQRdGTe5GFgqsaQ+fRwUURcGWxN7XaXdPnYVEB2+dbFzQSYxHWGuHMK+IQdBv2CtpDOPcMFBvYZ6B62CUP/xl6VUCvBStYUHCus7vtvv15ALF3t04pR3VejRRGgNsV1Z7LsBWwtNZawk+nbZASoOJ5i2e6y0HtuJDar3wnWmVd6/auJqwYdWanxMGZlX4ffa14GKiBqC9IjxZ7srTgdA9klDDnCazgx8nB+O4H4faOUh9OWPbNoBtCr1CLxinQ5moZcuIqCOL8IiMQaS5iK9pbgu3uG2B0G96EUPD2LJFLDzssvnsQgKOV2tXmiVDLHrmNFFzdOgkLmzM3hkCArWrGMIgvZSMdTbZ7P/qgGnhQstIrNaRYZO/O2OxGNzFB4oLthmflXrohIWXH/OQEwldwJuvRaiYbCF/fz+egCPAQSf0H/oXMeX41ZcSOBE01JNM+f8Rh6EdBPO5/5FhbArSalaPBvoow2P/KbYO5IAZ2uz1qDJBFpU5o627hQXIUpB6xqJN/aGg2kkIlybVO+ZrFTSZR3+ovjjW2kRgKA6BT1M/yltiCe6zl0vUuyB/OcM+YUqxwTpTNxyDI3Oq7ulj7K7BVT0Rw1/01xsgKQB4xPfVcFBZ2deuXhRc6F4wuDOfGDWyHbnTM0l2CtOtDh3mfFh6onR4f1DDOA4JEG28caQZufGBI4ys+NIYE3xCxfA1MvcBxp3u9z0HmPL9Z7iEhHpUOUItfjtSzdM7kILyynKSO7QJI0zd2wsYM15lwPx5qRhBB4Foaj/APjx48M9xhNL0yKoHcefe8mKpAR6TWyWVweYcKS7shCngKsFbrsM98olHoPhxuESZtnqQfSKGmt8OKoaXJFgHL9tIM24mV8UcAGJdhOwCyTaa+JNOPXxNRqvyzP625NXAo1Me94sr3WK6qVrOgxnSB3Qum0fIm67dHxP5jiWk/dS8edjZ/d+nnIaRqXqxRgUho5y2DpX0tXTiUb/DlKxw6gTeqeWMeRfary2jb/LpdGWtzxjOmGyLpeS0ZJSCifEwHc9oVdWDd7KKcIIwGbELsFe6eHWVyK+ZewU6OJiHS3mqLTJyRcZVvvFPaqr/FSzyCUDOa9//qZ9OJfw11ohtSvDUW01/DUcVGlF5G0bPef+ZIAq5A80haHxJXzaVb7P2a+BPUd8u6K3V3eq/HQOycOCntraCHPEfIbu0qbv8Roc3SMOqwTy0yzMY0gZ9kwHkVQC0/M1RscXBTEXIA+uljWK/bHRqBqiUKCofeZURdAnHn7yQ8L5WMSFKIsz7crb68DRUs2WTZzYZcwBTz2P5qR55ZbF0eMdLlJyYhPOA1fZQ07KF5Gv+UYhkD7WNyKbL3xKjSDhMO+4vQlTh13Kpe8cOCSa52XPMURzpaFN1Lhnl+oJ0mzTQX5dCoIRhdOmv1tYgrLPHiYG7dbHrGwqO23n6FNIB+/e9BsB5Kg4Xq7xn6nEBi6T0Tjc0arL4TtPC9IJfBHaBv38x/c63a2Wi78XcAi8Ds1z0dXfZ1S9QgL/LeACJv3i4PNW3k/L+lD6LkwJGYlElqwZBvYAv+WIc9DkkfCi1mPOq/9w/rq5NXrQ5pyfDfaYMoO7/uwTXM8mIKOvVJ94H6XAsFK4ZcoissybedaxNBhzwox8ElY+9Yuie+g/QcNtJiDTUVuVi4kvbrwQeMUVS3T980UUIjbkWqzcklzCjRy/dpO/9R3O9cmJnSONrTK4CSmCLxU12Wbw5+XcZO5oLEol+1r1hVaDXAmcuyf0MbIQFa5d5zNlUPCjYKogs/GYFf5RGvEpM7IBQcnOHOlG6Qvp+SN3Mnrz4JHiPsfuHhtFHZ2uR9K7T5orBQSElEmQAKE3avsn2h5F59zQ2Orj2srmNrS9FUu/cgjLUNBgxk4JXF0q73sjkAcNzHilcCmLVReZH7G+18BVnReg/5voWO+d13PoOOqVnzC4LasPuALRjpWZnjRt3lFWIzmIypAn1n9PLG4y6KGhcSZ1QlK9xGg+LiPKTfDtgokZ4G1yVnLryTCjW4kZX6Hcnblqhy5IJXD+1HEXdSxmTJOLmL21LNRwDpo56pXHLxEqOL0i2DOPQtzn6sY6YuWCcThI/7E2r3Z76WxvsKi0P+QZN/lmEmTlPg0rCW2R3Um1890kMeUXiPybUffmlMkhm1/CstRJePYVPBUZLTZYy2qLbciglst+w86MdhERc4xMCFqtXniQEQ7L1xM0ZwMDanF9kln/bQbJfT8j2CpIGSk8TRWPcKtODg+U4lhN1vSH+iW4J2f1565k3zF/kvZEz6r2LysbSVFDmV8fEWI6r8hTA9iJ7/FSi0Yfp4Li3y/wFpBfBO/E84Qnf8kjMY5JupvvH7sD8TPBEXPONZQerE9XENGxJfYH6LqwdTGImhyzJoap3cN7gBI3s528nKcuo95CIdJ8diY/jwS/6//T7DsWnrlNWBN5h6qyyTEYDiPBXg6C5Sw4/qIyeWPDm6Z0xTQd0Nwh+/hJw639xPr9VzS/aYpw/488g14KRH+BctL1/94KgRpfjdcnQVsNmhtZGrWnRn715+G+r3LPTq6z7J8axXBL7KfkT4iseeW39sj0uP4PW9I2CS2G4NWexJwceH3MygVkMRUsZo8ihNC/YTqtihvqvVjHXXeLHFBfx6X0MMgdmHuNKaCl6kgNM1wBH1Ps2w8yvc5umGxEt6ouJmq5oetOoRxrhnlE+pqV8TR8B9E5uErnG9Pa1C3E1EMrYUJc0q3jKsw76vIOFOTW8rUbqp1FLAKO098bs3BPUlTUJIdc9KcvibjBldLZBU9hNeXCaFQMAmUUkhlDJcgoXKeufRW1Hxo030CMtCYeO0wOFAqDN7n4ITbg/Qp9xaszWXYrcsc9S2hym98xappy0qMkwr0YIPeMWyxQc35S8WyUbTUfi5iqN/th9IH4FahsjBQ/G3BJodDUL9Yv0lG/0EAYnHI4BJXj1Rbjj2O3uVncAB/ENtxqKGNXnN99MwItQ7/PFNEDCMTaJEC/ODb/3oDRfo5uSisaIVCktXsxYhhkrw39ym9LCT+mHA2ZyY8X3EMCJ+haFVNt2Y1Wos9LRBw1ZTaTny8ufuD3tdc/DijTnEaHmG/AyhRf+B8SJh585EQyKjmvilxEdxJv9ZYXa/eN6tb8TfHmEbyOZg5Gj+/MtHpMXqc9tFZrxIkdSiK66lKeJYJg9IEhhxst3ujvS+nUgZt1YU9LopS7VqCAEPnIJYqQRN6l5lXqruJKAr7zTnYcKiJ94q1DfcSQ5vKWr24H10wibKCSp5IJCAEzD0wddJONJNVtO5TgS97e2+YU5CWQZD9t+lQ95KMUnqCl1a5qmMhal7xs0SEzhlbbFHa2p/MFrB/ZynJSnzFH27qllvjXm8yLFdtn0vYat6L5s9CNUODmK2ObyCr4DdB+t5h766Ukd2mmvAOB/iQgrRX4G+3R5rEyps9r3JAhvT3XtAwE8e334Mr51fY3r/AqIcnMfKNyKR7MrfLPBxOcZOXNc60SYk6CagbGNOz2dWAXY6GDtgqoJh0VpImvM+zMUOC/EgpyFS+3px6QzhXjhG6nDMhhqMslppr3B0GscIJbW+Saoed+Bkn2djPgs+eJhEFOG0biVMtNS82lG6gE40z36C6iw8fdTzQ7d4T6W7zuHX8s1kP7aViKyu5AMP/Fnnm0Txv31GJx755WQQq0f9Huj0g1KIgAADNtm+2/bNt27Zt27Zt27b9sm0bs4hZyBE2RnN4w6UKW5YbDXwM+kcqefz0BexWH7qm/5H/AYhhgyaQODcFtcDnMwv7UVAmwUfGNzbmWriTx8UwLFu4BGmntBk/k5tUXJwMouVQt79y7tml0bnYmlPEfVwPj+O9CZ/hNeatK1pRFlQgaC5RnXlteAB3e1oe3j/ofsO8iMZmFzHtGNEaM8bqZgnKa0dGgjBzWI3eJtNHLgbuNC7qxkWbt99fpk8oPO837nX5x2/3qGAxOOCHpBFNigRWNQQ33Zum2jWrOnEMRpW2T+mimVwvtOC987eSWPSLX5rf64OCgkllmlO6K2YaTd4oBos6wKPWVCsONcuTvoOYI3BYuAu2arhh/BczdFtuffQKRyOgc5ZVWf6nsttVsAw/YImJJg+L93KWoY0VJaa2TuKIaHUlqsz29m1GZtpkxdmksT4u86YeuxwuwFoRYdUYQ00hnl8dJMDno8uIUc9uei3DaXN+gBhboiMsHaSOdFMt2G0lqwgPbYZz6NNuLqLWtdN8LqCYWW0zIzfmg4N8QV/rLcYNkDzRWwJGkHc2LFcZ3aliqE5wLfKibqDwrhbV4v44e9TlpaB9WcX8RNPnX+MKKUTqOHVbzmib787PWlYVIx0hjbjcZmL6WiN/Ieev0hiZet5IRkzmTRUc4heOaQlzhQiYX550b5xsqUjXMWm2SafTBS6O+Mod9sR2YYOWf6ANr0QLB2gFbAf3yc1PrnRQOen0ISPhkgPzZDyaYCrX458LvEOA4s1k2hzVNerbbkllEXHsRA45JFTsVYBQkieY3y6ncoLe6ptr/JGOYpU0h7nEy7P1rum0+3nUGORTQX97hm8gJAHkCpUE1wQSNVl6ScQlY4mpsSQpKjYb5g/rsGjOuavfBWkyC30vfQc8BfOJFRJv8SkpwDB0eKHXH3vWXn1jSfIQAEkJXD5Owbcp0YFqH/yO1x+DQ8h8E3OJZTW93DgLAdwenNUSxSGuvTMflY8Adc1h4a3+uGSSg/cg7dPSDdrhxoA/qkm/QvpB/ebo+hKw63ubEzw8UsqUSnITDQZYOkkii2RpmZM++zhiIsNd2oDrKXwLlGrOZW50lhPe2hprLo7Jbs6nzIgQQC5kTfj7zeyC/FZBwsTRYjiYhl5cC4HafVpjlEtmoXJpXc17H3hI2g7JZpwNgEiN8mtAzW/h95EmilvpOSEYSIQN1nTWclIw8hk7eWCVYJCnhT33T+PcISfX+LD+BUdhPv7HDAEAa+EY2BTHXpQHXTzzhTcExSlnyaAFeCY5N25cuGngovdJnAuGY/PlVYUw4xvqDATg5Vs2lpS4MB+FvZCBsFQxel6Iz88TMqfLIQ5oG8cRca9nXGOm5EAaNocCDRc1bpKqcKw2L7++EQo+6Nodr8x0SzNOt1uYO+/jjwkiVW+xMUQoYwdpFzqlcv8eknDP1jO4jLGCmHYbZdvn0aq7D49UEuzxBB6zhLpagPKK8ZK3tgNTTqLK5k57oHwLG4KPeS2oEv2nsh0A1VjXWrn9ZZT383t0IrWDTo604aTJBpS5ZjRoMHF8be92rP+MhQV76pNW8wD77SSwjgitPE/eCyJEnZbgVIZzLmDwOkaQUCeWQWEboYmQhSllqJ13pxReoS7nBalB7vPKJRBQKJd25PD8RHQNzKCV2ilLKh2rmK4GqcNIQKkSR6v4LTlwtAGPnRZKd0cE7l4v6S/jf+doKfkY54J1U/ajP2i0SLkL4NmFqgspxWzfV0q4XITB0ZOdEzV5a7us+aefI81ntY+4AunG4Z0P+9KGMJISEXYZstJDFwJwQrKxEdweIAhbsawR1Gu3iFcC+cqNHsiTQIiU8fl2ccPlPvOtb8TVEKJk4/sPdzSty1fTuFWwnH58qjdJgIUFm5OvPvK2N8VoYwhrnJF2uSgyamueIeajfP21IKCGHRzhzI4OQjFQhfa/eThi3Rx87X4r7U4ufrdg6QN5aSdKF6f7C6ESwby1h8MKfb9LSo0vnbxL3M6Kl2YmGUagUnhWSJGFO4Tofgt6Wa6Wt3uaeqqvCJGD+FHFL8pe8tTRCx0Li/0ZPyWr+0R0v9wvYzCskqUAUuQX98HRhfgeUS+xXqF4WeqmFNfLOyNKN00+yXscr2o75e9eORL/Ft5QvPEIX2SFnVqP1r+mxscM+ZCO/Qfx/EP6mzp+Bn0r3ZDKXTxNJotrbyUO+P1mI52pARYibBM6XYb16CDUVnhs/l4WwECLMZwEKxHTdKg1PbGbGMSgkbOUyaYdpqtclT/wE+qRnBRsH4pd2qLC0qQavTTI9iS1wSQQOTe4l5CuY2c1skZb05a+BLRzglNS3XfhstkzsfPN6Dkg3SV5tGE/f//uE+gV0ZsyQ03L8mW8qR/bU6G5rogjfsNjg+qhcC9DUlmjTSzxW16hlTHB6ijv13sTRbfkTKh92u257fm3qvd5R2tiIDYzjKVMlKZqJKyZIMv9Nm4qCmebIAZ65wONeVxmjUeAnGCXEKOEnbjxOH7Z0CXzXQKOUUHYo8UXqZmGc5NB0lbzuVGMUk3tXutf9p0S2j7wqm4csVBYbPRy+rRWZd4G4zH78SmRouOm16mRSdMst62Sww23E7sTosOh8Jwhw4AjwxZU2eR1yJUSTfZvqX2VEjvc8LpoR/EaXLvTHIeR4tMxnwG8/Hm3tF7k77vDI2gQmf+xK0rAP/m9sUyeMCHoAaDV8Bc31p40ta5MK3uCoW1n3l+laQhyLXm8staHD8atT84j00xnXHPq2mjFFZcEOGbBvmP5CyjmF4Ircfjrohc7UCLqTDHlzHs563vtNRumj/PzxvuXDx0nMHVYUs61DGUNHrkv9F2lWu57eNZokkI25kQyDutjfTkc6pkQL6OTy4/X1WclOzQ9/+u7JulEKnc/COEuYpp3zMJ6R9UHQZ5hB3aWLnvJB33VTAuVAeXqsFCs25iQd7Ih+KoGqjCMBR5hkVebahYwRAVYtujnkqD1tpoZno3pFFv8Qj1FyLrRI2MF8lcJ3SWd6u+6zwkzzUsDjTKT3WL1xMIYnd2VmBdN3hGBzpUoxhpWT7R8WOrtcfAK3WD7ZvG2PBeGsWFloMX1E5aHvwnCsbWmR0BuSmfqC740tJul9JX1gGUcJazj9GM1pitX2RJdTTVrrs0MrcP9eUu6UKvPQ6wbgoPmnDdo0wGbII32X5aM0uSDjNI299hBzXKL35y9TeQPNR9lZku4oBDSf6Be5js4lZzfunXNYsy1SP69wRub6MUjf/jx1sW3ismHDnmRw8IL6mKTDptBupgBKxW3P2IHkeT7Z0tdoQRxY64kRKycQGju3jXCYAdQfRKycY+evdwZHhKUcpXbYwZT9EZNuJmDsioLStLU6cmz6qK9iZFdOSneg5b8PVeu9y6vmOzWSKvqrIp5AZlr0yRf2LlXSTN8UQytZWFaY7zX1wV0aHY4PROwjm/pgWclqF4jXlm5QLkk2ew2C588nS+7YjFdWsiI3zuWj3ei4W6Jb0mfeNXdF4Vh5O8yNF4n3Y7fGxT2yQvr4SvORIEVhz2aykl8NrwbBhftStreqScJlun2Z4lqmEGudmQYY82qSVcwQl0AHti6jOqjEbo0tHi8kJsaI2wnegmSNE3oS5YBIxRwBBeDo90speCaMRKbZyfErNFRa9RckYwlyZ009ttTplbxyfMqABx0+lA/1c9qDIYP3dDLd01QfMd5Cl2SHcfPE4ek1dCd/ZRkeR3vm12jbqw6MPIJiJm0Z8Kqne+lRpysOWwRIdPpeDppmKDlCZ77W9LLTd/n+9XP0veJj8qYmyoz+BjfGS7lcJGm17AOgu+gYProftTmpa6OIAWgKbk6HvvxI9iNCQ0tWLMbXvpPlgxby77vn8QimlzEK59z9Z/T1CC+7/a8J+BM5JQiUrAOa7E9fT/j+ASCZ+tJ92lMFU4D4H+pYsyLHg4+7zt0xS0H/c7DSBTsAxz136kfNhl+LEqjMBjlCXhXbaVSh44F49LzUP0Ja+YG9/OFtkBa8aen42yoPUQbgzTm5MpXnlGke4xLvEGmvU8K29G2RN5sHB1GOefkOB+vwUlPsgW6A6drx12vDldCIJqBMoHCtF3/YpUHxGXI2VtcrFtTc5mtHmhBvwFn+UnOdAtLldeBA/wsehMsppgE5jWyo4LC/ieA4TeHqswZPF8KmxCmZbMXU41OkoSjda9WcEVPHmb1XbR9M02I2AcSLpeHzNBLbRFWbKMBGxNpMflcYB98zyzPDcA25EoYCd9p3TvQIyN10TO0d5sdtJmdQhD5N3teTz0FJuZz5KGEX9xTJJwGEJoLwcLyYSkHtZCt79QKoX3wOc92tQJVYPGW0tgW/P6mrmcuAlllBRvXURy4oye0/loGYuQA2m5rkNCg+ohTeMHxjz6IsdpvUR55XKgprWg4xaOD3wyF4N9ym9h86tmuiwPFdb6KJkIDDzDWKLtmrGVPDNsLOBq/P1mKb5WQ14zb/pP9r6uNO+lXKPhbRqiiUkuDvVz+7uLYxd+dVC0W1n3Fh2E7+RQrRqVHiOhsM4yNkvPk8ts7FZ4I22fNl3FoHK+KDThAjwlvDcwOsyJdKeWfHbZy1L2L7BBtikRnwxnKBGRDJ70lTNG+xcy5X3df1XNvWg4yWnepvDlcbGZoQEpa4roGXLc45CVL/7GacJdb1XL4On/XGFKOk2qntBNiv88/0VOUYYFbZMV/Z9hl3V8FW9Jvr4PdbJRhYY+0NBUxF0PH7U6T7bagBZjgWeWd6eXBLMFqBhQMExiSVpVnSA8JfRikXHKI6A4UMHRJ25VQ85nF0wP2Ox/tIPHkfJrcSJNBqzjveZOOr8XPBifcz9MMVk8kgUTS9Bk4PXhVwre3HQ/ibo1KGZxnlKyaEqn4uvNIn+PzDGutATLvcHqOsRCdMsEnZP9NIjyRyLO1E9/MD7iU3j1BW2aNvqeQpiEpznybAXZMzpE+cxSk397n8mZEdP1MaJjMEG6f2Uyd+H7rgpkUinPODs9KhttP7ziwIgJC2UgTcIRr4aAEdctNBEy/LX/RgofPFa0cyHSQtcx3bhkTEliKnhbbp7bIK3qqJtiufNUNIzPvwm/lhhK3o/9jVSkafLV39hRZQx4CUuyCJDEzrjncpBrcd+M5cUxaOMg8KxvLGdPt7rmehM/3/E9ErhHl66nVwCKsEmkycGQqDvFQmll+DzsQsxqlU/1Ylzfwv+NsXQQ2eUHoHy+/DggzBCkgWjvmS/YJtlmOOFaHL2900ex7nEIvk7RV63nTA0iTsypaqP/E6D6hY+jk4ImLrhDvVVQNT+pYpE8zUeBRRLesdysVEPULvM8yyEz+eI5ZS/xHXM4ty+5CFH6KCtwLDmbKbfeSIPPA12KO01wHlVid26jZO74hx9w3Z8fZOvhqnGmX+Auhgu1rSSywsqtbCzz4SIj9PuWqae2ANulw6Uv9p7z8453QySChexl7g9lSlj8LJVVeydBE3/mEVjEZl6FUC66CZXm8ztW0YkGNqsETqtYoZ1hAh/6bDdrfvfNGDSXz8qGcyUfSURjQvGxEXTlhS7aydhIU/nMpA3+NMltAHdbJJsTiKJm9MKGHIulmcU9N8/aqBBKMIru0fw3+8eYV6ohNWKQiSqject4tD7sGZzgyl0gyzq3npLQWSzBuiwQlguAcf2oBQlxYX/PVZ274N/DbF15IL1is8isFM1xiZQjbS9v8vr9rrP/4lMtO4r7BoXBEmIT7InsZsvC9H8ezx8BO6LqBkxeS1XYZeNKvCu6KvXOpkVZf7S/3x7CcYUDxgRGsY5Ik9ckxYFUu4VB6xvCY991z3Hy2k4qbTZVmTyO+AFnjRtVts1N8jJQIZSDUqGPPb5N0NZy3250pIDGCKX9qSVtHFGd/n2VJTA8357jYUzZBI7/CZ/hoPqNtqfGfyEpmIW9J7XRDBnjdiflrjmNlkMR5nsnHvuY+eu8+ttEOFnrchVTcmVFdoZXfJMrdjHLQlS0xzPBuRWgFAm8hbaB4UxwgeIlI8A9+1/RaxjpWNasBgmpBSiI8nOVwxrg/KR/3tPdI9TleRzSwyzr/8ZvH4U7bCGJRGR3G0iUg7Y19oEfLElt2kteYWtvYbrVYSqO/J4PJEjtxrfU+XftLK6hlsAcY8vRE3Uskz7UfY4O+rQsxp2UlzctvP+fBQOvOgvaTaZ5TC/f6rS1yCrRDwDkp+GIYiAsYyPSSn3vRDyLJYOCiVoDZufBRlKrDLICVn1J09fwlrDrjOxUTcMg1KwV81kIkriJqHYRz/wav/zh305a7YTXR3YrPxsrFtgrTOFiWLjD3BqmEMoecyT9IGAES5qKzc5kCZOzYslf07GUb3B45tkivKx6mP4maiq6/TVSDM6KAKHC6qelTOKfwo1NapKJit99EBcWUmhcHY1oq3fOh0hQIHkNXKq2CRuOdkgiVjPfQetRPJakokvYLG0rhAquzwSm3/V8z4yRepo1HboeKqFM0iSfMzckm65wXC7+LjiD8UbyMcg91t2B33SCzU+MFigctO29PCnnskKBZcehWHzvSo37l9c1F6u6QhW0CXU0w/tHjHsog6LANkKUEnskObx8otIaOH0O2DkKDKTO8SMdQV6myynBQf1oUhlmQ+YhkpJgkxGs2srw9hAuNASue63UAJXTvtqgZtj5lM8DcZq+1MXdtKW0xKCYCM8sYbkrT6HwI2VfIRQyL2rhlfRgEWVDcxF2J4O7cFHFYgoGqNjklfdBGtluFRgRNoJOxVSHjJHv1HgoV/w7wVg1WMcs6tG/9DazAVj0bjNz9ie7y15KB2JH8Xh/yAnjWsi8WIZcsS154LfIAiO1Lc+gsgFHVLL++qFBPgZ6NR5r6m1s/5HYAqPaOA3RWWTqxjSIvb95uxh40rorv4eFX++yb3srpqWT7zdbJFyCcDrKF5VwjCvRkFx/ScDmpbi1FUJ0MrsWylQxNBOHzA5+UKKg981vq+0g1py5tFAlKLtvVIwPgs7zqknrQAmAL82d5+k5zUjs57SWD/l5WzjYHqq8dlW+1PM26K/OVlXvUDvf5WQmGNjtypxQMMsrfeaJcdzgbRwiCXw5+eZ/oxodZKrfw/nETlxJmSI15ikJO0CiAErXnAedK3AWassZe/GN3is/KZgpqebEjS/TFH9Af+yz4DsWE1afbZiclF+CpHFEwyX2Ptl+mQ8Qk2p7G9fVD1wm+dPEbTd3KExiO6oNmxLFrgkqjE6+aUvmmL8Us0OO5kp8pCNm2fRV/iEQ4Z7r2fo+1K33ALjjKBagfMSIdHyLv8YaIsSMIxbB0Rpaw7Ww7yFlgwtMKYMaoiYXgNx2a0MJIRIZX3DQ+CmqQ4YqJTYFt85mwpbFwsH34OaboyyuSePVy3fZBvPxOhMjGONRUP7p7ta/aV97z4y4lREU5Co1reQbKN0leg+B1AYEkEq87YurToqkh0LbMEZWCcMTToZM/xcFLd9u1YQZ9QKN7C+oZCXtf9VrpAzad/TZNSrXmDyFMBb3y5M9GxbpuHhttkh4sPnKEXutBb26pEGY4wUbu/s7VvZ22yQ6isO5BdajBz+HXmiMY/Zsb3cqsjhVj1AS2XAV2SEfUR9AGU9Z4LfTRqa4cTGziLivxsEHqBpGzbdc4l2yeMulhHsRs5a/13hmhM+aY9qbtU50XZTDEjtlxjYrdPqUXjjYt8YXTmq6AjyqB0oRNFRaATpn+ZR2FYySsUn2ck0l5EmJYDpPPBmnmVP/SGsl8To5LOMsVQ97O6tFtQ3HckJqKa4etDRsVfZ/VHjJWAxoRdnqRFTZhGltgJLRO8ft/t9Ska8eBPB4RmRZRJC1ZFPkyQWNeOoeBoYSDwZ3wkLHjqCXVz4XgHlc3fE39MUC6yOvCVwWiGSpFYu840L04xyj9oV5I3imwO6IF3YEmR//BdMsS8OvXgbUo8wHxyg13AJVnk2VLzjWIDAkBKCwpCNFlRV2dnMjy23jknjXjcJ8eIaJk13OxUMShYnmEJDH4LJjJjVxaXU9EGBcguUl7H6t6AUDlz0Zo5NX4+1V+/6Fw4OofPMwjuA6Wv3kX9Vfk0hNdrDoTbIZjGaZIFmkxlXC9+Ago7cGuSNx29pLxiIERMGoRAoVEjRlOd7LWOPpvqH2Wv5f+jbYx/jci7AecytxJrCeEWawZe3w2KG4VGqhtVaKxwVsH0rwM6cRTyBbgYnYORh5qLICSoRa8A4mfrkxqhObMAVWc9qSLoIXKAbLBLwuKwvwKI3HHpe42XXLO7vRempRERWvih9XZtCZ/5Ummqis6TfFfETsTrsp2jGzyvjDs7SR8sTdiryaLZbBmKFBUOIfeWoP8e6YnfWvSx62NdfIn6bb/GFV2/uapVBQBRR5NlDa421eq5etAkIZbSGtzfJHCkRH16WFIKVjR3lo98fiMfWk7+nod4zT/vzk2GfY1pV/asBe4yOCjBH1O5TmxtEyYB9GVIYfC1F8NqLu6tY1nzlwM4p6seqJ3YgTZGo//rvqqYa/jNsfnR7J/YIOVjRCBGS+ORZReK+j/Vr5zAQ6PjFUWhmi1pd5C7DuovMOtW3GD93Cz0FUgPC4whm03elRsotndOWk4MnGC/zEwtcKPtJE4h3d/Su99dE4blmpjm/ySW94LPNzs59IVua89jCcM4pja1yr0pyJ5PmS7agSKlumqAFpAyE9Y97SlRXYFgTt5HUEJmEBWbfuuFpgFHeEt2dsnm4MSfLuLWwcVDQw1z+YICm9ykfcjC+iQR+bVV+UL8B5Jq4QUjeleS//TnIbaNtcffGC9NcdB6PTrc6GDmCtd7HlhvMmWXmcJKjCP5+T4caSSgwhMOk4bwFmb6yRLKv9XZmcXjWOi25rRSjYlhjfS2xp8kzXdE72TpbkbHlIZfSnRXt0Io+2gqoUrc6KKuyvn7QkzkkWVIvOlg2S/yfz9+3fRWuQ1Mb++Uc5bqF/Ggrifi+XlFTwdoQPLeW//NxN1y2P3J9rcscvJN87HovhK0kmvR1uiasZCVtHUyc0GLWhK1hDlfxZ2Yc3F4xstIsZOHq+s2qutdsujMfOmzmLhv7VAcco3Nqog+sBUt+dYAjOiJxedpk98nTClp73UL21VUY6I3gcmmKy3YBPpfDyEu6ei04kTXBLvAOIhUFoATCwlnTMr0lNtWKyFEj9zHIhrTofPzN8XfEAqTfkfxTDfFzG2ujk6sa5VvxZuVWyp8MB6NNEJ5k2+PjEQW81xbKSy1vV1UlG+Agr7O88pmHDA1hDVff/gC6/v/GX2Kq+8W0ozbyF5CrO45WfJN29c1HU5nouktEslEIvWhQ1VRtnJv1qI7/hRV1APwKcHpmALyyy5NwuCNwOdq316DSJFhye4tQgd6olZelzi/QzeWg2+yYWbFCuz7mJ78NgZ3OfYOCSxzcGMJQ5HgV+yv5Y+XU6g7MLyX74wL/P1rEW3DFDi0GqyXM+LZUcYzORbrcDovM/WlScNoS0Y9hHg0ZHmL4PSS6V6fEqSjmFs+3HUvWpbctnJ4JJzQ67KaXQN8QpYjq4ryHRqReXRkM6qZ6WJxLp2UkYCWA1vjuAIOQvg9wEwbm4KwuDR5ZpObwpsWxKbvreEFdg0To8/fPYAt81BosVdsPXGd1UnlbHN2RwNF0O6kBUPO4DYRRnwhz5FeuLjFDsNSN1HsrCUIR/0e4WuTvpnWW5zOqVs+FtpS1cA3AgcZplhmvssWNhA/USTUnU+dungm7ew+sRBZgbvGwy+qcSa7/MjSDnsUKWmeemIUYmRYIMQCAWjyJ8fTsvtMp6cn3OLF6+vyBnxIwtFV3Vfesrpfvw7tlhPVstGAvL0gWyebI5Z7zrGzxQKRsOOin2pfNiekkKn+TfGRTtnbywbqDcDCFwCCjc0c9+AqKLIhJNwxWSSHdU+Gvk0VELa6dXMS8Sxz/RCXFBxbeZpcpm8bjtZHvqPpK9LWsGQSgH/p+GVz73Efs6QQ/KBFLpsO+Ujcm3FH/Lbz/ptVlTXowUkW+KUT1cloJKAhNbm3xNDbFzMBpFewNUseStcJg1QiJLucuZGGMhGdKxyk6jG57drFIOYwXn2Wpno/UjwVMYSuYoWVYzoMyyyEATIS+k4gqbepTpdmaxhyeFHLJ7Cf9/L3EWs64eR3NkM6Pfh6hy6S8uJyjdibp1CYJEnod4pskgQVE2earDl7JUGFnSNKiwolVf47IEn4qGf3gzCcl+ijJGG+TNu+Mf8K+2+Yw1jeS16HXEid2522VfO6rzd8LbAA530mqYatiQ2sBfkaV4TwbDZC8AXd0PUSYmXlUwjSPHTwmIWIDtScUPzRvMyqhqBVFQ5zRV+qfZd1Cy8J070PTSaYzMGqG0wSXHBWa5VCi+US3oc1WAmt8HgEjBEnrW0Fv9BwNCLsybfHYHdb71EjmzTREjvjI4ffQ2q3YpyvldnALsVxtl+9rZBL0UULZFA2U4TAgi9Pl9BeQimTse7gzCKHtjtrSR34KbmYwAPLPaN2Dsnlgu+fIaM0gyY0F0eo5h2Q6bi4dnDEJJQtp32roKJQ7FpcywcAzvYnCxQ+MsHB+JVSt0C0MT4ryWij9RmjvFotsYJXnfgRQc97BBqTJw4XoR5niv5+KnZy5W/v660BkZon00J5a3N46wF0Z22TaVhQqx7R74+9tXmIKwZp6z4GKzGmfx0ZMc1sC8qyZcM+hk38VJDc1+PkYvRSd9M9bK+bCHxPmQhhEOsPaQHmkWdnEDCwSo2I2T6D09vh0hKyVucFnGQe6nrd7sH3w4waRmbb9VXOuDmpO/nfnDLEZI3n6R2CxVJJHXo5lhksaU1BmnHcF9NSw2Bstu3Sd2G6dxMpuG1KikBa7cseRN32rTa6iY2huvxp97ekT6hv7pepNSWM3/6NJsEJi+LZzpkBC3OoUoFt/e6cabGVWgHPXXaEj1+ShqdEii+hwaDPQ4i9xrxAoksSxlK0WerYc+wb22/SUsoFfaODr8MkJZjR/aUIjhohz1m1GPafg5GiQbqBRhOxGCzwvk9IS0sT06yzr07n7r3kJkIRJIz5dlWMrKr//g7QXZxCXGub2Hclu1vKBlgkB12rKbdhT8MvZsNWBFVFukKZyCvUHDf54e2rsrSEBEMWe/+8pCm5NRGNKSFWjFs+70ehGBLG5HGqTeZfk1qAns0NhCu4WHYqbY+Y7iHjthULjesOcNygiFOi17OKL4pnLM0nZcRpoJVlFnK6FoFfdVOPaGcIjfyuMPnPZLcTNCnmt0f3GXRZLE8XzE21dw+xBMWc8oi19+fRIk3eYpqmpwbhhobH628cujSWlRcgsit4my7mlvdNVBiec4Rd6cLVqcdQTx39ZOv8GRY/3GXtets1yx1w2pLumANVtCLuaTnwEGYYMesVqzGG0Re239aivWR7JZVIx3ak9kvRAoN8IR25J1aRyugPEOPyxspKoWdNCNdSVjcdzWPbNhfSn7aQo2pkUw2UrAqDfHzyKM84vRFf6k9In0s2xt/4fKrl0SY76XWPrxwKbQLBHiQEoUYI372gW1HQ/HU/2h9j/WEjYSo5zSj11ObaaMjDaCPzInYQ5ZHhbe9RY0LiJUuB2csSpsby0NWjxa/TChdxgjh3odGBgDjlQ0twZUnv9gxZYTKa2KAqoAMSTnWp8KFUMahpS6TOlE1bxuewlwF4fIHwCpPedByyxKlITQsTYKA3Jd+lSliku10ya+tF6Q93glLLbrIKjPJxyGGTwNnn9e7qwVugDvAtf9oA+2nGoulzCSvMlXYBS7luXJS9jdNcAXXmHslqSbTLMQykHsFOI7EGNrWYwHBa/gJRdD85xLAvCY9fLdmUJmmJqrfJqGsne6/Wqxv5kOiL0pRnV1RSTLomNDAd4XeZmurSS3MNoqnPqOWA932e9VTjGWvoiK6ISwwSqoOF/WUKINBlDACBafrXY193OVNbYasYv+V1XJxGU4Lqv6eh3zfuKVHBIYiEpVzEvavGLV3xrY7BECfMbqx7mnJyJgkxErGk1HR6PovYvdt4rRDXKGd8mwhsyISdYnJ827CTbKcG2BDSGsWXv+Eln8ijGxMYY9EHaFYon8bUBmRy361ZzAH9FohQtTSHHwO5R9KuG+AuSVu7rKd9Wds82hlbhPaKNqoIONYqWnucAPkRVdBH39wm+zohN4fsV+B8E8qQIC9uLnNd4eWJygqzjeS/sXD+LRxhmT44vtdGUgTt75N2zLTDGbYGU0K07yhKTg99rLiLMV2gbvQt8TbCxa1ZGZo/waJAGRR6L51eb35Pdm3JD8hNvgkMU91cpBFtVyzMplu7eS0dPq/uGXbN4Vr+jrd9sm/r0lQfYk6UIkvjqBNzjA+U7ztT4lVw+6vf9j45BtlO4bhEzKitJqj60nRDEsL5LtFY58CBJVJXL6Cv8WyJiADhGb2azGKA+YsEM/hvFi8J8SGzxvWw+SFv0V1HMPOElJqFofZLyj/vMBYyog98Dwi803x3ziMgLTGTT4ciAwDhvSInJyk2fWsOZqLvZb+tVR1zOd9DZpRVOcyXZfatI+qGE7UTkNz6rc0KoX0vJslxPfdg088QR/XFwlMinur8e8LndcU2ByCfUpzsNAbzOFOMXT56sAb8eh5BkSUE8RSYez7+PXRYH8CBnMSfeqYBjEwNgXyBeD9+G08TBWVEZtVUwuv2c9ObSI15kQbfHv1tRSVLfMUVVN4Jv6xkX9/ol5qa0/h1Thezxx00CNTxh//dr+W2AqBZP2I7KWOu5iqYzWv/0lsHI8bVVJ+tJjTqqxtRyeUJlxOp/vYWVnc2vbwAX61ksqyeoSHuzE+FpO0t6BL5/WF7kx1NWHOWA7exMqVZIHvXNepOiupQRUDvnTfsOKFCjob+vQFt/PJ+PbYeqpZJ4SVv3dh+LZcrQXFFufzSFUE+3JPLRgdj+LPIQQPwmwt6qdNIIuTePgxDv/VqEmEPylRECXK/MwFpRRPax043m+k/L1ok7Xj5mXV7D3/ysw4LtYrw44eXJ4N3nVWorI+WlPyE1OzKu0UyVCadHUznOMnFnrdjusA9hDefiDnaI7dB5gw8/gzZQo51A9b/5KztrO3VNUlWUBkwfY6FYeHfIwsUHYDSZI5oiFd6ZgcLusuZdm6BBQ1+xF/o1QZH56CVxXysfxeom3wb6Wm3Qt1t9R4L88EeN99GZmGJKqKYtNS433n2JrfYlUNvAyWa7A750lBMyQ4vOtODvl1hRcylAWx63IAF8jUnY1lPXg97WD1eLnQlwMcMUK1EkfKZyQyUE3jVkOdRJvKeh2GGJFEjIgXsZxpV7nqYw93UOCxMus7JWzqLjdt1P9FadbPOCIvShRgxu+UwH2wJ/Kgcbv9LJ0eki85kR2VpGk0cYwguJ+y7oZnoYDwwwxaSia6J3Yfr0ToTmnlK4fP+nensDKw2l/XcV5xyzQ7F2BjQq+luxZ+So3nnso2d4NP50HsCoqNA9XrEc+i2QUnpBa+uvDJL+Dn4LitjHOVAukLZQ7mb/HvAwc23uNSUFQTjHXblVrCSzEmDa0iL0WrilUBpJkHgPqXSZYAt6GLcdYuXl5OIkgfMLzyOsPu7+wF/PQ3MaGNESUxPI0QXVf5Mbg5RvcPHuLSEuYMYIKuLD5qExvQbwu8UAeYnYIc2IXi9RwioZwpxPGuH9YtpSm47BLzCj0G2sj5PxShPaP0sF8/o4Kbsc++C24XaAKfSPuOzRVpR3i/fuYBqnmg0KlhkVrognaciVjBWEn+gJFV9suZsHKNS44qS/OiwO3OiIE4jAxJMgLuAExIESbCxeNHutQtrzZJG3OkT8xSwg61f59bVu/Y+pc5GXyTMu/Mzamb+kM0y4Zbcj/kbIeBl4KZJk8Dkd7gDWwlUGQ3UmvyXZw8jwGSgphHoDb8KY12Sa1ZSEc81LGNtI0OFYI9jj3Mul1rovx9+pXamXxsmMIYOSnur5VxXGPmdMBiGtZ7B8I+UHAMXJv/Woc4c5+9MKxtOoEWonSz9Ir7L1LeDTda7Pl7izdwum88CeSkGof0coIlZT3lfwmKJwpMqQbakeZ/NH1N7s67Oi0IWDUipWY6tOp5iuU3Mh59JNyRyLJt7BBEIVnhpctUPrx7fit9k4MerRJHMcZZHvyHxt9lROJDCErtOeBzvBJeM6vjnQKVDnc7tueqim+V/xhISAQrlaS68uIUXm0goheGy/2VUbpzxdYRhi2J3tXQNebsjAfiTHCDJn+/L/CI5bcCcSjW4+trOPVvxKao+pCctUgoXccAMdIlAgOBrDINmb2GywLuFJdeeHYmAWKFe1+ELxEx+gnFnqrkapEysgh1fLxe2yyT+HW1VhgvluyqvnM0Fi5S3Vln2LFfpjOB1lAQlQrhTF2ZZaFo1KX0duEymeDQVspKY4ePBMW7WlnThWmgwHKVONVFMYfOYyedRQcFGUQ23AApWLWtTd82RJ0pR2SOCYXFudnc5zVi6FVSQwuDSiL4rLudmBSLzHyx2byda6aQjL2Wd/T/0LF0LbJIMuMgGhdIOd4mm952jbi1B4tXfzxcYtKkojtQOaL3VxctUMQXoJAqxD7VCKh6RBBI4BDctl+kMSo7JzAZEaueCszffo7oWgre1om3wtlT9NJVfvZ932Zhwyiv85/uCmO56WbH7S8XLWUko30EkcGqJhD47aracdWN0ltU6QVM2SV+iUoD6nqOr4JB/pxheYUK4APQnq0CfulWu8YKxASWmSrl8fYV8oSVM2xiPFW0kPvXm+UOGl4lq/cP8MUlJYEtFMvCe5Ly+EjY9664mh/zpBwHxK74gHzpuk61j5QWXvou4YF5pWVvxacffo4RNPvKDNt5c6BR8UWdgTOTvfb50l7pp8fYpdsCSzWoBdGYimDPb6No6QMXjM7iItHBNh6Ns3p1WwcZ+3hpG+tLfIOyAnlse9hDgJM+Vm35DBYq2fWshVNSv8efOvXOxuJ18bjXAzAPeyzQf6gb7MrM3yEST0SMCa/4k28loetrDDogfxNMI5puk2gANcu/4yW7efuRJSUHwxx6VKzmCVMG7Hstugmjz3xXBHrN5IRu/M/kmu3GZ9dO5WN5yZasJ4OUrNrM5C87JLZ8aEgD0EdOhQor8suirZmpPP5AYsPszm9Q1yUBDuKgtZIPCDc7RN8d+GR7HjOHitBAysN/WOGDQVuciInMFB6RHtJ6UvPxDItPpd/ijxPkDIDtVKxUZiJ1YZNUbVLcYAJo+X8rm8ZbAOq61oHac7qJ1TXcmrWE7tqHzjcUuJUEvQshqYq9uq1shqU/SyhNOIm1os36TfLLXnYW0BC8LqZVzwB8Jj4i7Qgn8K2EXT6WXGmt+X2dY3UGlh0Qs5NvQI1vVZ6h1vlbTIY6cxnwKA/V6ckKpHPi1cTQWts6gvMv0CkwNx4tjUSPe+mWtSgJ3ZYZ8iXB6I6nN167XrjWHyGDKf6N6X8bmCCXZbwUgoiDWRwu06nZakinYxaSYGwFXHBVPSV0jUF3tLBCyK9jmhpeTVwjnmy1RB235W80fkwGsct7830Yh3epejZ+o8zxIVv/2fE+5GI6VacZTTzM7JYMS1zapjhnlYtjEqWqwVlfdETdL8MrCoB1b7TMMEoVFrVxScJSddKaw/olYbBTV5YEbZXYjOvrEFQTyoXlqvSOlnVI+9QomMjB4hm/rqNeEVQpvYxkDAR4h47VQ3k9NCI0J+8K0u2GGlIluy2ZsG1AfiMtnxVHDCc+dmWu0TOFmqhHrYd45fxWl578ZZDSGkpq3l5JnSkEKYpkWOinFAhz5kSeibmQghLXaGlqvwhWlcj9C/TARvpNIuv6kEOKyhO/rYFtdJgtsN15MLO/nq8d96LgMs8ivdYQuDwSZ7VxVBeodl5g6Wnh16p/8u9XWFbFQjWZzPyduRvv5LBx3Yx+FZ21R1+XFHmTTgsls20j2Mzw/DXjTPoVB904Oi47eq9rf6DCQvKYrMaF+aRaZqSoMTOv2GfV4oV88lUKymGvIqiZmVBd4SjQ1WzYESNFu2XSmmudXAAkPIRKqcSQIcrGq4B9FPmZHKeRC7ZFHQFvgBmaDsm/fQkIx9fl04b2GsS83ukJjPlRlfHKUAihDxWsenCQMVAC7G2RunPqMU37gr7l4FLFdvhibqDM3ZzE8ftPXpgbfXtLU4YYSAq8AI2smHNcxa4JI+QVXMqFEZ1nB6lQtx8IKy7pEoZJap47pqIVlwAXXp/mgX7R/7A2DgI2lZ+5Hhuu30TqCyzoQv4RKTtdXEPQFh9XEYh/q+KJHC0WOhmUyNNMY8lWJ1K5IlOS2vp+elMrEEO012orgOFCXRfF2FtcussYVbPT6SDPsxWA+7tCr6JaqXHTswN/bPAfyI+AaOHY7J9jHGzphaW6VLutfJLDk5LdGSmQe3Uo/YH9gugDKySyX1JPXD4T5a++FdtgZxYujKEoBNsRSn9JmrsEP/MEKgLAKxK7XqZcOZgo1vbsExWG60ORsMDkHUyxP3cd1chj/n4/wU7mkZPFe65ONiwfKHZ4k577npBOfxh5x5ftWfb87esZjaannJM4hKhhOowKf0LuOU7ZEueHwMqXPpaMlyTGH+KR6dUR+Kkcts/zbvGxhJGcsEMfZvKvaF90wPx30hymx/jB6rfGXxHtyrmbSEiauHLDJKd6zKXIhzv58KZ+KvU+kEWICCWHw/rzPTwXj7v8/tMbsr1I70FkEY/9bkXsUTcfTp6pSmf7vh2vMLxS/lDOLZd/THeTl5MLFACTLXnjTF6mWsjC0D9kCIBerdVsJf2vhnRVbHw5hQ7NR1YylBk0nvoRMO+FSKSRcvTv3hpXRp/aKDjlnQMQ0pb6nIawZXB+RmHaOoREdzn7VqhWNik7ygdGd24/sR5jVeVEKFTtNubi+Mg2TxR+q6UGWA2c8a3RHm1EiG6n/BCGkj/P7NdznYTL8fBmik3hYRZ3me6Z4FLmZ/+ykLo0+rWQiD0cRY3xBMbyqytDjNLevH7InEMATgPopicrhV5K9PjHkN7QzCu8gToQhQ9aTIsUHiSH0vIoP9CRFPkRScjs2gUTzqatAuvfrSbHbhD9968ERKtqFAWNawZCxcEBi+Ii6vgGR0mEUQ2jgJjtQYd9YE1xXXT7Is3CoD3jLV8AYkjX557XmlElaz9LLAjxOYWkI8d+R8KBifmnBsN2MsEgi1kSNB9GnK2lZZLWtuN91iIR9IL67z+Sd0RKsSCp0EMD/kdnMd26nn7Tqoo09GmJGzUBkJssUp5iaNwX0HYeANVcoMnjkvzgoy424y2NECGjduVEXFjZrb+qKmfeCevZsv9x8z2gsy3R016mCkD6nW4+9iA9uVOcxu5VgyhJessZkfwTky3twfDrJ+HitRqOywXyO4CIRe4q0I1Mp+RJsHRhfIt4hD6NgYlJsXsnWWtBlEzNudgdnwHbyg4fCH4H9HfVjCBsUhsTFP4Nzn/LOtiZztIsS/f3bi2JUJvc+OtL847Q65W8BRZkJS1GJnxhD9yyuM9Yo7pZqYZOZSNOwV8I0E5Qo+gQU6kzwb5WHTBhvulut7YFL3rnb693SqiPIGRdpNhYASPMC2Vcc4HujihVUkIuBHyRkUDwkWdYLzAGyEgOkrAb/1j8dOgCzhWMXLm1mH3E7sL7oliYR416xogFygsp/+EUd7YNrVLE4mO+OM+tveufyI3U0AcBfaqAgZCmKR3IxJmY0GyOWNDhLbLBIlvcPuUo8b9iuOCTf90zVlE7Zj1Vv8/DkVBKahwxDn0ze42swIqkPxNXya3QJwdm/+2kKYQRgK8zMfvwJbgJ03m5ES3uXVz7zBMazjghZPlqw+smNe4/+JPD1qkBnnma3s3alRJ39aQuSxd3eJ8hEjxUEgOvRUBiY/OMD8wHF0HltBY4Q7/cDyljvXMylg00+lS1PYopk+kvtr74Q1y1THFySXQ+aMSGj7vl1Nv/7H3CZS+TtD240syJnu1qZsMl9ap9m4ftmpIGS5xbDP2tsyY0Uh1/XBr+wpRqyb0fRaXNvhSDzxhQ2r7ITDwlhPaWsKN49w2DwjYXmFdeuTiD0lMj7u/1ErcQ/YjHQ2JRRoaaUSIDTYkQddHso7AHbxLLu81jcXzRj/lW9WJfFZihyxVmpX454bkAhzTb0ysarkdp46ihqYJmhTdtXBzSoeGJuuTwsfOpuOgZH2RdqUKARyS7hwNFqS9N9sPXBz0tLNGGoZ0VRFF4p6hBa7iTbBNZkdc2/ve6hXst9kn6Gau7SYD/hyt57rkNAy/L7IPv2tNdYUaXszDBPJDTkzFPqb/vLz90o1GZ/mImxbWI2NHW6m2EwrPxUz7eQ0Hjyw06n4MEeK/OUIYbo3ecWQRB3qwsWRt2X3oVjl3jqzTUpPAckKcPOEN9EAm9zo+8nt9WbYz5W7hR9DYhHeTCIPpC3SCaLni/HttcfHNySp52jhySyBfD2yhheXslsDVHqsgJipCjaIOPwBkWFUNaCV4G8dyq1GUCMaSMDlxQl5wvZ8m+B7KhDP0gAY3j31DMl557njsndNykZY8Hz5QWsftSv/KpuiwuFwYIvJvO6yNe07WesE8E+SOBMGxO+6KQ5R4Lgcu2jf/EjCPVz1X/2IC63bbVZPDJkOSygdumL1mDDqw1hjGFmETFzFG5n0iG22Ix8yjnbxe58lf9ypuexbG5rIeMo30jspH3cqW7xPfZ7/Wf+UVomE86p5WvWOc77ChBUkbZ51OlbOHsschh0bBOxAo2Cj5VfH1HA0KblJy3/xLFe/nheP7BFtvDLZ8tJ+lSZR3vdv1nqGe0c3DhDp0NHNqlWDo/HmtbhYGD6EDqTZpGABGNsfC26nVfMzGfD73bHK3L69Lm9AeUQ/9xzSbtETpsIDVSiztVPeHwz5oFLiGfHffbxc+hQcTj0s8951jEZ7dYfBo5m+70NiX+uV0ucLlkvfGnTEV1FK+oS5wdYJ9umxXF85BNYmvJltTUJx31pSdutihKw6w2u30riYInn2Qg3CYVtaMstptwdB5DKotTjs95c3K/23uL1KjLXk+nsOV8jfmZUNi7VH6sMphuOwmYoCufMQ5shx5GNXmtL8wA8rHV9IZiQlv427TgKTgooLzyYxguFljhuDLyWmdrnJSo8TD/4QkKCy8Qs5GX3V6jndBgQEcjoGS0nEsK+5g6974JiNlrpJx5mvwMU/L0P1OFoKtFEQ1Boml+j/JpWmXn/Zedf0Hq+s2UeptPDAOJfrH/jYoxxia7P727crkT69VxYm8ShjLA8HzRlrMZlC2GA8HKr/Aaqnq3dP6cSv7i+GTgZgTnsYWIPDD2X37jY8OhPFYxJTOZ/xnNhsBypcElnDYO++GTsKUyxRWFOU54Ju1qq33sx2RcdNTXMVKftpL9/vDlGKNV8116UrXuycg5xtmWypqaJ4HYaMiG6ULftXFwMOnI04PDySxuZwvx5UOhaB2wm6vl5/+ITEFri8d2hw7fEPxWbd5MG4kqzXw+WJKe5lEbdVNJkCD20laloER47TVJL5oBdusRB0F+G5tsza/8Z44947cHHeSg+SHROdIymrF0CnYpA96juFcgtUv2Z/ViFAxHrcRKpHKGB+lJpK8oQi/2gruJ2npWyT26aiYj4a6NURf5dvvRgBHalzxpzTYr7BhdF3juN98QuV35nJ2+hkSMUBgVrlBEw9JVVI337RgoOd9d+uo/hl71yGuX1fmecCH6avQesmaGnl9isvzO6q7aCqaAuDQITjvQVsX9LXLC84QVDCnBbWagYcsI+zbkd6+cuKk59cG7XxolWrOVpe3jJs+++uxiFo6gtMXHpmmgQqke0R+YUvUYdklllzgtCNrw0ZS4+etahZvQt8Vur0gOUmGCgcdjR5HG7W/YZj0xeFtGlA3R7l2VMCjGCIw20IhkTWryzBrWtTMTjUZltANAt/Bv+dm7jioaTXCkMu/SkiYzNAEqgkYnhap9O0pIaoa8NTxahTW9WQhD75wtw2z3dRr7+8Qi3tfgYo6w/jakTsbVYcMZH6BN/wFEWOqjlaGhenf8HW3e2rkq46Y8z6XW5cuZDlxtE9bjGJhx5r1/I/lxOWjBfjQrcu6NsoX07a+Mi1GXg+lB4KG3LdKDUYLSMMGVL3o9zMNuvj6zOr4nd9HPWtCcAKh+duqj5TtrmBfYDRaaOwWCVzxE4XX7MeOYcaXzBWoLe4u44Y6qiTMCM7qNL8ftiOt//SP80UDQy8rbiSFLa6YMQKQt3Fp9tQZmiBkc79pNnquWZWFm3tu/unL8V5w90ltgWfOvICCjr4SB9w9AkcTbbCFRrnin7ZCaMFJGvnoJx9TSiNankl4fEFh0SzvbEhVt3T8vNuBga8iH2K2S4QY66eceXSUmPTPa666s3JbvBMlz1GXDoeGRNFww+jFdZq7rA14ocmHIWlNOrwahQ+qXqGseKLjSN80iauvb1KI9PTaq2VS2WBO1/I/tz+zcBSWQfVMcAn9lKY2biM1I+2pb8H9OgVv/GRHMqOr286Rzu0F7Qvkf5FoFY0XyrrZgq4FyUbfUSafyiI7Q58QzLGqGUtoe/cgPYf6//r8Y2NJ7b9ZBGLVL4gRPzVX4sTAIFP/HEn1nwLtn5MDAwdnhA/fL/PgFCgv6k2wFMRuTdettoBAbFQ4eGwwf06+Q4uAk58/zXvLpfEkwD/bPpYXTFEiGpXUdWQgU41G6tI4JlrxsAuwQ+hZHj+fbcw98T6KwLOvT+C2ktP82gku02S4YtRgK/QoZ3RqVMYgQRjZY08q81inUyByYAgg1APl7UDwwepv0EWywkK4JeHMYjLw+UetilCDLatrykx+92ncc5az0NUzqFkMjbjiSzk9ei7N2N+TrSz7TElMnp+5fnaSJIKSisXGz6Y10EBPexgkHfJyKNNwpe8iiRJUYulHKh1IldHpIKAHN4j5HycZBCy/btbZIor2/crlkxwKCdfl6JZc2VPVcfxiqW9Ghf8GrqjqGt4xRUWds1+0JsOvxYGmNLx7YbDFccIFy1UKG22xfM43ycjQh0YdKvo6mtr3fHmwUTKvAizbbDE+S+MYtTuZRosFLYJSExFY7Y1RjTkgKsK0e0/LvmYIAU2FuXiRXPD2ncN0XgZElkGlRcBVCsrkC862OqGyaAKsuq9yt/b/nEHbuKvUGL47bJYvw2s0h2G9wsJJmEl0IHXgh6zDX76YJXXsNCl1PHOGDX5GyLVCAqpiXDcuTKH/HtX9K2WQSu5BjOouuPLhGTSy2mHSo0B3xostqflMkTqdHiFU+9BAIq0URiFE3c8NzGD2IWaN3j3rIpFeTjKvFx8xwHUshJN8Zhb3jsGA7c9m+sOfmBF9G6MvXQUosoqpHFMceofqt041OIIN2iJr646jQlTBWK6r+2mveQ8ZH2tKIuVvMVZxPUO2NuSVYPcOJkAp6JqZO69vdnMxVJB3n1IwHLN0HxWPsdM/h9UYdCSqwVtJyUIGFsIq24vAjKw+0+tj+9M6y4xe0LVXN+2gXA0x3aUsrMygbIriYDpix1jp6x1rMmfmSsulJVLf2f51cUsdzgYM9fhKtH3DqCsDyrzlCiozmyiSYlFc6gejAbjuUaWo2fTu4O8NLTWdIbgLhKutfK0bCkTdCUGYxoyN+lqQmgrR/pfjiVnBND+9l/Fso1h9CDVEjuBlC1mytlLWn6zmrRKlyUK4sVmrFocjGLF8HPdF1QmlkSiTvFDwH6NBvLGCRyW8QcjYVRXfRoEgXKadn+kXhk88rD43kwHEGuLSBTp3bQ9QzzZqPOhecjwTR0SkRrA0zUIBRIq846mX9Y06WykeGVaySC42opRMAERf7zf3NA7r0v77RLlVYb9ft5nYdSYqE2tDmyLVONjH+XHAU89hDqJn/YJEZ/f+qvcb4+BvkVhAUvNgIi4MM63+mN4Sc/ke6PSDUoiAAAM22bdu2bdt62bZt18+2bZs327Y1i5iFHJIO7rrPPvTgKPksfHmpafsHJUUuZuVhoidbR2WC1GnUuMP9JgBHFR8ObdWz/wj66jmkaNYuJ2hllL3SfdenH2e9mNWqfqp7Jz/j+LufqYfHoB8wtS+sdI3d7q9xAMqGAevcP6VPLVIa7Pdv7yI0hLg9C4FRfWmHCoKYvOunAIvTCPDJNS6YanMMmvJh3XgMXihx+LfnyE4QX1+yv/I6TSdW6uS3s5g3Dmgai+zUn8ZbpYx6H+UjLkWfpiPyYG3rGCzZnWdReod+0UHbkFvMRu13/ONXAw4SW7VNfi2J+GFxYivGS3NKDUOBt9Y4Ut/adRGBz9n6upMImUQ107oZ2EL8bL3r/b2w5PcwmYERDt76oK0Uo9c7/1gW6+tGSjLlmYMGaZC16Lgkc47NLxl3aXwextyhcSMIHQbj95gprlf9iWyNBqCiAl9QlLP25Y61uc87LvyGEdgv+rGm2MEPYkteYJ1twWiEM5jDqN1LTv8fpu4mkdksoSpFystKxnm4k4uanNVh+ty8QYT08+usK+Yzb6kfrTFU/EfwKTpLljDVGzDLvplqcwF21smWhnfwmEyktk3BYY/RXyqaIevVssdqL+f3ktf7YQw7XKQ84hUfF7bOf2VmpOdnw1ynJjocDshJ6hH3VAB5um+dSJIBtB5QsnM+utnmrrC9vhoJ6aJM9+aQORVqCd0KTlmF/p+2oVt0zoZKFlSMu44DWLSNm8aKrFTwNDzq1XbtXJrcxOAH3eXcWt6SunMYFuez/EdTLR3HXGvs8VTXoCRfy2yTkC53bJKXUKDteF/ZjWHetuqR462XDNMyzna2Md2wapU2c33iVkYyQ4SKiCenLYhhrZ5NygMKBda+WvyGSSqbanyATqMuM/pLqzwEeLhUpiFWllt5FWenILH9ArvRGHlw6M/5ZnYBNTJ2gD7IYcUttctnhsaL85an1iRPN0oQr2s97FpuBYnj4IU1SvbfmZFkslyBQA+bC4QCwquZHvSaxtLAHjDj7ql3lt6qfeYHg5/wSG5qc9V7mR5bTtwPDWaKUXeUBXPBDtwe/+n0J0LtmUOSSFEqOWAMvIrOdYKDJmGohyIJDrlFSJAY2W5vfXWXVYGusWX3V+nn86bEg3jkTtHKl+ZpH13IoJyxZO7fPoPgbloixm8CCnKpuBK9c+p/bkRIt6KyN3REdb0pawiR3lHYSvro6Bben07z2Mu13qS1mYdts/+26zqpBTwxMDhL7pohcBkXmukVUjqK9t25+D+q6niaSiZSaBzK6sHsEQ+GhntPPTB1lSQ7cFQ3gUzo3kjGo/zpVhDeJ4U8nmXx0aVEDPj9UXRPJRx9aqmwGm7Kt4GVE3XdoGi2P5uDy3BiWAg+9CNzboXqbqHosnlHuYmcCcYn3kMcL4Vk6U/h7HLx3KIcUeGPjNL1cS95L+emlLTLBkNzoes7C6J/Z/jk3jyNfjxHqWHcwe8Y/Sg5ZkGG2zLQ2DkNaqw8Zveu/Nhymo6dDECZMwnG+3jCHzWKEyPuQatnpDlrtJO1vrMHyG6HYitLqETVc6dMPnzvsRMnvTrXXds4yrq9NAWRjQGQiPevktZwKreGYhzw7AenqXl6EerO0wbKK1AxSNG2UIRGsuzKK7/kiD2k0jBOlGARC4Ivz21c0blK3hMgAVGUj1vzGBgitiOENbjZC+xygVWmSc62uKTxv794ztbZuAk2cTcHufyaUb3z54kjn/4UB9FKwF8o7T6lWaRNRjTX08cOgol3rpCMThlCca/rYUggnYN2+ie1vdqKxIjxh5ZJEhP0KN2NuLinATOOCbfUZodTGUGIJPwNXg8/M+b9nJdGdEI26mYvOIxcQKU5gvshFhENSkx0+70rWRXiCWMtAX9vKSCqcS93uPGSyejPIMhV1y1S5U6plrHq9+gHk5KtAFPJ+52LLK1i9iXQPBrlQuWmuypBDwT25EDWFm0ssyUQ5IhRPTjLmWqLIopn2GD+k/SL2jILTfMThUZGQvAH82fMPzN+PfyP56gd7K+VI8dqwH1gL3rwypZCbN87qi9o588HmkDua9dSztytToKfv0mUSySZafaou7YBTCJMI7qygAlGHYxZPZ/hdvwO7bAPQAnVA639223aIJMWDavzc49XzGggJbLIK00LOxVAh+UmuVXki5h7T62E9zx8UfLkGub1xuo+UgKsFYYYZYOTdw5PH53wa5hRJ7R+BDPsqfJ+kM0gjQPZNcvHHOeL+zxm4Yqo2eEleXcA2tVVt6n656k/uarG2XXf2MNsH1KRcJvb4MFppLLv1PW9y08VINvSfgjRmzY9ZlPW1QCte24xuQ9GAZb8cgYW2573HTL4ETuwKuRCzp8hJWWcqHGoroqUedzEwkEhsSaJ8uLXMI36awfkaPq0AUKa4Z4tM+PnmTD1S+fGtsagzbDNzRsx9xscJ6NNXX2i+gRoMnz57VnhsC2QsmJ89ciWuk/29p+H3Qe7yJl8FEKOE0cf7OoTSqf7feboE04St0S/81SB5r14loJsyGvWSstPNUyILgMajpkJRyOsM0wORLgnyPcFIpZheG5lrK4OlROXFlNad0QlxvjIBSlReR2pJNWcLxVAoUt95JnpgjiGzifZgBJApFY9c1OsLvHczTDY1Skxv6lc7H4lw88CzcxR+Us9otM/ZOe1+JOiVcOyl3ZhOybFLcz8NyXyz9b9gU8UueeVC8PEbYu1ob3Nc2DF6DgTsje0KmCCfKD91B8Z0plcLI19u7zA/fF9g3BQc81ie9khQd1RXX4rQ3JqjJtMMXyqliU676UJTCDl0A5H0ewrt5xfLHZk2fnyfQRN2wPPPNbjdxCQmGU3JsWCHyi6m3Hh9D3Y5X7sFN3cYgE2MXN26/fr6GicVpD7CqfwGb9kxlSqBBXzF4IvjPBUjammGu8izejMS+IkUyHkcInHPy1krTuqLGXc2XGj+EBkD7bZCxn/AaGamGmGW0y3Zw4XUnUtSQKuafKtYmqZEQDL1v9a9SZApzbNmpRghpClYvCxX+envkN6A6C39vEEsDR4pYFTHp0Wxau8iApH1WmYwKoEc/TeEoPiTTrXN6CV5LJyhmXhYoLKwKH8wcJFFM72nTRtj0ixxSuJba/iGqRaNkIuSujZeRQtpz4vlT7tch7lcLSbCYxZyI4SkH099w5Z+eSjHq20S60fnDZ9LPEKlxHK8cyMU1uB9i/eZZg7hBxjT8mqJjxtoKrKVDl+H53lkLxB4FNQvShZX+47/EvHUtdnDg2hlgg5uwZWZ8rVh6ksI++QxrSMKtZMLd54gktMHZQSt2ZIwVP9k1YjRljNpWhvpe7f/PN4LST8Zt8jO3vlpiVcm5CAzz2oHzE6nQWNFekKMnn7jJAHTP8QvVlNULnjb3bCsB56cUL0cQIFCYGv82Csm3a2GQg9471X9kYd2ctJOrHPlzjDScBCK+/aocIonEZw1XIMQHleIt/WFFI+yYbEj+pjFBLhbId2m8lXMmxd0/NW1tuAyMvY9Ux4f2LQmLl+o+3xYWQ4JQxa/uO9gV8G8r7B5rgjClJaQT8w9hyMykCQ7S3TYaNWKQmeUlscaKtdoGybdCrvRRNrDzfZ43tv2TrMrNbGfF2B1Eho/cmHlQ9ToeL8Vl+Rx9kR+443YGht8eP59NqKYiRGQLaC5fYXfljhLOVl/2wW1EuB+LPXgzBbNbJ71it+9eb9pOEU7dtQy/DTlhOZNULLQtsFXTqOy3f6/OqTNBf2QEX/ESs5gMeEcJvXYTF5XSXbVtbZCyKM3OJTy4bSGxRFdOJMguhhSPijWTsLLjlRj7owU3LbFLWvCc5EmSTSGI5uAj0WGfJCcfY8oQ1LcEDBrDhG5Qayixq+QzyaVRYueSn3ZpyLTKkj8zm2psqn0W7pm8cuVBjSpBRyZdEJWgMooj9I2nYY0pcuxHVhHLRZ+SaIeflPmXY8oNYxCH/5u279Y7YDIReC15dKWOcDY+u1N16XHoR+EXZmLYQzzM/wzgsid3hOA83nGIHUc2Cg0P9M9RJE2RHQ0IoeDsjXnoovlHghTuvbnCOedOtevFMpzzX997EVxveBgMRzjCp+klFamQPToPrbkH2hv7TVjCt0aVnaRHyf2uDuZmnzrOMBMzKLPoDz3ykQ5bsQKHj905vDFuCCDxAKCPRznjnmbVu8cKA6WV4HSLiNjKLKPcGwKBNkuToBV9mnUQjKI2VzKeVwGok5vQX02FQOjoeDBrtqsbbOK491DOJgBqlCaRD+giLwcb7/I0KSYNfajSgl3JngJc1l4v/bdn9GZQtSwT94/sooDoorkptCghIQBuF/wAQe73XwJfahPuoNhxJkYmnF9/7iRnr86aDfanpTuRxJUdcVvqCFZ8rb9qjZpYjIapLTBOulYQqyFUapYlpR91czWe+6sOGo5XtdBnGvfZBYCFSjK1SSVnw9XWkA197Jtds2P6YbnQnvwdd34rxeOVlK7lFaCdZfJuW15N8EG42iihbg3ei1uPiZpBllLSeBUHyKLKW0ieeAidcJOu3WSBIaGQ6r+YtCh+gzvG1fI91qOaV1O/ONK9oHUbCvGR1PxD4NsZHoFvrmtk6sVF/KPK6emletm5e+VNo9Tmt+aWWMI7INUXEjEyPQhvFa5a88VPpRx0RTampnThwWJGomPbRhvrhnbERP331QXydXBFGAP4WVZLA6P/ro+cLIA/BTH9JWCGdB1nuI1lFYYCeH/fQMPEDqHg0RU/kAixSwecIPCZOVxyThG+w1tAfXEiYNBjBa2IJLPVdWdF9IMwJaldX3qV7Ui4RNh1lphTPg0L+lWR5qJXC00B+HU3PH+WcKGqVFJmMaDe3W9NxcyFB3f0EFQZdkWKc4/NStf6X2Z9w3FhDnHVwDsoijtW9EvCvuV19QmjOSfLVuXu0ls9mDPL6uvJbhOPIvQyDC4wrC7MzinKrmw6GJtLQYo80yujADShfgopG4xhw3i6SP3N/Sxj4S6O3emC0bSNu6BRPZPkzQd8Tb2LsUad308hd4GmIOv3pA1Eo5itGclm8X7GMq9ppnne88PIww8AcL1LTYxj0PrCyRd1eUJyaMrKOdh3i1i4PBqTskZ2H4XWCzV3atfKHHHQefasbnoPtFj3NtxASSzLO7kcWRyk4COrYpEg+ZkDcCXZNlmQ1BPlySmXwQhhGRpxcAf5w0u0+me2M1tEmFLQyj5OGi/Ff4B8AquUure0UIlcp6YMBcsXmBIO6g0A1ykvdMzR0H9gnsE6y+5ibM0NF8I91pQ4e09sZ2g+BUCaoEADR2uRx94HJX2yl5YaSpLBmJ9QBawAOJb/vDhKUoB7Hat1wF5QmCN3x53RM5ktVcpUB8b8StnHtPlz4QUB34IgE0Jqv13OuqUq+8l8y500+dL5we4AyDvGLh+vQYAi8uRNGu3+Rs/x/bcDsZSqa00+OdjvrCPMTJdLMIYYfdt81GImhLe1dKsjYC+JVeoCWqLI/D7FKWwauVAt9s4OTJQdMaiLKnYDMBsqmvJ+0od9KDbHxoYxzebl19xNJ8+7cy1vppjHyqUx/hgpfPYLmBXwgfBdmSgG2vyp5slInNSawx1az2f1Nb5lPKfwrss0QTn9UxgYCQhQIQ7Oe0o9TS0/pGk/xyh1g3itCZWHdOjSkoq6nAhvjhMKRP9qJzC3zu2y4zvz9JK1TLk2KsPK9kqBpGV92mm+op/x5MJCkJPE42F0q2qISdkDcFraeJ7CVS8rDlkCQtQ10pZS80RQf2E+TxJO3zA9TICxX34gK2+5mXX2Vj3AAi3bz78B3mZ7SeUW0uFAB/FbCh8Ade8gbWKwgr/GsIVTWa1CZZduHcCTI0Iyqu6Chyyzo9eP6zUSQcaPRhStKCzXlWVknuUcXRRpMjr2yunfFwXXLasW3wUqBO1ATVVI54UT/TIF4bip/wSKvfjyV8oLrnqgDsR/gD2AolGJjV10dqBc62R+4f1XXg0fQJlAeAP1I+MTimzXGmZfTmgrz3rQ/2pg0zhb0wFijGMzXosNJkORI2Sm1zuhDso6s3AZiWUON7R6gKNnuhAUEF5sREML/nrVSpxzWDOxzN6tQL6IQMiqG87SXjXrqzPtryYhAW1sXE4nrcZL/rfkLpK7yaRxosLP7ks9WweMXPYRlE/QNaDtP2PNn0hlOdLxedvrU6rq5HmMDC1K+L+DevqmRUj8qe1PG2Lb5S/voTisKWKjIH6Q75aN47WF0wliRw56TqKWnQaBYmOxKTBewkFyGJSFrC/3DUzyOdKxr4SNXaulsczb+edhP+Or2T7xZW6QgzJTrIdQvSvhSJ83ndTflBo9wFjKi117L89wPuXiqYf7HWlU5VEOPPHzL4ELFytnuL392x/64oo2BuklKfsLtjkmA2Dmf4MoFNECrvbEMYV4F71w/OdND9eMetjKIfnDmqE1u6L9kvDC8O4XXPbsA24CyoiVJLdKMih19Zk3tdN0Co77ihkZH+YOMnV/fko5pU4kBHQ8LavhB6Vob+E2GvEGei3uq1vumSwAsE+SNSSnVTLeUNI+/lDISE3ISA/SCf10Wx4zLNUeKFbHBzh8FfSKUgCzq5/6e1SfzHGqVZQ9EceE3WDJ4+2cEwxVUxbyv1gfGdG7m+DGwbZAp1MQrm7gG9NrSic/mZmclch52/77lYVC0jcXaq02vv0m2rWEmxTbFlxEsFaiTAFZL6Ivit/j0HObOrjGIkNhKv4KHzgLvD2RW8rbcvGD8gSzOSltZxnlwWJAJvah6MCW35ytOU07r4wuaz/CdLeO5gyMqMLMGRnZ0yYrG68kLg5WaZdSE2qkO2czvBW/jarVfo9Nnuwh/cM1L71NO56ef2NyXOVaBO9SQjcNUJI/l0zlIks3MZskNkK+CH6q+Jk5jpUJ3fKFCB3hkEV5kOyOT1YnhIFQO2LabT5Tb+9KPC4K7wIksq5PzDIwoI3yuRlO3zLqBzBqZohm85zBC6LmtP3bMm8tmZj/4yz1LLRMqGtezeYWD8YnSJq9bk/QGxJlfyKfJGlPuLYFQbAgt4VHShUXPRtKt0OsR6xY8jpad2kSNBZI0h6bNSd65dU5d6yGq6DGzKRVsVDMux9pE2ZM5Df9yltBJZ3imxJFHyUZBEWliNKHYUm6b8A6hyAe+GPcnVGfCOl/qxzFqMh1vXrLO7Nj47C8fkZvKe/B8xsbRHnadmKozM6EZ2+6dPYMR7tAhx//5Q3Z6ClwkG2hkLTbO05hD/OguE1uGiDHpB5z8qxyID/tvzfpQa9WvJb7SJX4/JBLG9oemzAnf98bVXxA9EBHJ9nP88ptIkidDfdY8ybg2BZtwiocVT6z+77qqRWdGvwjNBvCQtrqnlt1i12V7PlOuNyrrc/2wRDqB7xFjASl9disl8fR0G8e/Bo4if5g2SofgtwyxlhPpUcTT9N60VYXENSf9VQXVn8a5Ncq99H44PTLbSbQYAbWbNeolOzFMCBTXvqOKE3taYfuWdqzlmhY0uCa7YxXGvHkJ09UN05CdLMmozz58sOrh+BTEhR/JRwvWyvtTyUn5B1e3GlJ5CnIz9WfV94afE1ropqHyVch4Q7E80QIhgqDWRmFooMcOfR6eHPeAvxJBiiJukpJBmm1nv/Gxaf77/soe+nKpH5VeJNQYmUSUzSkKY3l4beCKZurqv2l3QHTXzcnfzb073q4t68M2P5+6ecvMx+LmP9LKikm3/uBNZp1AihWavXQF3oQtQiRkMaYbXF85moW2S7GanTzfPHULilVehpfqszr/nOBKDW+ZuQ/AwbP83wT0PdsbsHBJ8aLJNGpjPWBz1HLpFbvK27qZXq1Zmh8eyzfTCuPqWi1e3R+aQv6iMJvik3ZbN7thLL/alL+AfwCASXgoN5r6cWdvwbRq+V2uOXQDZP03YbdIcmRbRvE/BZGzvmUkzy9Sfsj40D0iUozQMFdEZE3FH1iakKYraJaZ60zpfkbkzUTtiOUeuXOnsbvpS9eNwFR3Mv1HQmL96thumcMcdAp1m4MfTfHhZWPutaEqrqxuWD8hH5Tcm4kLnJ+ddmKACtvyK4U85XE/F6HDRdxV8jgAitEv7b665XWAhbVzYnXNLCcJrxGJeysODEFd3R8d6s14PJqo1dIB2KASOiqEUBiPeNslA7iD3tayNhyj+BLWdg0XIhceqNvI/zrJqyi8pJ7I+rPp/7CX8bxMVQPSoMXi1nGAvRlgO2Ktu9m63WCZTVw7XQc8kZZ5q2sRsEneTdCphWipiVoXmp+y+C3yGehww62zMhGrGH3c5QS7YvdzpIA6hpmOmOPn0/7LDsl7bHbbSXewx6wmjPZ1Coc+pKHQq1Y0J1yqWdeYLBbHO92OkK/u/eFUyYNmPhBFWB9rHIpG5b3h5yqbfiFjwFepTWATZQgdVx5d3tUvkfNRzWKRwPBeEUv8F4LH9jfNkW9X/R3yhPPCC4GW0PIQ041E4uA+IiQ0MkA+lRUTHKMpr1LGKdrE6Dqox1Hw885pBFp236nMKtpL3vbl0XTE96NpOmNhhewjgXgCC7uKoJ9dMeVBykWI4iqGankjrUNDXlEWKuHv87XWUn3hX5/SrVlG40xlNyDolrIZSbIyIxyc+YIVPtO26g0Dqd9VH2XSsTQfl0ltRQwD//X2/em4y6P/ZwE9HUBoWm3mDUHXbPRQM8ayu47C/9khB9P0PWLunzjvnj3V7lB7ZW+RVvkQ+Qd4TrPBvKlaWo2aGCn+e+6E40B8yQDLwS+tY8W8f7MdaN+VJNfLoKvuL5eyR0NTAGmoh4nOLRASf72/+47gkNl75po0FDclNsBCvcj1JaS1EDuXel/o2s+DogOedu5eYT7mTT9Ty2Q7FWtmB1DwlfhOz/+PlE761t4zRexlCWA2WD/a6oY+phz9qWNC8vKD56rc3VcYif3vIOvuUBr5j9Va9h3By2pkxIzAgtSXoCGHQENNNeJni5RA24l/l95sPopSdHlVj9N2MRLS1XMgr8V121YtYysRONGH+X8LwyspSIRyBuQ+pgZkhzX8+gbtinRoGiDLnSmeivpjzIhcicrHiT4unXyYnmhpSpXczY8LtdcYWCaTSsqcGT+oUH5WHPfqJec4Cqf4GqfYNIXZvXgavQy1QW2AVXTE9HygObD631fJROjt56kLGtWWnlv24RluNODXomYpQfjPaH3P33tmeWO546g3Ivi5gnscfDUnRMV2go49QxdAjUEP3TzZY41fuIHj1VmEywHN2An5BmjohJzDe9ObWa5O73qQrznkIsafcTeA2OrzE6rWOWph2JagIvGm/CuF+/Ze1qhEv8S7/NTXjqrTA7WFHv8pD7XeyswXQKitXrJNgXoNIX0fXRbSawRCR/MM+7eXoy/lip8okE7+Mjfehes66oZOL26Ct2qvm70Z3aICThcb44zK/rDyjqsHMuVJf1D55GmM/IV5UnAeJq257Kcd6ATPG5RrMdaIBe2I9C578gHGNiSzqYLurLe+h5DS8Kku+w6+1KQY1xpze7v2AYRTQmNhcknMrnMdO6ez5+ths0glC29oVIfidGTmE61b9ljtWE9P6xac/lx9cnJ/2w0I6KY/ud0t5zno4IZtOIbOO2JHRXHtJetMS+XQBmOIqIUX9Q+PCduZXfzrX01TEqSxQ6mL+k1SThPmGCi3rzL3HIW5TnkHzrS2c4Rf9s8c2wmsVd2lRYpHEP4RWPkZIWe2vJLo/TRtkv7cnox2Ip8d3QU1sBNvMTKhEO1L67ZqGEq6ejPjRZNNL1oCfV6RVKZArVhNWENyGazi20inF/mJveZpP4IXOv2aHFeq+++C1V3A0C22yc5aEPE7x6GhXXtpdcsGqbA+zjksluCIoJjjuK5jLHvNuvdaxMkO4cGZcka7fIPTn0/QhwZ8XtLYBRF1VIeaU8uhX97w62nzrhi2ppwV6c2BVoFERy41SftIA/xEmlarCBodagS8vep2wlY0QjgC2DhnHbQ91pm7z4+C45vZaS61XqLPZlVqbeQ11xsUIflR3/+Nf5eXCd0uK1QqDPYfQmreXXn19JA/L2KVY5Db4dhhH1nd7l4AWDorYNuNODx1kakSZVUDm7cvr87Jf8g31BODdzM6i/FZhL4ANPUWAQ5kj6YG/F6doRwM1+CDt/OeTNQ+XMpC09q6EYmzOh09b/IcstkVGViTUPJtu6LGiWjQZEbiZIDRzdHxM2xFywnaNDaqz0b+GTI7TSx58+VKzG0+cwAI7qLEDNAjJ7OZVeN2LXW0hEr40syHQM3/fGR6Zto+2s3aZhchGRcbV/yb79LZEyuv3cf6VcLX2qsfCnUbh66LW8JTx/N+NrKOEdVHiOJHWr2782a2FhR1ATZP4PWkemdp15Mav0khkEi24f7Hk+DIt72eT5BaCX6hSbxgIf7fJIS+ZKkCsjsHk/L8A30SV3ukYSeLT8gKjVWPZOq6XEE0xDmYg7cnWpo5N1l/Fq8fDCt/f7T9bv5MKd+cipeBUgRqywEyv+Yjp3MUswaCswNQrW8GRLW/TGwY0GqRIjADSKfO2FYrInKLXvNVF4/l/zThWgyHCWAhntYLWY6yp0XXt93Zig4yASSI62ypXYlnQfaRUw8XHDyeYu0ED0iHxdRDS6oKDsucfwvD1tRcvo+gc+PdyTXEsYY5y/q/a9zepZSCqcq2G4OXL9S5DkulstVyeEcoeHPtmkGB+ZNuqc15zUnRjkTJxehgHaq4Ra/HPe7UIsOK2lGBYjVIHZqpGF0QU2RfH/zpWIcBnNSaP7w7lwUd46bIsztKD+K9MniqegUCU1ULARtPrcVbp9+XCWs6uJFZYDa14Jk0JBk8C2mQD1v5byKQyQbUd2x3kXjNRRaA28bEZABy2q5PD4YAkCcNZnaVDRCJPDFcoN0QUfVMNhQ+7k8DjIqJDev4FVJTwoRstYHVakZ6Xpckhs9Yqx4hXXd9lNKbGFSy5scxIpQjSeKLIutmzMlDSxIiHUV6HmPXK7M/4ivAvGMySh+/c0Yv78uMcmonFmGpFD8oNa7bSI0K/3HJNDpUDLxAmLUl3DhD6xsinnhBqCsKoOkJ9eqwUBwBHERyTJeBLTXZqVPl4zmFcmEcsjyqdJpXtesph/+kSCWOEKczTIrpFsB1i1v5eK8gO2DyBZ90Y512ZngVlLXULGPd1YyEI7l6jfhwTekP1IuPDlwURg+LgaejSW+87yX1M6jOjFaOJOYM7j+x7wQIhLoozeB3uhm3esaVKOog6w4JecXHk4MCSTvXSNCcXs2lyR2Gx//Ss2KSl5GYgYRzJX/POtlpPrynzuFSScjh912o+GqcmnUmtekXejvkOqqfz1OCs6nHTWz2F/hoZIb4YG8jSef1tquAHN0xWP1yU2Non5xdTLokLVbsBYHdAZeHedtGXLSdPygcVXBhXz7qRGgp83nKMZmnN8oRg7nJ1sCLg5Tw/xo8aMkDA088D3fdetaFsS6XUbi4pb4Klp8ki5f9Kj+fUe2BrEjvMPPqjiOFvzknzsuuFqaq/MPnMPhAqEafa+NCba5eGl7qTdza/taRst1EiQFvxUrlEUdGdwjPYdvigFyjM5MfqUePgqilvHVy18aMTLmAFN3QUTr+m0MFxm7Qlbku0aMk3lJx7BStoQtYgIC4g0wyNIE+5uPGnNcfILd5h0jNQqfP8aEf/LDf8ZY0ebmjV3v7zPdvZMkJI+wL71RvUpi/wh6QHW1QDMn6Oph5lf46bmFzudKuW3G3ECKgKE2Su/9fT/c8o1t88G0wnr4mMLTBf7vcw4duKPxyBiwVu0309aYoJXQtLgh5PdpiyndAwC0mgdZawD1b/1MQ1AZFL+HmGt4wN4WHP6Y/LTvVUIor+psDziQ4c8FP6ht48VO1jNzw4WUvO9B7ORJDYeySsuXvwLoIRBhNUzVREzzl47od2eN2anV3OQQhyVO2Zq4HuTU60l9klCGBLRFo4GH2qn4orNp6pxH/Cv0+eXoUQlm25LpikOjOVwPv6T2MNZrU4Qn9zbafEBU/8wxtmAjsVmp0DHLJ39dMgyrebEnrcMvoMUaJGGizM9gNiSTUHxsfunDroGVXEsz8JopwfLODh7fCLTGjbrHMW6PzLk5Tjc1GtsSaHtTd2PzQgwER4PnDxZ7pjaZ5jxMokDm4hgi3XUiN9vzir7HHsW5M1Ievs0B8164L4AJZ6Gq2OCGUBZlhRZWcMoVHgUK9vu4RpLI47Ba9HRePhAlI5qt1Soe0BxZEeksHZPyz8TeKgYql2BnSyoiEmXFqixq4Q+Crs1mdPnYyKrrAyzV7mtrH+IqKEfzsA6DdmotgG+Ak96W8PIX67mLxFnBbJfTgCCC94r6iswx76QLy49npKyMbKA6I2feqJVrJExCdv0aSQLJ8r9L2g/mgaVR/mcWIj7xJ2akHij8QLLSh0S9T7DzJn4j+JNiEgdkJNLmyhxpZscvZuivkSSk1Afp3C5Z4bbHsN7zVeTfxOtgk+lQQJLBZqLWiuewNsy0myDNyvAWCAR31HhzROOFzLfyUqJiDFfbOQHZwRXNuARObQYuEumUl3aIoKkd+WxjNz/L8gEFKjaQjZLTJb3CROkbfTiyDTHd0oOa0FQdJBP3YI9YSNact6AIMHmZ+uZENUaFTbBdU4AK6Abz78fmC8jDUQWbbfjH/SHzsYlCBbd66QhGI7AfbaSJqIrsmJbSDdrZDs3uH3ndlqSUAB2pkpR2PoamXkUN3nHw66uuODOj7KEI7aDfuq2gUOQsyp0VXinyXHtW8zyE/MZuxWWZU0TfkjHPGNPnrYJYoze0GpJhpz6zPFzGOyWyYyJ3iaZWA0bmgBbACtGuCij7DuC12L5NETaYc3KUDeibLtqKB7JUKwwlbz/FKGNMywNh+eGenlYCyY0zuNvlswYB8iG5djULdudjmI7u/A/Kx7ij6x3vAxU7E/iHwWP82y7pu0UB3jg60sNDFa/XHl3bnBcA2x60lKg7u6Mdlfj6IM65F3LAyvj8t9auhb0WMjBuG0kQ1StWkwa7DTqlvc9UeuW4PGLRdLwf65HX1v74EZnpCfuqLGuVrnGAMgIlHTfBOYsHBMseBOYm5ACcbTy6jf/2p33BYJAWzN2SXtdD9HHD5ehs/JgZhA6kpaPXLYVKA/hMMXLxM32xKlTX5oUfze+p9yZWMq1VkePxG9DGb124kq8tbioV11wK7iTsEvnhOvIQZ/cfWIsAAspM6Y/Hkwrr9/zwF3QqvfCcR6wTsWRYbc+qR6xbSWE6lckwDGHiMqu198d4sVBVFphoyJkxjHXXZ1BZIe4XBig+M1v3nHvDNRUMvDbqfcRh8gOY1QBJDpViTMqsdL86SHEgnUVn9EVjA2sxeyPm4nU1Fcv5z68LUc/AVQke1jOaX0fdHJ1rvVhjRAV8KoJVwsHX/d9UZuiz6JMpFXayuLM9il3iEeQYrh7MXsZ1ruMBvdSjiHU3eUgi//3VTaII5HVFjw1QcIrdF1BTCRwFD7a75/8YJCtJRG782M3EXj9Ds+jiYNTToZr7APA66nEMQJr5xNhDX67j/VbdAj5Lx9j72KeobSMlJXQLpe/ZlQzWgRp/UmnAmZeFCbqIRt+WYJrHVRDSwuV5qAUsNdudqNYgUP51TmDiQZwOw9g8pC1e2/e7jkfvlg2fCXTvjg6yQlaQ4dbdqCZ5OrJEcTxkJekzJiw4nfq7Ty1MX5ltw4zm0w8rR2wWXVNQCyOhikehsa2wkNgG4AqE1ZGeGw72+bvX2VUrXwVJui2fHJ17RQMblz9dVRXv1Du9mp+jxx5mR8rM2Icnlhj26Xaer+MhVRvm1ZIwrmFSr1XZMxu2+XF+4IW5/3u9P1Ic1NtiMxEEJ/P3p2ELtkYMDExvA+0KgexatfYZJth4jRWYadO1+nsJaVHyclQXmnTZx02V6AfAEGgOKc3WgnAcEDnNzX+yI36IOkPweuu09PXnzsa1qx+bO6ouXVeNWVtrjsxP/vZd7Je7VIUsPuNMdCUj09qIRtA8F38FvobzrWex0qCf822DYTBejqP/sBM8833qbrejzlSaU/1wp7kwvodDvtOWyqq4/JW2JgvduJrj7uf3Qmcx0xeTySVla40gA5bTCCt70f8QSRUGTGefaabl5w4QUP7zIk4+Ry8gvhwvi/bLS0ZBIUY5TYbfHNUrNGhXV37DNcHveoVe4pXZMUC2xB+u7+10UNYuE7Dc1Uf0W7k1eWQXgZgvYLAuCytRnzgk3vPJjGGHxedtzT9JJ3ckscKOUW6efaDm2fcOiIwmO2CT+QNXa2NE1eIGToKWu4QhYdkuZD3/X09LdQns/J8rIl4mQyolJyk1xV6H+rtf4XWkE8tQp+XiO8qnxDCBmP8FaxWCCywLzavw6FhPYZUjH5gOxmxfmw9bnsH0ZLGszmgkghKg105KmfIWem62HlnfOnM5FfM0XMSw3zLv9kF8Qv1XzliX0nn/h/oqIhSne6gS1Ud1Rj8C+UqjyxoFAowokBtE9YPiTz3s9FHSAKefLrJq8GEMLh/MJLZ2y53O5SDFTib4uDgaI7ikdnVBcq0ykP4yNA2WIFTiQdyo77cfdcNHycoZV92ThtMgpcgaXCQ5ixYrHgxPZ5/TnudUusDbxlaHGMd3ChQ2tO6b3IN94btaqDO0gjatn7s1pMId29NZoKkkU2B99NgmuCohgogDQi3TkX4tZ/c20U6I5YKeMDc4gcTJDntt9kQhuLoqEHXYhdc7/JGlBuVcp5U1AHwWCFn8DKaZiQfbXdldbQAM/eUs31kAQOBnY2fb07Y15+QWZ9KL0G32N9BBsc2Hz0qC1CxqoiE4nU92t8z4mXYcTtqNXELRQXUbxWXmBiVSy/OWG5S6iuN554ypaLjObKKLcXI/W/m1G429XzZVlCku4bJ2CPg51e9Zl5zK9mYm8k0wAz8zMFq0pOAgvLiM8FmTadQ200CAsolpTt8c+A5MOwqPuRiUfUuNJUGjfZ/1hefSlQRB5yDCPxNEfv0xQbO8oE7F09cRiYWb7oZ5sC/Z0e3o8uROl40znEV1WWkYgb2r6wiqfFQIzWTouZlGiBJPsUtt4jFIFiDnf+CY0jMCBkbWWPH5ny7N9Exb0RnucB7Gw9PrlF88JAzNroo+iXtsGqcv7TCcLEjkD+T11Q7C0AVVg2BteTrktbW2ctxh4rWyKWIfZA0chpH3igrC1suwnnaBBqMO9EcAsyGdw8dfCKjnfZpiefhVXi3eDhKcy/fHR848jL2YYarJ3hlhuml4ykzK31wMVbmE9lWWbtcElEsG5qx4fwBs8Wvi5OnmkWtZUj0EFzACzHV77yxo7u6bY75FdSlCyeYpr7/Nj2OMCkIDec9EDZKZpfBOJnTBNrKnPmC9S1h5D89x3E47ofW5JD8blVl7xco/84Q+GHb3n0mEIkX9m7tGibGXOPLyJiluwhxSlo2ql19P6q5/Qny4Ht2xeraq6UUf78aVcf1a9W+1giFw9hFa/xcDRBB+WrKbSJtPe6cAkFNOf5XoCC3j6rxGA7OWYg1jsG5lIh0HifAxfyj5TG86cWK68Blz38vxpN4ObUHI5ZA9RAKgqrHSvCfrazS3HwGrselyW7Om8VIg790GPDYKvtz15koJpLGhw4Sw6oNzGTj/KgfHedulcH0FI4rmzjTIAEgXB7ibfWDr8Oy2pVX42gZYzpYqbi1kwk68G5pRHJsBYRS25xlBHCZzIeF3UIWQss4gyiNMkyQ2a1YwiIoOZ1TIMo7SYzGFsLaKgfOzbDN16GUOFCldX40i59WcPo9OP38wQdKUQleWIs2S40czZRL3z4WdWqgb3dADDQOddqXoCoQLiUxn5vs7UuCVs9bNI20R5hvRgW8BbqSDWwNesruYl4sHKfW/Jof9x6kI8E/c7t2Tu8Z5d7l6l9W/c94cN/9064jTDf95w0/mya42ykPCyaVP8RR6SLO/TnDNYhvkwto0la/sfHI0Dt+jpZ9aFQT+xDupn0HXVcow4FtoP99RxFgczidMvkSUG3blUzIAnHz60KGF4asdsOo0baAWH9zjRK6BodH+xymtk1SoMHfMK9oEFg7zO/THvLRgQZ+VXLMwHgV+f0OOXFjsCGfWcN57u+Xx+oPL6atHkbhV6c0KvS71XJc+a99b64hZ+R//ihJwbvZOarqT5VrBfpiVhKb9BkChR8vIPd43akOHm+G6DfM9oJPPJ+pqk2kwGlbsouaDXn/kPlRzUgbhy46+c0SGCvPellxhGK7wtzHV8z9kV1+DTeWIv4xJee9KEHAre4RM5qUNfl14rRJXv8JHXyDsVpuhcMiTcML5ZuCTTlu7drKiFPQvzRHsCL/6KhOLXEChxqZiCNONQbxJPSdFtn8OqTJ8De6152Y+SNCKQ6r0ZIrdwQC5WIrz5PRCArFNt6yDp0Jt1PeKRwzN7/kDBIrvEGrW3oTt0xGoMSKdGSoEMTbHjTd1bmqX9B0r4FH0i0TYAWPDHcS8KZ/5UvXX23TmoD+YrZZLUqvCLPz9KpSkblh669qBULt1xUOQ0NHcZHsbwjLS1Oyns3BnY993HwTBrXtrdDdEWyU2Ge2aGBT4oOICrKiXGWbPsnBXDl45hFKQPrcNEtuGdqy2xF9R/BXeA0Zkfz25d8d08zRxuCS710iwzYnaodEit+ULhAXbADj30ApNi/3QSFr0WLq/HHjnn9fVXy/ekeca12X1BwYmXOXaziSrFx6zmyz6WmpHztuBADqeKlGO7XDokDMwr/qzbnkbNi+GAIZXemByW4TSRcu6f3BHqB/QZstB3K1OtXk1SG1HuJCF+p69MBF86GrCSp4mRcoRRu5OFAvFVzHV0GHs0IPsU9oSu5JIRhBXMMgISE1/0SmoHKrxLQXkberKhY7QjaiE/XvfvvWJe+MMdOuSOLgiJJM42xPclMWM3KTc8hJpWmJKiM3HLlgxZUHS5CGkg6GRaaTFxdHuXlrWXmbcAJxfKIQ3JCBVZlbj/MzGhw7nPR9tZW6RRnEsqWcATiKixu0CizGc4eklB+btBocp0ROZD1zXv45HLT5vY0ypPnvaBhcv13X9gS3yTZH6jcGsSEIaWV9pUgur0L53tc6Aut/mrzCCGaAK8KDo+e52UaUNVu7J6N90oLYEKwQs0TfkQQWaSl1k3eTc/tg6d43RuhXpLddWn0SZ1/nGcatRFRfGVNS8Wcdh3ALnWFZzUcG4TuJHrTs1J20CX4JM5xPy/txWUtAfrXGD3WcAz4z1kV8xyYCZmbupyVNX6wrA2NVNHp4BGP17f5BUrkDRI8BQhnolw7g32w0cNMSzv6HQZbaax5rpp7K40to9NXUIPQX6GsXx0B7QX+DLT7m51DeUpasxsZ2FTTF/fWti26OI84zlW5M+6ZsqCtDxi77hqTHtiXEwZfMjO0Ll56knbc4vhqemVqj0svdrOnA6qEVJd0GxGRTCJN6govd5bZEAoFdtOZC9+bkmP74PThaU6VcfTOzTVS+nakgcwKP8PU/cKfF8XYpqDITBKUm2UD/CG+u9jBmhR5wsabKH2M2hZB3K519hWUbz62svNeYnG93imHh4b/y52TMj7NF+HGlewy7qzXLPycpwinItgw5mX/TdulzeI2hoRAMvyuk5bw2gfNLApQ0ojKVfPGuu5lALb4qoM3nAAMnMUgwJlmPXVTW0TWBWLVguVaTn2IJsrtVEehMtPhxM9A4q2f098mm20XUk1G2E1UQc0Un4KZUbfHM6/eY5cxx3Z1JcH2i4SEsAwAcPEhl6tWMpNeTHAR/uNfn1JO9HYrg87Fv6lZvGoETvO3PIo6xfkDpJNK7vIkSdmRdq46KPtfMH2cbKNt2OtjMmHtKXoJ2xjHrhxa6dOxeV9OYyt6TY7LG1iGPQWtYwAXFvtFD+3F2BYnFpPWPfhbmfDdkYY1618tfNbhhHwiP2N+X5sZAmedY+Q38ZM8jcWunCbVf14jyOtTfj5FoSsKFhzTEPot0Fuun/d5dvhNvd1dMbsvglqddlj1ZJhdHSEpV7pIZnBxq8Jxt0WCXNf5gWSKyoVLFZWLkdpCpfV8UDYXLXiesnRgrEj6J6qujTySB5Eob8dPG8RX7zpulrS1983oY1VI6vLWtJ9/qZXzhaf6laejQdfYlLX/yUPxFa/4zwyehptrlE0uhdLeEgs0PDF0V+uw8d+56GVJMUyRvvxiMPip7bJXftVuDEL0Z2OnBGFEVkqB5vqchoNvcz3AzC0pItcLoN8OBKremL1+VqUSHdhUpvGcXptOe0ffeas2RRlTXcp++qTcnhjI9OPLAsGpk3gwlTdeUMI/fdD6sqo3OYIVPFNDEhFRoIKGc8y3Ezy9vI6YiIv8E7sm7JwJaSK7nf79VM2wYyOtCcfJRrjteGYyYL3PwjqfYRR1pmdNdkoVyG2Lud54c5Nt0rwWvdgKK3IgP0x+hWXTA5qgZzP9i6PJt3HLWVlfCZ9eDFKY3Tst6odYZCnHOQXf9V0UdwfjIzvv+wXJKwVDmbDXLzIlkQO/h/iFcQFE4vui2uwEjVsKAanPBXjElptE5kG83g2G98pcXbwkhWEQyt1UjWi597a0v5ttl6Qbwjg2b0X5C9wE3gIFe646xERvL8/lf7bVMDD+U7s9k737wrFiJ1//OGfRD2mZe4UPkYjhk104vq5PZHp5C7cx2TWE2a6TrAqmz3dS+g83s2kxyByF7rZ7Q4iuFzdOC9gl+5PkZKfele55/bBVlOG9yt4HG3NsZ9hFW6l6sxmVpd+ISD8Qr3tjDyH6WWavUJhSB406denG6RmqZm0FAyQB0KiJTCUGd7cRr004YlY3JqZIpV2d9o8ymgCnp2tv2qYj/ynlFpY+xL8KOVAbFTjRjU2RuhqjM76jzyXMyvg/X5OcME7SbgbOTocjb3tV8cLdt8v9+R0kXBWewP3XhW9ovsgjMbqgGexxhRn8riC9xVu78LLTBCqpGD0jMDnkAU9ljk3IBeZgxl1PLM64wGnyZOFwQPoNUKJ35xMQG0hrBwskQXOtrY9P70CBCmvhRPGjEcrk0g1GShlrG1tDfjykUOQpjgOHwIFlGiTAsVNcxal4UfS1ihYqJWafppzkbsPffoTZUffHuh7aHJ486u6ieS0kA8kfo8hod5W+3Wr+QCZNG/lApgQV1uUjsfyUP+hFSPmTOoBXhUbkjD6mfSzlbwnoWIRGn8ppXWczcg6PmvuotjSEn+NtfrTa+tt+Vcw3QILIkcRR6FxYG3HqHkWbAHPwjSO8ct6SdkEfNxonla35ZVjbcmUuu2ERoRgA6uI/W0VhFfFOq7ta9j446jwTYZDLRobyQvDnv+nDfuHvTQbm9HbKne5VfPAaHmVZRHbkOwM9UsLOjX9hgZiAwDM2kEuBbvr9i9DA9r6gHDHT4PQTSY0hEWHSRZY01XLj5ges68t7KzYphJYDusoyUm1MnVOISO8f0rDW73pK4sZTsGuZzL3JOFTuHs5vNgfUixMKf+8b14wi8ukMM43kWq+LElYjV1XEtT+wO7bM5i5IBy00D7tw7ejF8f4qpChvNCw4thfR1RyllB1VDNwKS4LxD463G9ibOLMSeW50D0EPpG1/ynT8AM6wnzSTIktTXvvVytgsUOUbMFJvu6TkaZ988xkVaU3Spr3mRcVoQz+aglUNts0It29Gh4Fs3gba4rlIQo6rGt6cyc1xrNbqpL+f1ul9mfOxxn8rqsJPrtP+amr3vV4vG+eqrCvzqfx7IczL7/aHiPUmD7B3L2zCD54kS+KThllRtR1Ztw0CF2nhV9fay1vt0Bhhb3vzsYa44x7jUXkEkwhY7j2z2Th1E5geGhZ7co4mb+UV0P+ByDEiMS/HE3r1+vxQOho696TMZtCxxwxksGi6IjB05UUSY8cdO78KjtBFgN+EgO9TTn5bsIkg2rcQA8rB17fxN2cukqByOkOuRz8ZD3bIblpIMlym/qsITm36ZB5TgHaJ2uTmL8TJOmvlXnvZOfudX3qjc8raRsyI0Iy9but2mVzw9AjWNTutJVfo7Mr6ICKF/vqDPKzODeqeptDVTlf9Fj5doBMDmYYLFfVXRKXl7QxmrNjOEslPXK9rs4J/1pFe00EkK3DOPPAM2MRzln0Z+LrZONyiAhVyLmTNHArCMWUNbN7aPtqHrKu6jzC0x/eJaYIXz3i4z15GC2NBA90wREzElzkyy7RJW2R34G6M9z+MedDS/7ejM7+OVdaXDpMJzjDjooeJ44HcbSnV912wOxqkwNwAfHyLqUiwSGxRtXYF6SPpox//yArpesLYBU7DxedcG+zMAf8CvNecavSnfpQbXW+4p2prNFsgxZYRukhqLiPRbsbO9BZtXEkrry+E3q3HTPdRGVYIwsQh+MxNx26ZxjZF9kUr+5mRzZ2FZoFqYz56LE5Ve2OQP1pSduKxtrzq8yT6XzHWM/4vS6YymkBjnddqlPE2ib3XQwVgqVzqiqq313687P/1/x2bqaA0aW5YV4hmrJoEEcZzf+McKecKO9LoMBqKlvagVGB7JVLghcdrk7mT7lRQDckzWvJpf7zKaJHHZwo/r2d2HIvgN7Gjc6qP09bRbUQfEtkCek26r6crqM9oo/XONsddemxHHhb5zutjtmmjQNTbo1kcLUjR7KOO0jioH99+EIlWf9DBi7Ancu6EaIsqAzp9PkhcOg+jl9iheWMHbP86TI/2E/t3KTbDtmgXwsVoSl6juFPHQOx9WqPV1khsUXnaeg/glmvgFqxCPi3uG46uUKEHVGWDSMIPi+FKhbayuqZMIHPOnda+mTErbVAvcrkWORuOrAdETyMEj3A3PYujYtlSpskzjFXCt/Uaz20PIGJ70dhi/3CYTpO+twtfC93etMOrNUrV8kLVyFRq7FOayucz/qNdyy2pHULEUDTzheCTIGO/y5Dl5gVv9sOLWoOFj9K6WhVe01Kg7XFmH087DMjpRAUNBRMltIBPwQtry5eZrvmUruwVZNmG1kUG0WpCPfxdSoq5x1HJWgOhoVQw0JjoxCwQpaqy3ufjJ+DOJPkvfgiRng9lGEDQH1LCO7zKhqF7tbvodk3XO8Zi2EHgZcF29FCrcf/TBZGXB9lz2MW48uSys9zl4dFP+R+S1KZ8/GHbfBNslMauq4uOF+WjKrn7+vyalMn5Rx9ZIcQ8sCwvzzrG+IeAPj5sWZKRPQRCyNEZ/B47JFAf4XWR7iGbwTUIb9hmT66n24XdT+yAnPDZT/I8MW4oBK8/aDJCiMlQ1liDva1nXRjtqt5usWHWktm0zjlHu181UfbkgArwgU45dzIHcqAuV6Qi2Y+H3Fqv6/ZLSA3NRe0yM4/SC27xfz6RbTsNiukgIMoNlFHmfdTuDfeH45aQnRZs5YdRLwYsH2+h42BIb33MEi3UCeOEAycc+1dIRY3Ns7zmvwSyuwvMdZlf7EZO2tFtg9GOpno4Ux1eoPwacj+c5Q+2eF3VwsrpLRdqWgrx9nIQtdPy83ZfMopbAMNtOW6wgDDkfxSr0aIX065SR2e2U3vLXy4lIykv0CdWnicYarmx+L/4tN+Y4f8KtELmkcqpUamoKP+pbXQR6/U69H8/eunGrkKTYJzrrNuksZtxd3pp+twsXNgz3Di7d1Fkmzj0aol5pQ+4QjXUtZBJC3wR++vtI6zqELeJ1OZq5KlJRfaYdaDjSxEvOXE7IYcgzdql5/JRXirciEFUZc0FGqAIK/eR5XoZpI5+tDS41VxiuuiGqdC2DAOcUmVk9tqNG4AZhrOI4ufOES/RxAibefyiNzsl9QOqibDvvpQ1ypMfCjE9jwb29G8XdwNM44E8CKLcY4unUiXp0Q0C61kYKr9bqHFtJq89CVDCnKiq+DiXxRdNzOva0L9naGaRx3ukzT4qCr1c2acYc0ct440OK7WQoTrz/0e6PSDUoiAAAM22bdu2bdu2bftl2zZvdj/btm3MImYhxyaB6jMoA0ojMlwL5+g/mHu5UVN4ydaHqJezm6rL5pOYxUGhMcp4U2Xfz6m4L7PqOcD4dV1jdqDJIOF/SgrYrSf37gQHXe2XEzt43ULk1vF/x5YxG/iAUmRdVgM/jFy/0yACAkP5sgj3gBErlzyX98sVOPX/xrg2xJcIZ1+L9oUKqR69Zsi+NnxwPI4qhUIKSArTG1Ekx2qT3XPplWde4LsUu5rcUM4aOOZz3kHqBBqC2Bs1QPZ96AmjNyjjfpqtW0fUYh90QvLsGkpRg+/tGryci9hUeQgclxsTYvBzVxkBokyN55wy91Op4YVGjg19m+4QSoYN/fmc8pVerCMOhE8oSoQt82nRgukqjaJzBlLxmeeOwSvOAjCutPKZ8+FV7lOTbYLlys4nc9LPVjJeGn5kv1UzHU9QhqJPDbrE/n92AJmttvsVTeZMhf22Gn+WJMN7fohIK7sQjrjxKPMPa5CIs+ddzi1hIHivj9b/YKyl1P820i3xND27TpiDEeHbinRZlZMiD0yweSNXxTGgaoAFqLiXB5DveCQj8AV/tOVxkhc+smw9odzlTn4IFzgwN/+mnJKaZTNDy9EtqDxFsIFoqSGaGSUbNTSuGnMDGaj9cyU9Jx3njvpWz+pUE9BXdL4N7Zy5uFpldFBpdvJ5bA/DOq9/b6BBxQl9UABoEtzNkFuNkwVYpea+iqRq6yTOYrkPhE3gz89CZ5cWR5T0VavNHQ7zCOllEB8qZGxT8A+qQqPhxk7oQVOixqiQKwRnotKk06rKFWH3Ewrt+343eI2g7pprb/zmL9Pt/wDZTIxQ4bmij8BdbcltKPV0qbiZe+TOjecrC2gmomGfn/irdqcakPm+ARn5YOgrcmMdEoUCE5b8FnwLJNbG85m85l040tG7E0WOT7TTF8j1kOos3IJsxsfA5o6NYUlQT/cTpPrR6h2xLg8nBqUpHkx0qG3MxjAsUQIX5C9cfztzUluQdim4bSNnzBbqYlfBbuGgz0MYEJjkk+ii9gJ9ntxn1jX/zgQH4kx+U5GWLGFBEmAUKHckTmUNiMPQNibv69h3FxE8SsOKlnG5UrvzeI8BrWq2nPgI8tHW/5hMnY7Mu5cgEu7Ys6uXdExmd18X7KZHRPL+SdCZ+ZHs5mMEMxjSugHsUBYKxDe/P5Ja3SDgrASulUe88+fwnfkdnlHQYkS4nbkvbIAumDx8+apZcKCQBl1GWRn+uMXgiu4TSlHMM3FGbgh+fThR6Ke5PBc/MOgbz2IRQ2e0yicaF2cMDkgaotW28xQYQYoJ5EWsB4qNGn0HrDtjBGVR36/1zFeChvdDUDzisNTKJM904u29bX/jEN2R7jxozzUwFh4yqEnyQOrFbN7mKxmZlbey5DaszGeMny81eIk8M9kHNXAP8U7gO6Ie+GAuUX9OIwgy/qMbLw/s5cS8/dwOUulujYunSNfGqkHi+u+SCpSWxoMTv5rnhKjMzH/H7oZBv4bM7g7cqRykIEnpn173CzHVuqz0gwwzwRB70UzjJ00UXt19AS5i5hen5OFRfNlyp5aNeljDbL0U0lAk+NCLlSXfX6MFzfP4wQJi6H9qEO9DGcs4sleogyy3y7g4tUQVfAi1dhVB/L3dmlMPaoItdAoQmhY7DAcJNjVLrbFe66Yj4lrx0GFbaLReqeSXgwzb9r89sFE8cvXhTXUwms7HY1CsEhE9v+uwqTy3oxNONYmGexW8LTfpFqA/xpvdf6YcM/JVNJ7GvGaYu7X42fB1uPcnELsUbdSBf72gMhKmT6iHlNB5G0KFwGTv+4nrEJQUs9hKeJFjKYz01qjaEyNyjgGRAeIe6x7SH2knjXscw2YTm0kI6HezjFxWck5ShngyYon42ehiUHpdZrXS1y1biIiJw7kzeV2/ZcIRsASYJb35HBeELtXarkkXTFYjic8mBhvv1BOS5M8xybJ+cxc1tbhO6QH2LASp+XSTofc6PUIrgmH+gsbK/dv4Tth4rVkjpvy82tmTsK2uQamL2qn8nvsH9prajrltJrsafNZZFI/7/GtU2x9shEkj9HesVaNhXY/T/+VUH5qLoCvUIQRMERrKUYb5VhGP8d2pxGr+vuhuqYohlPuQDrqGxI/+yzqgC0o/I5SKVZkNEVWN4dz15NTdRQSqrBLxGf1U4CiCLTeigBNzTmlR2xMWQv9KnLgkX097+Ra/S/XVCuTVtOgCCmnD2uKz/8HHtUckWSK8/vU9zQKkXKn6YF34QAXPBoBahLAhR7ptdoMtwkth10gpelEfZGsZVvdqnErBpvqTxbCekCd6WxEFDBltlXp6+VcWe8mgviD9eAFbrWLa0J+8J3odc5XaMnhUq4FsQIFJN1BKqnizcnyElRiFW2dHaLxKUYIADlomW3rj2uDzSa/MyIx+bXBiysy+qWYC2COJ8/0uVSjK7IlnxPu3GgMvLZyEyPyc7kQ3qRbee+4dFB7LFQeJINIKZ3iSnHKhxY3QMmCH2fPGDnS83q6tOYutByauzd6FCVty1KfBG/XcMX4MkIIVWIvKwjXCDl8CLK4ylp+lecsAhrl5H7nKLcaNqeWi3hKWzq4NgMO2ghAdENdpfYaxrZEicAqohjtrU76jJoiVLkmhIyVsOlDt6zVtenWY5Jn3XAfxK603pE+KuNU0BBrfmTpkZTcKB32EBuXQCu/zWEGL7iwW3AHGibjbGfuQIDITqb2+mz0Kuj/OwLJjoBo0766mfcpD6qXjFhDoFxG3H/wBIyPy0Gsm2AaranT+6gZELx3TDDhzUmZTaObbKvdREvboDA7cAMP/hDKDPm0WgAXQaAXe5dMs4bgzxr51LTn7gdomuVRw0nE4LWxSnSKeVeO/Wa760R0AMTT33jukA1MPEqciTVdcU2F7v+bST2Jr6KueOzPzIrifVLyRSDDpB6yuDC4IGHkeIzSvYirQRhz98qvDcBHZ5k8dG+WQdDRhEyl0ra29RIOH/zakAvRpOvItVXjuuGsyd07CUATCDw3awX//A6MYeRvySdLaSXPLwY570VJmnHk1Q90u6EMi/nnqg88cNFGkEyZUAh5OSRk/UIDBXLkuHkxppQvivjcu3CIxccDZjVva9mOumvmtyI7DmPVyr/rE0/WQ/KozI/zWdf1i3mCXwePDq/k+a5kzU4RZtVgZSLWLksDWStaWAFB+X71J3WwzyonbMnI0NvjbF6XKyWHveXClYk7XeZKVrGu7MmfykFUHWU4hyCWovenYhoK0zPPO537lxUsHkbb/R4OyZ2VtNhp6QRtlK5+RZf0e7hBEATVHLYQGzxPceMRQ/k/gsEa92p7EDw5A8hDPIo/9u1D8uwrbhYbuKEc0V3IoZ46f4y6YMpZeuULd2r+iEtWMURmikESb6WKuvGSADY9PN0st+gPbz5f7wzo3h1TNJavrBIKgoEIm5eprcDRJm82mqEoCWq0D3XMZ3aXLMaMCSjJx0uBm/HFQqPbID+ICZv3pNmPJo1LdVrvJkNLqnZG9NSEtWe42y/tpnP+7JKDQSdg5tVY+pRBDXrWb/xCWX0xFZh/AOkPOYi4jAJrGN602rozCGsmm4K25W/CohIS+/JGBXDQ8f+lksGXUFx2BbdLOTbhGaui30jdzmppIxOEqsP82BskGKU/bg9QbrQvqe2lFXo+J6Yz4gH4nspLp7DYxMQqCghYkVfRoqf5Wzmu4Li2dDIF641XoK8D/L6aJli5cr2D+yh8zTtEfOOaLXNLvVcfukt5AUeVXQbpVzJER7m4uCITQKuDc5N3cD4K6xImLNcSjJFokNlrwv2wVmUf7k2ouoJjhbWLtYD8nk5PJlcmzt52o7HVmbh3xrwAovDfXIzWBy0/4MSUDQIZVMHUd+mLqKT0TS++uCMsP7M2x8aQ5KYw2eF8goX1EVdxvgSvZBboqIJmji7vJ79brnAqcbCx2Q78js36X0lUwVbKTyQja8d6UL/819b8G2nQ7vNG3d+qlKsjgnHEEwGAz9cY57iYlSlYsklC5QLdx3z2/KZ/h57TEOs2VndkwFh+N5KtXLCbQvYGyUrw/u4/4HciHADAm+78ukLuwSUdr51LoyS0EkDV44RagUCj/9zx/Cti6JW8nquEgk9eFQja5gIU6aqAvyncaMhYYqIbLAeirQHLZeG79UMcphzJKBUamvL2OByJGXL6t97p0/b5T04fCIwp8hh992+0tPXQm5e19C1YeWEKlL/v/ivqmHG/O5NcwwNolPdeSPDsov2tUXupByoVD/cj0TpjVV8kJGZMCXtsZgGyM/rq5+3J3TUUVLaWFdKIwHI9CjY4QCSQe4Q1JGVlMWP9L0unotB7NmjVICdci9/2znUG9qbkxt+lvz816DS/mmLWWQTdGpuVbyAC4BPQHgJDWOhScVGBQOUO6j6I8syljcRZupLeOHU2eNdgVQ8REF276N/nqT2htHphm9RD5iYTB1dCCZWfGI54f2+1JWGRXUnXS4SPFPqfqIeyZTKPb3ociAvmIT+FWmzoUuOd09kPGqAfg9djQpyuJHuOZYtWco0HRFqNKo5NIT/UEakG6hFAd2uXbz+ivaoGyeV7jQULuPAJkpfftnUmNgjbciYn40FsmBCi225pJ4Q8N547fvJq11pHwDVI5TxKw6s3cvsowF9BIPRdb93Yr5ionztSRiAztjGCH1rGjgjDwmnlryHC/Fl6uYfw7DJEZ3FK9gJGEbZqed/MOIB9QE9uxJhE0VBwdo1Ik8CEv4eh3cEDqN51DAh5hxPLetIsWXghafagj9eB2dhuJvfe0DACq+YQz/3hGfo9sZC+em6CknLSmpFVO5IOkHH/d4gneY2r+PHpCxkr1Ozg8bhHf1vz2Pums5/1W6r5aaUHTykEuyMUe4UqlAytVWlYXvGTq3Mc7OCPb3mbVA5CTYWRg+s46r+xONXZi63+WW9fQKeKzjvE7Y0nRSFTmvRZ2hSD0dTvu5WphcI8ErH2MXNk9/AogLL2obsivSVSurK8r5wK4/FvJD0mJak5tcLgrRaPo/k0Dtc2r+J5wGyLsHhmbO4Wn9JGz61kBjXPDXZcrZwCNmOz1Ts4fAVcJB6tx4vL6hJ44w4quTsfYyStXMz5f7xCmp9Y8C/mnNMGLy0l06n/Z5rv0JQhYnyv0Mrahc8UGT2kw8zb7uEl8Ni4HjmnZMM9F4P8ZwHZOo7TDqCIxQU5kyHQSybyB+pZJEUfXgE3muwgprMX8p7EIIEtaWSLCrc2WlfcY4E+mP4A+w9pck3j5bFlkknQDzvx87W1MRkIg+FgJUoyoLOBgKYceK3vF6oxi0twV62eyP0ALyI42+5MFJ+ZIqIXmq+XEH27CfRP5patnIzYUknc1fjjQjoJRziKUlf5NLNAbxcAYjIQu9fwjBj5jUTtjJ0lj07p1J8DHrBR8YtFR4jt51zKc7iL6cbQjnNeqFQ7AcmmbjefJ3mFFJg9YEHJ87zMD31vTyRjmRmUaC/P2aHyB62xjw8HfZwdCnS7TBWa4sDDlsG2OWJ1wFNGh19CaxndlIc0futidqHFJk2XJFHd04ZmWfnU2fpWawJX4F9eNPQXjUCA2BB9DkC3MwFqcwSX2mNAIlwuB1xJ+2NcAc9zs1y3Ew1PkndyZorUWuOUCVJpuKcCSzN0DKb8cI7lH+60rNFhEGMi4mzfHP8F0ELyomDRLSfWpnGbToHO6Mdvu90cordhgUEiu+Mj9PZjI/CKaYja7zVrsXqPv/sawSa/NgZ7rEihdrBSQunf/JKXQG40toldxgDpqN3MBlXDHGwRgkpyzu5tsN2fcfVImHFaEPEXCXPC124ilCSlS/ZzlgWNR1FqDBIPmpjpBXWw2Yx/NHICs1R/2+bVKo15HvxByoICiZvxE8CXbM2fxWp/FslNH6IYZhxaYnjOuZasFkB6yxr768AT9xOVzqeP3tlqWceBcPbLjvXk5fuyrWLUbmV2odHum7Qlge8hHxNBwAW4hgDGJGVjKWbz15EGbkNvsXBH4DCXa/mu5kSUikM9ss8cFhvkeRelzJLzBrNtmdd30X0gSTRNJpdsfS7UR5PHr6AjEqd+REjCz+mbtvqVG32WROXn0GXdCIywrekIV34Pln7Fio6w4euBUyombSWJ/Sw/22eGZG3a+yExRe0VwWujCalMbJfrs0BSkMNDmf2l2rRBFb5PF6VmYaU59s+T0H5pAPbJtNaMnAJn6PlOjGdv4GSJm2FJjsVTkOhQZ97MhXRmjU2BRNgmZYOQtD7+FmKWM4RSFcVb7ps1sLrl9KaMlkrh7QuAJt0L4lTthBXAz7S6TW9as/0O16gMF/3p+u7LvtbroTxQPYoI7DCIe3EOPu/kmQx4jsH2V3TvKXLujCug7ZyoNznpCKJAaecAqkYvk7KwOn+5RvtqPxpQ9iJqB5ESGo65r0kjMY7PcW8sLhBi4Y8NjD+jUjgX55BDJPOymj8Q6bUD0Dwn1KA78skeVuoOxrdNBPXDO2wGBKmEaGhp1uzNOClOHz3ZgTeMgpDwgj2JGo6IHjSsuz5ivPnULrOWPy0YLgr4cWlR3465skxA3bClVwojG+eCkB2S9NZ4qUAQt/SycLdF0PwJosiZx7bOPoHeiVkL+Sts0lvOxpM1VoU9JENNj5H8FdYgV+JqlqsK9tLzHWfs6ERDd2E85NqzA4NpAR7FOvvoUEzaKTY3rzQKnba+O1iAVo0y8qLJmu+wmTnb4yEn46hU3zMZuNOGTKCvEOt9PdN/lVp1qS6GRi6OBR6VdqF0sQ7NB4M92FjxuHJXnagzRP2kCnn7mWDJqW5ysX767IP3KcIgOzg6vUOGyeB5acTvw7cN7fhQpbJaqnk7Vq/IsTaKKd5GEv+vhYwGbv6UynRQLhnNg5lXg1TFFm0quK2TnJUwNFgLPTK7aAzxydiOkO9kVOQeZ4P77Atvv2v+Af1ex25hDJ/lgxkrIvLKCimIXbnwQZYl9ocbrkEIQCx1OoZcbxhLjOsuiVuNCeCy3wDG0B2NvQ6vu+corXEK3iJwbuJC54KDstbMKfLPwyQe9dyFrACDthmDBu5c4ZMEgvVJlXdwDb13/bEkR+D7A0sy4Y/lLE5y/WUpkBHX+SWb0PKZcu4uCqfezam+FdWBVGltZ5E3oONSDfcDEBu4yICbny4pw0cPdoyQZQl4GD9xJHOPsALGGoPrEbEZFFCi8qHF3l+G4ESup1ukel3HyHJjgA5mV+2dXlKAmCWwcSZ0xQWPa4avM95m+yFYLnD89kPioIEV9BhC1GV4nnx5YPBEH9Ro0EondEki9itX8/h6y6Ijg4kl5D75ZAgmdT0hOX4HPclvGV9l0xk29r6q7tB5moGfyUwB1NFJoYm09aGk9GSBd+3J+EpLyr9Lq/wCl5D6uahnYTHzTZLBkj3FVC9FWtL9Z5SDcDngmxlvwg0LR056rR5fAKZEweoRg0/R6aiYSwqniD16K6L/5vcjdimsf8nN3iOtgPVu+k16nZZ5YfRyquBR7GGDS78mbD8do31oQGgVQdhc/MVWKeAK4jX3t3uPMx0SoYiXYioBCJE1Xb6yCOU7wp6wwYVHthX49MmHll5/Zf44SmTCzdl3lmqXRlf2HGFAeCeg2MrPa9UxKDf1COwgCURm1O9CBsDjWbNEw3DxqvgTbgAuSBCHkrC8GcafFiiCTZzcaOhbAU/k18RtBNg5RD1NHco4oj0nQ57zPrSgz1Aus478V91coFDZmye9mXsVBMLXeC+/3vdtWUkvJHd0zTzeDYCRRdwkRU0ZV/uGuNuMxNCUlCa6w81xb9tMIRE8iYzig2JUiHbVrTvl/y6CcLEJjlkn7XXoqIzCL4w6LKwOArMxZb+kuy+fZXG8fvJmqdqfx6dxQkPfWCAf0Cy3FlZ3ds2Rd7zhdIoyghWHTf/2bBav9UQQy4HBCqnzJim2C0ASUtsc2qnAcghfZH/WqSDG9LApduRxzGKzUs2dIEfaA7fiC52HxEACWUdJC2EX+gpp5fcZuSYsIbDW+Zxq4TnJJJE+rP/wWS3zZKKNoZottNfh4UlbUEYQwvpUJWan8XZZYjGz05eAR63XRIkA8aA9hcClSA70gJEU+QtG8IyoNXPrPRRMr2EdsElU/oGlpZZA+8rH09gg/x/u2P54c3meKmUJLMUCnXB7RouwnZWMzcZtwMyvlyo/d0yQiZaXteKr4cZ068BKkkbis4nClAz6YPGHVCN/Ub1QmS7N3tdsr8FpRHY8QHoYIPyaGf6LirD7FBJ9QYa7lFgwrbGBOFWkepRwMTNuAN5Nle5WXLEZhfW0qZCJCvBkbidHsf932ez9phXDCjn5n3ZVulojOcGEQpzcCeirkyuPOkG5v/WBf+Xp7Dv+1JPI+jXWYS9MQrE2qLHYe8UFzOi16pcmPCxdz2pfnHrJAjTXQdBd3Nt0xfdo72MQvQVDORvplpfce4hr8p5om2vAFlHG2mxTdnVR6GE7N5y9ht3UKT/JdB5U1tPqUmloNmhOWc/k2AxC2FY1LfcVwmCmgs5wSRYAQSQVr1oZdDQMjfXTo5Fh44MGD7TXmoQnJxL7bhjeN4+fp8pVE9cTt4KAfkheBn/uWUdd8LK/1YafUCaG6ZjEo7N3oVxmGqgjBBLJdw/DKDsglBR3DV3nTtfBwv/y0dDHzjpdmwMjB9i1h67v49yTsaepoQGU7EnFtidp8aRBxaBPVBEFmAl3xaToUC9r8xJ5sgAIulZjyJJz/97ABMX6zJqwgNnuWSgwg9Nl7eYnvo8jQAOup4BnOB1ojEoowGXKTchBKFSaYi98SiSlW9inTI2Xx7+KlL9U5MHEQtv+JVX0dJBVkwISMZqB3q89j8lzgXUOJ9CUjdAYIEAN9QtgkB0jmQ+I5c/YlgtkbepX3VBGgQyGuwpmMKrQXW525rhkEIH5/zQhPMNjfOvudYM5ecwTWiZqrgGetmYzZoej0/QXPuohs9x6fZQlRag84EG4R4F8FYcS93v1aNtMJGnnGV8aJCRN1Ij6F8nBAQJPc6vff9COfPQ2/Iq3PhwJHY6BULvy5ET2+edwUax/y3gKrZpdhNPrtl8Y2dEbre1qadParaHZw9Vl/REsHixoL6hR+wPEQeaki0HxRB8UONZjZ8m7ULNc0lL9rRVm9L/BrL4G6zd+nDDFp1HUZPMxHdeMA2amDhF6rh8hNZ8s0qh+GQOp6GcaK/ffm88TPvLA3mVb5j/ovegWSBUzWk031pbH9PTK8lordNiNLr3in6Q7WgUXPKV0/TNnyVrSUZkmRaFY57ItaWowlYiahGPgKnSxS2gG9uv3pkjxj+b9nv2e4qMS89KMyam42F757OoX3qUg9UVUAP2d/a5KgD0vLxTX2p7Up4ub298glKhSwSlJw37o2kGOoAh58d6bGPk1+LcQ7u47MR7JFDIIoQeN/OGm/vwzin8ExUXXXy7zRdrEOETTsGlZSSu3Lz9zF2kqSNa9pA7FmaB86qOzxK6o/lwZQ/xAW8FrhkQD0z1cFRWS1fDpYrKHz66CbSwI5JzcHXoUYhWD+iVzJAUbJG+tfPE9stdWNB8mLBKSfpYPIM1rex8o4m+44M5zM6kJxgvLbbHMi3aSQcXRluFD8KZ7SJKjSwvnmrA2C8iB3DiLOfARR0Q6BU1RTCObzIxs8/+k8xklm/nBEWoxFloB/NAan3MUZA02kkn8/bt988Zj9Svw8Vxl4g4yxeJIL+Gt/YU1vNA2B3HEdXAxDg8tFpKpUhx3UWCgd3T7N1nug0gJB2OevdZhWcnVz0Fg1a7sdVK3+Ja0CFNrijl8pqbhjUbsJNrbEO34Dv3g+rl9CCv+74VmuIF7vHxPTuzYKmabvaRiAGcMqLTCUP2Bv71qnggJJaYkjNRXNAVys7xFHhcwV/oi9/2GFmqXumbekg9l8jPm9/YNORcow5gyrBnbMj+/sSwJ92bqaFPL0DlEGN/DNNN2Suq+o6LW7r5nmqHmMBDhJ42W4gmfgMxr861PaK2zXxbTPoCbP0Av9Dpn40MVlAIr3Wn6DQQlnXs+SFkwwNagUbvf9Zf5KUh3/7vo7JlWoZseOEO3baR/DAYE/hEoSvmVfe+SFTQpRI/I9l4RPRO+tDJ/3OCacmoa4To32lyEmSwpCHuC8tegVSaYD1un8HJN0g8ZiNNpTMq/domFa0UpKwh+/oTjaozRL02z2wouXErZLKOfyyB75oRg0h1O6zKQwcex5DvopqxfQkYPiIt3TC0895CN5Uel2dyO9Q+n+I1/QoTQwFm88jJWL2ualcLmecVtsq5QotNKQrl+xq4wKj397aDwx3FCOnPo5zccNRQbGrR0FXPaGrv5m4oNWbU7URMG4gx34HtzBSAhwwXJLtL1bpQClnU+MTU8o2BQcRfgwtNjUsYww/IkKT/Q8K4E1+IiynHyHP339gny/nHjiTvEUu88maS8xlX46oAyELIMqPL/2Cos0Z3AJfEEMap/bLDcQevl7kmgNRdZMXj8N+awUgbqQJaTXvXFiN3cAz12UNUal5r4omPvI8K/tfQMgjv0Kc7vs8KyDDzbDHU9S6TNYDK0+2ERLI7nbJUpjsYvhfGZdhFYzpSpb0l2twKWzMf5DtQvzGq6TBt7tbHy/7nnypDiGFZBoaLfrCi5Nj0zOa4OIkHPM3HcVvsONa/ZN40ObGvS+GMy1MxXidCmgr3257IazVA6NE6xmGwmI52Jk8A52JVXLoiAJzuqjfyxR/9zNnYMpZ+rgn8a4IcnTwxzOUGrebuyCk2baq3VRWJ94VCgqXiRIOEJfZbgE18GRNbb3JkkBV4wdz83+VaKidKKwZSoRqSyeKVV8UdLLzLoSzgXmXZ3nd4lL60L9j+JVGmDg3biFo66yKA/52EJ2ITSJOx2se9V/hsn6JFVRDIyAQVIAJWqpJMZ3qJopyo1gFN8uptdRmjGLyY4ig0rGHvV7WcGdCtVpo2PEcC1RUT8fF6GutxkjFmH0hmdXcvKg7kaecrEOJ6pTqEvvjFJbG8HZbXXfPU8HQWs899iy1W4beJMUC+5+nu2U87eZ3uo+mr0ePDdd+UZqYNclDkO7LWesM8xxGNLk3dfYUWk2lhqwsLwQJGqN/17hmKXaOkOvUk4nhoQuuhoVFaReOkpJQguPZ5RPsAGO6EyUIaKHEA12lEep9SG1ZZJbYCvycCgTWMO+00iGDjnA6xnEf06DPdcF8qwbsJ3jJmAMn142LHXPrUGCNAnzT+8w3MaK/hPBleb1FMM3wNoNiQyh/KgbpxjjYtHg00AHBPqH2qRGukYDKFhklIjoj3DIjbXN1lkV+Hc3TMKma84C+CuuVh4TfMB9YEyMAceUTvzU9IJbWQu5Cj9pvYq1bdyXB4rIfTPTJBjSqRfIGqH60I+PhHrtVFyX2VyTa3X0KAgD2OPFHrrXkZrezzoU7d2+22CjfM5+ePT1MR3omeKJOa6/0+NXh2h3YafPrmLhhR4BId6gCOxvsGUXYVFUzCsZmuLYAGP0tez9tMq5YFmr03HJBOP+ecNIQ+HhpTxx6WuX+Orl/QQ1aZbMJDehfB6E0dRhI9ykUyNrnn477+8ihNTSt+FjAzrO5hCpnM7YGkjEFyDsX60wS7amZSx9iVRqRHfRqBAbGDwRaYfKQXihGZ1LI6VH/k8XwQoQOu+Dm6wRxBBRJwAacZTsOB8b8YZOMiJI8VKSH0C2SKOkEF+BqGAfwofZi7IJxWrJdLW5R2ofUZpXEbQdr4NW84IxADBka8vgCBMrn4B61sHgw1UFJSybxXbQ2FiLaYfJdRWj3rwMT24y/ml8oK7liZlYeWkgMz3zEgbx+ggOF8HRqQByyf75GGIHyIs0CbyCxKpkDBVV8iVsZWGEoDe8udBgAmRTJV4FKhqmqF2BMTgx2Hvkzbb05/RiNLbZLT+O8G+XDZ058MsF8KChGFWTYF6tdnXKQglhiZgUnIOVXi2LQ09ARjnViV5NBAdhGo7q8aD+gnc9NDMCVQA83vCBpNlftqPEv9vkrvOdSFOjKnzVTAgJIvY5KhfuKug3c00kPHJHSunxL93Mx466I0nRu7zvK8uo/D5DF15ZM/0iEPIwBr6gerOLeFc09y+xT9IpIqR/97DnRWeK29LhJr8C5EDaiUz2zNTp2ApkcGiU0s8a6lMWu9G5L+VXe+RNyBl8g+OBOASJ91GcS3X3DmRhctAsGwdAGtXXr1TLBfKvYmazpM9kMnNwWr2aF3mt3zRGnr1fjFdzr1zd9UcRBaVdcbknDDJdnBMpkeYrBOxJQy8ZAZ8EMrC8xeoCHrzqxSwkSDpg/q5mrAw3l08wWZo46swTupNiIReMGR1yBsP0YBXFmD5CmvQ+F1RwoeY+FRmS8RJav9pKmeIhf7ztnVO0JrxWja0BsMJbj+GSHo4/M0FWA9Bxk/USk8czoPhPwKuyq76PZT/3PB8C+dVeYj042LLc/IZ63RPjH0u3T7G6sgF9vCytPo36murl+kKYF6JyYTred6KhQs6IA5vCu3U6rGhadmubtMksGAEK04PcANk6vvR0rKYN7FhSQskDTman+6T+Fi71gorapNkbrJroRBqAtQ5QjxfGfcaEJk0Lg3RBREuGEDQP6nx4zaZE5cUUxBFxi3BoEngBRvSa3W4W3W5pMsVIZ3TusV9irIycEkLqqcC3hwIqXtVNLRoOOjmYBLTpcprxBVcwdsuxqgA1UK0akUDoDnpPvsbv+vbKA5qdmvPCfJSgnGhE9ua2l8onxnnuEeGnNx2iYY12AE8g5kB2HA0UFU8n9o291HVgiLodsLxcGpUU83NObWoR7uiQefXkgHZSywEPi9Uu7d/H4IUzQX7y1nzSkG5Ok2e9T4H3kFQ36unyMO4V+d+7rBkqfS2B+5+dQXxL2gPopJBvtl75cHcl4v4OJNfE+2j/vUt77udVFiBsyywchaHdrwNcv2RdAU7HWWa0yPvj38dAg5f11j/K+jCruZ/ujOizwhOqeI/zLM2bxpS3pSb4PHrJ96Ux0pINAVHE7ikqecT6NL1jtJAsxU1bxGrZg051j8/WLCoCohZP/UgfdE/10lp7srW0pAytyPessViTthQSTYEHOlMHzjszxAcnONLp1LDWDhBBh3yV/K11ORPI/5KcA0aX7qJ8+ROzSnaI7TH5UVs0FoQxgasoxJTQnAwHYUnuEAZO9zvFkaYghystFxBFxglTGmQp4DVLJ7/zPYT4PHGpmiq0YiPcGswsK/y370VkG3LYW7dmdQdhEtcQaX6Tf8RrfWtN60BNKg6LliJAZNuTbYMCM/XMeimcQ18Y5FBK/l5ZD4HRlJI7MDWKwZATDX8Df2Lp23Ij110G4/uu2PtRdswCLnEtdNUZKA+1J6hXItZpf2p2dK3WU/XiDyGDOqvUUbOL7HmJiaxUzE3tSwx0bBOmat8XCZl+GOsf5BV4oHuYubLKpTrulrm4DU8Mcdh2U8fXBpPDFRDVAfFybu22Vs0dQ3HA9uuAdcjC80tfUJb6ijVS2PV4MPtizL3qIOld4W8N0iMRBacTp2ZREx+xtNk9f9oWWvfj+aXQ/26IvU5O9/Heu5My+rfFNSJmcO6rC4m1DUIkCWdFityFoZPcCDMQxjsolv0329ovs9dTpkh/8rQz6UkYqPyhEin7qEWN7E8/CeoVtlXWrCWTLWQZdcFJ4YGJnnwkiNf/sgawe1Pq+Bg+a3CuUxdIuhnTaVOP9+QvSSV8lkoax3u1az+3CLZpvXKxIdAAp41zM7K3jIS9cVkrprc8VrCMMjw4RJYKvQI/g99gj3yqZQf1WsyK3vddocEO0P9JB9zuOkIgoJfDagD3bFonEGSFqF8NIz1HiJ6vP71i3KthQlMGna8fmPUa0Ps2T6fWylRNTe5vuseRvXgi/ZbkQ+I7x0d4lgwSQ5kDmRaykWaTv+WbVUbvK1ucfLoVqsUw0+aa2B1Z/prdKyVchJ/3nUC+i9mR7P8D1YzjHtAzCUKGS/31+KwHhnJVSiXZj+PG7dqyN4nWIw2/R7p7p0ETgg9LBllG3EMejGbx7Q35ZPgXeVuNKIuVLzBJuFPCEXaZmZdB8a0AEbGXMYPHZuROrtSa4TqetYmCR0tyh13ZO5bnpqnjsdCnNEPsW8fHbaQKDU8eW/afAKXSVXv4PIQR+fhMfZiJSBLR/LrAezilmN+NE+ZjnBspA86mF+hCnR/VcS+jEMSSyA6ndwfs027eq1UXPbqJfBrXUWMSO8fxR5QghZT/IB6ne5Y2l18IQiPubII3IStP4n41ZzSrHe3kEmUD80/dBRfINOPz513XjagVmH1AC/Z81DEJanYkd3oVxRzQu8UmCP0HSOMm5sDDlq2WBnCmuedhM2i1t5Gj8WGAgSDv0L0xEOsmB2gTJ5N8Aou23Zcx3ZoM1T84IK8WCSqTxrqKIT9TI5VSS5DQdXD/e2n8TYTj9IQ8kbiWCoEe/c2GGMLfN879Ky1xzeFKVRCojC3mVfl8xRCM2BJlxeAaqtkZ3A/czPsni8s2tYM5rqbF9Wwp3yoppbBopePYmBvyFfbF7mGPB5a1QxntCq34NHDqPATvFnz2wLogQpO/5pM38ahXrzCywh1ldFK2Wgdv/kYNW2kKw9u6PHtUuAYiby9Z3+dqWUv7Rp9wieLQH/ipmsDklTzYgW4X1mKyjt+IQvvCvl17q78/i2DfwpL789kpRA9g7xOidbOzaT8MIFcq1xlarmdI+BBsATFvNizBP4sUYJ9oYC5zL0wIULUg21X5+9cbs2XvC672UDjch0aG2HcyE5TdXkPMse14vpt/JgGdyw13BcTxtL3M99N1m0zX87XckyC8h50iWoKXMxXGiIWD0ce844NPeKZQEGozfvyoRtlycoaJYFKlrYUjkck4ZzvkotEXod8W6VkzNxYZW+/IFhCa89HYHopojFj5UQ/Z/OGtyG9xl6y/O9ZA5sQfMSrZ3KnO808Z6EOTONzvT6Nrc3tOYe43SB9vV6ysh8NWavcW5exhws57l/rIERp9G0BjbEmfqqn9KMyNMOwmTQBydrOwVNB+cUh6VOd7rLhBRuFvQ7pdwwFghDFFxJOTG9uLnPiOYe7VTxlEXabM5KuIilurZkPU3bKkEerRaN4ZtuBbTrcnICqPrNU7FTGytB99HGSHp7rKDjWfnqdGKudHKVTYlTZZ06wyoTnx7leIoaOMUgEzk6s4cZwIss4deH/R1kWgDL7nJDiW+DWmPUA6n0/QVUPwWEiTXUiMgmdBPd3ad1ijpVvLV9e+u4qs4L1fgXhuNbbhwMS8Msw3yaUkN4XxYVsatoKz90zD93Hkgb+EWuWhT8HHR3hVjYtQuUhO2bIp8J39S84FrEgVxiF4emk03kBUxsjHOm0FtT3xAQ1YE2Yg6cM7Mn/kTNTAbPKPLUmC6fOHiS2lQlp1TUSBcJoRF7AXfufLSN5Im5tCkwdP+wQ864Om8D8YcGOV1UhqBMhCr8BQfa7X8NXSqpu7/k5kZFwak8k0BFi0h8JpCndsNe8IjIXWLvvy9LNI0pxbf83XduNiwioQmbv6ghMn6F3gCqtwH9Ny33cBGdHjllFhhnW+7NiOFl38oCS4z8nhfD2du/LBwgt1llgMtEupBTsDpp819GK8z7VwMsj87U4F0tWYRymOHPs6mScnGiadkX7L5+PwS6LWOzIx1Y3o6xdtTNt8PAridxQFmLYWTwCzYREfRPlY4BfC06JkzdCMK2XC9YVuXsGclFHcxKN+5mkS0O6/iam+fvCdLXTV6c/Pkk6UXFO7EIQlszI+V0sQ0szTTUW/sKq7XPwzbGXkDt9XPohtAueLi2kHLdpIwqzPSujN8f98dpHVOn5xEmPZdaftdflw4cvQ5007bGE3VjjGHE4QCtmyg7SF5bmyIwO93OcxkvRqDo10ni2rkwwFj56WIxrqkKSjA1SciIeAT2P/pdCx5pGUb7Gx7WHnx5EBdM9hrrX2Iga3NHzNzRx832zFhpJ2nJpqMsFKMsO8Fo258n6P5T0dLcCv3wYQoJiBhu0uoirR+x5zq/AbAqwzUjioQnC4HmOXBXXBtQ9R8njo8aBBKuWtqnSetk3n0+WyyEEkSEju91Q+RIxndHiTzpU5pNIUBphZkA61oZuYwVwNLW0bU5nDkpQtJyTZiG+5WnRswEZmqm+4dAppHtpyuet92c7YL6fDk5zxFv3rUO0CBlO5GK0ZEcfgVe0CVmGZ9nUxUqlz+sdMje35d7Vl34aL6NpK8n0NP3UKqbUSYb2bp6iDWS8BS1l60YwWVtxMZkpRzB14ugIL7Vysxj33giDwRtphm2G5PQA8q+eaZzfvZ/lyVlbSdUX60lhdhTP9jMloq8Tk/KoOqiJYrzdqWdRvqbZV3BKD9LNumOcfaA2JWqDeZWOrJb6qa7aEX5Xq9gEIjKSCCecaFd0QVEJAXW2BRMADr6A2R1m4Ng97byvOHolfPfV9vi5jtdlBoZb9yIrssS1SU+5OYXCU3ItWWM5lTUA3L8Mg0C28ubof8pzHpwTZhitiRMPTNYTiHR4o75f09c9MygcPY1TEMc+ubwzX20VARlcebL61R0EWZxmjm17fTj2GnrsDUIMFH06ds3Pk3rgXDPYXYEH254bcDrfPImG9llRVFyXuq078PSEOmy2VnctgvOGi0jy4XteZ/01NYfm1oKWP80dn1GFrN1WbrF3WbPyjj9YbskCXp7lN13hXz8eyFdR/kH9wmkNHpzXZfjU29ezjNRp677Ssl7/wsmeB+3YkZlFlsmeGudhEImuYEgzrxCv48F+YGKNfYDGl+NNm25rKQRkN5FT+729JdUIjPkFB7U5PJNPWJWs+ihvc+L9twZuoaXsbTb5RCHPeXDXDp0+BHWiJEgVmTHY4VCIkB0ow/aTNnDh1F4OiEnOorCeZc77hUNrhqBJ6GA+j6+HTdyvf6DE7L/UmQ79I12lrj8zGDKee9dtFmqU2PZKIBYIgYSDOt2SdSjxwxWh+iBpWob9cn/cDfsBC8Hv9E7P/lJy3Wi1bWeQXBIniv2nM4ktAiOH5l5/k4qmD124fIyDQOqm93DiX6CTNDeJI2qbo478hqAWchjTv9Cf1GRBKAcYW5TYloSxiRhl13WEZN2q702ktQFUP0VIaPKR1/vwfvpEriWnajfnATSjz2UNr1SYFIJveevM3FeMVSknTs+EUmWu1MrizjjTSb1uef9VHmPWsrIr0HoXSlNU9mokJvhKUWmsrRBwLpg1SmuGiXwPfC1RQN+ZjNxBzbvjTLTZ6wCSVuHW6H4D1gU7j2ZU32yDBE+Hwa+NeDAApTIdJKRsRJwv5R4Oyb0RZMqToxeGHupTejUzdnoB8JXaluY5/Kg/Brh1Plv/11YDeawOcoG8MxwKYURCbZLFtW4aqxTYVAFwOiQGTxZfd6HNxSxFb01L3gV8QbkbsdfxxL11J+gjZcCyxcsWliNMftXQNwULkiXEYKF5vWWF7wTrMgp3Anje9zCTaFvDOnv+daBcmbO+6hvRNSDf/xkJ/Lupo3mwJz9P4o/58aFnduZXk6VJygatKafj2J9IKe9st+0RkxNwxjjU/glOj1K6Uppf8BNzxKQY3zJDZ2jaY7mqTNrmEKtWwiD94W1i4VfZd4056KFp4fYjKcAn4CbrC3qiJEbuOaC/OTPTA4Np2eB7bCkXFOAZ43lqlyicdZUZqMGx/CrR8j6rGfJyMGoPZ76Zx0H1Q3A4+YnYxw0pTilH5t+wuuZux1pgIHeYF8gRQf8DlKm4nqTt60LQ8nbWRyiiQgBMvD/dbNtHGd6svtcpfGWFHTnkRFU9KIevARGSM2WKlvJXylH0GeCA6P9zy7ijR0u17IFUNp7UoW18yonLyH0kM6DxhGDyeW6GDjA4mjeR0OSze2x1bRGDaOKgVUyaws3lkJhONEGmzOObEYsK6e5DvlsUeJ1L7Q3MmSrKgn3nZeR3EFy4gT6v2Tts0VBbu+lgYzLYF4hVovMDiS1hTMZcWgjuGSODTbQJtFvWYg+qsctBFVTYhwC+eTjK/vjv35Di7SbeP42mq9hKOCnk2/AChBhoFWjpDdid54njeOlI2WIMfTspxBU4il0S+tOTeosMgdS8ple3ZtlJnOHtW6uf+1Yg9cyy97JmWR9hEtMjwyNbA1FpNvbAWCqDgLMy+K/vo623MyaVetRYkd3iCtM0Czm7q11AnO8Wgg81glxpxyKNxMwZjLUyrYBiq+ZptIlJTfXhXJwLGaf2kw71CCfKX+J5QpFcaiSBh58zkt7VABW8Rp8KK8A6ZoB4EKTlC/8DzJubOmqDNVgXcPjm6yFc2jxSFOvYTWbZ1Q1+rrHl0Umpkgw9UQOk4GHm8l2KiWwXUl1ALKpHHhjDNS7vykq/CzNUiNo9ruxnJDUyP6bO+30E1lvKJ2/v+SqjbGeziiBEmXQhSaXz0ho1juyclcoikP//JI2/GYNo9IFUL0RiYUYmax6RsO8IW6X+jJSmz8P6CKDgQl92OjOK/0KJirsZ7JaxB8VlouW2bHUxb3Q/npECN858ab2rlTGFB6KKCU/XYq3uh0d+gr7r6vkzxA/AiL5s1dOFv4J5z8YgqnPdeRv0vDSJE3lT5/kKS8YOq/aZ0R0AXIXtUxNCgUdSgBwz8gzsyqMP+cIX43leJMttequKRxFMWlImC22/hbZ/3CKsiYBYCCGdu9Njro4dgLfIWCh8dLuWTL86pztjkFiy2dErUoZpJySeS+RVOL6GnErvEGJT9CkGCBAwVSX71HM4DoKFoxsNrx+gHzAHCuNAYiPu+mF51yrV+050UHHNJRO6NmW4y3ZXk5jHudqGOWDmySFoSaXfcf9A8dM20dIM4Hyl7yVvZ/5EZeGD8YHOhFbcxUyTtVKfmf4mWpm1gtcZClGTBAZRLiKcevx9eVEyYHK1hixbVnp6FvzZTO0rATqui+pn5uCi2uW5kjlMsiqfxYLiNSO5VCxS9oQLZsEWqCnMSmUidaWMNVUsrG0BiJuWh/p7dwqK6MGq4N04Y0YlLC/9z+lm8t3EPDu6U+PEkldoRtSpxYlDaAep+VNJcbuEO6jfYmxqYX4lIT/MRhke+1baFQWIPDV+T0UC5rZwt7NAM573E7CyM7H5d+Bj3kFXkODNu9J1VExgWOstK1zWtlLk690VrXSqp8fIpRzC/fGNQeR9PKMTv1wQ5o2WUv2EpfHnKUlUFhmERyotTorWtxnwuR0/Px0pV2xb0w7CvM+gjQTf4AW5IszvlAGkjI6q5wnsMpJDPdlyTePt9+gnjEW/j8lIAQmpP5HN8xBfpzuXYGUScb/8QFUHB0kK9SIhMuba1eZV3qdP+Ip6CK2yjTxxBt4PQ8QiZH5iEkDjf5arnKuJpAUnWRsFkTQX2k+0NNskZxEjuxMakHoTtZf2PqXEbM+z6NRrqFaRPx6psaE9YvNJUG08bL7xu6JTSgXf5i1lnD1MGQ3SOZeaIJ6a5vpYvi+Zzd8jJrAL83vXztw6cZJ3Hai1DspxagqMZCjfK3snAjLapHpajK5eujPiZf6hpWikBkc+UFyuH4zDg4jMie/IRXmpz5F6TSXdq8mR7sUTUjkpCct7VPln2P1B4u1s3isXbIOcv7msVprDGAE08HfxrKYLXyUFbx5u/g2pusd2+0GVyfLFLIC7L6vnmO4fSnLh+PmBkw4K5+97yF+5XgLIsKpGyUgPaGjCDeiK/mr+OFrtjDuHH4LlcX7z/1vetnJnd8da3wYzfSZ0c3yppzuxEbL+l2LLIdfHbi02RhD7GIC5Df6qEaT8U9iFlKge8IsLGNO4hxLfC8fiKEcRZrHq6jjwFLj4NnTcmyCjyVjM5C4+o2mF+l3/ydTSA5rfSbof0/K8wA3QU+dh4XYcVbAlmFkJGYkTjeMeulzNi2erWTdez7kuuI0fH5axmQGDZxLpQ6b8Wc3vmnC0yYdGuDmrQ5zku8cJAbcm85zhtAm5eYlD/qqVHcq3RQnhgb6S6ZGSkCmXwO/r+XgY7V/B2jw6EKF4iwzK5qg6mzxTj9btqmuyHnLaz3LoyO3V7Kv6HTy7nhUARXgcn+J+ZxzQdXhzAKyynP2ZY0YX45+gJ5SVgW6E++AhmcaSA/8pByS68Zwk46ICUd3V1XmV/nfsoVQ0QJHOJFqbbSEb7kQbftCtshfhb5jzpuAGKNePo1n6+xVU3tt9Prf6r1d28wAIcs1hie3qmbu0OeNrmNhCJnLxP0cPlUyTYN9WxLXRj25BGAbl6UZs583rZiZ6N7IqMXWNotKw0CnzWME8vcy8YZ39RJmvJ5PcFVXBP5prbFHdykVFMuWcs/uba0KUV2nB6jW9pJq8ChXnjPnntuRmeLGdq1tgcj40arXiWEtVSgIopl73nEwtqlmMzYEytl4aT+3/9/zjXqC1oc83BORVutZtgYnai35joHsV/tzxNSmS0/3X+ixZs80rDWVQNdE5kqO3v6QlhVrjzCpq7wDxj37DODhtW9wozmPEDdWNvobFOPqtONXPOuJTunDzeqW70kjCpwT3Fh8yItT8cni2fL8OnEVJ9Va1ws9E322W2x3oCd2RH7hwxpgwRNJZAKhYq0STa85mSyzmGWkVgZOtuYI00XolPzN+wU3Y68kcr1XrNeBoFF3QH8zJA8tM1TSjszkNSLpV84uKKKjGblukvAeDDXc+QPJCqfR9lGaW+W3emxtpRS01lxyOCUnNiI0hZ6grbOWP1SKdFfaRWEuweoCCoBnxArTgUOIM4v5UoTab8ZnM4Pz37d92jymzTN7aAgcMCCRURh9f3wmQXE/6gw8R7N4e/ro24IGCG9LUlUPjoCs4X2QIJsWIFx9P6eaHoJ0jBMEzd9ssI/z3ZF8lf4iBb/AkJz+DynFd/TqZHixZqwz7eQ1qFvp493VxcuaEi5Dl3Sm6jPrIsM86aCh8iZvie6nBGZeMZLMGaOL2Ym+6havIGTkJ3B4nks8HxuSFthlXBeDiJqzlhCsniidQuUskR0pYPDBCy1Qt3zh1btE6Ola28or04rX73vVlV4tglgv/JbMGGvfQdswfq+aX4wAq5d87OAEQXnBZ0w78V/lxr2ZHMyQfoT4H3AiF5HtLMt4ixK0nsiC+d7ujRGhm1YSPrc69iCi4XJ1C4+OCiFODn6pBXOMeE92DMIykkoeFRZccO3XlSGAESCf9t8ZPklwK186YUP9HigNDuOHAoYa+l0OHg1fEHqSGyaVYCT6r5y0/gwwwIB6C7ZVO1h1UnW/uzsQv97uTh0r0f557XFMhV1Ato1c+GDFBrS/8SBmW+XuvwsZUxkA1jqXbuN/aFM9MlygLV1bdJZPWHZJi0yT4f8HuiDbO6iorjdfKVqBAke7RIdEn0ggpkkn33gKkg2Qat9hMy3F1w4wbTbaLuPeG80TGrZ3ZziCBQGJt8GGiJ8DUSSgkgsEQAHErj6G/pAw3ColpcQXD/FaEaeeQoqC1Ut5ekT4kFu3zycqYrMv/iX4kLjtk0Igk/SSTaMLQQSG0EHQbomETyC1QKkPPx+a1w3thjrk25b92qWQZQ9stJbd5TXrxuiHmX9dv8VJhBz92O+ohmxjwz5Za7fzFnbYCHyVm1ssl0MQUN01NJh+wTQYd0NWiIr785Xr84VT9pmbAhkV474NGC0ZJJ0Li+Py5cKssZfitk2Hxzp62BvWdSExBcRa6nmZhLtesGH5qNm/plUHkurDEY8iQroXEyFJhHvQ7K/vZXcz0CAupdEsagsIaFrkBF4k1pGy0PvDkE1vT9swhFjz9pIYGo5LUuU1QYkWP2Q/e0vjA2wjC+qA+K7zuMoFSQwNHS/h23TbWqwA0DYaVoHUr4GIIjbaub0x0wBYZf8MzBlIWE53sbfph881kQlp2lbqhEkjdVsN69RqMcXmhOUMxzRin86yy1Jev7H+n2gFCLggAANNt2L9vGzbZt27Zt27Zt27bt+hmziFnIwSLqF5bXojb11rKACPJ14ZTaXwdKAayZUilkSRWIgpqDTmmIZKmcOzXVOgH9ZNktKIYo1pSWdrn35XKqqbStIL7mHgpQ0j6x5seXRG0fiUw/8k8UIXegpROrrPcKfo97yWD7/ujRbJkmHBJhm5UesqGg6/oL/0f3ornIPH2KCRRAD2zbpESoO0RudvrfDbcizxuPBnzWe6+yqrfVn/t0juJNiZwCU5U7hPLf2ejKI55ieMqZ81OeyPEa9l4SAvifjvMdoRC/MjztIMWdsnymCS45gXdYvLcYP6ry1e51m0MVkkCIz8qCzSQO0906IAZfR6aFkVdf0AqJwzbWPYgTDM6kmKhAOHvjP1UMDaZVV4vUrqCWGM2W7hvikrBAX1Yab5SjFjjIXmpPUpgEE7ipD9+Dhdv7S30rI9AO9s3eEh2iZFqtS4Md4D1Z+RGrjpDzNfk60D1HokM5P2padbdWB/oJ7hAdSg8dz4TMAkiVVsSplk2KnMeUARimXPApJdjCtGU2MDvxT0oWSSR4gvF/VH4M6R30NXqiNZbYkSKtkcBLbN9AN2AR3aiQoZ1uZLIZmzAVL4HmES8lhmCy088eEqCcGIn2Kor87uUnahI/YkEbsm4WGYl73+1DER9pIIp1HM3Ws9uoZlxWeMtq2mx/RuMuWU4Roq/Lv7R/oyuDOo3k400mvyH3L2FDCSqSF6bPtMS4/HzTLUeqhNBeJM9LzAEk0vysuZsTjMWLLPBbZ4V2OSB0NLACpbEkPOGPyznbTDOUVKXd6ns7IdAdGxaKRxw0VH7LjYzNSEHh1h8ZcCIogdp7akzh4Q1ezTj5MWnzHqh5AVfjnrEM57DckuwyBS9kxBJDnSzXxJm2WEHFT5dV3D4zpMkTZqAqiQIetGARH0tYft4/wlB2+ZxOzWRPfDrUd2glXGfM2jC2DhhJ87VIeTUnRa4brwsHOMd+Z8PXNcNCybPuNUcjGmzdzU5U/VjhruPMsT0WiGBG/cPHSjLir3mxJf/kd4UkMj+w8DiJsheDMDBEs8sw7oPyipGcoY7S8b0slrjTNvusaJ1iQ2tgmbtT1U6J0f8pi2w3dSPChltpYzguQlk0A4Hc7DAjQGoz+EEVFxQY0jPGnCGvdUSozX2Ub/6g2t1HOoLJRXELG/0DLTIHvan3QhiB4+/S1pR2hwwhJYqtYH0eU5oxL1xMbB29L/V5anyLot6vmDW8ux9z3H3U/qS61/+upQ26fPqiYeTEtWlsJoRzoRJW1vNHMGJvPrM2P4X8cBET02RbxlNenw/wv7m5L+goaTHYNWcgutQfuQN58uujWlfEXdzdOdOcbp5iMgdfZgucj6brHAc37Z4F17JdThe2mJIAYE+in6ZQVNGT6ArnDo6vG2X+foFODo/a29jxF2//yk7+hCSJI9RS7tbXDNuNJR0DVcvQpBu9EjNlaPAkLcpBcvfkM/k0MixcRLIjNVGRLDmDdv7ehGaj+xzsbaE1XSwbxoOBqMPgotKv01H5B5vgx2fi+5bKln0j3rn86ZA2tEyf0FwEPBqAUAm+d0H2GBoueJ3UUImwlbmK3Ufjj+IbiCSeuUS7FAxulFI+yH8WxvCLPCaJ/FCcxeo9QXWbNGgEfXQFf7RH7wfJb+r5cCimsUBMu6eo+F/pMmOMRKymsYenl1qhW0MZJec8WkNfXv1VbWHIKM4trqPrhoLluwr+kz7/PZ+YpgOAPGnT+SDT8F/2tfnJDJ6SqolCQsRH45NenXoQPXJkhcq1TPp9IhNz8qTt9/gomR3nxFgsczEdnPAVEXSPGwcopLo4eSihX63wz29AkH1HD5TTbaFiigU52e0R+k1XWbQ0cryuCGUsy2G2fyMd5koddgENtvaOiba6RbvpZmk+mdrBwfsAL0YZRvxWsKNJmPIgZd9tIW8B03ApMWnOP7J/b0Va/4V+HXHD9uP3Xr8SuQMLXFfv7z+EWzoPPjcLJYbMNutkjSVAeR4cVD17AjRhdLao2Ell72CoeP/hrSFLRjjgbhmFZumono6KKf20j3NEM30970kTStUS5ydvAM/Y9MAHMOU1JJraKcBFGUkOQGsiLEVZdwqVy6Gx6RuevDd7kqzubxm5zgVW746M0Vfl9p/gKDfTyqUdKYaIvGZpGE9TbnpUrbTKtB5C30nPaPgQUaX56WRyJvdBHhcEuBJCXldK/Z290hKkEK7vkwf0YO3qD9HoQ8+2R8UNk3i7+Vq4O16P/iw3a03dGkpywjUbiIrMC4UIul45sTuTMEK50vnXyvd4y2tVVcVONp98rvSGmi1s9e5sAP3SELhJd85jGmy/812+vkVTAnWQysqdKqLs5coLwXR1qfWRH9aWs4o64JV5E24cQ+btOBrJCep8dR0uiebYQUOpP8h3/rsYwesEkPI2H0GQh6lmvFcTmiE9oEsJEZ5fpe3gr7KPC/KifsCP3cfCFqGoN3WcrNl5vTj7Q6qaKd9RU0eO5RlTxhsFQJM1zIrI90b942Bn1YezjfFgcLR9JHdaNtpSMGjBABWKPqkuSAcL6Ar/M6RoV8us24dK++lERtOsoGNflq2X4y7bovPZmTJVfJPqxyRznMQtt3x3Hy42UB+mARmZra8BwCeJapF8gRfbe+j/R7wx6iWs7oiN81DMZLywKQALaXB/bl/+E4r7vEKIZADRe2LFi4JcPbsc0AhyCKaFXsDS8hKpKBO2554VQuP1KNADPsjGDgrOBmSHFWjWnoWNIMFZS7iArV/4iluXL1UqVeAQulpM1NMYiXKcCJgICLXh6FH/TnggPnUmJemI1AeKTmJn2AByBYpJlwjEdgJn+jVFLgvQHY1/XLDzhf0NZ+mEj+adsMSacUVfUZS1Dvx+fZbGFeY9MqMRed2w2fii4VmqpjnF0Xu3+Y/4AUXXfhuiEkMlAb1l8ywa1KoVcTJzMYeRv5YLRhaF6KCcN9bHsiJYdZczGYuqOmSAvIOB1UkM0vSo848Hywp5oWJea0w9z4LifAnawt3F5bTknZ1xrOjO4INNG3L7cK5g9SqloVgGG5LOqaqm0jeBmhOeLCPu27yJDYgVfGzvHDvDHeynyRojytNkSGMJK0qyDt/YWRBmizxAFs05Vfp1WYEr81Xm4smwFIp9YYXZntyVfjWf5UbL3sv5W6CHwoGwWNnB7FU85g2t8YFVi70BSMGMWTHrTVTheZC3xOUVbCqTXKOnLAjSrebPdxVyjbv1MJoSfLQvML3w6jjIgRg52cjrhhQhA593ifJB0mbHYhwtB3SwIZAJ4ghayTWvwX/xJevFd/YPIXPMewW9DU23rs+XkHXz6DQ3F9G/q/98fbK3+vtYtk7l5gtLEcRqpqWlJNJUTnXuV1WonDcCbKG3TXSqBc82hH32Pw6U2QZG90BC53qU5bGb5YL35CIFCl/Kix5m6ofPKJb7uWpJjAlbl+WUek9gTq5L0cb9oWrNq6Et3Limrwi3ZDBkVdfKDg8s4iLMJ5t8Saomi1SCcUG5F7jUD89MLqtWkxv/faQ7rKUmXYxahe/r8jjOk8mKLiGqsVC7kYwNFFd2mKkXu+PR1fWLtjdvYpe9Qv+ZUKOnO6HuWc7Wbw3QXYkFsI0GE/eR/fGIRAl7dB3tFPc6jVv45oPKVAJ+BgSPfodPyqmV3ACzHBrGJ6rG0uvhd0h5lkvGO1wFynnYZqxR61Ne0nVsvwQ3vO7umhngY2VP9BpziY12lMN4N95W74dUIx716McnUml6dEvb9lXF2YmMQUbECIlIKny9nn064ePnP9CZBlSQc5BX/g71HFG9YTEI715ujG2/TPB/M26O+f38sAR7gBcHhhLLxLdREtXen54DJV0mlZgWy2zySnN9rAmNnR6Q2hBJcIhul828uNxN41g0hJwpZXPF/DBcB9CCFX4QKX38kJvYxmle9h3sWryB4vJK0W5m3LkuO6Ow3/e/CTedjSiqY+swjvI8JtfDDp0EYl0nWDPjV0DCC0NGSTxWGAH0EjCdPvfYPZAnfINYpYeWpOBsLnlPPk6/0OMRG67+hG6u0mBlTBfRdr0ihNuNhuiJ423oPEOadCekd3JztOF/gL711NIxsz8ihJ/PMTs5TxxxoTw+MS/dLDg5rk9nQbNsKJCTXfu4L2bZD7h6MNuUqdOTBRK+DWmc5QI7WBrKXYktKPEo9yBDHqdfXfr61freCQlW5mPmIeZWH/0rZ8nTdHmxLKiytxJ7STU8Iii7unWytp647MGGyJpsNKjgf0DpFUlhZSUJGfyvA05iUUKhMXkeCXYqpuMMij3iUoQgD5/K/93wh4AOho10+uylpQlJuQ8DPZlMW3vBaMzGgJGdB/LBs+/15aSgFTbDlv85v6d0kQKGV+9q6sBK5r0r4cpvb0SQlWKcIOEEvbAGKb83nECDCQ7nFLsEmnY9xba8vL2AzqGFEOUoX7g/Y2Xy8ZSXUTTwzzeebsYzGeBCRDvDB+Ym+ugYG0Xh6xFmedApDbmg6acrKMaz+qOhJwXz7EHnHlpfnlsAfbOhcrcgS9KDo/9dBfETcbST7Q50YACHKlmd+hPbRz0yrjJ++8hqyy+Cv0gXDDzEk0ZkU0Ql3zFWJ+sPouiRSmmM8aO7l3kD6KnvTNeDKXrZaFmrOVRSdXf5D+NqzFVp/r2e/FcXeIZMC348qPOM/46QrxOOgFKybNGm1poZJgkGH0VCt41qrkvpF0v5Q+0Ck6RS4V7TvWnMOpJZCghZtFsrzIUw5fzZSNYNqcaRA7EuSpE2Wf7xLmKlYwk4tzCyYzxK3uXQsmL4Pl915U5nRpdE2b3qMN+41pJ4PgGffJMcYZWbUSbfvn8ZDWxSNLK0MJZ4fMoQL5jCOVDMx5juNJ153fbN2T/agfMbYVfOsWkFcjE6zLzFhmIzji+Vgy/B1ynsNivyVdcwIf+R+rIkIRmgd4vlbm8M7WysKA8DKNdgkhzV2axrl0G4Vw+x/PM0kuMwgWVwB7ak+ViqyU6E+hE/QVpGl2Y8HfELgsYXR554TCMURLtOiZXA4w8xctNFTF6ksnkDgbNLzDgM4PCV+xiDfw7Bi+FVk1ytVmnV4vIFkZ3pwohe18ycffSrRafDsuRd00uWRHqVyGY1WYXTOW+csVqGbtV0IvcEFGzFVONRzMbwGHn96vcYRz+EPA1CGhb9FNQbK9yMNxTAwrk8idQbrD+Qx6iWIFfwHlAuZ0iZ2DRoyTSP2MkiB/ZttZfqpeJkSSR/GG0eZ/s5AmTr+7Gup/p2U/u9pmLBs+5xBdLfH991p0sjSJRk73CcXWMevcSX5xDjiTunQQZgcM2H6a2RPpdQFmoudURsdvR1quBm1y4mkwddxnuB+lOK8Rv5ynDWGVHc+ngrCIkYFYR0hMcX8x7aJiJhzTgyv0TtGNAaNofS6mK6ONY1ul7nc1PxDHCQ3bjjzuiPf8gN/Omt5/w7idlg0w50IugRWyY6DavDhALbBS5Za2yhBfIf3GvKzhE9EM044HgD0cObcz6i5kgEG9Ypbi4X0A1XdlsdTLZxs64tNZvy/yESfl5cQWGIgSLVbSPTt+vMr2wkmzPHHIahpsLS9+rfp7lpTA16Wb0otJNKRZwnUlEzsSVqCBI8zgAv8g+sRVB2EH/2gb+1B03aFAzyMkx+OYYlgutHl5TYv+258MNnOKEQ4K/Ua+2zcs47D6pkJdyRs+P9VV6xku6L7Ehg9Aub2c/I/ToXhlxLmlsIS7lbDeDZrBRSYG7Ox/ko77YkqJB9LylRFW7XA3lJ6tWpfhMSt/s9VRS7fjSb7OngoI22nbHoEXq10TrLyZyUI4zjLzKSZZrQ2pLA268NEGzKJGF7Z/lx3V7Pjtsgzm/R2ebCUkjR7FvqX2iI0AQRBcBX+LdGxNgYTUX05ys/UylHA3RhcEd9kSnBwA1cq4ZJdf1ChUDtNJgmEVCki4GlcKAUyJDOkQyeElEx7oAbv2mWJ+KqcE1tuhDi2NBPSpu4JyWXp03YBbgowTJi527SswpBxTgMK1a1HLUCni2TGTYW2lq3lr0jIWUxXpSwfwnjKxJmt1jQIWB3Ze2itciS7qq9pZh4o4y+MTqDrlj3LelbyTkudIDaU1QefZOlncerCKpsM2iD7nwxd48d7l1cYZYxs1JUGprvl7blXXetHwJ0uEaT6WpFOfOR/lggd2Q8IsacXUCR3b8tq/YHLjgK7ihWXomGG9XGBI8idG7PCtcvDTsEprVrqXQ/tX8FtFpnn5ShSbXVaAMlpN12iupVXXCLSEfYfw7tu46rVR7vK4O6dL81n6gfsHh9dCEP3lpKvw9NclIz2pNNnolZSQsdVoZKSVXJMWKn1/uh3WYi7tuVgpuCC7qFQd0VgFgDOnvkBEg26CJcXlHjiePFqc986ErELiTFNtEL78uyaq2SkY9z2rU2RjDDsJ7X7ZIcfstQrcBuDYvPbdvh3gRVbzaXGUSu82WTuHWaIymLjBzsNIVwLh5cep29HPNdeSok1Ss3PZgveu7yDrDguCuJuqpOaWk/YeL7IRvdu53N7YbPcTCXWw0FkxwPRYcHvrCZXGq14PfaaMBLZ6MRrxYLvUklynHHMEB1W7jPuw/68exKTz/1S47GtL8FjozrZDDB1Sa3UsynZrUlZghppOrM0eDfgKfi5o40TkaaRhWLR3g5V6ymhcbzhp55PU12py2UBFcQP5hvpwqDZKN9/61P3nLTrKL204GtUU87pt2w1slwDihGTsAPAnyucwiAj/FTwJ5BoLXY4UVLPU4PvB33xLObFgTyt6hsyBSsa3RLD/n82Pxx5ah0BHc0j/OqCyr32xvDCRuk7KvJYCQYRHX/7pJ0cWlSM+ltcSAbFITSSo7k+l6edoI76xEAmdqT1gBoBi9QmwyGC/LQyuU9TmG7SvaA1aYX1aPjZ5q1NDRNy0s5xXr9tj0HmV6yT5SsYACuybvllcRwFOWeKLLFO8Fi0Ui8Y55mR3Rm287giYVD5TZXBwqNYrAMnHwB6Mg31XqElTXrYAaOanvKmL/5EWZNM9cdnGWZ4LavfRfqFx3pv041Xiqoom5nimUbMfF6PvE2yJJXgEW8J3OPeZLhR9Y6Z00rem45O689S4Npgli2r9quONNEf6drcOFZ1F1e2cFylZhvCrRN6P6zyyZnWaeWMZmtOY7BO2m2iP1aVuQe6WL1FnXKumswjpoki/Bmoch6ZIlSs571hf0jtlIhG4zuHbI8m58DflANuv7XfVaggDuflZIfWZ2UgxCWgrasLMqZL+4jQyaXP3VmaKkIP8PJrzAFQG4Yl/CgSdj5qaeIqzq2jc1GnunFPo1AWKjwT6EwSMAop2Bvhv2wm7T+1YVe3ZFwRsTSyjvzKjeVKibRfgsscvYFwMAkzjT9xxQ9YHGAIEZOAGu3iFMpQzZwyohIgAJ+OyoJaI4V/FL4KX0jSDo9Cw0wGI9t5dD0dIflttg9ojabz9t5knEF4ywRWOFBld2CtWIIe1Am5MMwzOHxaVen0ssT9YoNI75FbVCVFiR0zNxw7Qc0Syvqu3BauBog+wd/Ur9+Y0ATgvobfGVNu3iuaHxshGOdF0nJF5gXrROmX5Hvk9PKRfA+p8ClCb8OvD098ak8/pS5ZrMGJUOJDwKZI3raYa9W9ugWfdDuPpEj3TG+GwFO87wZnmVaa9aDuvTnoqpsrw6DzauCUHcx+Eq5ZN7/0xMPYUBlZHG//TSi8T5Y3WmK5bK9w+ZC5KnW9LszennUSmyioukx1xcwxKxq57i6EAZYWm/uKjwg8Yy9oZn8tw2sxd2HBe5YX2hGM9Mkx07ihqx8QHiVjE8GgKlM51diNnN3LGRAfiE8QumL7HVmFVOIxBoFAneSi+xt/TfBOr6AQIUycQfi1TD8dSd5qM8YHVDCiRy9Lq4oOYWdgAdo+g5PGbt/JQRRZH+YkqXx+4gR7T8jSZPxYHdPF6Er8gFA+DwdSyDbFbYP/3bWoVnflCg+8liGk3WKkwDXX+zL0kuvzJGKsuCw52F+UyKHGuEA+0R99dydoteE6mjbqP84jPhnk4uIbMtr3jOPphm36hMQyPpqrp7T0oupKahOafpY6lkgOQdb4Zcy+GjiR8y2vLXFe7KTm/yFCxfC88adxWzJtBn4Es5cymH/RysTfjYKQthqDRGA4Onys9YHuSW8Hp/0mPvdsQlZDAO6L5sg9xwQGMlyjIfcCeeJmfpuy3Pqmyvve2udbiwjGNdmy9pl0UPhUAdPywYu8Za80eSaIp7Mnnv/CkN8AJ2BdIS2bHFgN6KSew4ZjPNYOCmRKxwazuSwZuJy6UNtstfT8/1FuucCUKwLuki04GFFKw+e88mIBYPApjcEzHInuPhaqyNLrY2KLB+QfJw+X8Inx8hhycCWPT3LIY1PEk/nB1ddYZO5XX8x/Lb3EwgTIXZ0Oa9ePQuZGyykG8+gVOmvoBLGBBTYstTIz7/pBokQCVU4WM1OknDEQFo6k8rzNN7YT6Vshr2CHhwwinOvYfI5TrarmxdO1I6zI+G0mh5w4G7Fzzo9ozQdikUZ/EdJYQwJdXjT0+3F9gU8b2ISRAeOMYmenHB+zvbOMIELst6CWzTJADcp+HA/FsLO0h9Ta9QHMKf45BlSSOOoL0LG3SNEhINZipekuHipTVMzHYsiVeErA+KCghIv8rE3iiTxEVbjQHjPHc4z9ot+7WOC4JZ387luApmwMPHbxvpITHcL3MOm+RG1SJscfbGcWPNdoVK1sC5bmMTpzqswxRlsduEwSkE4Cd4iXJ88Skk7lDS10Oo3k8Q1WPz86/+UhHEs96/IJ/yWo3VW3dmtS3D2F6cID1SGp2EZZArFmBbo9I2xjIzzrOswyaRE+Cj9dHryJHv+9nh+FSKrRa6rR/rwbLy+ktfB0cr0fXNmBlGgNcls1SJxtD3s5K1DHtcwA/WWzV9qDxjKB81FO/WxWkbTvFRbq+OW6OdxIBD5kki95qItnPvzSlqxEa9G8BWj0v6yud6iC3EVHtd7fDjvt84dYKgbg4zW1kkecBDUuuyvA3UqOcQHOqNEK0lMbOZtaJUGNYQb9qnwFARl/KPmd8UryhK+SpOByGQlH2Yv6NrMgQiisi6E2ZlHS7KuhygxN3dTkMS1ldhOSniQF4lG8LdMUkaSOxu+KUkPNB1KcPbNRLPODxfMzbgJJP1YySuXy6Mz2UrU4aCUY89QvfN1+relDxGE7rFux0dcfWvwmdmpTWOyGdl8aqQ2musI7ujqPgCrU15MZ7i5j4oGcoXBi7NUPPnUewnZyKd46pAmzhtUlcIUqKV/2OfERtsytXbBmkjU4P2Tr5kBiepMniSmKP4fwH4yCOLcv+KhQFhM4VCQ8ZlPoRH059EwCzsYs7A3SuW0ITFxCZSNAEzgjP1H7DWWl1PRSSAWZMBOB2X4VxzOlzfU2VU7Oykv4ZyExrPG/OF7Oj1FjCjxVU3Zmn1+fxPI9sIMLfSm4FhUEAkXq6P+gAE0WHLI5eidQq3mEdm4vvAfjvtX6FpnIUYyTbn0vb4NQ5sF9cVhwIPCQ6pzzPD8kjdLgYYv2zfpx5zF2sAVTyQ+mHRbSEQmCmKauzSbZPBGi5MR2fJUGTu4TT2DweccCgT7p72UuWKZPu9s0w8v2FiO8wc/UssQvoiEIrSJg7t1DQGQDR03f96NQrommAQOmc+6M2wxknsKudsr4lBvk9rdlMBODaMW/b4XG33mLh9fHGkZ1RDdPXV8YWQpnecWlwv7h8U2evUDGBVI8Q+lPPDPEqvDz4/snOtSoCqVutPcf3BdGqGlHvGZXMtEFL5gTs5BHCgWnJVXCM/KPTQa92V8ePUTmaqcvYUO4n1d/N56wVUnjfxUMkB6NSyOYedcRGpsmX5XiTSCc4aymaQ1S6PfHcDlLI0Az6gtchMY1xtt7psuUqUswwxzbLImygIp978jpux7CcJIpA9AWdXz38Y7x4C93Q74JspNcMYjmPabknQoE+XRsUdpRBnen/XLb37pt8xGtNs+yc0dfVSxg7TaIctPdqyTRhuDPDCtUEGifsTf01PdEJrhgMDA9WIQWA+FMwokc4sSufWPahUklvKJRl95rZ7EKTLwcvun4to5/iQC3W/u0sSNtPDX5G1XOo0zxw5/gGPYZtQPzwPf11R8ERX3HlS7HmtW+exGCDcBeCd5qjtH/eVHV5+AvZ/PL90KQz2+Z0RihYOgPV1vZRzsvFs5X6mgss5whmCknKx+ucr4FcKuYEJt0dqOP18Vg11GG80ustt0tRiFgVPfgYyew35YWKsvd7KGD4SaNOujlcoJ1pk5KgfYHwev2XzpzVLzG+aC5GcvlJv663jNrIyfglaqPMw55tKG02h2uc/KNJCgAyoiuWG5k5plPe5IkJAIkNXof8SKIvq+pcj81UQrL73+ZEUG94p+R7MG0RwL+TamEwqm1YOn2UGCkHknhhp/A6Dr3/0SSnk3fE5+UfmBMMgddm9hAU+neV57myQ566sXoK4pE1Sleb186fPBNuF0SbNKPrR+HPqGKr3NQxZbuPFBSHmz4Vc/S8fmHMp2/4itv3syi5xlYKLBmWHm+bblQhok21Gj+6hy71LIh+9MmDKqg7p9fH4dFQTTX2JrXK9Y4rfGJHE92WF8LS0qnRUSGbS/Awwat1H/5ZKGFFRlGKK0+0S761UIIkbgh2nWaEandd0pgW1hMfD+jKaCCNKQGBrilpdz1tmaqI9hXPLQdB2PCVgsr2PdVDAnkoaBcGdgGP0y398jIhgqLMRrbymdn6jEbCi3k0mJV3XeoGn65nIuZsQS3aIOOzO7Z82ebtxjE9TisAi1Z1+pn4ocR7Tk/AQzhcaW/BfRAVD06u7BOJT8b1MdWTVlEGu3Sj2JPA/w8AO1TFx//oDnc1kncSMBqJx3v6UjCg3RW7GDuYzzpD1GEAotr3tOtr9eLedfzDxBMm5l0C62nt1hezEnDgRUCdqhC/8o01pG4w55OWwVcMh2YiEpaYyg1VdYx4dhiLTxshFlORCU20p8mJHfovGnwJVkLO4WOt4H4/xLOyiarIF7FCGeUQrtVv5bBClF5fxCylhau23LtH4eXnQDtIOtojBtRMgG6oHAbqvOYFi3ZkxE96WNPStjE3vOVpt/jZsc7CGWmdaoq7EK7La5vCAYF50QbnI8oqmThdgEf6yz2ojc/q0LTgs/SITzl4CgBBo919BWYSp0YchCb0yLomLx2CcEpqeZFdqM2MYT882CCXheI1npYb9Wj8oF47l0qijb8dc1lngCeZxDeLGkI6GTQ3sAoFzUvHvB5zDPBA7P4cKKqSCEkZLlj2AbaUkEAcfn5b4DFHV3zoP4RAc4zZQs8VCDnHgKqQYVaz8rzHt9Inqyhyg1ECCIaCEOmFZG85jqLPIYVufXysXkArX1Yio9itWmmwaYkzguVqZ2ranLuZPdrG47sDhI7E/7XnIVwCFmTESdAkppZnJvB1OOxOGvdoZNWOkIYHskry3bLoKVMoSdi/wcLXvzfvBHaDn+2hvEPQl6gjzP61sNSFrbXn2RuSRl98juh4/eEH1Mbd4nnLpFgqSnVHJQVn0v0Bgo0a+hClJRAhnZB1hiDyRUF2Ms0gyM+aX7cj6mOypGLfkLnJp5GbMKnWRiumocryUcQiUCgqa8NGmXzPi4AVeGjAq/MvxCsaB4XR63vosCsg09mAjCWEBWGFxS+9DrRphxW4Ol3BXn3jIwVDYu319QE80iR4yFsG4+ihUViXGMPnl2s0yCYU0Ek/p/ye5/rxIhJIx1KKVTafM5wsqZw2EjD+n0YWzu/xVKbZRWyvwew0ANzL/1b+0zGyYfO1jgaFwvSQud8gHh81zBNizipV3PC8U0bDsvHg8b3hTlPKcFZdnXWrbR4KWjx62UTp6lxGVmZCPp8pKNKDcRSuAn234DbSsl2JgeQnXcfPd7vbaSd5d+iOwc/2LMywXz7GVNGWLNTiKKhzAu1UfFXPpyQZ5R4DAuOHVb5Fiq/dlkHJNzFd3VcsyIrlfJ3txwvUCNfqaUx/Xgo2ODzxeoE6eUHpltYvS42zs43I6y9erYoMHZMsvTnwkF7RQMw82lw93gObMVecRY4rJcTXYXDavOqR0Cf9QXtBfMigcMKWIkXgvAEf7HtkHqx5bO5Hxy6CO4IkZI/YN+TxfyPs/RfiXgz0Fk1VAPqOyfZ8b06mmCyIi/ZQCdjixA6eUBBPBEWD3snek6Si3Lr9OYAvPS2ct7Q6PjnYqjPf95Bz1pFlgq4ayCyU0Y3uY+ObVTUpikzBPK9GtqZIY4C6WELx2cTCaRCCyZ5BjLtWrM7BJRRCX80KZpIFqk+zKy9gvuN8Vm2S4SLTKSvalcPwZU9Dab9ylzh7SDFuEJVXvj5mKtcWtMnKN71hq/P6zmIFE0rIw8TevNbGsleyxBNTsnSg3FGcSdLSp1WlTHR84qxaFfyvSjn0fCU+Vn17Hr+6o2NbZk1n+ZQ+FIaDLrDod6GZm62/11Vxx4sdOoJo2dQO4ck6BfODchph7lH/n+5EeBDSCn58XJE9ud4uVdPv9j5/UzJfUIzJUnejLDA4Ug12B94E07nMkLpt387R4RXoHedrVnSwnMofYgiz8zqXtCNrx5DVm9jUOusa4lNNQa1sMN8Zs3I7q8UceIGc6vTexo+i9b6RT15Z8IU9SxeqNI+OnfwUwNaxIePALVy7nFhjmdw9yHIXENq6NnUfJIHhxbYV6CS4fmR6aTjZgIrv9UGJTMwQ+/ps9VO0tzx+TTJgSJdAXpY8nf+M39IAVHErKgwvBvhPJtPh/5W08+wL5KoQvB6mjjLz1JGmw3zaOlah6w0Mr7frmysX/V+bUpavmzlawSUxd9uiACzBxH/WWG1Bs8L2zy2C4+B/nJ8NUs72Z+NHKVC313OVKzpfugM60kGaS3hJPu6OsjbyVmlTo2w4MFYtjmw+I66f5Q+jlEYnmL498O2XNuqGV3o4dnpFLx3zPzdLIgQpojO6a36M+hrE3HNVrg4VGB6OnPkWj5pdZo1g0bRi6fJajkqVnicTHdhBD3lXMKBlCfGze6oWqjASAj9lHLPTK+IMFrBnQy9zH1vaL1rVSZtl1VOOCqfAzSpsFJnKmO4SrlTdpXH6wIUxaStMXZzui4SHOuITNQyakKmYAd8igk3hvcrY4UhkK7Je2mbeNBz80kftVyfri/92c3b/pa+/P9Fjlqnt2iNKmpWf6w+01OGS2xBRy5kbRQUOo3dqNcHyvgemb0yCTySNy2JEzNId9ELQFdATBtFNzSs5xMmGa9Ggkn7rhZgjEoKpk37Z9RAf2iOvPsyYx80BY3EY3xOET7Ipcoze5seKt1Cfq5ozKYkWapWaJzFrnM1ra9JBvLqVM2O7swjlMUTQz5MeyvxLFNn15WyVJQF78YneVmQWsk0k4Bmaya9KkUtj7/X/qaQbrhXfe7JJjxlVnD/WdJIaWr/iXn4osKiirCbOMG2UGr9jO9wRSIX4lrUbh3JcselHrtu9joJpd3fDDru9N6uYrECS+mk5RTcLQL4c3GClNSufyo5rn6pOgqAjikjLhq1vCgP8epVGUUCu11+39r4vX6fketZPbnuSzfocf/Wl64JafgZTHFOn38TM4eCdwJ1+W/3F/vqMX8r41EMaCqBhdTQI8YguJAy0pzqqIJIKWU+PFchDvW8Rve6mTi/vEg7s3AJQrqoDtfgFbDfRdvTu6yEcnpV1IBeh6oOsT8SUJ98vBZce5EPRb9GVUIPv4RXML9OPZ1X2OxApWLDivcVTIbXZ6MoMTqX1aXLuj8kN7fw38LSiyTK+zIrb6UVQppeJ+X70C4aGVsf1cOB9+OAhqhUx346rAf/f/FdzECZvwC5baraUglXuNYmTVM+CJH2KMuV4anIKnoZXKEtbkuqJViAWRd17/Xk4luF8Q2Ro1KEzo71GrioymCcNbkr5Ux5IkBbzxM3R5pbt0dgvpvCkl0PvwaWctdrXUos9rfarrr5PXRvm7g30H25hVH2VUWCEOs/Q7SXLR1fxLgJ99+7urZDNRt4GMX5t4nSU/ugOkW7FzJK8wtHwLm+Yx1zpo3K0SmMKP+UqFqSJTkTfB51vCNllYRI1BA5Ze94+BNsLok7DcYEDsRDR0AF+MWxJ7caZbUxXWmb/zKxTUdx9o2hZ49ejbTM9yuvsSIjgkQmKsXlDhBV6nB2EejHU5o9SKGXVaC9M1ngfP0EKGY08/msNYBzB3fWZWjyZVz3AyCUXdsKBPU4Bu008sYsMFYDl47qqIYpgmCyq32yoKFSHwo+qnBFYiiSBA5xG0Y4nmwucjqJsOkOcbTrS8jH7EoR8yZUTf6LenLjkqywG2YUQhhhk9Dtio3qgywH/+ksTxGLcH9/TAdERj4WYO2Acuxp9TYkI8U0slhox3S8yutZpUwn2cxyWrM60n3BafhbTgAn3m/IFaVvzaclQx7bCb85Z6ZRwBtTPM/eFsPDSXJRbmLiS2+Qs3pe4HFo2mr+3ksa+kLIVrRjrZPyS/+/sdV1PaU0Fd/ixCOxgWkM87Qe0uEaTBIZh5spZiALk2pT7rZ+XrfEfiPUUy00GgD72oiQahf12RbVGUDZpoQpZflnCNUvztCZkF8ZTsZsypIxRrrlWsOvxCLbJplKpjNwqh9pfH5HFwocbmqelIxLBCPOLIs2yMY7kBUp1rW34wpqpcYQdk0DE8z7/3BDmODhLuBtk2KQg4cT0qdHXcN6p8+xYn015uywYvwz09HekddWeNmckQj265XhFgqWxaWAxQ1OFgLRfnPxplXlbWKKmCodq0oa1/SvCH+YbETYr5l6FS0pS6BAySBjawCs51AxTaJbhIkl3moeYAwXIFFSwn5DlomvPNIcoHuzXHifOa+Ym7yA8gjkVpm5ijihbxXiUa1uyiMVLnIPvklRvHDN/FIu9SG4/s3GOdaPrJu48Ax0bgtdMXj0CmjiTn7Nma8l9sOXQRd060eG4JxdO8aBkVPhr7FLajZK26lviAlkfU2aOpNOkSUiBj1nW5G7uiNBCfFx9eE7uYfKsQK/DNw60sNvjUBOCxr4/quFnV8xhTbwIh9je6lkgrnwM10glMLrfhupAS2oHh+NLA3w9/NpgL68Ol3ItOjNidcC3hswO4urbG3LpMtFtD4HivGpI8H/tje7VQTiH9eIIC+GTU1ojicYCFTQ+WSQ+l2kOQvUozPKsu3UlITWXWRr/sWv56GourDoNrAZRY568811FbO3woLXWMmhf/9zApa6A4IGBJKx2XYigr5WAVByCEcJa/cd51/PX/L+wGXJIJ/3j/wNUscZQ3b++yNsI9YnWfG2i+W8ZaXkHEHVXMqT4sKkUiTMVKfOx+UrCbswY6+2PY7j3cM43Bp+dHpCDpjyURcTcJ4Lnzg1qIDubbRAuUjv27buxnxOr2fH5cViemw9XJ4OosaL+A8LR7vxH3uGS9cPgW8phQab46ErWF2gssMOYtA8FRkSyJkLibc3mV60wKMooOK0HIxSeADnXMSTCqVOjZzyLTMSlGtDVzNXcewp6kCx4g10ooJgySGut3RbrjQe+cDieAgs/TL+AV4lLOJcrl+JknoS2A1Qp62KVB1D+YxtflF6hhd56jNSvORyHFel4jpwiDgjod6hLhnhmt1kqsvACOCtWTVwlQj449+st2rU12IQ0gLt+6U8Z4NEx4c3xoOR+Rk14WvCq8oxJniWXDsgYruQCp6+RTixFH7zSbpLfidlG4H3Zsav6ufMANV1i+prj/pJCA1r1C1HeNtT+fB2Y9ha8uhcGrABwNqRE0b/0eklUVc9FtDOwVN4ADy+Snk58EXydp2c1FQpvqwnor4UW5CsHuUyLDHjDiZtlOZQ1bUGUmer1kqXSUEzO0suLfBolCRci4LtcFIjNWSUU6ZQgUqKf7ra1AO60cn5GjY8ZicbIeothpFLj9IUX0eJNQIFTIlRWl6xOMrVE6wXRf+yXf+TgzkcY8RD7SF6T1iY5aR42auUj5FZRgUV965aSnKiu6Wsgf6YId/4o9Boi2mdTNrJX6iXhxX6Wxi7/Dtb6dSc8fNyIO4796U5j8s80aJjzYZ2H0/3QLDv/Mjcf88XguJRG8QlYtbEcta1Lrz9ophA4iJPUm7FDrWMoz1qRLdnVAkHZqTl29WLordJke3/UsJAuYmCiHnuDuS6JW75s6uCzylrrixNup0LPgUWXOc4dbQlbHs9lbuz4X4UGheFdZnFyLOhaoxtYlS+3Ik03RvBcI/GTQudlTuMWzzlUGXXQhFv86XHdMt76PI1ckgxaBE/IJ71aGUyur4BY34cM9o11Q8J6oWFO6GmD2SySQOAJ1/SwZenVjANysKgwObpw1Q6JX376LtgbK20JnR3H8j1Ilhq6wJGOJSAlNNOCstJqfGv3Jr8RYay3oQY0Wd69CQHCZtcuSR+wKPyWSn8z+pXOxW/iaz1rL1CKxz2YS22bOJaZVivYIXi+wWg4bUBx6BKov6ktg9r3Vuiqu38Rz9Wwy87ymAgXX+1EEoMKeEvs/zTSXap6ZFnA0ZdeIC4YB6v/ZQR5EQbZcRkgqlQ+TSP8f9IlhXaAknZKC/uUC5juv0pZS/vhwM7Ac6tiEPoe6uh0TeFg9SHihqAqXNwWO1FCjOTeOH8KUdhKqg6Xm3EUXBh5SsBW0wOfKRFrR+byxQBxA5WltTmqeP9OJ2ofcdTeHrUOeqacWwhaclD9qd6pLk+YX9HjCIN1xUodAuzIzbVJK6ejBXC26KD1dbKmHN5ziMebQvoJ1/sBzXM3E5nX77B0HNXwBtFnlFxwnfQfm3Os6XvqcywzYpow8946x2jO0+yozyXgFpGy2slOMLINGB3oMCN9xwpTkNuJaY6u9buVCbTr7jKZIvykfM9ddQUTfgbNXs//7rQYdDZefzdkJhZWsR3IxetNylUIfVHSR+clhwq+fbPhXlGcRekyfw+uQVCH/UCdvnyHy445opnOyKVTkvaC82La9u5eFbKC/Ev2j5NCiDWABJU9y9HF4kmxo8b/i1aUstseveD8QEK2Xv0Rk2QhGMqAIUU6FuPh86mbwuUji5PcRV75mRVS03gTtZlrJrr6Y2VrMppvmXl8BSzwATfI+UKR8ZB4yufkJ1U8OqplQWO+RBqYeYUfGL38v82atnNWtouorGx5N6CY55qc+dS5Vu1QscHhkO8EgdnYxM7otuDr/SZnbJMzBbFsO9i4NSJM4XAjtfwZWdfRzWtU/5FnH11/Byu76Kf+qJE6rZR+5pBOkwR+4C+X6X2LthfmV5KZCtZg66Qel5K6couNtS1oDAnOuKvbV+rBEaCZbrtmfeBbTv6weTYyR6ZHiHNK1vCEiCDELWVT62HXxNVEZXsCV5/h5/bPpoNxUivw1hj0RG3nLZxZX2O9qvB5HnAfuGCkkUPzkVsNSaire/TAKKnKJqS+vtebupAMjPnDvoaOu826X7vCzoLotirOTu7ZdPmgTfA5p9c3axfIs1NZ31BFA9ehCkWCXVwzhEyIopUbLgSPnlSV3/pcapjUa0V3Lm9sJMcxjFZZ9FyfKtsP3QRSZ2r/xz/ZzXTetCJ+8wJOYysw0vzAtFvErpKv8tsnhjTFrGFhHxuU7w7qgVeU3Wdlfjotsa63NqEu0UX4BVpXmJEOlac3MfFcx2MnJsZhKQHKwDJtlwfyU1xJ+qctjyBZdQHucoxR/r0WSeOanEcin0n5ZsPx0FL8V3Fy6zB+r+wGE7leZ8abPBCYlqQEH9JUOGiJKqo9Fa2rvUi+gMl/OWHhqG92jYPUClP5PTEnMofOKKKBlk/XcLqxgMSmZpxaQd7Wdj34dWE1Z+4FVgmUWIrCEsgAFN9apYIqHtyn7VYrZ8Q2i3JqBezCb6dwoTvs9GfgW9yzNChJGtK7aDCG9Fq8L5/K0NpcyFJZ05r1YORrr8t+v1jixq5o3MfJNcSU+K+LsBcHABRiRMZmdbaow8UQXvy9/HIQ85dsW+YQDmq0czjEbicJr3/oqE0ItusPZwJZRSa24BBVFiZaER7G29wxuFqVJArUtPxSZtiqZKQ72oZsX5LpcQ5WCjvXjXhX1P+a7xRzG6bJ7VaeMfZRtF/7Tp+iai1S3XkeBSePF9NxBds6K+Wa/ZLscBJPRS1RMu0DIcXAPpryVA3hC5AXPkX+ZieaZet5671utbLiw/+XSEyb/kE++gVgRnDvjqrsb4Ccbn9Av99lqt+LEccKLMUfGp0fHAZevApU1OEx7HZ3izJkP0RMp9L3y5pHc5rKmk4ZRnW42Wp6Yq+wRA0Srp0IoMF1Jv9doiPUiCkpbH2IxcKIF40M/sezlA+UThVYLK03YtWjIvRIn1x7Hous8w0RdC+R/chMcXtZaol9HPHFOcsAcnBOwEb5OfQ3Bv2a6ue+yC4ePBaP3Hm9PpyQodnG3xj4RHALhrNb6ta2Zu99KOM1qEbt/XVDrIQCpCaUmnf1poc2YzQsgEn6/i9YxaYskWLnZtD/bx7WyPHobwahGkik1M7zBxv5lQAbisNM59CwK9ahin/RpMlfeqwqgRUza/o7eorecQiFjrvwKCGaXBaVB1dYcX7VfOnbfWZBStrbTOrWGjRlWHmECxJF0nlp/fOOM8ne1mO+j/Trf8lIRzOtVSpWauuq6sc5krUk7UkfHlmsGjc6LXvoptFtAvvjuNA1yWz4BdbnNYoKhT5spGh/8eYIVaE4QmkPQNp5fXhAqCtyI8ObRt+ENBA/d/qwQ/h8p5xCEENB/DIjuRhIBL2rQynHHds5PLfoE/peFoy1Zt+RUgmsgQRESRpRHCf4MEjN0mrgEI5cYs2iB5tnotdnn63LTy2Jmoh3gK3F5pkrM4xJ1FbGpAb56rC2GVTIedmcpcWC8JMNvr2Tif7iXRHc1bZkrDi8YyVHaer+tPfnsuHnr0gMVjKtfqgsfcxjxfbNewUrnEWegsuF9rUFOt5WHwH8tT2wmUBNi66yisOqhHrfk7QwmO8hADbqeKBVtXC3N3lFwUbTYyWTbSxfNBzmCoTtzuCCUk8mtsOVnqnXt+oTkPP9W2j08IaChr56fltYT9vP7z8sZRNGQ30xSSacoZeUfLLLeDqLZmoG82taeGX2d9VbVvtZbdr93Zbu2GPlSPAMsRd0mIMJoKPgb1tsOh63XKFuWC6dRgoFnx1+vlzIgzs2O4cqr5kjaJUanO25BKzsT3ZZW2KFauXuXvxX6tSljrr05fZ9Hu81nJlunUtiLMPMc/+XPNYNz3olnzS1a6Ohmc+GQdcsI7Kd45jZ8U+MFokA4l5D80ki55qMbUXb2xcyzzDLLozRGo7J9CgLaKMCxtx/3jH5r0YG6S0sAmfP5rn18jbiBFI+xBUKRmvHYgOQzfyul4kQaXL6iw7qCKG9hh5DQsbN/X2g06COOvGIMxypvqztPOEmHMtrwJNNSz4571VIiW5YvuMq/S6t1mybgXx7+FrF3ZIKxntwaJxeQz60py5mAtbtZFc9KvI2sj+WY3gGkEfpgy/RMeRgrsn2SlNw/L41/V1lMv4rm+Lsnv+ylRQaqLEJRdBEHXBgt+TNfK8yFaLUa6nuOeUMsq5L6xp0N7DU4vM1/JgwrhCmWROi5IfBnRzaiWYsoovxl6kgRIE3HulcOf21LpOWItNvhAEvLxGh2+KcPMOnT5G4yKV4W1K2oGltQ4uHF8i6KN8W5/HzZBFvRXsSbIjQKKmvb5Dp7Ku6qVLthRBCIumDR6/ArOx06p4L15r0FsGJDXw3d1hRXt5PIr1YeEn/o1blWz8o2Xr3AjyNe4miUp+V8tFaSIhLMAr33oEyW7+MN3E+Bm5dTMHqkHEq0NZxx4IAJo9JZKiqOxcJSg+RQIyC5Afqu5Xz1NYMaiQjvLCHpMXTYMgFX7nqPzDv0PAnfBfwdM8mYC4jQZDquTVorgyD2jmb6mJMLBCthCXc40STdIjtbItO/PWiE/Ew6N2t3bVyj7AP9IdacZr+2iq0M36Z3w0vQL7AG+oncSwilxsPlFdRKzH2d6TdddE+ytwAEhnF8559gd84f3hYFMrYLz82jrzqaKWR5UL3lYoG9yP9GxR50GCB5ZcAo9fHgdwfdcl/s2FRlB1O49TWjF0Lbm2HXXGUCm36pk6UNIR4JNDH9eqhMPLy3DDxR4XGV65WUjRjcRaJViwLXEjXr224jQYA2gmfEzAsv67sOcy4UDnYRRBIow2YfPVX3vyQHEjICpnTdLTRqyye5/DWeY49cW/aOZHLw1xOgMGahcyrOCBGZTjx/e7DY1Ab10twCqnNUXT/HBQdaPx2oy4H9UVaT9f/3/b7kcoM89GhfpibPW0MKqdNwO0NQnDO7GLORHKfBF0WYHbrGdDYtK5fPqsjfTVp+MYq1pnc7v7e6AD2h/4u20v00RnGAUCvjDQgDdULiYRehWRzjd42CUiN/0SSNMVzsB9OXl/UZ/YtcXuqFEoyYSQD9xuPzKXkb/IUB/8HP317nRRw8zI/9WENtselNgZT4/eZtkegNJN+lu4Fz2RaSrDVCmkMM8X/rw+ow8SgYdi2M7bTNJhHzqkR1pHviUtbtQbVrGnXn58E/+Lr+StoQraiP1bshXyzKidy2wrOI9Yvsv0Cr6TtCIUnihqIA9Ttp35KQgJJcPpfFGuIC7ghtz7UP6iu8bpLLGYfd2k0C9VBIo1XwioV1ckb/ppUO7Ll29G3pFAN37i5BOCOlkFCn5RBGMdsj3LrlX8dLlMSXbp3J2ULuqZy1b54rfmzyWVuiQ+hOB+KmNDYWrBpSWOYoafGyUpduTE7xQ9quxGpqwavqmPPHcOxqF1FY4s5a7UOXl8/m6Ri9vgdJj7lWwSKNRsW3L2p0KJ7ZohyR+9cgkrd8shRCU+4pZa4TREE1Xo4OIUpOArqUcYtl09883SkLsolirLyigJtva87f3smjAMP2I8kmFQcD9ifmIKPsLkSYOlu+gQmAYDR2PFF8X9fM3HEp0DcM+J4vXFxNi5xbfhxTMeOp668zn7XAOL9F/KAViSsNFjILBDGsdrDM3VYwEVw8DE783h/MdcchPOjHte1izxuPuW7KDK6S5qCbNWFfikCJ7dl4QqjnM139OFLT2cxbUfaXXurqBoxtxc3++gMbhFQxUmAsMjfRTSHKBt9vQBrq/YMa+wmvC2gmcrGdh2OIplKIrj88cHFFnjVW6TaGic0fFpy09YSLiWo/uct/V/6HFQVTv76Q7OQERh6+4hBIwwVTcKtf+Hquc/PyX/saHNFqXNWlxRyjWfSvIBfGP/ClYP2Pme8GN+Bos2wmWfJxtbP5XKCJRK8y0KZxIj/Loz7Nk+yBPmNUuY7x2k65VTx/fMDr94/kZuWyjokww3jSXifi/wvod85jK9pOyhe71RgUaiPKzN7Z461mwc6ItJ1V9egV7U26M4BBPJJarD+d377p01lCpqLaQBQ2EigJeUaq2ITqQzAMobl9TFd4WjjGbB+7wd/u7/v5BHDSQyFwdof/Y/Om8AHQbTiOhNrvvqFHWvBhC/n/L01lzwIWSWq9xTH3D2Uu7la0n8kB6/FOu8Tz3LdebHwvop16l04ovN4K9qtJHqELrB/8Y2Pza3c4EUV/d5xGQitThNTMtltFFDj3bDYSplAaoRTB797gm4fdj7+RS59UdzNYhAh+W9ahZllpt/zG3NLrSYQPXAOvmt3Y/wIf2KR70Has8HnY4qHHmVSxJLA9UwZj4LJ1JAWkkB+vsYaGKrZtFW07Y18ZmrmTCNB27oH6pEPZBDdELehPsNuabXE2MXIhZeEYAQY3Y3hzY6O4VvXV4FbSSkehE3sqzdgI6l8vc3t++b/vh9cZtQe9RrswcGdLVkMrYQzfEnjDnmXX4eCmPmvZX+W4j1Cs0ObnIIc/NGl/8DBHp3rhzBmbvV4lT5f9Huj0YBmIAAACMbdu2bdu2bdu2bdu2bds2PlaH6CCHXswpaXHZyJ8qVKuW0fM3giMRTNSt0rmICOtVWj3SYFgChTE22QPGmaoNHgJfF9HabVlm6CDzr9DymEapl45f2dDEnrtMrcPebz4JwS+NONGpG19AqPdxedRgRxdSzusjHM1HEHh0Ri1g5ZtFiFi6K7hhY0RTZzMLyuWwln/mwHZgPotM4roKVo/vcq7AgaAo7vroSu4O0reqhS4pFy9phJDxmFMi95pOxOXy3PtO1j6+v+j6IdZEvChzcMt9eiSlDN5RZtSLW7h1o1DTGtIGTWVQMAzinRpUj+I32/nt2tLnnYYhY+puIPdsFpdBfo38SwAR2495wkRomIUaYAQynUB4SkGh3kyGxkf/beMb0ysbjfLsGHe09Ny+YdOvPe0NMdTV8r0X+McqkCAoRHeEgnQGdT4lVDvqyNXffA0wnRmxEpwWxpS2R1pLN/1DvlTOPAXuvejteW5q5Oy2RxmNH6wvR+BAf7zVqtp1hwYiuzAk+GeP20aozsznHZLCsyZlLL788iJy/2rYrHRTn+Lfsu6ezNVTRW9DoWYQsvsurtmU/UTZV5gmL0uRQAniQhV0TCfSd0OyMTlmKi4BjCNwC+u2Rp6a1bz+CaY1PgY4GqlypwWK/0JAShFwLhQ7dQGeXdbKyb5aVQb749DdITL74Yto4w6IKS3sGCW3/aDlsDYT/J2xeEG/cNfh7K88sw0sCURFYT8yVcWQaNSf3XdDFzoJ9nJyXa93A1YQr3x5oaev7EtjEsJiR6Kf0VDME8ReHBsApnHbIBxCLXkiqx5sIRPrW0CkoELzqxwETYqdsQR+ztUEmAfhNCgM3/s0euvUDtisPRejF0/QPc0SbNgcllmdD4uSBckxNr2m1pnI/cs2TewSCTlfadPrZbJoC1lGgyBqHhYxrLlPLiJTrCBOcbaO70+lhCWlxXtjTscvcGs4b/VSPz+ZBUtoRqilx3H+3hMWifDFMwi0rFPgDqM17hJ0xKmF8W7aIsZV36hDVd7JugNdXz0Msr/Xl8BJRfV6fbSgDSHh8xbkOoAl2ZoU5sko6CeOQ5QyXJ7SOPdd+ORGoOEEAoCuuhENZZ0lzkdmBRdieGl2hATi4UdF+mea2k63LQcF32w5jy0rX5RGyWO2OG/ngL+tjNCPTto9N+8e3W0IufPRYoNrnjYjwGKIEXx6riVmiu1i8tDqdNGV28nt1RCZ4e1OGQWmiLGrfiKgra8YH23tTyrAeFjlAGbdOE7JP79A7pw9lw9Y6mcTp4cUGl5MlO38Tf3njIC2fbQ6Pw+t9RjhvAw/yFUkRie5EXaeTzmS3oUsr77HIVF+5KAHXBf7iNfaY8gj7Au3j+kIEVJU9ZZyKnGu+gnlQGslEfogRea5fWrhGyRYNTerhs2zNZTOFd6hEG/UgFaUzhcFA87gKAwcUUz3nNaxY+vOJBlaP8IX18ahBEKqxnJSCZ9TpDPi4/FqSMXqUQZ+8B8y8pNmhn1/mInBNHvcFjgx7v2+yFeLgcXcwaaoklzh96/RwQoiFcorou0vg7eZij/iPGnblYVW0C9v2YcgFDWD5swqzjgTApXPkfHLAVGJPNWf7BGAUrmp3FRsgerPIAjkSoRAC3zdkVtlAzLhVr8XZe9BmNfnRECNmFb25FWo026r0gZQDCttmLV8hhBcNsZyq6HgK9Tq6vJZrzwLRg3A3V5IKXITzjDKKIAfC1+3XyPGhi47UQW7ybXgHyfqVFsRL9ETEaJd1yhzPI0fMZ1ginz3IvH3IM7FYP1WAOpHY8t9Li+1L/vrzxy3hIV39zVjGXPOUx7qekY/Q0BWqr140JHdB8OqT19MC/oDoM2Pyqp088EmaJZKqBEt24aMgOzOFVMQCAC1juER6sTkr2rradEvIiQSf6pfy5GPC32GgLVgpEQtKOaAUpKe8ol5ZROpvzM8vZT05hnWf3VrtpZe9q0xL6YEVNO9Xmw6mDt0kXtyB2DvV7dZxtjaAtvRbxbac07gORA1vBVZyDM5x23K8zPUn9HSyeiwwT4e1IN82m5r1WSGsqtzlK03w+wOSY0OALvRsYUBE/i9oNl8x7HTKRoDiSeZh1iIhw++qoI0Att9ySWqowhwXLsxsU8GgRPpzrcJX3lIiCnrSeRP04QDLqerbgTkt9GOnjmu5AgG2ayOLOpdGN1pcFfr9pb0/cjsCvcVJS24UO74BmlYUQONWNmjbiBYAKNKuLuAviPxd/5auT8DQKpZzBpExEubz6jqyTf2YiHStkMP7OTnXmVfszpWf5awyyDWsCh/JtrzS9QKFXoNFIJaDekrsFUOimj4FDC/k36PwipBmvdUNThI0mIedCCWPRTVIgn2/hcdRkKYl6lPKAsKvzVgTo53Rp8n4ScSxwoR0lrrYbqoot3giUoMvMIDg8CL/rm5u7SqiFVZJx5KNwtx6rWhuZV9pH9OxlXZ8q9y/QRzCpaYIvSgkDgDHnexoXTpGVItEeu0hk53rn62/5D43uZTiG5zL9P/7dCW8YUxPWmUksW3rWBMrogmDkR93ed7qtypSWKr72t7joOAxNLUaeozGze/mnzovDNBP7nZQgkpd7LZeefrx+0mOarMCaqg6ryOU3PP/JuWG7zLibuuZkVVO3w3fDBCZFmNqQtvn4PpYuZnYgAm+Rpgj1RPax08EbE602y4CrK+uA8aRoW2sltmqAFvT8M5lG1X9W4nXHlx6YHDW2daS9w2l+2I8yjfujLQbm+L+agXkIItrrcWhnBAEhnUGrDpiV8PsW0QiHK5/mo6IgIrjQReJVK8+ivTpkZ3exw7dMZCj047/nkhoq4rXdG0rREGhd6yvfkK8mv7faaIybT6wGRgfFPrfZR+M1YrTDHcULjTBH/aCGGiCNw0DBxOu1i5KAS0PGXeDi8ZfuA9rKOsuhE3JHxp6PqGh+cbH5BSKcduMYdU/4cq9RVUdU8VQaSAQGTCq/xQC3UDDun0D7FbFdRdGM5Brl8Q3zqhoUzHSr53FdSLcmlIlKlLKsc9OTavnUDL3SPrOtNw7C14UFshL8DA9yg2kL/IlIxksO8VOfjm0OFg1vZW9wo1eab9zjLs9vHEY6vB4Mr9wSFQn0qcWKCagSGcmu/Naixi96IhwNfMgj/xt6yxDXyRuijAaLCgjzHWTNSW4LUU/V/K+gr3+QHTlbFocF9DE7BuLBb4WzyBI/JNLU48ifzXiapevcEpdGbS3Uegmq/rCITF6wjE2NosEEN9ZtA7FGDu0CIPWDsqdbswqdrLjTUb85030R/8r4I0k1U14+TRwZCHfw3FbGQcoo5y8/1EEctRxJaQ3ESXEdv7VPCVDjqs7S9s6xTwVosDnLSYZg1kpbXiw9143GVy77pBQXhd5lz4lh3BNsjTsKWMnCyfoMZd4/NwY3FXjwtdZxSXMjvLo6WbXixy61SdnKuyGEl1JqXFkkh2j99u0iDIDB8RknOetO0q4UhJ6x3XNGsGUrOOSEuMOsegPY7MM2M3xaRzuyExpUaaqydL7KcUK0r+05CshD0dTVzaEFSRoXBUzCCjy57FrS/BKKiVakJtW5mm+p47Dyi7Kx+YMepJFnyli6BlcAU6TaSpwDAVz8xa+sXRJzUEE+TrfGWzuJ+A+7wnqc+OcXJ43ZOhpb6LAN5f/37SgfePRqE989ebcP3IFZ7C8cf10Z/lomwkFRHU5jXpIqkXq7R3eybk+WeWkWA18kBqcMm4gpuKtMHS9v5XkV9cCre2M88qNiGBTIB+vl2i2ucYnInVU+Wp+M/xZBK3luF1PNE3MbkO17JUoaQW5jyw/Eg09xFmr0lw6onRfD67tG1YU2seHTba2OzJSx/FMTVYoVaL2XBLn0U1Bqb4feLiJD88DgxUojK1PRPW08u5jm8EGiDGEGWyoaYM152sBLdbzjbffXMZ5dijV+wVGvBLm9ZcVkuCFS14cQhj8KNu+JJ0HdJMM0ldVL42CHXZvep+5ERbY8Bwy9sLkWCONDssvo1lYewRc18bzi7kHentW10X1adY633jdWdZ+MLzdiwM/iAxFaqGmEn0IQu27FWy9iKMlnm7VpvJLWpScEdpjh7aeghuJ1G72hT2QUvSo1Otgt2vqdQRaBKrh5NkZFQWlFDeEmJ0kmjnv+QotnTPlJxqmZlA+Y4igp9L/txIBWK1gc6PV+T7OdQ7UxOs6AYk4nfXMUhVWTcmsHNhvuQ1mIhZrzJ/z2Eitz5y/S2ruVBpCzb1isP5Jg3XIy4Z6PbfgVXdcPe2DoFJ7jijeNFFjrr2C9MOE3iI7TrJo7quGrpPu7aFOcnpxx3ubwQYbpzd3ryXNJJYnVKMc6hBxQmLoIKbyDOx2XyjSuPY3HnyYm9lkyi/UCNCJ1PktSyqMD6R4QhQ9aV7LcfpKt30ZV/y2zbWNXTCwNFJ7msYsdKI4dMdVHsZ6JEGhFd49OJS0GNV5g0XSTmraAq89uAiYaMN4aQW/7Sl/GqF76ItJAZ1Yt7wCNpjsXMg1o4NAq33YfHlxyt/EJaPVDFhawGhSeOq2FLkkeIh1rshv2av70s26A0H/na4gKbJf88CJf0U4bUWqK3GYKm8Y7NrVSAYGpN1U7YygH7cF3ICQ83v5VCy61VHAWn+OGYQHkwWmPcDcZwjE/86RZQMGW9i3n9I12GFHKAPHJJ8OMnLj5eM+mqzbfaHimXLjaSA23TUrUjsYRFE74vO7yMwEfTjrv39bxHCCt4WHwMzKa1t2npYnvizARl/BAB6gSgSNBAcbm+LIGJioo6EbE4WgEIT9FIvykIOfmFozXvNNl5YOo1LCdxFonRo6Tsuz0zlm07n5qHCguuBR1MAhgmp0ItW1rGNC5T4rK4PDbU6OW3uCOx8vxv3cdgrG7ibQE+eZUiLR0ZZwmkwHg31RismW/osID+t6kf7o/iKc+c7eTmQ24NClzgP/miOEeCfMcuoWDvF1LTmJJjyNyhE8ivBLIUf8Fbxk+Siyd5yJgmdRTrEPYaxa9AnMauDgttloRgfLOdt4QF2FCPboXDWHjtXZat8sC840OJdRpmtMYWkwE8xKazggsd47x/2a4KYuuDsqt9T+RsFCeJur9nfKeAGIuYZ/CboaD/fHVkWnPDSZ1cTte55o20AjPkuc50IahngTMdY9/9k3CwH3HHBtkO40UuczPlYEH3LBkJPVmTJ9TX6Am6WcSFg6rjm8SRIJVATxRyEEilv/mWgWHf+w1BXxAUCl0Qj7weRGAhTmdKwulaoO6rZ/IOTdCamzJezuQdCn/EQzWKFTy4H2jIWdxY2+MuCZNYZ2ndh73wimKVES9D+EDI6G38Ulkz3b2KWSSFxn7M2xOzdgMia90ORoNg7Si3ylKTWtNnQgYPSqU1LWNQhAwfLE+UV+dQgrzdYz+TbT/NYdqkX4M1eU2Kgq2VwnLXqnLhypYFnEu51vWeQMBh113EIrqj177adVANsDbzKmOjOL9Nzt2iOcicYdpyvLUrjz0AlxFQGIiJExsfrooaweU0QZCAZ6zubf54Axv6Seoes9aAA0suUBqB/gL5f+Vq8i+dPgWgvqhq4n0VCuPFR+YmWLc6QlQOl3ZrvduhoeQf4LA9pXSKMvfzlgvdLstESd2USkmIqwyR7P945aBy6aPYrqYdp9+nRd6Sby4GFiTUW8JbXgiE8mpF+6PCbvnvRZ66nVkQjIYmcF2l1l0TKf40ifHwcek+lqnWBtZoGeH5dqcd+Zgh+Unyjlag/lyQmTKbsr69uyEF+QMoPAWn/W8Wfk3LPEm6lP4i2bWH6xy/J9YiBPa1Jx1+RIVFxm+WVtMbdQLAeTQu39Yne/6gIDmVwTtZ6PedDxPsJzUwkU1hQUNaw1ymEMZSBCzMtEhKJ/zH8xBevHIuUQutO4Lx6XQj3gLB3OfX5xKT7xyCmP+h73+x6Y2sSv1O3o49v1Kh3ykwyRbbToJUx2+QhMiqR0pFDhd1wARQ18aSNG3hhiSoPLIetzDWCYe9VaMQ7rDDahIJUnySifwNmWYSHcNNX7Z1vPq0OhxdxhdOX3+wakSHDqnEbvaEapN338zKT/uEuSpm8ALpXgYKXj3OjKtBQuVw9lF4fPDnVOKy+M8cBqlFRvtX89WdTJulsJVwX4DaPNAmUNLc7TpiV3BTqaZDsArL+yH21xu1IN6KgqXJ7f6N4YVrBYommUwEOI12SH+ccz9g8qxq75XQur6nIWvn7kYyxW9LjZJGFc/yJ3w13niSG+1CGvw1/Jal2ieqslB9tgpQl1vXt8hwX6XaPj546LCoe4v1pAr41FyM69EK7rltLlXxJD81f9lSCsRh0adSqS3KRoUScAOxlf+W5hfqdtQenkESi7s2Ynoz83WrrJ0ekCnd5FmByu5Uf++SbVLuCjvJyFwGsmdg20nOsMdblmaWc1EFf8KOBiwtL0Q8GbJlfRsAzk+JJPkmrzdIlVRmud0T21TrqK9MamZZgJczrdX0HVTzxqDa1CZtqvX9d9p5Qn8SJ3clnjt7fJtzYy56XSikQnmQPCfdJwsrQDW7Fh4bFJSOjzDpXepl3TeymzSKy2eN9BrhlvpbYM0K/wnUqDJdD1MP9vkDglqNEUmD+EPAw12E1MQxDbFP4mM1DQ+Y0vTVtECiFvueywtSKX8om00XzkRdlH9mYGeJSChHf2Es7eOS2Q7T4Pa9QpmL7KKufhslwJedmpOOkp2zQS4+qYUNo5TsUmyEZ8gfjs2HIq23/hvu12W/EOd2t1QDJbyLDvHUo7SupBRLnd6ZmWexowyWIzBAoLYtswuOuHGyvUNGWunMs61InMtBMn41lqJhz8yMRR5P7rh8SeeonezDZ+9YTsFoDwqwFdrCauY7CbgpIK356cqQ4v5qqVVm3adpX4SJjEm+qjXK7D4x+/XSH9KysPYWrDIbpAj6f1hafRxz1oCRYnjcJqFfirHfK7Vss7b8h7ozDnECvPpTcGLhNmbNJaKjPAH+AoJSwThqzyT2m+ydUzDJzE6OYK65jb/F8Oo3OLJK1eSZv+40+25Mgvzom32v5LMmWZShOawDd4PtPTgONHWDfWUywQU1GpiWMU5qtBrCUXJLKQT+kvu6Q2cz41GwthiMfwWph6ueBbK7RxWpjwWGLlDjDX3BQ8Wnk9tKBTVBR9ZkbqRCaV/XMzlyn4xuDqjEV33slvU5ZeislN86wSBaYXzWbyOPntgf4jjHVGdrdSXRj47Uf/ahIUjzxpE/jRmDCwC1ERCr3q7ZVLE+wrWVoJj1PsX/Cap2E8pxwfFw6xvoG8Y+2h8N3RYP+saWgeG46CSOe+RWKsgXmIGd9Q5ikqjMMdyWKKhBjTzPUxEq1G1WpHF1JihXsUxIMJry0RQgJcp1r3XWEv4gpTeI0nOsLucZglDzxNg80jf0fwndd7qxQuqUb/bR+ah7g+rccyRrTq4mNWqIBxwG6MIulf7kvFhGjVoiS57xZ8QVD9UcUMPE8RAW/YYW+VEULWQvN6VLAvrUuaq9F6naNxwhgW0uMzMHbo4EK5Jbgv59tJ4hwHaXa6jznWF1K2zGD/u9z+IDewsguFN5BHSEvvvL84NBNVRZ9Gd7bJvOjwdRTRVQySMndktnrIEqZhYOJWyVooHgXNYl0wsJ+xF9UdivXvOrmvheIg2uVd/9rScc0bt6N0XOc/uZOZ9XLynUmr8HUYQBQcvehris5nE6l8kKy7HpdiQn8iIYG22J84GaXEcyvmQSVjdVRymyixa9qgULQObLUevJ14NjGvibm9UrnCGFCqtmOSuZZGGo1PeY/p2U1xc3iHpBhy3MbDM8MecDhHrq9qjj7JLfDU9wZ2cHPe/pYvp3PSZ2fZJB6vWu8/FWxbLzIhuLMNtxtbmdca5zux2WVkvLlnDU+YE2B+3Twn/LVKvX2Rrs7jrzoDrm05YSFhPmXwT47wr4EwD7LTfmmt+cUYqy6S/nglVRFU3miWbUsXOqaV6xUfiA8sH4HZts1VlPe4ncXeN0DdQMs4UPFJ6gr6N0uY0KHKgMkOmV7yPSw9PQksyVUOXxrbKbmTbro2VNkv9wZYMDQw1jzx1DdJPnMJdGxzZmofZ/Yl5kB14Y3C5dhJi13ILYyMRoVz3eTCUtDcShKDj4rhANUNQxkfTJOTyiyqhv7MIXL2WCE7byvAbiA/MdjrtyR01RgFCSP9mMULDbXdS1vO3fTVeTfCQOw5Y5nr/fe15M7MNIet76rihw0TZiAStsRWOOoLBFP7Id13DtxZTDBXL4KMQ9mAX/j8IjlmZZ0txCLeaVaA00PF34+qutMRI6yrxOr9b/FL0TnfMgP8R8ITKvAGPEbbbz5G/muRDFDFBAWQpXtqZQGznk1Ho7uIKgPv3Z7WMBfa0ePGWyzhTPrnhoPSoqVDeGjiYRDb/vrD9TmpQTgPQ4xWE0ZstKPZIsESW0NnX3IayM95lYx+OSSKD657RJTQpu4YfjkcSPJ6CeA3sPOyOqiMViqRhy1PpuGpFV98HtUpw1hIZNCAh5oal7Qwok5BcKpkH7HCNCgfL0A2kzS5A3MHbytFgn+3xbkFQ6KOGFkRsh6gYT9GOTuokSW82NpBVQZhWv8LCKTCPMV8FxCASitRBUjxmtVumqu2Bsfj1ELLu9fYNF32phtys5ZTbahQJBacLWOYyCTbRNDgDdeitKsPKE86Su27n0Kp/NAItz1WMQigfCyB92pgQ9S3zw8xbHHGaE3YVCuFaaEou1wugCL/LqiYha1Rp6Al9FYMeFRkPw0/Fxx1cBA+RucdG2bQmAbjr3T7nivVKSNGPTZVuzbB26oKggG4wPT8zRG9e1b5JxKU0dY0LZ6GffA4XiZX5hScUknPgTUjI1k975kiZX8LZe8611Lw1R93ShPStUzceKi2xf7/m5xoAhcsK0KGi1iOhoMus8u9wO51WJZ/ZAX1sUa7Qv3m4DR53+ZhK3DrwOd2QzkdyrpufMEkk8r6yT3QCyXv9s4dC2sEO4KyqQfR4FBFqBEUa8qwKwWda5Xv/pdL96nJqGgHV5nAR8l1Uygg7bRDoCTR4jDWF6BcnG6+OUFHBK+e/KGwNmHnbPOK4JHXens+rnZmXD9pO8qeIZh1u6r4jL1i+w9znFVOnBvbh289GFsff/g8j3dhAt0DCgfY35Fp89xhzGJozU3pEPoJmg6ItYt+uSdqCDv4qxTY6QueAjTtOOtatAcTpA8M6Zqevp/CsPnQmW0yexue8Lu2Jk92/SXeffAjBBBww3GDhN9v9dXRnDdSTWixMixkDVHRuAK1c9Fh1MeBKTqbUQKPKb1YX0cc+iD01/c7pJ7gaWviDCPq4cgF32eL8BDIO0vdKUZ6vOcUXmaxPZv2KiufApWWDtqFwzQFbuubC3UN0D1vZOIJlobem6sB+R2AdCvA83z863cKSRZ4Z1M8jGDu6xlf/AM6lVXXsAsfZEm/pjH8omNul3k7lG4mwaTvBXUAlGcds2gA1XfXPThpvECTNrDCawb9WO4271VaFXR/10kylErUKBQjoo7IZvFJ02mp+wN5JG0yii0n/klWChjMH3igi+mgxMPxvXqmd2wUfSRAqF9vpbQzPzzsjyaJVRsHzBF2RyTBn29ZmiENR05FdBKdpYD8zEw85qN59qjV5m2weBaCd7cSG+Y1ZWxYetZyk4u23F72KWvGzl7Jxc4ISf/Cjw7F31d6tOYfXtfSFx4qLHEJtrKQ98YCSBpLs6hv176Q/ODS5e/+0jCmRNBjh9dEOUIpXDbcrAzb5/9NdRllhCSwiwFfjokPqyppwaetEsQv1Y2am5uHuDNRUtl14JMF814J2mQZZrLQTAiT7RwvbV2btoMb0k4p0fhBfl+bxfdPpUM2fOcQwqxxCH+hwRu8gneE6GctuAUnHe1BpCQ3vkmYUZNSxw9NPyqzrchAZRgwVF6AL7a9q3cFXYFWA1UM/81GP4T//QKd2hPgImxWWAYo8xiZ+5nYB6mhR9z5Cmld8pII561uKGDUt/ng3F5Por8HcRXTF+Sgmqq8bYgLEmahC/9soOyY0l+ms+QupBn5pead8+TDsQjRISgZKpZ6oz6c1LoKBVShn9PmiAypoJMb8wE4Ovor3DpgxNRsVxiUxQqgpCqPfs3oiPdCb48ARU8v05JuCWuEcsJ0/p2b3cP6+4x1qvCQt76g0FyfTbh9psUkFSSt5YA1aUX7rqG0ME8mHas7XlNsPGCMrNsV6TN+LAbGwfO8S/wiUdyh0XD4xA1gnM8fCY3MlCsaylmUdw6qWhb9y85LTs4mwXbjSyW3HC97+xtTrukIJKa8VZpLGQ3tCw4a6yiPu3Jd9xtSsH+HTQVZZgJyYG1stXUDq3ymuEsCTPghaT9ee9BPSU+KrVIhQvcBJoJa0q22b5X/xHTmSVDxSCZ5AgERWRq/8gSlIjkNZQngPO1BzGrOx26eaLSEcZL3hjoWCWI3EwrDecmB18IDADwS2fuMyj2kNtWRhA/zqL5GiAgnTZ3X8X86PyOQpOyAl49urCo36dN3Au8uPlCQukC3UJgXhY62g8c0RaY5N2RuEz59XBoN2lOYTfelCg34xyd8CxZgpvMTYgBwXfv0ZP/vh0UR000DG9dg/G+nj9hs268GnqfTtjoXl1RMuC+sYuDIn54KXL/sPrTLYgFeorXeUs8Q8PncfFmnImGEYKQkm5z7cAWLAJzHdvuVXHcGvPzunHdI3umHnDR2RLcD00tr8x7kS9V+ZvAqsjumsle640aa83lUPe1uMPpPCFscRVlaSpcOWglD46TA4bsvM9Q+O53A/cRJuABrbBLKdVgIwsjm3Xuswn1faldXqZux1t7h0wvVHQ1ZvrzA0NtbT5rH2SiWDZ0rpmoy78i09zn8LpVVTj8gfGdE24R4/7fxZxGHSj1XJxhobCsUPO8ErlCoHCEBoCDSrwUecoFS7OYdJ8Z1he6kjE05tdybJfskNkxxF3+tIt2DRiYE/s1MOGBjg9B8OvKNGesi9DJmVxpCWzjrdvqHk7hA/Lr+zX1mvOkz0egWEP+oRKw9DbaXVqqJBDOnDYCpZnauA1+pq4SEpWbxUtKBQrzYopxa2NKGMSEpsguyUUf2yg+I549HEbddiDOVLuv/MBcF1nw3o2BL/f84GKwHT2zObE4ooFJ42KlAgj+2iYKxhdc2JKZhIPfkW4JW6JR8UGl5R9EtGSuRwEcV6hsSACXazmjL9ptrTCfApjZOO7MFi6GHxAgYhkY7/E/n76hbYZvjXpJt01bQ9JST5GyyfRJ5Pyy/aI317VNkZJsVka3GxbM6q9t8B+cPQqqlqeneuNcmE/pUBScjUTYJWtlc22THRoDJH9ZGy7nPzNWHWG3ekV52WXeA3y4ghN49xESPo4zbzW+S7IdEkgNM9h3K0zvoymt5mPlmSoHrJ57rsjnKGFojjFIyC1rfNGR9IDm4wy8hik2ZN0dre3xN6GSy+PzwNVhClnVgKn68miv6ZbvKAWB87ZvbZ8F2Om5T3M0eH1EhQ9lOXAcM/WmAdKURsrEec9YeXOXHdMq8RdBxS1RRwPmwnbguUHZxHSdSTJU6AwYr2afP0eXERRVXOzxnYDDzCTw5gQAuW1LRwwSH5kvpQqjH4RJaE4uMZsQIxg7VhiBubJwPVML2nIp+poWtbHbTAr2s3dqvikYemn59xEQs/mmFXE43zDiZhtWjV72iIvQFwoluegVmRMgGH+hJ3dUJvv62VB3HFThVqGak/LjQx74pIhEry89hd0DueyOB5fEaJ9DfDi0gJtJpvMthXSKWdxYfFxmrlfMhXF51mFO9p1qIZiSdLxcdEVRmzGzenye9YqygqJHZ7EZsD6/LCl7e1KYSdPt1MjMm0B/bF1NzkaJHmTezzGOI9BMisBTRfGn/mhoN/VTaE+vDLc3F/pL4KfslIJrB1Cmqa1tDtLMaKRm7FcuUVzd6aQJDNiGHKEv3BrnbM1tMcvbBUtZClCuG40tpD0IY1WwfHYi10qGjJHl5BGi069WdID2jIq/NewB4q7lzMHKW4cqwjFYe18j5Vyhrt/U0XgG5gFOm0+Yuw9b+sTbNbUEkaY6i7BvOasrW1yy2hWsfbk7ra5B5KRglsEkGcGhZ3ezGugqO7i3blZonczOuXf4dgRh+mu+0PcWNBtzvOCcfG0COMFXnGYjbXK3Avl7pcbEH1w7gjToMtvbYm0HGUyfb4PwZDyFVFF93poCW+gmta5/GrxUWdOtKit7l15NulZcKHr9X7V2yjl3FOyyhvMLdr4qsz21GgPmAGkaHWs4lHK2G5qhhxG3jlXJcP/GDJNSQ0lFppbDqMYoBHTXci/D90+PlDrbdB44e7j8RITThyW4xN1yq/J+IiPb9C9yvXqGEzQURY7jVT6qP1rW29XhD9cWVBKQ8ZMdcTXAdmUOYHhOT8QadE/Sk3nukI6gDu1Lnzu7kxqqE/MtF8HSukV9TCY4wSvVCMhOxKXbXKTMQAyDRR2ECR/6arUwmykmmkFShrUs6WmRIShON56xeLJYccmkdjlZSMsmQnEAiqpNiWSDfRunInXpd1CDL06RFQ6yFYwkmaey5Q6U1rOxK1Rgrho9iex3PkdZ+jCfN34jwhVT51YQzc7biKa9zdCWKiVafKOannVxtEhD2fPWf+VjSSrMreq1Q9Z7um+Y4YM76kgYQMgcBhcyrh51gcC72CX5fYo7053vZxXYqUhuim68krXevoiSsJlnopcn7iIckSnTMNWUNyTOh9C0A1TNyHTx13ip3kSRtcL7pe2qYHLZwCU7UXVlqyyVM5F1Nxzkz1VdmFsdQrbWKxFtZCuBKc0ertJGoK1mBwNVXeQh2QgX8EY5NYz7EeufWkG73SuOvxoiDqkLYerIxkaW7kdj/j5v+cnLoz3SLAla+Mxu8FD66SOehTTyqFvWKgyJIZp8qkQpM6AVhj9MTtig4QQ7YGaN4Zc8dHvIT9QHECSOK/1tz24imDfELJ+JDJRSBJpJRHkp6Zmaujsa4Khz2SQ8XMgG94Npjqd2sngvO8ZVl/vS5orrQdEqee1QYw3ZyCHrvT7rHXOuGcuVpcSQw4JNMKaHUgbn1kUy1tZE2mXWy7sbx2/E4P8aSLvp/3JI8C31dMeK19W5FmmCBN9saXvxzlW/kUIGrUX1q7J4ejHx4Vi7yBfezOF0lr+Ju/2RCsYNSuN+PZbYLTuld1CRNq149H68Kvi1HZzbHVCyj8ELvG/D/CL0GQG5htP/UUsRZNt+//AZERAFPbfHuUJcvHDLwnaki1IQF985OmjKqIRsITzrKeSoAGHE3BQztA5GGkW/bYAUTlN8/yYoyzy/huI1kY5Nq1CMi/EZKtzaq5XXNlXIZMBsiUueM1VCVkOHoBXOl2twHaiVt2T2tE3WIwUvXkBnHZ+4glTs3lltoFe9BF1G+jwMhNdzbhjjzSZ9mXYi6/YmQ2m/Q06PjUBnTGAqdfD/lhYB/elN6lZagtXziUuA6PIY7S94OQi3kXWSMrWIWHReXjhBWSps5EtvqP42FZoG4WH4pGMOKRX1AZ7V+9q5icMVr51DKUcJZ9pZwL9DCJp/OToJ8JJGhle9D9CsgGdBtO56Bu05xDxHhCyufZmF0/qMi4+rv7x8fES1rqb5incHmWpSXktzAvpTBcLxYWlG7lUZg6DiUWEMB3LxYNvlmZS3vIKFOPcrvCPAECnVG8GprLCEeX0rlemTZpKP+l6QrmCNNcw3kOnhDYheCVtDz/WF1f2Z/qI48FEyi5Yqt4U5IsU+dEuenASDCRwPGc3uk5zui+qQ7Yyfr2hbCDzEAvXKshaSEgkPjdwOUj+3JwSyF6XbMSFO2iQAAn1h4w9I3DhXrFAfPXGn742ranTcJtCVsHDR1bzArJZHeTgSltIhmitb8BQmAvUpt4KmfhpB+5YP6CbN0OcbVaDV0BuvuuKs/gxEP3V8UV2bj+nrPeId7n383XpcBSsmyeE8Rlcv70DV4BvNe6qW0H1LscS6qxUx/fb4N6yU02fl4yRHQth3ZtsrouZa+oKkjg4YvfDShSskYJCFDQH1+hoqOup8CibiabpYIfpXD0yVKn6IrSoXKmQNOk1NG9/HyVqxx97wDyYlGUPI4VQNHKWtAm/bKNsm/ajn4Gipw8hCW5UgPSHofrSqzBlg0T2+kSM0F4l3RGORm263Uae+j8FqWqzr92qGObS8RGZ3Hp1S8AY0ILLBTs+z3mNRX/zYq2CVicJXVjkuKHk7+6EOY9CpVTV5JgZApECLFby4fZS1JE0Xw6JM7hoWWRyEF8/4MgDsb3/OQ8JPtvloBNUGl02dOSRE3+1PtPAdt9ArR71wqEsjryYTQ7jptQu6bF56SOK3oYhdkNcTVgBc7CrVw2oI27Hnv04IpEMRtRJi429+vSx0Hbi7aHD2RtkO6FxfT8OU/pN5nLmNv5ltNy95ATadSKle0MMblZZxs8JanrSTeJ05+9e84qr/GNojXowTUd50Cb6WYggZ4JhLg5Wl1cnUsDgGSFFZgUoau89mvO8p9wBR0nL6ok0EBIT8W/8Y1Q6uIyAhqrvsfwpi5I5rrH6qqTEEqvwguUudjnIdHq/wgOgH5fqQ7JTGcZ7VWmqyYcOkav/1dRiV8ljrqNdpBBAa/Xz8QcHLQWtWA+0GYpjet8GMgf/0DHEqLqmQh4PVzCdVxagVAnncPIWEMelUNC+buzcWwjiNxGlh7ut2W8D/g4drqlWhBW5e+hDjKsjSmwbKO6K7KNsZUmxtVj/PW2uGiKdpAqWOtsOq+wRUNNwCjPMoRaFROtWbeLk81Kc9eEYo/29PJUPfRgBKO/g58arMsDKZZPT6+XdmQfwEfflKflUBAb4Usr0Ay9QCXu2E4We4+PJo9Id5/NQLTLJNwkgrcSAocZOGgLQbinNjdAhyC40qc6IcpTqlTt3jKW5R6gLsClLkgZR6Sp1TtNvyL2PEqB0XOPUdTc0TmCkG24F6Emlkt7KghauhNmjggGsAA7cvcv/Nmlg1o6Wb3Q1rChbNd3u85tRQpStel4aq1mgaaGhFCcm4F/rAZ6SKmkCgAck/FIEGbSTgytlNMDmLmvnFk4Gqgh6r/b3MKZnEzrYul+9EQ4mvvAeYYczpqeSHTakOPmZV4dJ2twSQSwLR3wwLcGsZGIIpegaRq1lvqAU9W60nBLBxNEmco2mJKOAAdc5yaixdQzqNT9iWGTtUQDFlNuxy6C+mzInAJICF74By2oMl200MCozNo1Fr5lQ9bjocFBkJ7l/KKMBAuWcnC02s2GDMPyKO6yk2yHq20ZQ/PputDAZJTH2fD+DnaUBiQgILNel1vaebJP7zBXvgBbjWYiluYzyqjaVCzoJaR3pnp2uRYHqRGya2gn6VtNQ9qwP23uOC6XvPTYJH10CnTZoGE0weUABlmLDma3rT1zrWR2NBaORLoYSpjSV9tG1N4OQJtutRU8ROvo0q/duY5KPgmcNYpppsguTNEpwazRrgUGMxFELWNjzM7VVszhTLatV7zzPVjd0e/sF5j8sSKy3tAIeLnEtBxDHLo0aphJUb+CN99QQB5wWuHr2qzWv8VgvdrnG7x4uZUnANSq4f7VMvHCGJZ5P5jE6kGaktlFJE3JvsupwlYzDwqlR1oXKLwCIg/Dt0grGJdLwuk7Tr9a+2XvpFlTzHtHr1RhjlrocxtaIl+v2Xzb+ETP/uxfrI0AO8lQ0fK8oL8Ppqpv6c8H8cIsXohNGmAabpuh4jmO2JZOEvkdyFSiaa2NtZDy4iwlEk2Gan7BFDaUiqlqixyH+KQdojdBPrtlbGensUsXFN7SYburb0FFuezPvBesiApxtFKtbH1isYkMAgvoCiPgOD7tmKnI1fYQfZa0ZdY4e0ZqzuEO+VsqIM3Bz2IciCn6oU5vpw3c5LaJPaHfkEiIwDukfdlcS2jTi2yN7gMgqyBkfLN1c0F+FSFemSkqLpAffIBYbS1jyIjgqAPAtZ/17/DuDJrkH6hW2GHrf2iiRXiLbAZREGD2lXeCmhl0xRWJrE80Dq1rA+7E+bjdjRDXC+FRR+/aaXWKWcOg8nCgKGcRnHr4gJT/16flmReFLJg/7k/va8ro/bdiUgjeaJrqQorpLpHOdpylGzWmlms9DmmNOwhm/I/WJM7S8cKhtKXJQ53qG3pf7M/84u5gRcpx86T2fvRPV7d8eYphhc4Kn43eAlgEu5pYoWzcX+mZGj815moGPndMGd8oPCzq9BLpzBpa00dVOq+OhEP8GcKTUybeyZt9bdFqTZn5nxE0NkPwND4pxoMzKUhoTulLhVBG3XZjioBWZFi7oAH1R0FtDno1UolMxIMwViolmpx0WDg5se62LWPdoaVa57aoThdyg9jmcSkyKgDOyFjib9MlYF8F45xDStwCYAskJPfi+5I6eay9b7DZl5D7DI55LxTGavXbYErf2e66lA5XzJf9YgoW1zieG1ZE5zbnG11h2Isn3ELqBxMxClT7R4aBdabUybciTca3R31aqbD7hPE6luwyLGO1Lrwh7ZKpOktvIX6j+D1SApKHP8+zgooSMKkF+ArMdYqfan38Q/v3s+a0JS5+Ta/ro4AXorh1YRGX0bVR7Dirge/m6r093rRtBwdwkrg+EmHv+24/UepDqqXsLLvCphvz7Fqgzc6H6jXGh7oxdvmvrdyRx6T5bPeiKBc4KiasnnAJ+B4Z4cTF7CKQfyjwuk1abcUR3d5wSBYbezqSyArKAJ2Q3VYDEi3GPEufNk9uur5wvGI+IQtqwUI0wR2uQ8uUA2/W4DOXIVCI+rb+F7p4uRElrmNdQfkLIHeEy2f8eoK+Q8mCf1ugqNujGn6895Pvqlq/K5olwIhYOkYENnNBGIJVn0mLvEZqknKmGcTsv/arGm6pG7P2TXx+dizZEJ4gE54RXpUer9qQBEGy3Ljm9LOSc7pYbu25O0r49A879dZnYpdsblx06+zczQY9QPgjfrFtes0lC5whUX36li6v+uqmTo1HbXpagoLBm53x2hXx0vJTA5ZN3Glg/79UkxqhMqOTXh8qxsl+ar46FJOWIH04llG88POS2gDjcxWK8bBUdXRg9avA0RMLhJSggAkO8B11aZv/5a4XzHXhlCJsnFs4/7UdJmmYB/RnusNK+d4xmFLzVmIMWtx5GU609fro+LCig82+DtPFAZ+2GFGLXLjzo0Uuz8d3hsVqRtTqHF7i9aVkZ5xxIvumAnZjTxEA6tx/R+1UAQw9S3T4t2puosORqeqvuvtqI1w7nVuprGBsnCpgjSC7zJKx8WP1y+SFAB6pJAswr1xpyh9gFR6JqNr6d59FOLHMNo/AI44ppqoX7zpmcbY9gMMKlilspWOSWxklrTSp5iE/Asfkz7eKbsS8S4adgjFQD/MnbhFrypn8RWPXXj0BVBBZNulC5/85rhT3iocoljHum6U4mPuCwrK5QwHlMzMzvJlIhqUDyou36aSq5ngjjGPfxK+XasD5hW6xp18OFv4V1sAAt3FuJLZRAYTHFak5aCWlniMUn7+EF2S4G5h/Po2xvPUAE23bLVh5QggrnbayCjRo6E+EItaBiTShbDOd8D1P+5FPSZie0462W+CKImQ3JKq3/lYJCt7VQ0Yy9POuNJo1JyWzUHaEgT1Q64fj2QgR7hLiURN/+FXVKMGZRFkTK/1kr9GyBlUk3QpFxVdM29AANJMC6RlY1qjC062Sp0EnlFijB8oV4Llrn6Jg9Lnb8hQ6dLIkAfOs2ZqX22dNxGd5hC81JXTGQIif7nQJcN1d4s7kOO/l8GCRFfr4N87NB4RWj7H/qYeqrVJu1W1NvAlFBlBZlBp2Uilzgr84DlS3FFBlJcSe1VmWwOJYkTy64RLHtB0qOXEaPzx1DqKsN8pjYR8xsH/8xU5lXjuVm4w42ytFugyGQnnMCDIAAEd8TrkvnWgMTs2Kcnwk/ZNnRhiOGjHTuJflJj+QBJoKwwmOnXNmT68EioQ1TCSvC+/Gb9SssBrLL/NBQN36Vph/qQot9+mMsVPbAgMrZo0BUUpMoyV1Fz+foTmmdsw9FILTfNQPcuAoM7cCaSD1hmKgpI/FJDNiHzs2rUTA2CVUC5xe+Yy+WZZQzeYYOCPCfdH66Ch4EBcokfVHS3cpjzmjI5OkjIA+sO6qYwGaJ7EhSXlDavl+YnNsp4WxwxZ/Np1dUK43mnbBJ5BAO7qVvfA13ntTnBKVUt4p7lxqmQMXeGk9FL6oAhHbT0GYXrLbrCYeeydXAYl+vwpR35RxIwYM2OSU0BhFn9YnTUtAW0VhosCNzMHCaaMlB043m0oIzFxctgUshgkNy6XwNGJtu31k4pPgWVeLaIEDik+r2qhRhz+D4fHfGYEG1c4we3Fuy33DUubWU8svNw04MwgNwp09lIHD2LQGK3M8utpEw8bkIh2K7Q3ACq+cKBAXAqjiFpPM8UdDP8NHAX6EdJoSTnL1RICLfki//tyFzi5SKKCH2f6c07dqOhPeftwd7J6LlpJLxMmo/O5zhLFVwO9oqxAdrg5zc+j025xqbTOGREfPxmwo6zuqgPaNtChi8hqY2v67r9kPdbMV3HbZ9myETomyUz2aEBZM1pZUbFpxZjDS6IYjbBGpbZU/wLvVacNpcS4jlDEknQSF20jBqYU0Dx5kvJcRBj3i7vRpBACg6DG5FqZetqNm6qGdivqrWJfVeykOWrDGx/3MkVZZ+DCZ+694Zd00ijZfHB/FSTxzArA0+rGqYrYAEls1qaM4oWxQP01TFqTbnvbKPoxOIqy46NOKoVgzSKvEkfdAi49OTGeqbt8IvNW7g1dbfSGGK128ysC/FwEcm3/i1lFFX94ciZwIXHe1jy/qTT0/mA81nAL9ngJRWo7pZ4Vnwp77vge2KlXqP2NbBW8/E4GRSzJmkki3gJu6d1tllGa15z/Nl+RkPC4BZQ18UjznOY93eYN6Gk5HvP+5mndVLXS/hMZcTPuw5YZI/wve/BhDuT956d9LO5bDuV+YgXBPxuLQPzy2ahcasXoM3Klh9/6qCvWY2B5d90h3+iMN6cDqbcgWgdToZtcbVOWPO2knHdVS9Z3glJDWdnFn0UJvg7+ODX+WoPsn2ohqqTLD2DI2jb2OaLzP5+WlzupSZEQhPKKAnZvRkV0ZVjhTJeZm3T1q5ZHwS5R0CMlaMxPZzjAmLXFE/YezVgu2ADuhsUq4mmCs95uhKLtMvto9QdE0elvIhi3/edWHWPmnOlBqXlE2Ivz7aRHK/6vlQKbPIjjIlCNX2TPUagdL0wmMzQ1YAWavefpaJRsYnIZK5SahiAyCAXhFqnIDpAeXGd2KEdkHWMlISTcGmccre/BY39zZ+QZwv8qGjZaTZcwLi/mrp727eUaeQDbMCAVRULZEmZh26fW7WbBExGuIFtVKcC73KIFaH2fOvbikF7v0fVXQokSz7J6mOXj+IcV86PScN4drtCzSrinlI9vbGhwvKy3MWoXVqdHpGKeZJw8qBvnMqz2IYWcOec6oTPnv39iTTb3PJqwQCsKy1oYAnzgVs2KVDtvMTr2gRtj2jtVdtIBzqmuXUpQhlIr6ZOjqIzC9TYJHdXxSSdLwdth1S/qsNhABxsjBPxQWRsydMqmoYaguiq685UWaoLTtRlixb2py1S3p3FKJt19tcGVOo5QDNCP1mJJIZD44j6B/glH7/P8AOBbczNoYeb7MMv49CD3F8FLRkszgVzjxMwJv3KCTZrty0xUJ+zyJZY+/HVzrmYemgpIaByyM5IRN/fGL9xIsJW5uygLbZV1TCT9PYhQHzMOC62Qjpnd1R9RMM1q/OuQkygGqBDiYOWsESJErMZnLSjAvvvTDE5xhLTdC+dVpMAn6OpMkQbh4V63eszj26cjQd4yRixMMr2WAcXJwqbEi83hYg1UqxZaq91kOF+zd683tm6ZMmaoHfudzjG7dZKrsw8gUuopqRXzSNs6z5H18n+tULLWLlOigx1O0vZSPBiCLrJtydWSBA01H7Sl/UwZWaunhxY5sFwMchUOJLiNtz4b1NJVJ5FcH3UCIwl2gLw7Lefs98tjVts8jN4WJ4RccQE57OcvPT84EaX/xQK0kznXL47vzgMQf6ckUGR+pC8zmbQ3vYV9IMEuPa0fulqeMo2tkiq3RUk8osoe4dMauSuwGlpUkIC05SZoHofMFaoXMV+HJdEoFdwmxeO3TWQRFGVIXtbUWLGv3/An9fnsA17zTekayKsYoamEcuG2H73Z1J4p//X/xPyeDnqrK4oayx0f+zAHBCBNELvHhW/hArUWkIsqKarSNvWQmudGCk4INeCkWgXJnRLKLw8W/nd7j8NubK0aDzh6KkEMrVk+ZvV5d8GE0vAwWJ1uhjR+TxEJkfqwU3418h/v8IH+4vFU3dqfuLKUC0X79Vck/0yyajqevPqD63cd8y9HrryFjCkih2kdSovOtXxF9/KYFbyKdMGIOaB7swwUEcZlM1k91vJSrmcLPVm7rv6D1tZWyAzoBTMz9iqK4iR24IBjZrF7r1eCiF0j8/hJ+Bg/q5OOhzBTxeuKOP2wMH7k6zIuHZIURKFq8PeZkpG1379HfZk1oczQKaFdRwBfzGfe7Y4hx5QQe/RdetLOuB7W+tvp7eE9v4esnM3Yz1bbSqpxP1S4MGgYVV3q1OPafm4jbgJrBzL9Fn6EurkcamjMSTr4AOt/YSL0EAQGMzqeVLh2Cf1wsjdAlrYvsCGHnI4t2Y+0yva8z+dOhTsHlS943V7YCEuoUZTXGiaZ0uj+yxC3vAhGj7ae8vCe7E5+rANhmq3D5S2ol7vZkm18E2aXZYefZr+VLw6JRtZS9ixH6k+uuCrRtXunxhrtb3dX1nclJcNycuZXhPiMCYaWygc1Ox4bGQaotwjK9ED4FZ1iIflavCgbKECgxc7ZqGfIKfmN4eEOw5SCOY794ig6oUDK4EVeYENFXBQdnbOug5RCWqIl+S82It9UTbgEJdop5nIfeUkEZ6jqy7W0djRFqHjhkvJzG8hoWdaI2yGfcxD74sXP2qT6DiyDXHxcTyZ4VdUyj7l90YL3hMNJe7YcQwd4NgZviCWjBK3nHpivLZ82/xQtz5V5jgPimx7Baadig4DJsNaQIk4kcgb1Fy7sJT/npjahH3h87MuAXjDmB1YscDFGKXq+enYb3Fq9vSnnwbFYWsR5274qy85Pq046FmF8coT6E/3xiGdlugO9DUXsHt2+SbNHlVRm7/H+0EOqWOBR3HE7Y+IVCy+pgCxlMUpsBO3zB93r1GYS+xPpTeVCKF6qbwEFDLfEekDNmcEFTw0WA6Pi0hpYEIHm0R3/crrD7Ulmxnm5fekx/f6MQ5sqKLi5KB3k9hXSwF8mSiBF/5dxZ8iSkcZB4NxeyAOk5bAYAqJoqqb57keNYNUJx7CgBA2/PioWCII+wiLlKZS+OleYQLk5rLWi3Gbp5+qYCfEwfOKcphiVzpgYmw6gL9BC37SiCLwz9su0V1Xky9k2xrp3YhZ69mz+iQG+khntXZPJWv4KbY6XpjrCSwvIsj5kFN5/WuWi+1OVioVzAo/Co5xqB2j2X2Y4eKG+FrlQXfJcbPJhcY4tF/It723wFaGTkPpf9tlPmhOwUDfqTpjA9fN3IuZLYMUaAejaqfJGBlJsQH7OLW0GNzw4fo8dw0QGGo1hzLUciwfccInkBy45I/jwyTbjh/JHnDUdGYHmxViij0pNCYexXuPgPYz7/q4w1EOQsNOMD3cRu3d+dtkRfqQC/SUjpfMIrSJji0gtg5brqh6RQGCv70ZuN354MlrLBVLRQFtQcalaPL8VojaJ4/VurvxLbXzxD48yT4IYqpvuBirrRU7DHiQSsjDC4afr2nX9o6Tqf7CmZMUJbKiKWuuIe4b3UPAsfmSDvTzskaEsi57PvWRHQ0d8lvL1VC3qxVsaa3nQ+T4j3R7MAzEAAAAGNu2beNj27Zt27Zt27Zt27atDtFBrr/8j4cXFDMwRwpy/chS54vtWtwuQiLf5Zmam4CYlRA2QRDlr5qenx1r4fuiyWPsp0CxOL/waefM6Gcz8jqAaxonElVUgIZMFuyPrZJrCzrosGTe+p+mQshcCfwpA02LGmuywZPLznBQqdUoassHQ1WPJOhYLYw3c1dANdvBe3wXbeaYMcJHlbFjmHnE33PRJON4PgNQoQGy5Hi/NflHHVUY42Q1DEeizvw6Oj1fG+MH0p/ByLo002+ecMRXQL3bD6SKHeQcw37ALKcU06vgS5AOvjf3/bZigEML8eAPYDbC4dvdbrkrMCbMR6GKv37uZHjpzqtJkBcNp44XDHmpRtRsjGhuXHMKgQDP2C6gAP883qyfOAzZZo7+8DTMSLuwnM6+J0mFPxHaF20wiSm36nUOlB8CqI5zwq8cDHKm9O97NbCawFoulyERoTO1JVWUw+zGxL7BNvWxDuh5tnpDIFIEdOvlB6sd8JHnT4pjgmQbVprnOysst1z+FxuELolEmj5+AkOI3wCrbrvjqy6ySWAsoXrVi4rYOfrJAoW86gnj3DXWP7SuU4/0LAFcOdakGMGgYgo7yCTytVsO2HJ7Ct73zL9StAJfLG90eadPlCjlWxLigGzp8GDyGWZKdImKHb6e9wz9/ITC0Li25kckJi0PFFNx5ZvfQW8cDnMX/DQzPWFsrt/lXZvg4ez5gVX6LThhrSityI3DARwXDMfYXOfLod33JWob2VHYH9VcIL3ZBTHTyG9bF7/ewZ3oQXrlHpHnPXWXjwb7dKYcrnFdEScI2ZehLF3GjvYdH/am2Ai65LGSc/Txv0ocnBcLjz8/0gAtlygbyCeStBeF0AI4sIYA9zpFu/CQSkROTvjQEgTLg0HlTqoKy6UHANOtuz6WfaJ+SSqpMbjhLVC75gCkRrxvA/Xp6PO2J2o8CsXDZkGvhfrOz23RAv8trRZmgL9sgetXjR2ipoh49Hqy1PA4skiyPFkOnewsxEdN7Luw+MNAPFtvW2wZoVqazZYmlvsuEY8gNtaoz+MT7P4PpkYkgMyrzXWUKmqKsnPyJjOUewIOX8hi7OdpG28/zP430Av2ZSZ7QT9Va5V84VYbCiHG4TLBck6mYC+YAXKQ1HeDK67dE/54o9rmYKShiO7wi+4GtXXWkDrYH3HzFKYjf+EORK/ImVkuw6lzVK0vAF7IlTEIRtOP3eT9PuEctA86CM03Ie1YPpr7K85KRlGP8uGQoVGxn8UU6S/Xf8ZXywuIZ34SglC126Qba4zd145+m3Wssg8Gt8w/aT37WQcILl4AZ09ITMso9Bl65kbiy+f0gQE3ZgBftTFAvKYzcMcTFXdL3m7zKsQS48AzgU5CsvDF5blGL7Tv/t1ENNXjzdMQ7WuVWKLeIiHMk+pBtgEDiaPtR6cjlyldkgzYHYEfltl96Wi2MuQ+OQVq7KJNJPBg2GLZhIUVj4bugkgfw3TaqO6HTVBKaH/1RCAXwSEP2vI21os53nyZGpdSn/Um7utWRo+6zkdwnpepy/L9sjzxy88n75uyhUeIKu/MyGgvh+Sw/n7kG+QMu72wB3DvJMW8uVbT4eZqlzOVXYaep5e2q5ZF+OY24Sz9sWJahCx2ej93HcFwpmTFWD1sNIkV0H+afdaRH+Y7IhZfJITurms64fRvodikRV2cirFA76GES2CjWsD1S0UrB2I5iQxf8LI6QOt54NnXPyJfuf26ySb/1uTX8m/3wuj34EeCbqF9+UZVeYXk2hBUOk6/pOBpc9OmGfk2zD/WKR08IrtnPYESxrsy+SfP76bV5/JZYTNfFVbd/xRKcDx5zYuz3fVNW43pEJEzi4D9gVHqLX9k0uCCUAp2c+yBtLy6njtGFpFwz4ocrepX4a/ukzNMKHFUXCRwbXTkK/fTCopM4PZ7EF9IoCGz9s90a7wQ3br+5ufbS9PbN9sr7P1J3djPXPvwx2N0zWLiOEbggDtJPVztAt0x49SkZjTBR4io0d7JRMkqs8Pd0B2mBpqmDBvBD2WvUBW1aqQM1nwDq7uGQm/OIqjyxrdfkj1SfV5UNOUdZc35pxbMkv6xuP8aWpYrg4i2aXglgmXSJRv1sBCuwaNYYVJJcQFIgIeog0Tan94iMlxEGZ016UfQkHbWGtvylNsI+S+I6sbGwT6E7tV1g8xt6EPYha70+KuaqaRlM60I5HQn2xgVgcBO29nYzIjDz4hLGnrSqwLx2Am+7HN8V0r2Nd3HURfR5aviTiNaoDkHUI9MMMuD7Sflqvc2p4CjyP3r8fPzWN+y+g6jalhL+COe5pvXOxxbp5zMu9moTnr+FRq2l8tMLf5EzvmmR0UHEpdlQJgQ0XY5lqG+qCUpIeYGR7+odIG5pQw2PiFs+kvFnM+vJD6CBb+m5qamhbuqsQfbTfw33jWNddwBZLHpGwk/YRNuOCGSiv4wF5voOwxiHJV5W+laOgUHTDbXbrusg3RL3CDbH0K/hct7zj7wblaZ0NcFsJc0jYvuPaAABE5ZZb1Ss5IyP2V8uTG06FI6y8GHmsT/+YIYO7iY1VFKW1XWcU28ztNKRbf7YHBzohWYF6941LGLlw4ZbEsO0360n6Di01YeDcesyaeYzQ8zuLsF4s8J4j+KMIlB4fXkbG0JB678HKXBO5fPVDncBhnoZzo9BTS3CVE/Nuw02HM9aT10JhO7cAyh8LL+jf+X4tP7Tu15Qg/y28TF+ll08eQQLZdB4Wlra+Xe9k2O9EtL1K9cU+Az6wP820N7ukbcinidFl94GsRX/E5iYe5CgSwMMbG646Z1U9S0Mtj+yS7pI+d1Bgy6glBxw5uwbqF10yghjF6FwY01v9yninnZ7XtLuiKC3AEhs130CIeRzEEUj6i58rjtOGjWGazFbbnL+fFg5/nm/w9BWQNyqRfVHr1iJ8lh3pJ4Fmi5VZ0rRbP/tVBq/p9t6WQ38DfloRcaGqWkayS9a7NRHdIICLQ6mTHH6R1Sq5mSHOSwAKbcjSmxMcB9B8cuxzHEgGiA1/7rHzHTpSTfyZyyNwxyhuuiXHpnBw6IpAci2vW6mM/83+U9mlD6xgmVUTzqZl4HIUzOqwq/1WdkZALP5hvtJytAxAhI0xgeMqpj6wQh/+Ktwfm/8/5cxjVLsRmU878o9JLG1dnsqt5caEGPtEEZ7Z5cmbb49KiUzPWJLIY+Qa33Q5rVm550Kp3ieUocDtW42qsxm6fH+pMOnPml1sUnAjewIW2htoq8NKqr+RQ72dlPDK/DmTHyVOp75ujTgSE2lIhoveUDN/swwE2o9eVwqwE+zkwqdRsAmKjCK27D1lwGhLYttKIzpwSrEVcpTPYsqsKRbjj7PTO4tX8/XWPE0WGIhNzc23tdvUCqfZXLDlJ88rfQKhnV6rGxwt8GphDCzkDgawv24f74u1jjqxYC/psmFNR6puQeibnMPAor3T9nXh9KXddlyFAC0xotvGIwMLNuGJYc1hSGckBrQlzBhQ87zBywcP68cMvXkGGQfiSsvjactXtt7zg37kd7iqEPMFlAi4S2A/ljv/zDUE8Keq/69y1cca29PBtI741jxU6LvBKLgLL3f0+nB1bbeYDJxcvJwZ7RfDyIY41IlDjEXyphU0DPaEEo9uPOz887STo+OvgrUL8nsVBmIWc8u2hQdTXyYpF6Ml6qzSMBsz7DCUFI7rnVDGcGuiCwlQsmBFJUeg3t3mavwm+28XrL5c+OsUZEfnAUjC99CS1B5XVkrAW+eg707cyrcabCOiD3mrttLaFz7rpwKOFvSwbpq5FTuvYO+idgYiG2PNhuh8QIwpMeVxOpdMVwHQARo9k18pF4drLUk/V+g/2tbY+a9EpzMY/Huy0svPWeJmZFvxSzHXr3HVJ2lRPEDD6WKusqNKdP2Uq4unU7SsjvgCQ0hHvgTaWqoxjWZAJJpa+/qlfXzNtWLD0JmBqX1n/WtUeYAfsAaWIQVoDftdZLwJbiVkuVlrO9iuE9vbZnvUoo3FlXbm5hRHkqxTkEbjbUtAS3lUAxfg+rJN054NIFaXjpnj6lK/bKSC05QX1Bu4lig9aTpTKW0+WZ2BZbWK8ft+/wRz6FhGwZwo57gBqT1UpD8I+/0CxcAh3UWboji1RjwIZuLDV1PVShoIhtw8KhGiXfFAei2kHQXWQ0bMW0uwR3kjstyxKzBtqwFms6FjkGxRJ3yff67Ci7VMngRLPOxCzjxGyADaxYEETE4lK5B9RkVIQFMWSBVYCDCbc4v1xSuXRJX2aVEEo6TbRZZ/yPOUAyGepp+SHygBRClngCvZxCIU924SeMg10vvZ3+IrBvCeMaMij8XP54fIEIm1tiQ0t/gJGeQaY2DV+ZKuM708AnFkId6eh/2ofBPZm13Jvv0je/do3SG5CqJQoQo/jru0Hp0gz2H2dli6+Lw+81OW4GuPjKqwV/2gvQX33jIl6xBTXmq4LCMljm3QS8DpFeiSXUkzSVwOzJoAOiOwLKHFF3xLjH4KiDK3hmS9Vog7D0zjLgb+CqV/s7MKBf+ZW2dbcXse/grqoOILS8IOUgAvBMVLgHcTmJ4a8+xseiJv2c6GG/0ChoVQji//RJ6xrhojG5uoqFKYxHwpYedX0z38ElmgwqvLSoBs0oxXwuwxcR9EFLQo5gZegFMiXvlakx1fdOqQGd2wFSBvo/rbfFJKLCIO7f73ny0r+4TcLg7aYUVd7UxMxGMn5fnjqrUVhY0OVirCdXJO1QnpFguLzHCE8yuTp8KdtUUdkpJkgIN/b0piTkcGt6ZKfYkD41HSWEfD4FYFX331qPPfDNnZ+YLYUkl1gJ+c8gUnI0oTmuiZpLfDPFnOV2SUfjvDTDhp11Uz3tOHCxp7O1O/i2hM4dIXoEj7AS0/b16w8EMoBN8K8G7w5wslgWys2LJDD8gFdeXik6CKHDI4cfWuhd+FGV8fI40OhScywznt/XU5TkaRyfhXuTmZ4dNO3Iwie1YR0xVlTek5EeMrp46zZY+NwNaP0A6/UN9wbJ/bSfbVma4RMUu87IsVjpiRmAmlawM+IluYHolN6G1bwJ0CSUhT9//NMURlAwxcB9ulLQBkbI5lpn1e482mDNaFJkCVaL7Rmc0RU+rDw+/aXKOrZQ/bxZgFE4wcjIRbIjk3evXbQBeyYqpsW0jkpwtNYE9iUxjC1uArvSO8VeDu20oQTVoBiyNACPZvLe9qkyTAw0soCc8KV0XnYv2hsVSfxXo9l47Ev2+FVVmY8zMBj/FCQguWXIMAXdDbnpP6xj6RHhc/e4kIcYMkC91B5ymhVGC6sBKurEFogNEfW7kRJFhyF1cqYXhShNr19UGWrnhvZ9abo3gsdd2hBr8AzF0MalYMIFXiAPfAFlaJIhy/VHI5uWEyMoZQs/KIH1yjpowRay6nPJzj4dQ1zgepCaTOkPHDAZ9qWQ1sW5cURalQm5r+yuuzLA5MEbXgCYWq6I5Kopz96gNXS+rNS+Cpp8y3Pm1v7RrtEppzsV58BV7vU284BxMxJL2sTBsEx035yap9IcbntEF64zMtXp3k3Er+uLE9gXEfD1GGjqDVCbQmpq2rBkpkMBc/3aDD+X6uQcPdWmplmGtO4NtyLvHKkN4rBilLxoexUuUrpq0LoycWmGiFoQZ9w7wMhJKAqES6b+1EsSxU5m5AYB1z75XcMU99SkPGx0uzTlD99pxIOnsSGtxhcloRAcpp59JW/jDfq5bBLoJ6D4vEjmsAtcywISxCK/9v6le1/U+ucg6e+Fb+EGsmjTS9Y66mgfIat7SRqBAekdBu2R2WCJ0jKMijKrSrkAyfB/V+w+vbD2xYfasC3t7rfqQEaLuJp/ILsf03T1PI2cgKLVsu2spM+8WfHM6jKMdp0jQ6Ns8LWmJR5LfUDOB/WBCCngG1lGvD11OVRbNAKWacpTsm+IMTdk0IqOBDAKui2l7xNF7jBcHBBcNhiz9TIMKteaAJo6Rl0ipu/iN/hFmeyxK8Z339xLYIJy22mp7Zl/CUxXTszPkhh3wLgnTpO/OEMdz5EQdX7sJgZigEvpo2ilzkDHf1v9s+ZEsYAz7kibi8rxa/S0O+EnV4kMkJ1JYaVAw6MX15Mjb5oxXAJS60vUICnrZzCey77RRIhlsO5GPLol2yxQTGSuatP4bdHE/avvNYy+R0mUU2wAYAjDGSefRsk1YKfJs1ylwqRutzuIHR1T0d6wccvgCbFvL9rlZemJEdbfpILj6IAN8iDq5Ds1Y8xyef1zl+OPlUMZXT1jyNjn+jW3W5vJTqTLgYlLeCnxLIY1hC4GX7ctMYsyGZctfxQ4rNyxeRRUNNIMRZseDKbkPhXoh4aSgPVQQ1mn0hVczUoiCO866oW4UOFzx2VuSrbnaOlXMmERvI4xamEyWiPCDU5zfDLNjlm18hgxMQzk2pF4LfnnOYQHkgm+WMsGmMaPVaYs14aXjJYOKqs03pwB+OcPB1VzJ4WPCbf9zG09tQH7S00GacBvQb+wY8srJyOQj4dhvQZZRphTUO4pv6n2naaUJMFwxhRKuo9+6umEvqyIlJ/TwdhPwzYD9aPJKJQpWMOIePnGdI45G7lRuNj9pTrYderSRIjyzZPprUH1eUBCJjWqnO5+TEd7wfevUid3W0mfcokps7/vt458eYf/Rf6E592jsGRgI2lTqocCFhd/4Mn/0u1CV75bktb9Hp8JelVE+BTN8uix5hTFREup/gTXOeIjSfisNZZfR7QPNRZab/3xBKzBLY9HCqgcCs4YN40D9ncQBE+WVuvK30zwEd/eUomAfyqaBk8faX6+BTPcXcIUhnRIcRfhHtTRtvNLxKGqnMBhNbC+YBhWANypAYBP3WJiG3Gsr98khLcZJYbByQMTvIJisn0cJQ58phU7sp8iyPhGjG5eH/inOMwm/PtnuUmWaQXbbhrxVa9gPP/mBd/YLPsh38K2gkE+wx2HDV0gh6xJMIhdRWdMDY98tUPF2j1L0n85bwldwK54X9K3kXurPTqb9J+w+MbBwuhMYJpmZ4qQYccfB0hiQq36DAckZ5PgQwVhI0Z2wvbdfufEnINPyEIRMlF80fqTmULH3BFPHG3bspJKjtaRqJcCLK5lf0XizVhSNlek2lbeIMnfjn/RturmdYGKLcPrP8Eb0w4f6wlPy/noaNwjpHfKRh7uAYPLCkehyYeAlM8sahs6jIla4xR7cLxCqNggJnMLcYfh8jvG1kvlrlRS/JpqaMuE4TGuDyFnI1Ssdks97CHjxRfKLE77PwuMqIdqdg+aCfikBmBeTPWKP5R6vIliF25AkfRlpJgRbV6gQDmpN/pESqp8fnVPGKUU6bCznW6dhQyo14RYm1Z6vAvlq+hh8LGlmhNIoHVwGWmpXRIiIfiknGEmD/FmmIkunJUc3IQHuNHATKiVrDNchGBZ1NgW/rbHGSNtz9QkbTyZcOMfZg0pQJ5w3EV1xF6sll/mbuR35uFvDorK4VHW8ZNMkcK0GSRYSNQmulQ86Tc7tofTwL+J6An4IFK8NlA3+wEVLJ/VYMPSQhJQHC2z+vIjIepyJhF/Eksu9uJqvnJUQ/ZNhmW+UMHEPTWcK5Rg3ATsicIW7GsRX7X3O4SP5DRV0NtUUyJ79xtsQMOICAcXeswfQqYSdTGWc6zGSP+RuT8g9T9r2LSMQBYxNy2XDoyAfduuNepiineSaO1HFNlczMnUohZ7qfs0sKhdY/4GfJ5wouYosW43MLsesLBHMUQP+xLpBEoHqZB0UEFSTfDSZzUvZi4W/685s3L9cNqV6ABbD2oORK2wk1I+BsQ/MfXjDaUwrursIWVyEMilTCXgNO41pQ/GurDy7DNnRS/TlEBsJf9rCNJzNnHPItOgT445TZohmKIiY16kVqaWQus19+CDuIuA3T29xjY/STU/G5Nq/0r9TCQ78SJpioUhiyRqOepL/C1SWvTH7UqSYsVTOUAm0+yl7mizdeVPf8jLK4LPAxks2wGeGQ4gceMrUDzUjovBOLc9cHX2/OkUkKqi4sw7svKYDGW+ssVyF0Polbc76xQQvs2xN5FZqNIYd5w1M6VwoL9dg3ge0w25EWKjr6oZJUORGL3k7zuQu0OW7gPCwQqA17a1FsVt5NlFWrk5En0tBvprw7lD5yDauy2SEaWgipK/Ug2qoA+41BrDsUEQ6iDOkyLJ5tYqMl9Vc7mxPm93OkbGSpSwSdQvzfHI4OSrfOeaUHZUzMUMrPGxgKbrTj08UaD5mrelGWyHPSt1vZrU0iI7UiA2NxxqNAu6zLQtp0FpmJmy9zt0/Cn4hqksxpPWMPtksnGgZP1iANBpH5bSEKBX4z+uTtOFa1RhBvBHoaHXD9cEfsZhPFgJA2Yn/qzl0oBHOGmPJLnBXYJ0RWQ2xvyUhyTvm1SD7+o0RHecA4vQe3jBqVcr+TCZtFgRz0+o6E0sJqv8o5TGjATIA9Fka6mhAuAutoca6ROTDoLK5o9SkTzdFpCuOEr+CbRp1lRAcQ3Xnuj+S2F7R3p5npUNytkApLYllPCtVYRjGHtAubXiwvt9W1Ql+r7QKoEbFbXf1ri86M6BWXHBmjcERFdlVylar9Dd575kf+8B9LLEzWhFmvF1/a9tbH2/ZNAzcaBMvENknz60Sc3lFSbd3XNVP7BGEtgDz+ECV+z2PaCiHTtD7Jg3B2PONVASPvkToGG9DaLk72HrJsTIUKE6gAUjqdXkvf7b9l7a5ZUgURt8WH2h/DX3i4PEg41EuH3x7eGkn/sNRBwrQAsxSFPhnqco8IstsHK0QxlS4YpPzzWGVWVrSP5FocSwDOUx3w5DMMLyZKXU9oeiyK7Spwc9hKa4EmOwDLZOKIT8Kqkwr33IvrPlfn1PDUi0ayTb3zBFDv9XfQ7iWDo/OYsg2S1M7Qwl98K/TTPhv0H5fUcYbdoNJWiI2qrcexj7u/29qdmh1No7555eqs7qZRC1RHDZgOPxFDu7ivhLC6DSe86q8LlYZO92Y6v83cQxWDdj71VEryD6dhdNI5XQeHel6GsxfhWhETpz7VxrlzQDu96bNtjnX252nbVFMUpqJAiFHV5e9bmP7MgCcB5gj86NdLupmG24kxkK/aPfFK30CuKyKO6qKyC+2HowPg/5mVN1zrRRoWbNJFeN+xSptCQApxCbQ3tABNpZlJWKqU/uMSoDFnvbWR8ib7mcHMqPED8EJr8w6yQTUpVP/3zl1CgxpCTYCQakW6uYASCjG66IMsPGZZDLJkV+svoMC1JdzLQOFyV5+98en9/svA2NDgJmQPplbEOAzTEpJCH+SDT7Y6T8YDbXsYK5EVDgIP3fzhl/fX5bDQTeIS5xFjaUomY6WWhnpahqDeE7Nqltl7BuRRdOBHDBD/bvQQOgmc7lecdiu0QlvMVyJLKQQLGy0MIT6ku1sfTx/TgkxpVdn2CcdQGVsbgc3BWbEF1Q+d4euPx0V1mNh6nXMI2uhwPbZBiAKpu70bEawUXREO6K8noECGuKhn4HP3SWAgKIeIK7xgqCK8EATr7twlIN2eV2LbHfDk/TJMR9ovX6LYdFE7QW9HdiwWk8BMNk499S+SWoQfOLdqKH3w1tk2Od6mq1Db55u1wzMoJCSlcAku0x0R8nesIsy8FrzaFfvzkzvQkSNoyHqomt6kK7eldq2qmetbHV1KXPGTqsYvaOx+g1ae/6zWafdE5FFjVltGeJbOmKPJQ5yfX13Ob6QuoSpKuaG7nKovHN/XXWi/OHlck29tFvIn/wRGKbMPWPNuFXaGbG2fiMjd3G6SUMxvMpAbrVp62b9M1BB1w9q+WuiYZM1qF4lV9guTFFOtwXfHrZqjSWRBZcROfrhMjCtr+UWkcUkOhAqu9QE8d7rXcoph8WA0SDGJDeAt0s9YAb5QA0i268mamTbHDV6AvsVQuDX5YnHW2P4bR1UmfIx7TDD8jYvsn0tkDZXHXlc0uwaQ4Pe73R0hRLTtOXABugrj05bgoIzivZGjtaSHzAQ/MWqpXUYJWD49ZmFil8Zh0D9yaHgCvjYpXf1GIkeyLNJNjpQAbVgGGlIOyqvHimkKZ/RyduoM9Orrpjb8Hdwu220iOSpGte8xdJArUBHGfvK1dNO+rM1uB9CeG3pT8je5N/4NJezNeWXDxJEFtbR4PrHF7y+IkfW67XZSnu+xymUaXORHbU7dqG6LPjbnOAxobxwqd0OQOKM33SNNcwFK+nGm0DgHsj01o71dOLRmpnTFpNsXzZ1WpP5OK5ECJuvGe/+8ERqzjOf3JzQ9gfhogtHcOEqLzI9kRq9Nz5sexTZ1yHq5XpbpiihtwdDh76hhY+nUV0+Lyp9cAUtZ/JS6ASnEcPcKFAaRYO90vXSYeIa6xfhZ6r/RbYlI7DflGj6dAeCEWVF3qhBtzEku34NWHjxoqgdg+cDLjhYgi1g4A2GlYi4SpA25PcBDY9mTbph9ZytvjryOR6vZ8aO+LQElDSa/Py2pdm7LP6sQU5mazgK7HsijLeQ8bw8cAg9cmfcmIsLUcl6DQ9izdR+u0Ys2mvOQB+rVgd7kNJKCbn4jB9Wj58cBH3NH+KYp2WWZ2O1w8KODQgz/UEaiYO3MhMpCBcahjQnBn9fXZgyG54HE2SiPVMQNCPJ+o9sbk6KG/VFp51R9R39u64LQo2l04eaIS2NneSTCAV8bFzkEGvXi9nH8JsOPjoIm/EdjJ135Xf6VlHOhYU7+MYnrXXOwLDS8CN31k55LiU1vj9J2RKHm8kNwqYNXALYaAGYxVpvD6f7Zjc/+QgFHIu1m5i7IiIG3CANjTAzphA9Ot7hVR9isSIqxhyylEDoGQOg4ghpKBsfPQudjw5GZHQxDfwI852Znbl8R6r+UYysGx4h+YSJXIUcbXVKVuCnQhMATz4Hwr/EMjYY79Z6ptP7K4kTZtj3E7570kyaVJa04wnysPA/Zd5Jv1SYSiCbhgxlh2K1hQBUWiyITUbbmtXQ0n7TjOBJXPTsStH2MzMkP6iFqVz3oHnqHmwPfcnvF9nzyKEkTW6Bl6xGsOvCU1jWK3Xgko8ZMztrk2sbesn4Mk52QanHx2pYQf0Q0GVDtTNEEfL+Ab57zL2seudZtJLPttnJO4jbbappFTlHX+6b4WEx5tKRZ3kiB31PoRKhodKPCMr+RH4iGB4xLwbSFu2Dcqg448B3R8qeVQ5G3ndZxhRNv6mYyj30eAFv4YfvmjIvAXrjt3MgMS4GlvUHryljrOH6IwaU/JQuxE/at68yFv2w5DJT+iLLQ6DPiwh78vjdj3hLAppRCVM5ShpHzLbyI9VYMR2Yw7mmI7HjTuBKSzvpmBq9XJuKHvYNfLaYcohOih6aeNBItoj2Mkk49UcdMFEQB2mRCD3ROe0YZ5IdPQ+3ZkF95UAtWnWnfbm0j+rjB5ieIi/SAzkw/7wFY4R/eMRt0KzNSMn+dbbblMLy85y1SEBvsxpiPSeSH4hnK76SaIOiETbF350A7X/CDYBcczTYoA6qaIknHESNn4PQxHyoLES4b5roUuvZsUKi1gCOpogpj7xUDaQwWUfyNTmVwRTy7cyGdqJzVCLQBWrOAHYBbi9Oc65QyCCeFY/lKKpCgQHdNtQGYyPf5C3gmDlLYgOS5jBdETnBPHE+up3UY1aE0hKHRY2AX+TkLAXr0rJedA0UzrqF+jCCBuSShAOqCgpCuq3440XZU3qRGe0CGWDkMH0bjEhWzDaW7JGvxEbGBAHoezIHywiEF018yHH5HHKra++xefpj59nc7ChFwTT9RkKyA8xC4LLzFftGnXvqhU7QCLNQFbZ7bYHqbHEM4w3esk5nkZ8LvJ6379I9iNSOFj5lArxvjPLyy2QXpUfE5L2OFHloaT6A6tZUxhxqGhBZVZM3D/EzPc+TGHkexyF4fMsM+wcRLIO8oGBoV7356oEyqDesIIGu/vAeTtmqKFhnkY2rTKqtYdWOiisgQEtrRU70nXH9Ao98hAzVIy8OIHmWug3nrKMkUUdagb8SUrYq5dAiQe1uRfFitYKQ6j4R/6erubJ+MipxSjYTaBlJGVnmPQpwSi+2mPi3mLtlbV4rKBYyjfbK3M1h01ltoUEnMLOKQ6nrx2qHc1mheUBRpxmefyl8zUW8AOhsoXcY9rCLTq4NnNScTbRqa2mxpTx6moEJYis8+fd2Fnr5Mz5Z3u+waG/UbzrRg97CapMaPwBx7U+CZjOe4A+u6Vk6owJqHQkHi6BIvUbB6tZzZGIhNyWKZmTRnPeCgcP/Q5lbqzffNkHN9sEHdT4EueL2MNHhU4A3o6C5iKsbpYR2yjoyHXKy6TtpcuTJDiCEPh9v0UzG6bxC8jbgo+A+p8FnRDIa3c2svB0AgGyny67MnaS/wQTg7NwQ18t4RQRNchd9jgcL6Lbe2w7clSxFe3evL/fXuigIXP0HEK9ld6yGVcImOSqeRCxW2FzRfYsmEOmPQOSwqL+tbkglWcv/8NiascNfl88oaso+9QvAu2vc271XwSPeM+gTulpPGeQFRSfTyq94Qj5uxQlmsdYG8igkCJJ1IrPzBOJUU1PycSa64EcpqFNR11mljkoWAqu3INUwaIWCnzdBjh6M5G61N0dT9HwiKca1nzVt7MGaGOr/HmmnZcN8KJ9n7KoWy+/pxKKVXscbfIWbVPMyjNzUapJ3jwLeLYTRcPvQwNA6zKdUFSpN87f1wHc4Vqp/5vgwjiMPZibWzGfZO/CstTUupTsHWEpnQ/dhlCQk4DCMMus6oYlIjtQKuujB7kixyZpI0t0/kZpcEKCv1ujSQ9pvtHZvDFUAI4KGyLZB//dTLdCf2NcRw9rs5TjXP+itp3rN/Ed4JIDtr2NMYbDgZQkoNHWYRNogsGmWHAtdLIbWlwJqi7ei7UqstQeFjGsgzHFykNCSfbgiH4Goe4WzbMbE4M1vmJOq8zGbsIOAdlf4dXPUCZpRyuganf06R6l1dZXyVd1RVQzIx6UOfg40narwTf7FnTbZA66fvpA+UgsB/Y3ZXswRBeTUUuMqmGlxZ37QJlCb0VIK0X57UUSpkcXmU4tQPfEFJ++Epn9NZpnXYD7egTCyMugPvd9gX/VUh56rqIwP0RR9igmvx8P/roajGg60QZhEkHYSVOpQiY7P0EbBbXPEnRMwjJZldKqlVhkzBDS7fPjnJQBeAeyxfhX3nFrfxj6ojo9lBuWqkdxBiDPMRT/NWrq2ptZidirj6Krgkv+pFjmitZOK6Gg0VI1CjzNnu3CknNXsyohUuhGD87QqFziEqcdwJ8WpRwlHUYHW2BOAdvonAHr7OIMU6kiLX3XYAQxxjrSPU0+LbO4Xrshb7wrTC+8grJqUiPU8YP3ce9AjCUEx+biyJUmOuskq07Q+qmeI/1WPA3GfV/r00YngkYw/urC5Lc0fWw5YgdJE5dxHtQ22WHGe98dCWA3DYFt8aXG0UKeE8cbDQhgib/9DywUVo74T0NTN46h43zvUvrfR2NpLQpibegpyLQIxABwpk7eFOWmCCCHy9VWKkWBis7lLpV8ZcgHF8dszJm3snYSPfBrT8FGPeHQt6Jqmu49YTAQaP5ywOOiDg+oKsCRDDIJ3FJ9XlFRGoNDKhXyFsBcu3qR2Ch/EYm9OY4nbE4z5PeBrvh1UG1t6oWoryKMgzH2ZT6sD+xeDg0bI/IGoZIuB7cJhbgChTt0G0k0KqbjppkX+gADMeeLvKtVSnoqfJ1HcU1FuAuwOcEcr/anGP6yFwTcs7yuRC++Moy5sEXfNg+l57Pi9y4FGUmONY/xjEQ6gYz02DX05O5ddhRlKAldAY30Or24LjPUfJkj37zboRJS4AS22b5EIu357ZsmjJJZxYZLAYg6C2tTzLVv6CmIHO32xJecnmBsRuWRmkB0fzzX7QPAsBmqF4EbfY7aNbUFrJpg3UhEbn/lQ+5X6BiFOMKCrLzLubenjYICktjSmqW8sw3VFXmtcL8ciSlfc+e2Eq/+VciyLN+hkhFOpSc1HyK8n0Oov8IxhBtNeZRO7/wDq5gPNhjLWuNpRWhiwCiGpgHHInm7pQ6jRS5dMg8RohSAoMNINglf7hjT9OAeVAowD5dxYq3EIfri2fSX8N1u0OxMzYPwObkpn6LdptgJhs4kF141qOIXIC4cVwize16hPKOzrKxVhbI1/nDPL3AilXLmpsEnjv8NsnN7Scsq7S2XqM946nOhtXd36wRcFZkpXVJbVwsSUfT92V9Cr192pFfi7zxaKm+3PbLCcCj1b7IBRTN0THJqWP+4uEJGeFhDJhJOu4UeaZm+Lhw62P33w3gFocfa9kvphc2OWDIDLFJY5rmSeGcrBL+Ygn1Ntin8d22NetBlQAt5PF2NuZgXo86qj31pP2RbSv4nNZYvxZiK5LErzDWftiO/liOEypvsiMXGkj7F2+GreWsSA1QGAxXXEO9hNPfLasVkYHc7RKFWRjkxKroQHnbo5cBwymz/Ywl4duDiT5PReKGwBani+GOvqwLpXDcKKBA/fzP4XNCaOE+wDJxeoEANJE2+hkzfbMXjM6SNcwGj401TY3L9aUS0UyHfQNEDK1E6jE/IQy4/LSCZmHCqykjsX1JUBVtqFWFPOkIncDTI8EPsP9ILrHL8GsBJfnoYRoq6Co8SI7EQ/1Vbq6KLrZuwZphi/rB5/zY6UZQpbvaX3gLEIJZGQ8BZsfmX/SWXp3JCOrBdmiPE7+G3pwPCHgPxWVafFolAL9TR9K8MI1iJlQjkvJOOGnjKUw36ezWLPaHpTL272vk0FUmY3NO8VYBrE+C4enq8UY/Ry65hv9y2NfEGU5Iscw+xJQDPFvTUo6y/6KfZq/pVq09QeTm2unHZjPp7vAw1ZLiGzrZZvzPURmwTyI3WWQvp3SkA7bkC1sHoy3OsBGfSHiogKa5lM7+t4h4CHOanVRPF1lCATasiwvBLsB47G52CAzXeZXV3k14vzYNiMSRZ1Rbx5ErrEVVcM+BM81XP6qCh+qGpbltiOaL6NpMUYK8ItplLBS8d4ilZpWtAiyY2hil+EzM5eTAjsalrM1bz61I/IUZl2ZreCtacaBG2aSnVFzpv4hlpJnO8o/OoutOAGlX0wjxecpvh4eZNLKhUckhY+MuwN/B6i6hg/3yrK2k8+HODTwb39fgl3z3JK7f4JGC9MEf6mcwwGt1iTFlHiuFPEkxdCdCsavWfvR6Jb8QkAq9L9KVJRpHtOJMeDTTt94hIkvZPbLdFyBbFI/uAtxvUsS4hEx+efbKaeaDxpSWLYjlw21KTeJTU2APg9Lv+jChTh55uYaGuJJ/5pdv+aVcryft+wtdXuN4nF2M1MWwaaw4RU35H/wK+7wmoOnyGYZpv+G70aieeD9eIcVUQbdVpmIpKvj9Tv4zvzaRLHeN9Uo7PkXQz74c1bM8FRAHsC4Wo+2h4WBRBY//9ArLkD884CCoY1FJf9ysIypqkhlh1B1l45xHa8LKUuvzzP0KujQwT0gyR6SQYF4WOOdzJ5iaPu3gCNIbo8FMaxJLKtZdjllmg55jp0Tu8EqllVyelj1ujdlA0GwXtj2CNuwjdUzvP1UoUEZuwx4ddSSz5KWWzXh3is95ykNidKfu9QZg8b03s/vD9CLgAwcmeWlKabeL+X7oSxXvy5DRamlbUYKGeyCvbFQavG3u7CUsgNLcdN0s3mQXxYwCIC4jPrU0265Xw0N3bDnlOQxuDkanwW0E4v5CDJJOadYyqRMX8uERqkIgF24xMXX/FVliZ7/ioHvfIUc2jmvgWIEGSkfyOPKwbcpHf1YgqaR98IvR5tH0hfknVJ38pK9DXrQI71ivQPJqACyJYugQrTBLfTlqM1upqQrCKlK9PbhNvPSvrOT3J1ANX7GU9lZi1GalXxEZFpH/3T+AMmRLQB2wQkCD8x/J4LDTwzpse7vG68sO+XrZIaIzjwvoySs54E6M8HQEiQCBorSfRPTmE+X/ZGMpePl171ljauhp4EKKeciLQX9w/rSPorYGIDSIkKdKTrh1ykKVlm31hOOhgdgZYWXju/jZSwBM/EKRSwG/oiDOoncbLP2LZJNoqod0FRA/0I+GslmMC8emwATOuffzdqj/LYJAMFnMtZMl1d4+Ta8Xc6QI/Cci1a2Q4xU45hw9slsaTSi51TcrsZYHkkxYbtYmwljW5rmLbOJpb9ojWZ+5HpOvRyQxSDIbq8CvgrE2s4A2REs6U6lax+jFB9IGEwPs0yHsOlkbMwvWFfJVp17QLjdWsIGH7we4hdxCAhOO/G/+CbB3LaFjeMRcmKfyCz6GCyLjqt3T5FLl57WLOc3RZ2Lcf/zpGd3wt5tXcZ+xPRSo2mrvoJ9Tcu0AAEkpGwXqTvPsXWM2z+345m/TcYweuQbh6AcLBGqn+kiw/VjysAXPGUrWoZuiBOWtRpP/loqsfKbgkdIpO3SJvB1KADSyjp2W60fAM9pJrxAlCsrSgOwdGFC/dqAg40S2LG5ZVPKDyIdGmL79i6CPiRaz5IgBcq69HBE6rSf7xH99BHRNDKUh7vgg2hqmpOwZ2Woo7pG3WNyH8Gsjw+cbEpAxhaaqA2mE7IG70KPjOxgfKojgug1HnCYzkLXIn2X0x+MVamYNMF15mYnA4ZZTIZiaoVwD0jLy3PXZVAPQLOHYIfaygC8Vi3h5/tzn3BnyZ2PK4ZUT21FUOd2aCJfb+aq/OhZqf39H/lm7AG4VZ+N40CdqTnhOuj62pSmCfaHUqmucXlzXV7rDyQHweR2+kcNMrMPoQ/MTLQCXVmKNArJw7GimYtNSFJ9SsHIMHmA02h+JTjhGHwIQHvmMoQ0nY6tyuAhTruTCSz8vZlyQdl7yA+eKnp4zgHRiluNPk5dd8NWHNN9FgyJp+hAyJzY9L8TXUUC8v8HFE+EvU0hID72TKcG7p5B3dx2ikwiHsb8b3XEV2KNLaiPq1gjKkrIvf5KSO+KgZMaHnSBr2j/OjR3UwgIO1wJgH0eseWrxy2e0l3NbsUEEMPzJNExLCMXBO6g6MTgGr/tQ3hoGh7mlmWbmHc19XSmLMhCv54ZmoOBfEu7zYIdOmEApHsvyjvV3om4cLrdp5Q93z6J2Ooqih62bbl9AVIqYlwqqpmau6csYnhi95neOg8yGAj5XRrqsSO5ectwM/gqC+zUZtFTpeSNnSNIdLwirLe+2LHJt8K/hJm/T8Wtf+9uRiwBy4Tp3kxuQK2seRx96U+NvCpHbmkhsCq2vYgXkgU/uDZ6bdtb5re4lV/whO2Ac0X7itWCw67J/EL0I5264lRB4P+lwHwkSMyoiizu5o/VWDp9645JpTKFUo3Qh5mBixAVYCXYR04V8eRREpg2zCrnNfi30qaHsJ7ppyH2STtoxz62uL++Jj2GRU6MrexmYrHbawbDZ34AlIa/GSc5fiEQ0mBlRjTN42H86ljkZNLU7Cfu+RjBV8kq0/JPBLERp7BQu8REv/RW6KO0P1Ee8FeEDluhfPJghaaAo/ReS/urUUurVsYt55QQ2tQMYXflZcLZSMexAN3tjVSc4R4bOCY7WZy04r46Z3GlFDh8KYY6J9SpL/hdBwNxYbYL1W8MVvfLV2a3WNruQLvaiCKQ+zGG50J1vTDN2Ka2NZ8xTZC3qeDLo/Fz+zLbn2huCptWdAoRrDYG+NvIxreAyjpZxLhcHm+0RW3N86RuBETSMkcPenH8QW7MMrJR3tZt5+ympMoRNpl81YKEh7IG5Est/JOHxsSjWwW1zngYFZb2+vx25AQ+Pg1A4NDCOiQb++P5zWrzFsC/TWOmXrxTckmywIcw3nVPR+R5ohzOLWukhSjeGtO1k2REOKB5QxwNE2/Hv3AMmB3pbm0su0n6iY8DP/jDWZbhcQOuK+1ls2/XUiSOp7B+qoVySMhomWtFoq70aF9l/x5uXkCXKHFrZQvGlsW9CjWm9logKSa9dt9SHgn030V1v+J/MGGsKzMy+oz0DIeVjreX/JldY3Q9hJTShjA/7lBCFNtzHC03aJL88rIKtf2Z9JFPAQKFpi2aKmJqbxBHRTDIvZd6zwikJRupVVgACowvBAPcotHN3awJru+LvFPLriavW6bgsycT3lrd1rVYqKrWptRk0HxIyVmsxlu+mlvIBMMI5JPUImRNlQzZdYLEuQ7Mx5NUMPpDmuqU6BkWm9PL4YnF9JbMpshNwDg9Oqp87LGh6SD7cuvfO3WHFj3EHjkcZ2Rk2KFgbQ42OudLUJqcrBlsHXFa8EYJuDq8bNywNenIK494nTY0aKg96o1/OZIePF1ik2Hzn/7cMvJitBQJaaqt8JbGMhX/RBNqoTr4NAhij5iCa6TSBsDqfjyMaOxVlCQCD9alkmGlW59Cxhj/ldpcld/RJhUwlw+ALVBbxfSIMj07KLREwinm2SMg9b1IJKV96znRriL/ajFxn9AsRg3MHllGCRVBmNy+lh3ey9IezY155HxWlccD4ZKCeAmOkl3P0WZLpJlm76FR1lCydKj4x2gheXg9CJfICgK/87IAOD4G6wgldehQrwNBc/+FoamCay84DrtygoTwKQbxVkVY6IsQ1/LFcwvVWhXz+/tCtJNMgrl4mBUnHjzGzkb7bWzDd6WQ0CCNA88dsoMXAfOfC0pZ69rmBdOiK2x/QIra4ptGWt84QGUhP/WbfiM7iqA0RxGVzzxvGp/7FXtRVlL1JP8MZWnsZ6KCudQF6WUP46ACuG1a7WZQiBF0EsHHlaj5pcah9U4TcmQ3BLqIr0wb7mnKqueYn/rPsdf4dvfAKzpmQmWouFlHVAG3SxzbiOCAQyG9FhqchszQ9A9W963bjnJrajjsZHWQ6jdUap/VxQpg3ohdoj0y9lcBuVoZW11wI6yBIOd7un37NHOhAk6DVKe5xmaRyP6QloeQ1EfXZE0K1g00kl+SrU96hP9y4/zr10SzB9G7athe3cljqG8OlJy4FMntGIZEVUO4nGbWhHUDeuZlpkYWVC0h9NrTuhzAUFCZf4OsLiJYkNtZvmUe4ixJUhJwLY9meXC7UZI+9dNsdGV/O5jbVIokfsq+bt3VhH1OcSAzL0tYmrADCAtnduUolR46E9fD7W6vmv50yntE3KreXcOvaHTS9xHVkS+EKiqn98NxHmRpjEAjlm/qVPgrmHzCcKXS1cHcPHJPu8UCLeTpTrLHvdLm7Ngc5GRI0OQ18t4xfaYWW1NbhRfgd713ptccZYZsK5JNZ3dzBm+U0TzSy6RdTNvyHxivasOlCjemE8Ipcx1BQWIAITIAk7BFSkuwZ9MifrGZ3Sq7okStz/or/eK7lN8DpbrH8PylQRBKXT7Pz0OxtnjmW7RPZip5ndhUrNiCCtbnrXt8jtTgFckMvUhmj8kcYuWei0vKCEqO+I66MkB+3G3HUUyr+lZ/yNHIpIHzkvN53N/J92jx6iEK5Hd4HPhe3js4jgMaB3hZaejlsiOfmvHV885RNZtrr/v/60+DzFqTmhhHysFyEZ6TkvnyyeDSRDQp2NPCtB+M34Bb6u8kfcAd0MF20CIGUD1MhSXaZSlkbKEWFwcr9y7EvNtxJjhaqjCaTFaRuQYA1sDROGbMGmSMdbwV/o2exJNhVmRpgPldrV1czM+vHd0i/8aY+wDP2TlkL1cgQJg5TQJpt0/jb4LHFvcqkDJcY6JEs5ZEwUbYE1e7IQN52H6GzLK7Z4Dk0wOuXX+tc3BNdhol9fY856wEMe248RU9e1mUuDMoIiEjbrfKv7xTdUjzeKmBUNXtZ9kq2XPgYDFg7CtKsgyraFNpcCUh1vvEaFeOasCFKOc9iu7kCztf0p8anvhyd4CYO/r54pBPtL/ucC3BpVRiniQmuvWMroljCkN9oIxpeEgjf6poudhlIS59jHxu/fJ4RtVcUrdtmYyfRc3Q7SQJsEHRyqK+gilRR60kXKljF8twWUHJUXRP4soptlGH1/PHosrWIkScrjUF5qnlvI/F1sjofZtpNHko0lq4gAhURzyPCKJFn8p+KXsn9hdQbPiJWpu1sOGG5CvGGf40bS1hdvkbqPpCJwekkSkaTdx2NOQ0e4kZnDZ9B21yW/qDtBwhcrnzb/6lWg2mFUUyTWPxfCoze876MOGM3HkcvMHNdMM1+TOKNSUbhtsEsimA/eVcAGYRVDCey8H5ktJfWnCn4/WYzDOikm8B485rHTiP6v/z8B9tmvG1hf5gkSF5DqEwAG4R1/cenbFqJUmo4hYP+kFXFQkqpMXmr5+IiB/MqA3unR0wK+FsdH2D/ZDsZ10dos2YctNvtRTn6Gtn5bXMXmHaoi9Pagot2KNhofy+VhuYOMO3k50Q9REUQn+83oE1rU0t/+9YAEqG5XGZml99ySzomw/PeXyjx7EiLUOaeYUNtyoJIUfpPyyhEZaYvu6+ayk1BxzCfRB518YXzjKJ/oc54x43uYQcp1N2fzrxPUNKEiGfXl7h81PyvmcxdO1tewjeQPnbCbGtWpk4P+WgeSYuD8qXRtwLtUx+EDPMnra2tEJlqNBrzMjlORW7XUzaBxEHn7rQMae+L8FZQaU+DzjGp4u+0FsJi13i7ILjcVUxJIhi+Z7X5FLEYXb6mhh31wat1m8y2TpYDVuPJBLOxHD8BcNC1tO8beh1+sMSM5cfAelbN6vLpaMGqKH150vOxSZQ0AXFnAsgDmjjb2CvY6IlEGrscSvTl5PWQql4ed24DmMVUllU2DedqqKW5cZuwqJC0xsVTMFxYJYaO1iGG/YMbGP2/QP1C8Km3cbX2OHMnk/a0UBO/1rQ/Q6b04D7LsFjvZvFy5hW+f2J399A/Wd2kXWHs9GGTq9WGTVktleHSm8xMWyuly1aVtxJp7dVpcwmVKo6npcmXRwu9aIAAE2sTBf8dkBeNXGrWtca+D2PCysWtEO5Jnpk3iARS6IXUGAtLvcYAq2M2Qj+grYxwrHrTENYGtFHY3so5sFkOW1JD/OhUUJl/FuRbOjkfvv7cYxCfaIn6sK3kpoIoDNU4WmqLxHIn7HhXm442v/cXWaCYOgo00FndtT8u1zGIEnwDCfk/0o13r9hDC55/RLd/3BGmrbsY+MC7ZZqkeGsdarso5wDUSWH7V6xpQgD4SwwZkj/z/BPvilT34FHTYYqCztAH8s/zwz6KxlzW/wt+ydiKE6KxQ24o1T7GpNcKj3+d+3tH3ifoEyo2AwsfZdhV/JpCK+D83qPOSNnBFCmJanugFT4SvEk/NTgkKkJtHqSy/C+OeqiheeBQDjjqluI0mn/Pelfc/cEYd3vCJYNAFdN2dSuC8rO5GHjA8ryFTsynHmXZOd5YUl8sE9twlxL4xqWqoS/Lh7kd5rA3idd9ucj8EFry2WQjMug/Zij9k1AeeNa6x2bPTnxuxuoLNgfHbHTxvglpNlGiOgjJsF4vt4X7vCyAfOviZgvwLe9J14SELXtqV9TIZ77x9Nn+gGoBpY9ej6Bng+SSVQXSid0NCfJz2jRzrnwvBa14u43Xj0B2az863FkZLR9pIQy2hjr/PSD3jI5y7D6J/kQq9Gms0xaPXxCXdwhDE7RvtWT5kbxl3SERhbOqWxVEx9Qu1hmSJ5r4qLO1C0T5pdYWlm2/rJtjrkD5F1OEvWCRDJzqWWLuTyr5UEsJxlitGHyphOP9xa8gy2UwcMkJOOIiuqpEHKNNY2FUukJOdzuhJFRhz63gj7KNPadleXteoubCezEIC062qTJQhvVjYpJBeHxp37nTZ8lXF5uZOIDXI0l4OCaPyuG3sGBHpKSFDADhkxcZp5pbJGIyIZrPov3m/PE3Aypcii6XkpslpoPJNPR7R2zTK/xAMobtxr9A4Wb2yOthhaIAZ9RSWLVX0TCQk0O4jA7dvhqD0w1VMNUi7Yt0ewRJLYqhXhzQimMfrtB26/ya+lX7L/KIJ9acZlKkA2KhW4Ci1RmPpHzGOZohFEB9ZxrrNTD2gLDZ6OwNe/Ee6PRgGYgAAAIxt287Htm3btm3btm3btm3bdofoIHfl5AKsrWpi92ZUHihF1eh8d0DNyMWZZDgPuKhfx9P7tNTDQXkIxk/X0MuYeqGFeXi4+EwVFjMnFSSQG8A/TGt1iDYUxHsLPrHJSQPIRDbNcHEBBcsvoVVUBAqSzTA5vQ/22Vngjv/VWQIYCvo1fpSQL+mcfQ3LF+3zD66lxjBr0s8IhCzyyZlxSMP/qpiwg4u1nfhbECOWTsS6cPjd32pWug6JUdgKnBl0u8qLe7ufxGfeo2aslOx9zqGs2FheWYYQv9WYP08JSDqmZ8C2tuJyth6ghDyFkQpCow9hDjQhQh9CVz66uqYyCjZ7OvP3Tt8HYVsE0JfSKS1p6iBa7s3dfaeHr7bJKdYYnHKmTkQr6tA43ydY92TccdEhIWsYbO0Hpla1n4vkYr76zBWZ4auSjd1OqVF+WaiskwwBnt0JBuZvb5/GnGIrct1aPuRtK1aVhoh0HFIcNfq7SficyeHcN7lPa5rbU1jrBr0Q5AcutvZYgLYvj6nJYzZ9B+8q8zdhSfdYDuK9gLL9+LOHG1E6G+3N/TYQOEOK60OwXvhsvHaoif+x4aufuaywncYGFSqF043X2tMxfR6EqBKikfZMQ7yMI8wuryNDmKGMyk9rzqSqgQ/tXUX1wRNh7JgCA/k7GuIzwnl2jcPSaRwMO+vpOR66bqI4QzxWYvdA+ZlfuL3EerZ2RXMn+rDG4DpVPIO9NC8M4V2FVx+zWBbuqF98By+YA0SrXq5i2kKQYokY9v1mTxNd3PLM+WEqXqnQGLyo+YkCRa3XjlPtu5ClKvTTXiNi7gfbC4xYCSpFktqwpPX75Bcv88uWZHuhuFek7W/dssl7Qbwj2QJyqBhrygu3rNwXGEetvxbCd5r9Vnh0zTtA5O4HWQeuGlm6EMQqLjumh16uho8bRLdvS3b0hvsXsvaYd/GPV8IItteDJUtQpA+B67EBPaMa8zW17RHZ9cARjMCHEDJ8FxI83JnZxsXdWAcox5xRrXyRMWOsl9U15xYXO3NUjQaqKAW0E5+5ldN9jXV+iCdXXp11kEPeXWCtST2n0vUpbDSdXptkwIrx4cu22aDGMjydL/0b7aJMVFt4XMKsLHPGP7rQjKTmR2Y6I8KD1W15GC0Qa9QRhmWb98w8z9APkmuzRqZQPFOMDL+iSsRBBUH2425rJynuCa3GkoWEPgQc5wXwabX0hrsyxW9E3heqy0E0qPOLvE0c0PBPd/qKNNFz1+01KSvMUoQju9BeuIZInlNWYLPaUbiWqoom59KhG35Mn73m+HthKxeZ1gZMNpk+OtoaDC+N5I+AoOUlrXHc69wkmKzd5Wgv6aiygwD9ZgDlTfJqqu31eoAaZxqMbt/vOntwSnVj3mQiJ4ZnNnW0e02xV4wbdpiaKJpdclUnuCfAQAFJkpt1yIwPr4QkDoVe/UN2r2YbaSOowuRoSi/XoHATPhB1k4q8tO/2yeJKwe1VrOtS1TaSkGob5WTgCBHWCm3jGY29YrleiKy5Md1gVmjxNIxC2bdn9f7W41d2uVHatMt8jA59kL6veKjwkUdlD0gmopjzCIbFw1W1ii5tvLw7mW4U/vEABT4zjccXOsrNIlBZDdIf9APDNi1XCDRpxef9frPtJ50LWD16Nlq6CpiK6VihNQS8TJy6xv7Lrpvt4YRDmM1ybKjvZ/FblaHvcQtXbC+LONmfIp4be0bKRku/EUHRZt4VB9TMkPHwHDfU5opu9Pd5kgS2TqBBbIYF2xFCSXYPiCisKTKWrFPaDbwmq6RaP3plK4AiGJT2S8Oo0qxmk/7nSzUDJuxrvKhOaCFxeA9avOhO30D7mVmcLdV3CW3Sarxiri/K3lbR1deg7bmhpQluysmgboUkcUoOMhMB8CrWvfJgAQW30/tgq9PvfHQOlDZo36bF3jtwyJSbuKvJkoiN2HRnPT4ItXIOkjoMlpZtGA7wuev9vczlsAzUsFzcW/WyZzUYn3zM5COvAQHT7nZVRTou69qGEo/D4xS9IriHP4PgMkEnGy4EO3mlX6V/xwwSYCW55+0PYKlYuyTyjY5lV1i99df3U96WcwiSBi3qRc9CMOyW2Pa81qkNWzgTGpplE7sc052lFu8Paf+YC1d7RkRTcWQ16U8LKNGFT00Q+SKie5LAVRs1+K7DxfqLTPBy4vGneXqNB5d8VB2adeiFtKulwHqDORgW4CONaHHLnGLbNckw8220UhrPEs4y45g0DuymFQivD1m7vUiUVYfi+PAZOLQsFrVw1Khfq5RYFqBINCIhpD6urFHGjTOlZiBqqzIgfJMxxpKJ4SJGzxGHfSnzZ45cDRl9J0ThUoH6Y5q4vsuyd6K9X7ag0GaW1YJzlAodNCenGBWerAbECJQbHQWcwj5tEahPsyRMu7jbKN+P/PKVV4lmWO2DaB1A5hCiX+t86O7fAoh2aBpkhp4Lv9CC0mXmn0o6zLfjtZglQMfdDEQ/tj2wIyz9b0beNQzZ3w+uZmlkIXDVxCi46smQPivS10+FJCl7KrNjWdx5eJdLGRPpfi0GLVfyFRQWRj/zMK4bGlMxj9U4p2bYy/hg2Sg5qJXRZ3bQ7omAKbhmuJ2M+0PKe+E817TYyRgXwf6/XEVAbFXZu8CiQ4+Pf0rbmGjdOV0VHL6D9HBUZ/6jIdBqfNzZLpr8vncX7TD1EldiIm21mJ3rRgPROaAxJ42l4V5GQqDPzXfz7tEA/5pA8NLW3zPp6lX8ilfRuwc1Q+4Cq8KHZ2fkSS7Dk+aGCu4GDZJIerHCujeCzlNSdHz53chiOaPSEL1RCaqw/8hcihMfmsby2iBqhs4iTNq7j5eQlcx7hMNghRwQJ7r8n/FQj4niClc3Hs0d7SGfhb8NTMj36n3zUtcfTF1BeLjRz80j7Gs/ltDgOMfZgn4vTemrU9m6wynsXoxnWn0RNQravoqVZ8nEDHEy6QlKNNixnrGT5Q47OzYMr72V7hq1j/EfilWVK/JEN7wPYVA4IOMjFaN3mq5POXrSWM3//np7blkypco4IYwFnrUM9yV6xsUvEpRNd2uLQBkvb+EYOs7nZ2VlejKntMx6/7Huh0gQy7DHrKVom2DmYx6XviyP9ZW8Lm5UTN7Qu55EBhjeQwVJrH1c1Zrd9DtSWqFwLpOQ+GEs3wv3YiidwjeY+qgA8uODGMNGWSZqH+v9HSOFx4nWmVeiTaf4i+JEqmMo3X2Cc40y66Zpxvf0Yk362Xmu2rtCZQfl1p9ilGaYL2WeCINJazjDsZ7DbpOAi9SDU2+3imgPENWpqMIYVllhsSXiMYaiEH0dJEbeL2i90rK0QpF0pX0dQI3z9YcAH1ZhsX8qg/jIxltQiPr0yRffhT3i8Cjpec33A8W3L6WN0Sqzd+LcmXoGzSW/Hs4nRJ7ruTYTzZQlcQNQHg6cICh6L5HkCW8BY73WCCgHcYjCp9hkxYPTYhnqMiY5ZwFbRwvFkkXvOLbtzcl+gR/XEy8/glNEB4fXYK+hWLsOrOXsJaXDw101hYUbZotMMXYEnM4jk0ZojxS15YqPLcqnaN9yRyx1tV+5Z+Ly8/BWUxkGVfS6rxCMP6lZZy/n13SDBWrM8IMzkZ8tjN6Yt4rfryoMZAPdvlFHR4Zrit5A29EbRK66h8Q74V9iBiUi5nhuNeO/JEe6g4xroYuZgR3v9RVvU1yWj+fIGw0nw72MLuIgVxqCPQzvTOR7T3rNWPVwKYO5SaD7QOtqwZZXUcKy3R/EJ1kZp2Z+JjPxByEtTSdfkMDfSltL2w/JLJPBavS1g3OjREXpmtsZPFuwK+E+8reXJheePg9JW7FQ+EwHLOhVTPUULpcQT17xDQD3EgXQGa6NC+PGoqiv86YucHbv+G2pr8TQFKKxL4SWm9uYJGMfVs0jQnDrIDywjqEBX5fyDQQOcnch/GPogo9NPuKoxn+su9GRqiwBEJ74nNAcvR09BOm+q7+vNKDCmTCqwHlGN+t8BagmBx2SMH/OkSlZA5ws5ISPXGPk7HtG5qGZvLLsJIT+6FoIBNIL9wYyn0xYd0HRoxPgxFIEfbXqCzFuniyN3OZ9TRAp+niIcME/DMZQkkM8us5Tkj5bG3nk8xa60IQEFgy7GfH63W9wy3dg/ZmnQ6LCxrWRcKaliaJjodbXwXn1fzHUyGRSQIqOvVAwKAq7h0KPyUIPPBedC9AFhm5q0lkLZeVQxA6Zs7Z/HYM2lkUMWUijFxiS/rioqIFiQuH+3KdXYR/gnKOJ0CjKdbX8ThnROv0qXUZ33j2cFwMoZO6/1Yvi6fLA26bXSQ/jWZC29U/rIdoZVWEUdQh/uTIt4+1IjZyox58Ppx3VjMvT1Kz/uqY53O4Ln860bwv3Lsk8acSba+kAZk8/5akipUJ3MEx5GzUxWVPyPz/RsNiCA5rj+J42RCmTCquWdx+YPkrdgo73/bP2U+u9OGnTiRSQos9DtihY9z1yLoHzdAr9M4E4cL9rCXQTUCrmDECJJ5kuvitFsT1kSHQEUgxiU3J1NI0DrWkZnpuVMKBD9pHpKApW/b3m9YmBNB0AZS6dLI5MsU49ZY1zcobO7UROU9F+ExKcPBPxTGEHr1EuLpKRzzh05aCMIu+rm7MrXdutM3E9vV+aqeYtbkJesL8zEwzjvDtt6pvXk2tox1uwfkuSzwq6+aLGgwKnesmGQe4FpcUV/Z2Su0pPlgczaEhOu+OSwifbascEj2v/zA5YwcHi5qhbOCikPG5zDvIgruRzhOvP48xHXzmd8KUAPkwyGK6lDZ2bbuZa3cFzaN7vP+td8CE8dRbGuydzL9/wG/w6w+mEpPPLi7MUu74RK7pFoMJvgr5O2HPjry4+JjBj95udb+5P//xHgcIExneq61pDhV/rx9Rw7f/8eo1cpPmpHrxnf7w1VEo2KoxECVfM/esRyubAJbQanSfUn2YBdxC9Uu0W5oMHT9yj4XlbZeUwM+cu/sVT0eMee0sydwfwFKOYZ3C0ZhVH5hFeoVawifbzwdy69E0WuNduKZJ9Rb97CX984Ta7R7vfqIDSnTAuzpG6Szd8v5dRrAeF8Rolprz8FrApq9GcvufZhxYtQSSwrGgXfR/dC884hnRsQhoAgAh9l/YNEte2XjMERv43KzGeZyTBlqxYgEqO1sxGpYL6OI9ysGaXd4YE4JlIHewIVtWSEqM5G4CHUm77XTattJOYX3sszT7pls1s1Q5A9B1ELsr7zdJG3Xmh465Wi60Tupg+4pEpqqGDtxCOGxAZ5hdKn8va/1I8PTooe7PugfLQqhY5uz4jCT6d5khe0mOFxAFLR8ENurAwgLOPkbgsRUbcBHsBjcFsqLZmBCucVUBRZ4ZpLgNI2RiBZDr9Q01pgBZFQjdHCTSvQRnkYfigcvRz2X4X6/Fg42Vxmb5+5D+rAspQ4pEUKEFPFcedA0QNeqipmJPr7YsuUymz9pP26KQuAgXjvjL6NSoy4CNx1q0VUDTpb421tx00supBxNZ08HpF9aGGH3Wu4jpPjuO8C4yW7GNHIfAYmOu71xlhEnDdWiQgzu8qECpkCtl/FeL+zlgzf3lR4JpyIlOg+wjWzM1335LWlEqIiGC7aCiUPZqnKaHrsv85ofciHRvuKggXvRMSTykgrBc+89iJDpTuVFg8W7vvfs8QFq0y/Ox+AdDID6y9H6+9EXiqsYQC256jlUSoZ81HyeWSh0ol3UHLkHMyTx0IrzPXTXtWwvPzdmd9qKHNKBGsLPZ4eJoNFLW0U0IljL2OFZggihcsDHa4tK9Vyan39acEw9Dpl98iqSs8B2vT0//su3jtIBpMadrRPXrWPOUhdtU8qA6wPyjt9pn0CZkjY7zvDdpiZoaSwMP9ZFJimq9d1TzFQgeVVYAUkDaZhVAzk367kfTYB7NecwbCvo4H0aNotmbwFe+zmc1WH3sP0UW+zEdy+u9TVebvyTlQcwhpQ6QZsHpcdJCtLBUIl4MpeFoBkslYTUL3bnLJkik4wTPOToCkzH92tvrisg5AATG9KJlOceRoQGIl6fzWiUS9uXtEsUX3a58QpEcUoHYhXQHixpjCIDFWn4jLAb+DdB8ZWoMn7qVfB12k3sVrZEK5E7nWD9RW2nKE+Aedb+ig410HoLxqO1Hm9z1d1ubmrULgZvBvWwhoN3SWTVOlYwLlghjsJCX6queZIRuaqdMyo/BOsbLj0k2vlgp/PzxtWf0a6ptdyE2xJ8MEdk4OiAmLEHMMs6vdTME+1oZPzB41zJGq58fT8HN1QMGYQQZn39mpAOHBmlIkfAnb4pBkhHLDeQVAi7Hl3t6avaTF2m+rtl5i9aO8Ib8Rs81NtYRSkcM9kMFaqBMyIt2UHPE7SW9TbNaZryqxBt/ae3BRBv9PmpJqG4epLsgD5vOPKdkjJa3iKzEIehB0x2ZI08QQha8+39EUe3a/VezpITUSZvj6dZ3XlMZJN0cmMqeV+8hyHoe3opOYQ+3p7YfzRitsk3scdeZaOKDPepIrAYowG11xsipKFa4vJIDYF3nOQn6fSt20EWkjs5Itze2+VPYVi986wEnoWE9PozF/DIpikakPOyvsCd+muOG5le7fAk40Nwd9KGdyCBtQp6o+w7tW6RNa47NLvud5Xy2HlD+DnL0QX6kKtVmmkYyOEcGgQl0CDXlnq0hF9fEaCGdeRX9p/FzxAlAIjfFE1ZdDpitz4TwEotMcmebWv+f1C2v89Ga3R3R3CB2bGDM2yX8xzBBRbMxECGKAT4VLR8GxLH8zlXykP4rxu0fQbejTtCru4dsdW3KqG/iMrZeaUoY6M3rAH6J3cR2MlUawl/8C7HHuM4zqAqLbJxxlvyFEGfJog3dY39LItkz8UQ0hD6tWYTFNv6Q/DCkDSJ5DoLl90+KQhgBKwV1qtXqzFYC1cOEiypcfSJ3umjV7AJIzo4BjA4bTZmQUZbZpJMceBWfAiVqGxj+hYGWpFI7aGRAfOLYxZzr+aECjVNVTo6PtAhtjcOWnO4Ma7RxUPLWhn+Yw+61OAxk6XkW/IG4oSzZnuVSqMgWlQI3bws6PZ66nYK5zqZBb8kTJsb3ibotuJB/mssB/m1nuXHEhsHEfAFzT3uJONiLALa+lVB5dEIRiyf6NoZmdxlXB73HqEIRYfsmyBEl8ZfQCLzKIqIG/uk6XnUBlnhlndmWD3PdN7V5p93Ru4nmTP8w6OqX3Iw56w/pQmA2siZG91N+9N78QTkMVuRKqkjBISq4re59EYYR88VdrM5cxS+fwvOoEOciNPTstNjeKRVibE1nU9MAhQnyDNQlq+kOpiKfveRocHqS4Kso4XjK4CeIm+nMYZnGno/MDlBxDPZs8mr75UVYXfk2JzU0Ynx5gYUzuvAsihQGm//3b8fWUfybPPFAJAIuzKQYTefPJw2EosavZFYqID0vksC2FLSY1Q8TpBGf8B5/8e9MutoS1O3swxBEayC+5GGN8rkPq0IYY8yWZr6On3QHm1FNHDVWus2tutynuknJG7b9yG58Lr5qtJzFt6bdz4U2wobqTwt4FOGB6HwZByNhGpLT2NrUSTuPIiL8sgUusHvzGahZ+vUTzeoeDkaUyllL7l9UXmZGZ+FGltbv/Irhv3xI60Sb2YYk+cLdrS2msJHacngg5pmCDKeYGO7POnfH4Z0Z08TX1CNd6aQEMtKRgXZ2j8UX+GHD1Q376Sd/pJcG+QikxYRMHZFfVMU8Lo4Zmd0j1MMLZK3YgY2N+DJRGrJlaEsFNHWQ84Ckl9NvYT+wziX+dyVe1kH3q/pWuF3SubS2+AHyadsnBok2jUqWhPHng/oapj1+belDQ4pcHVAV31O6zu20d9uipCBJIgrjCWSYAFLQ/Xl0JxrvS+BRffdD72tArufW8tYm9SXi7HRrbHOxY5FrmKn2XNjBur3C+Fnf7TEamBhPBneQ31QNYVAmwIig8NNfLuXjnf1eNxlCea5bJFsuEa+waxp7GYEdNp8/0JuDV8YkoSvNKnU4IscIk5pJFxztPUot9eWsCTVE/aWjls8kdjk/swxVtF9BxRcqpzMnSccT49Ghu0Cf/419D+9Te94FpfFU28xVcJNSuFk6KIU0hNxv/AiL3XpbQ6M6+k1643g49M+Yy0Fv08kdK4t4u6wBYrj2tA14KhAwE7yzLxQqpOW2m/QYS6yHE6Mv4eBxfn90ht4bBz1jN8uBDE8kq7OuupucTLNPQJRJd1IZWf/xmwCQKr2+kUJbas8t/DXOTPrd0any5V0ZOKJWGIhNrt8onSZG7DZ1JkhAPgLgu4WDGUcBh2ot+37hzmcTZBpDXTXGApf1YZpnKB6TsKk/3Yp3evQDM8g6g3lHlIuzM3CpITpRvvZ+egWUB/eoWfVyZ9s998eMfXUMbISFBgCOweuun1Vhd9NTJkxqVNTjTB4WEKBIAAImTmH0jYjtHpVHDaxZVmWyPb5QVTQrtX92Jsm7HRVaqe6GCanqaolj1ON99wAHvkkBauAowTDVk70kBgT62EJ/+99UOAV2E9YL9I4z99CTo+V3Rx3OlPg1QHdkfoac543XuZaaZQ382JHzgRLgLjqn1MYKac438XvR2kAjnjO85TuhTo0fYBMeNJEX611FVS/bBMAK+FmQxPtDYu7Guiu5oANCTxNkHnJVmBtIh+9RA5WbD4/MmGN25LNYPIjHQZR+vqjN28VGy2SMxJ/rGrNJGi8SB/yVHTp6xoihPuimzK63WDd37dT97CRVPAvaRegdB2iVqNAOzFUjS1GS8du6oZIvY+RAveSL3jkqRboLyv0ZQjdTGxSVxsgYwxyjz9hj9qXmSKupj0uKeMDoHNBfd0i/NTGavugFPn1l+D6D1NFL1m9PvXKUlU8U5wfRwl7l9k8MJJHx1d7VzKjbhOqlUq9VVIDqszfHgYUyNlSyjKSuvLJv+Q6dgElRBuGB77G2MadfyCE4iQujiX3DMimX8iBFdxCwzz8WpcZK9ODmzrVrJsoqnP6odnxWLIyASBAgyISP+9ol9F9OMujqlCCcMqGF7HR5lCBtcDpl+st8/hgSgGUH53n8AbST1RIYboFLUjriWkMCJTt9032mH0BKu4ICj7SMQccUv0iNzLeOIS2s3IkdX1QDZgLlwYhb031Mb+fxpkn2E8EhtuFJjDaUEKhn1WI4uX/zHcR9NfkC1yR1igJeb9N9bYXo27pDwWcjH1SNYcztVerEXlrLyMaGoX6c8wUtpQG8ITcpLyrnxnumuGpHf2ru0xtUMI9ZGAmP0sGL+Xc3/zgIRF87q2PssXg8ucKVueATsr5T6FKff886Vezq6iLFfr0LF2gbCFYoud+fzeepGeGpSJwGRSo7x6oiHaq2dacL2nPfy3y2cJn4DzROyskmFazr3oNpUXxyKr3uhwIoJ66sao7ggJIZKIxPDrdrM4SfO38OAn4J66btaSR6ZUwjiqbhW0PiWwtIXFLfexq5sDXzp1hjOnz+XDGTOaXcSZtv6/CYZ7Laf/MLy6UeMM1cUJd1ylhbslp0rE+RAlUzYDDtWTnByT5npxvtheGnMZUCXUo0W4K/Bk7TC3UgobLYBR6n9yk/F3hNK/0B6Z6Vqg3qSu3VKogpLiKpx7yePULdTd4PBHpdlZxbbulzZTaxO2aeoMs63EB6vCZ8aMM9Re9RsijhPWP6sqaahz2zt4HBbiRbHSIn3gJZG8S2im5tdtjuqIWfaFhadQJwfB4TV4uauMC+tf4T7Kji3oQ10cnDLZv1jwudLTgATZaxdB73VetQ3kcpUvKMQxUV48t+1EMWOr0aA7A3gGwt9lNfdXqC/4W6brnI1z2depShcmTOrht60s3NSilgYUeZnYIWQuIFM6mM8PW4RIUNmRINd7OwyRtHM9AqN+Z2Yg44NCsRvei6E2k38AnkFeMosj+DW9fGZJVQAg0xZunyrwQx/f8lRBm1sCsq0iNXRgrte0tTfZwv10qDqeBvTmC3O9tcwql1dhBh/TfeXKTauT67BHvQLxN6D/8QWzD2q5GLUwsj0VRmamu+lapt3UaWEhoeu1TPvMLFQR7jNsKwes300gWpEqkOZGmSZ3vN2gKo4iEZL7VkFmQluunDTIq0N8v41HE1FB5oEagnp5J6iaU9l4F38KJqoJU5N8hsN5+mEUqNSTZS62HfPBZESum3ToPuN/r3S8W1jDAgmmdL0ReRdD9elNeBW/wtthDy0AathavM0mJdegviA7mpODawr0OxXuC0cb5ZAZI8pLljAZnydQGWGkEu01RKhGmKL5FdeKgXaTSoquo1Pn6j/nX5uHyoEtEgYdzrG23HQ64CfgEtbZ+qjahgimtor27/8yNppXOyLBojGFhHKW9mbzkGz+bRHbfEadYUIwT2fsgmBBAsXzQ2ACLt9SjxBc1iQ88WYcvRMGKnzu1xFmpmU8ERW6cPKc9ZGhQKRBehFlJp0xF87ZwtTlvOXIdCpEkulA30g9VP2gVefX7b6Oxe6jRDPxh9DFwRH1fgXZxDlXWgW32w+wZzjN4C/mwm3ewqfVIQYuDgExGC0ie1OzEd6H3Vl2/K16bxWrSloLD9qySjRejb2hIQI3LNmq1ClG2lQj8OeydLFZ0Fxxh9uZ4ZMiJ8Hrzx6BSipk39xa+AHZzw31jbE6ATWnHblySa+YP2VywWlvO4C8xlQWbCI3DHgpy7TyFrewx1NiDs/0Xwmg6JZzudcP6uqRYfL78tUF4abOw3AAa0txacTM2TeUcwDDQewNIg0/OxcB7ToE2QHJfeE0o3s8yeh9dFRkz/GmY0hD/cMlkOeqDxeBfjKN7naR0O2aSH+4LAgEh5Ft43bKBa4BAMxnyn68G4O9VX5+2KyXg7GH4Op+8QSemIkk6wOjkCsx49MMo2z4GRlmmRDR2EAQl6UJ80xbVfzT+Gnp1GQ6wxOck/1/MPoJs6IQN7mSt3EmozvYvYtV7aWQqetN94c+99bvNWBz+ao61ClEJB0yz/vffKas+keF1u3bBCGMHxh+IVq2yTW4DpQUqiEB2zEXOCbyvgboJfPBg6MF4cEzwa8CoUN+zbsNOEAb4cDjMGSsrgFKxusOOU/FHVFTqiu5Xc7zugA+fT5zfZ9KEnBbxLn1GsywPVS+B/B5Wf17ofJzfIGDwcATuNVFXiEyQNgjtiqh6CQdhSZfme6yO85L1zBtRVv1T9hbXHfixS9jEOMs829m+TNNVBZXui0ziRi9i2LG1Aru8g9lZouPLOalruCH0nCW18KfrQXqhS9G5dBdychTt6f4k6Tp38rbfhPu1qeomHmVu20T+eAM2HyWUS6h3xcos4xjcMJ/WNmeBuR0MvlVaAnyMcc9IeRVVhAJtu3UEM8VDlsZr8qZysSyADxZBmT0QxJnfxDNhJRPNXYtt1McBrwbo5IwiJU2L9Vp/xkM82mbiUg/I6XqUE4IhTYTMz1wW67sNT0wksVsIpjdrwWWZjFXxhXlnp2I6J3603csxs0AxDhr8Udv2OdCQJZ0tQZeVG9S9QYcInH1SU9jNZGNEqvBsna2aYkyozg62u6EmkpdAK52Ud+bS6caWInxxrfa08CcxyLHR68v8U7GVa43qG2EsKuBtprhehe3EdKFJId7Kn2W9Lzy+Gg3KvPq7eiNFfqeucuaEFbSbDtQ03ADcHuUpK73dIOE1sa+ufH6N3SstXw+AjqDIv5C87AyzbDg7nrfkgnASr+1z24Z/VOGdvg79yzLkF2H/BspaUUXN5jl2xOAF0X7LwE/DSFXKrRBK6OqJ8Y8TXm+NhgoEdv45VgkBTmVF2fjA87uTXmjZPxN6mlS5w7Jn4aDOjb0GWuQbwpDSlaMJH5KQXKodeiKFD46LZdGG9ZyUCFnAASbBoJmrxuUjIpVNlFjHgWtVVwBo1Ls46dpPfflbur9cFfkC0yMNcIDe4YkqY75LkigrVjYS1Tsp2ShXvjcsqbsUdWyF3zFox2U1miynoo4I8kllEXn1P4eUDT8KznvGdeTXgn65EjqK4y1hL4Pbx8leveV6PlzAQgtyQPe8n3+lbImK/XVYwKrt47ybD6dO8wDfhebx440SHhJM4vnlYZYIpfDGYvhnpqVtbvM2IhcUzHaK9oTWTtCn+Y4g8iXFQJeGDjr5MsroE9AaoJttSvF9Z9L+9hwHjbI8UR84AJxBrizVWso8lEtEjsKZaIqBtL6GPwoP1mzyfB+dDKVqZ0RGwKFb1Psa+8NYrjSNx5mcJaRXwVr97O9P3mSminEUlO7i8Ta/imGO0VS6AICSxa7LYEI/0J/Ac2YWRBshJoGmcWSietf+M+VSjXnJZEtVv6FqOAVSbbNHJtqyfwsXl37oaTXs5kqL6SvJCXMlUOtIaDO1TxrLMg6I8O9Y5PEFj2ZFRMh0phnNOnzw78C3LNB1F/k74a4bia/CbYLq0CYIhrbi1Kw8EJXo1vEmihew0QS36P7r3tyrrK6cXkqynfy+0K56FJzWOSEMKVAGweldbrJ3qmXhELAHdJoqolECtuTpMz2XXoQiL96eq9RS1hYZvetfx2dlmtzS34U00X+rYMa8YayjJJR9EBNTF765OygE/+7pe/U/CxW6WxTCcm9JmcG/dV+1ev8Ep48PkvnrptHl1uTOgnS1AF0qr7vC+YYvNteSRo6IDp5Zba8CTwHcT8k5TmSBnydCjjRRS9SQLPQ0/J8L0pXwBXKvsh+MAlPmUZteXXL0DRHzYmR/W+uLtxmp1bXs0mAqPC84eMzexQ3BY+vWY6pPb5IuqAefvV+wravBM7rq36FnIwp6kQIRXqR4JFb0hmojEklJj+tjStpf1RuV+sBpmkK6vO0R1CKDb1XcsgiOd9UsfF2ugnx5dnwJZy2L1qZwlFYwBkpr3uWT8AorKesJQX0FHfOY5TvhXBGw5lHB2rKPPfH4uAOVUcxxutiKRNGzfY+C1E/7Kijo7vUxhxkFjwz+Rqx2m0S6p38ekGdxZfc1HiZhIg1i2Kjw4CMM/sLhH+OGZHK5gis3ilIkaXUi+R3028oDXRM1uGND2RJGRpvEFIksRSK5wmStHxP9GJMtVR5o4udk6us8TBHW1fLLPxyu1gvG44V9fEtBEY8Bhzk6a8p6TDMbG6I701S+hWKJiZwUQ0IGYjdfY5remFqikNel/GZ66RJURtnDNkt8Icpf/RvQEw3rw0IvRhX810TV1DxqOxs+rHPQvxHQjq2UnL8BI4fETgOeUyL7r7wNIm/KOwfbx+UHSOjdKnmjFIS+9/0PLEs2230YqpDnpon2u0SjzvGtm9DYn/hRuvje/kP6x0KaZd2qGdkJDLduo9PE7VmmcIP/TMHLw8oRBeIis+hol2GrHkQ2MSyD3kaOD75PPt6dm7foQ1Uiif0Mura9zqtPxgdJQzpdw3FvRqMqPufItzOJmfujtKhqLmy+DiAC3Y4+3kJW4AigA4f5wmKNs+Hwm5DumKhr+8XkBxrjQexSACNjNQP+oIKk4QIsOibN1HxN+s8p68S7h57qBZQw1vFOTNJKEz/XIesvdnizD/NM1nJpNWPze3CuPhVYk7BBjX2GGJfuV4UN+UC5ysj/R/MvS/Ir3jwo3i9c42z3wsjMudZ6IK/wy3Y6TjPwmUujri1qfiY/ctOcKARA+CEZA/MZnQG5bNplAFdcl6yjKJfeSiWKZXFNGCaM4u6/gKfBncTDNo8XRVHbCrla87S3ZMB8zajlO0oLXFS1Jywoi3kOlOSdYuEUilV9ASscCWLjPaS4x3DcI1Zf0lIfD9iJR8RxU9D5/BNyFnY+xZKuMfWYVxJ7De/eZ+rSgyvB2mvfFBJtYVk0uTVt2azgZqXMyohOk64z++AhoxBx91cpt9ffemGZDz6PnhkRqE0EgeVDJ4hDy8w5/0Z22un7dsM1swjEG+cZf2fNW0zt9TXjQFX3xXDpqDNRHlZurk1EsIaxG1X+wobUMcj2HhutV3LHjZmHWhiJG1TkD03xEg88OYXKW5Rd2iFqKHFSUcmOWYC8AdItsIkPkXAx8GTml3sFyHADeY1YeqRgdWuG5AxdzgAysGiEqARJc+3uOlJQd7ivqWJXFFuWtV3Zr6dsynV2ZJjE/683ECutimS/gL0+bu0vTbU8ADpOPNsExBF9bUmoDCdZfU1Vpo2qfXLUcGtETBt/+zqkCP6ucje7U6A6s8A8sZg08W0rsgjGgOnhSIfFXob113iblVnd2d4q6kH6ZZgCLlxTIQY+kIwzAZqGaduCf7AvektN8zzhqntJ17hrKsfxIY+ctO748K7sfR1gN9A9WJd6CKlAsXB8Fu46ISFL7HRNSS404NUQ94/HrGtMYKgqMtHqRM0rqrGxLAhgLVzojlsLPc8sfnEwFrtsGe2ge+FK0RoMthrQGIqRw5qa8c+20RCaGJmEqDj2DojaattgKEV/SSzpEEww55ee+1TVWO083OCDW1O2MrHLA9dK5WzED+5U7NPbGjTNKxLLxM5VXlKloM515BR9BoqO0kgV8a4UT57UpvIAfsYyBuUmR7ZCg02V9rBrO2ThIx73qkNwaUeWo2CdPVgOm9+ogNnTGdlonY7Cj0sr6Lk2zVxTsZ3To5E+SUR3nRuUDy6kXJ1XSN4GJtnQpk2RusVswC/lGjbLb0u+0vqTHq0rT6h8vxBSkpEnGNZ03fAQftHUczIc23hoVCAz7dDH3ev01FwKqkkaXJBwCdwYm/TRCpnSX5eRZqeyMagiOeemYc2GekxM+W4m/yySMPG84dvOfCLijotsrS8/ekTUQPGUqgTt6UY4PvyY/NC2VfL126vmhK9wn4dESn8UA4hoylOI6hsxUby0dEDL2+rU8KwyI1Q80PMAwB5wZr8Qc1s0oToMrUqwsaWRDjEVY3nMrRhGUCpPdT0CvJYk7UNWuXMW4NG1YikewbAJUJHXk2TCA6wGVtrgvre/QaburPwnCv7gXSNroebBJzXr+jZHcn4XDKT+KWXMaxZZDNSfTnFNRr2GPOyOETxImV7Es7ZKBlO4uOrKFeR+lj//ZSqXfIggm7MoCuH79xloocYferzLJkxlv/RDw/2AT2mjyPk84IkLLwnNZD4+naVZjmzD9oEU3Djp1ifmRf+wQaqGNDbHUR2iuYkwkh5BMUwzH5vgcWEYCAFGrdh2u3zZgKiwd20kv8C2YYU4ffOZ3qTs2QAiH6mJ3jLhgaMvLKfL1azTZYn2I66QA4+7zH2iLmWwu1H53n2yVmeb8eYoX+mw+7FZOJJ+lIeKMi+czCYRU/ZahIP/yKuyJaqCG7Ux3yQKzZlV0aIbm7cc45Vh3oXprkd4KSvvKuWcGdE3Yky9Wk4ZPkf192MlLPO4jJTzqeK4hkrESCveKdETmCnAsBf4hFQHggJepKbyk4IaSRSHwPgKiZUR/Usn9X4OOV9uXae1iSyLoGGZSSrsoiS9ZCHDqq+HE+CqG4R6Cz02lz5XGl0aEwZOHm7bv18QfahwpMZv/9BSMzMP686a3pEDvY351/z0BmCmApKhivxR5lr/aYBJK7ekScKyh4eZnMwj3c9OL+ULtSpTsI0NJC5/jM2AndJFM+KyRcckYi4vh6s0niYGe0sXHaElc1+o4wEdxKXG7lfNjMr102j0c9fsM/oPTPdtfHsZLsTF/HX+aL2eM2M3GjHR/IvexvcCLxX7WzHj0NkDt0n+MMfHpfAka9O4oB7fiATVBLVorLBuXHoz1O0usxw55G6FNKC03r+wPpDCAuTK+0bDe9AEsqHZFwAWCvb+JEQDKFGZ7IInOjrPPL4D8zVUxiAPT3zcz61xCQg8GqEPo3rO+Px2DYhikrWVTLbGiAwOravwmfoFxobgP4Ds403Iw8Qzxn/pjdjxNaGZnm+d8tf3Z6ms+JCOggLVgzkCXRHSFv21iLwQvlQZCbiIIqC8DIakBFILbbF15t5SXUBd2ASZMNXwXEdeCW8mcQ7qovJ9Hkvy0r0Q917xYXmWFTQqK80MFRpeTuJwZzAcygvuyfT9ujL00aM7E57Pj2xGAXz9JEeYWw7QDQgrQ4xFUrP5nsJzwiifStpP6BiaJNjOF10Y8d5LWYi83ydzD3E/Sonr8z4g8dFSXdN1YD89mAIUoTyaM/71VGChVDg60tCm8N2IHwsNrGqajh2X5nmHOQTuy6MkpUqTLGwxdiTha4kKyB8RrrAsizRdxeMLTzOLejZBeuXCRAUyK394qwDaeUpqNZ8dkVYDr38pFftU+mdW8kXH1H8I/9Tu0QBCqqcFmY4Z9MAMQpwIeNV1xUD74Uz0cnBV2izHVMeBU7cwJuAi1i0M5JRzOHLdiK05W8/WxdOQiP/gH5klGaa7TWPOjcL2ND3l49pP3suIemE2cyCMTmc7P+YmcyYU3XHLUizx//uwQ7AG0R2mxl5k6MZ1vEzX+YyBVfwCSoSy3gBVyrd9h2oqCcTAPBYXoy86udjEdlGIGS1HIJY9Do/It6U2WNybNfZQJ6Pa3nW0Dpcpbmsb/wEl23LTm2dQga7VmZse3ZC6KhGTKFCpW/jJfMzPxZA4zgvOqR82fe6X7ql1SSgImCMTsQwo01BrM88eZy3kAWRWK7k8J2cC6EYZU04F7UfIJouKj/AGfBxuCz4ss+K5vZyqS325TdT50+Xf0/MVCNm/eMDgGm0BJVjlq9+5BBnDROwBEWiUcie/VhcwpsXTXI3N4tlD22G1HraLomwAjPnqbKTUCNfrGpwkyshRG8xnux8aSNH1bKxmczPFsOZ5E+Vja5cPAAhXzQehBVwImum+yknEs6SC09WsgCH+Nart3BgidLuRLrmjFsMHLZMgbdAFQV4H40sYrMQJwHohCi6BT9UG+v8yGjEphaAmFGs2PLMlYjMEiSKG7nFemF/9jG1pc8H47YJU33qnhxf4Xi9cNJRC07B0XPMwcJ4CySXVRQnmS4ynDJeNVqZbdUTG1OhurlYeQA6SCQ//IaWwuK9B7yPGs1Fe4QbrKzRZJ9yKi/zWNo6/xM2tDLA+5Ie3cKvgFifcH6CH6FdgfK+79/610WxBy1hm+HhDeCzNZ/a1usLcxXNWxeeQgqYb0e+rM/UfxT6DnlQUjg26ioN6SkwLjLjpR6iMZJ+hd7D/4w3xwcg8VNTNAh2gr21tkJdCsaAO0hYNdldS1o2Gd7YowflZwIhg3c555He9rI0DC2D0V2yz2CtIbJ3qfM/Y5SATr1WKnnzU/zNNylaEoPS9vxVNtXZZVBzJTwcPbwe5AgGp2RhD23rHHU38BeGPnkPduBIC0LMAQNLGesyL8NFUMOLAmTq+o1RDhcilaJ3NgB8BiE153kcSWSGazRk8jPqe7lDa1bO6tmQ3pZdaK3ZoyY41t1yNHDJcoPGUFiB7RDHOtlZ6S8vjrvDnAKyiROHXC3jLc8qSU421lbQjX612rMTZou4YyL1Gd3RGhskmQAU8l0zHFHn79of+2ezYhn0hRx2q8Qvz9A9DSJq/UohjySBXhewQkdmAKzyj9cZvZcTRZkTKBv0ksz+FfI3l/rNT3zni4aF7zOGvEmijKZpcO47NRhPuLTD0cg1jUikCyiUMzZQGribFNG+Gzr/G4QXXGYgCdJO1MdmzKKVBtf2+jQ7n/pyWzUpO2kH414XPo2p/Kd/Mu0lzHs42r/kDN4iS046PCwhxRy01uYH6xMbLbyXTk/ew4VP5MGfcS5oSIoJPm0grzeGvniDYqgHRzj3J250wyzFK7lvQ0Nk9jLCdvVKcrMnDO1LM46Z1DlvsXagJmCZwzt13IXHwzDosK8LEXDp7AaFhM5KYBbL0X/OcOHdSXuq5CdYDG62SpeO/cLVF46YZTdVuALkUamJRaWJksD9rW5m6FdaGs7aVJq9WNCH72QaqyzrbaJyH4FYctpEMHNzI8JFVYxb2R3GI4FTKqwvHmBRZV/qMBVW/3A9nZemdtL5OPA8wzrc4KobeU7zrKP5f5eZFPYVkBSWt7AVEqfSHlpOQvFavLuqfDdDC4aEDXTBwZTtCz9FcaFGheVO+/8jAYdpAP6JAzuIS8ce06kWZDHUYASaXRpr6dTu20CulI2zAHqqsNNb3vugiSifpeDnfnrwrGk4D7eaLf7sMtTZZh1QSqA1DgyaK12OzzRPjXvbsNvdb1Yvk2aa+xgwAanK6gwVHfEBMhyS9oShaEVNeyT3e+F/ANfF5ffBpeVOWnVzKIJ5HS64akL6rWTdVhvj1B39J+X0R+X6/WWLUngsmAuJvrgO2G5fbUyiluf0NXScEQIUgrHbvv3re7iYVvrOM2KRXUCZCWswqjcci2E6tVJTRQraYokv89qDesZyDMouax7Ipi2z5vOUMe2UCoeVaFhx0JU/EnxzPX4JJA+gDK3BsVZzmQhPmfpbLk10W1+A7rUlzu65n+cBOGxVQBGslFW3Z68m+GmHj0tMV055tRWs+zQtj0KCt8L/2W2CmQ7PGH+2e8or2zE+inuKkrsHZfNWtuGOKZi5lTdc/lhh8zxjWAiR7GK5i3DROs2/EnhUqiJxtrYFzMSnBIbOP5xrDo5SZlJYLWH3kga12JPG0H/FCy/TsgxueLv4ZX094jsxtzJQU6cNbCvsuoRoGqQ8zD4B1JCOtoTCQ0lt9bvbrMbZgmE9k4btz2QfOwCEHBV4BZzTyFxKkItRPWJI6ZKukRecwO23uLsTEMHf9D1478AVSqC4TDNJDGaUrv0ZIKDMgZTMM+JCMHF+RPiD2/nNEbJEHDm4DKI20BWr/mjPsu+S5QrVLNbUWwwRwRnMUvtLoDjQ98OrhacaNJ9/7dfcsJmRKi1wk7VURxy/BVGhv1L8TCD+8TrmX2VYZjYNu/ZMkoD3pGd+pwVPlISI8MIkUsPX52iWjHCWHlcvQexSMtxdAfH1OH5zZZ/ahEQWRuTFHuGdrPUiRKz2ilBtMMeyXS0iRYHtsb6QZPObkfFB3hv9no4VVqX4/HYybU3SgRyB9v4UWU4OHKp/VETzeizBgSLbRlnXIJDluSKOmDsrlchoMIGXflUxd4Z0CwWe6ltxZlFaW+CNmh/CGsIXc268Vh8x6uUspCPwllczufHL8XGgp/GQ1qWGR5m7YV3NrmSpwqjmWT/LoMxqK5CnW7aUabodjHDRstsMXOfgH9oavVAnYl+vJFXnWZuC54tTRdLLYaX7Q6OZtAy7iJEAIF/ZOBOIlW9/KYWme5EkGMnYpxcvvdpDk07pEpqDgP2kj7n3xX5OERlvZskovmsNLlohYUI309RYEmv27GvOWvtvYcHSra1qo7YpLrwhaVN3+E6W4Sgm5EPQhFZfZNABideWJ5BvcYxKgNAHvF9xf/+hsQh/PrgDZ6bdMoZGiD+1Sj+wem/iobD4Fm66QPEF/Xy9MrP7sNpWE7n5vtxXE4UOjAiWN5SEBZ/Rk2Vehev6xPuL1NLi4jhjmmUtafH7N8R1w4n/iy7ZfETBRh7nUIC8qUhMl6k3ceKdj4mWjtD3XenvpezeJUCNWtwmY8NUHostgr1lhS8z4uUPMMkELkKnd9pn7AngqC20KIkdUHdT5irc9WQ52hLd3lDbt9CS0tyGGG+Jof/cwmqCvzqxOF8NhxubJVtGBZGtEsD8G+kj3IqByryCe7ZvmFAypfYB4nk/Gan+/xjtneMlt/alNmlv54hbcUntPwcGdC1UIhZ0Bpl/pL0S+L8+dxqhxt8+7phZpLT7PX2NNYZ54UgH7xk5ox/SRgYGvsjv5y6Ti8zZXw2O8d3N6FRejmWYUaYMavlBqa1g5nDL0ccLbIqq1mlLGcN/vC8klp3I48eWbz99m+6S2xPnpQiNUglMOGK7MrZOIXswff37YjWeHQ7zv3HsHPLWO7AIn9faD8HWH5yQlBVLoDmZiU2SpCCwP79/V7p+yJgWR8w3apoN+WfQTOZyz7mxd9GlGRiJHuNrgOAT8yS6GByAOyz6C2BuFRsvJdR5gD4iPI7vq4LrlKvnJtaMw538JwZGvHNN0D9l3BCJWjlN6/0+DDgxUJhGU/g3xiasNFXbJ678xwk0QblscukDryOh8PUNkufssiJyI2AQ5EZJezyTLmS2sn13ASBtmjSmNdZb6z8ccgoLtBccuCPkWGfxi7xlT19QGF/KAjM//X/kOJ9/lmY4cVWYBMPCcnIDfCJZKPwpuGMfxsadfrjVFBKJYYhNlOmottWziMLQwLRNjJ/EBPei73LU37h6+MnEWQp//zYaDVYYbWPRw0T7lvv0AaY8AJ1FThs60V8owCgXpAvpdUBSsMNuyN4aMyDiXZ7OeJMeMwYonn48UHv7DUzRBq/jgKpu1uGOGwIVN8ljJEEb00OKxUdA3/12ser/XjvbgKl4Clo9H2a5b4o7l4o6DyJbzdnPlM3hAosaovqR2nVT7+JR3p6H3ujkPEfhV4IMU0cV+svHoDUI1yR5VKF0gJ9RBoKzKJ5zMyUUPBHur9CWIyccHgDyX2XF6Q/fKkYpK0I1O1PifUCiRzAV34R8oUhFLsOXm9wPY5Sc1k4UYuroZ80yauxRHnFrCqx85EjlSu3yNT7BjXIDk6c9c1gfBC14ZqNfOrN3f65Tej0cS2675IGMMRyVUqkJ37PGUuWO3thktYLexaTtItFPoiIu+sEpWSexii9LjVRuP/ua2iUALQBf9NIRNnVcVpZJ2DLCYDt+PF9RBw+5160S5gC258642uXp30IIQXCjlPZJwBs90f/kpDxlxoRholxWiI46yvCXWs7owUxZjqYEtW34wATQIDvprGGvvtDY0R3VZ0s92cSakSzMFplZb9IIpLQwjrHzPKUAq61suKTE5LkXjFwsBHRnR+gwYjSa+nl+Kwbcvve8F1xwWQlUo82OB0QPuwjwq3EpFBV/XR1Yh3BlWH1zp/BSKVwGMKmEZfhKsJmZj1tVkxhLJxDF3px03kVZ8l0J3omi/hmTVAtzpGekD1nIZDj1Yrf1zo0d6aBUVdBmyD22Y8ZQFsHFYZg4HNMyx2PvWG8oF01svvbpoX7HMO7QG6kJKg5CvP5joM+XTeI/dMUfX9CxTNNmhPc9enqUGPjm0A6XkjIH+DkFl/SGx3AMzBPYSlKCS3HfmLHPtgCv/NU6KGN0ryqVo5RJUI4h1POLNjYysbspKln9ZyCA62BOB3ro4eFDQYErEYL+pD2dza93rYwT/zbNd5aXhxDpkhdXH0PS1qla5klUofTzfbbNgsh0kxI7nut0gx4TQ+7FmFkw5UNOCF+dSDfCtQsZjpZd1y9YGYgl2S6gawhW/MxF3G6y0DibAXvL1ov2Zkt+DanOsvELEquHaMoMUAT036hybF9/tLzfbPhpi6Lnm63tlxZMPzuJzDfwsxYlCep0JAB3vS0OoauQRh+xFd7T1U3dMERssi6U84OUy+RStyuDjTupXh7ILt0zPnNN9Sf9RPFABySA/Pev6dwKn+oP5b5tnChmr5ex3yKWz/vpimntitpcSi7j5dws8R7u15MJPZXGEgfKJdNaWyuRT96HE7581JDxJ/JVsxz0oV7XvXAaCUVFJs2gpIoiRjVnoQ9bk61H8csaUOY+n6wNiqZgu7wsbKJGxe4ScUSaF++Q/0e0g1ctZnYa3mbmBt8aWRmiA09K8j9KgjC72mKa8G4gsdD8N0F/jH/k/yW5cNpPyI+RIei9F9W8h1fr1ipUxfeDR/2x4Ixgq7tbd45BahkeXKi1oVx3z91gh4GID3KFog0J5HO/TfErPQENmqKP8LsSNJAGgjp6q31snEAfhrp0SG9/Ch9Ajy2gX4sO9T6Sp7T4GWf9+ie244aJTAlDmqPmS06yqIFrpH2mkTcF10c0FiNJbTzI75yW2U11XLsiMkPK/NEAUKA8uROnJLzISl9bMxqEFpTcSzJE7uvJjSvrpg+QRqeEJuFkbtK6+bTJ9Kj7NP9g6RB81wEms/n6Tc6+2P0npLGGQIakPdl2k0iTYW/lYOw+sdUmt4Ftzr9V8b5tZnkRPwf6faAUAmDAAA427ZtTrZt27bNyfbLrpdtu/5s28Zk7SH2IB9OCmZzxxj5zb0bY6vjYuO7Zc4JPussRUKmcPDFY8ZxNJuwhwQW5GGDXf7T5DycgHWbEMJNyix+RVtREZK9N7S9UGAhAAJaZI1AtcP3dzEO7sO5U/aTcvKMfCd2BUgmSBIuzKwbmHgfHIAfimSMuTnjmSHR3DxhOAfJmNMDprbo5AJ6w3yGstz+lccaLH+d9fIKO1BdIQFR+6o2W+byhztkjn748k/Dxd/Jp7CB+UHQ7zaLlG+xkli9pR6pH05UISV+iwsH2IVFcxdqMx73mlP2kYlJYxkEeVgh6xk/wncVXgmiH8ElxlavuG8CRiYVgfyPZSnghx/oMMwd53FqvIVF2pNlGINZPR82IADRxu1tYavub8sT/UOSJdlrn1Nrxpog5OEF+XkNWPp/zJnFVNNYFX9X/50Ve7g0JtI0bQl9pa6te8sg5DMHJY/n4SY1XHDVk0V2fuH4RcrOGCeYoxlV8MCH4+s7pZznNc7DnAt4wb14rt0H15XmgaJbUyI2FaZtaYm+q+tr/MZspowPdC1s+gCU9TgZ/9OPVeLmCyTRGRaQvm//hx+btivIml4QlMlCFK9mLP2dKKqStkR6Dtu2xuALHLJhNIQqjUN8MxE8L4pjk/yabcW0ZeXw1T5U4E+Pin2tpru8xd2y8RPDK/JTJ12i9XpOXuT1yRhe/jfSK90T112tKDGdPUUp2wx8XhSU04AD5PKK8DpHs4jq6VTddrRdC509ymXSgOlZGBymjrcQKZDFbnAE6RX4ViKVvozzPMQCLxlF8nJQtw8Ey1pZLyOXxx4SWjf/rm9KSdfiue1OCOecK+GhY53qq2RULQZCW7o7CUj5M2qnxapZ6lsmadupWbDejfekNRSSsI+slJP4hDiNL+w4U8hXyu3Fh/rsaM3nABzErWISkvb5iAap3a+BPDFWRmusuMUMVOsB/4VW3BB+XGlTjKaP5y6CdJSbDeRjam9j47d/4XIKvv0u1IanyRl6wIP2e4npWTV3mmQxfQz7HM3V1EfpZNVemSFU/R3F0LdK8yOPuk9fGD2EQnD0ICu3fn7tQtHoiVpSm/eDNKWvINMIvb+v6W1t7A7kburv2yQRClkxi/q055XTK8xPJGHUFQZBdv2pApcgaZEr1UgK9anclJju90qU1pHsVYyaPXg62sTKumtMqPgm8W3/8RY4I8w9qrz7MazFWMuxrjMPJCBUDEo/p/jFzpv30EOUzIS1TZllqAkCilQ9CbaY5yepaHhwgwOaZtUbl30L/9K0bK2jotnK4ZD5QFQI3Jrx/YZBrgkOYrDV13dny+yAh/0k5WT03tIKK5kZyEsjHruByrfCmyxAU2FTU+6IsPbbXnQrnjCNRz1e298Jke8qEcot6+m2q5hL7zzrynOliIWlnWgWj86HBqSiPQ4DLOkYEnriDDEc9NvsjL6zNTWEvbYBOhdLEjAvGy7CKNJ2FULPz5ZiE4x2+SyXpNlQfu1vk461sKgvkliU2zgvcu1j9JYa1HhO10uz4yGzjopEoeftAvUVOGdJzybuJ4aqnSzXWqjtXuJO/7Xm5MsLrl3m2SRYPsjQ5In+sak2uB6qKYFOGE1TNNQWsPCg4Z7lJwWQzOxG0e/GGMCMvPGW7uxl7cXGHnbSssKB5eXsirxNaUy71ZVaKemM2BNv8jdgimYdLy4G1z/DDvl7aEECErYg71xo101tgZJLwnZNFMtzgIYGGi/P8CkMjq039JD8VZ54r5xSEN8Yq/wBK112BZt/2gTit5evFMuWSRpTp7+CWrjChLg7Ndkns4tj+ZITlIpMf71yEl9UzK//GRL7DInc9jJv4KrHmzc175GIwIHaTW6MTkJO2L3es7OdNfu4a6eEHKWwppxuK/yj++d8uUJYOFt0Z00cydafRYoTaGr42U0L5FEoruWQGR+V0H6NaCwJ1hvBYoHnpOfW3bygTiw6mtICUWoNluRG13jiAB09fCwK7sS3uWRhOVRUvgZEvs10tYGOXC3ILNgA/f/jLMQj25eSToUIEfOuajO1E6+YvNfqAJ04DtC3Yu5lykZLdxZAEPBaD2DDHzrzAOlyFsYAPaYZeW7iOJ77Kvp0ouvNgEThNt+n7v5z9mi8SkiP6hrgASXJ9kSnUXRekwrkbLgKGrH5qffrG5ykXBFoQv+QNXj8rmEljVygpoEI3RsWgIzmemTnE+AecZJGzOfV64YvtRBBtwRzmaWIhNMF302xpbb/tXJXi6wMd3da6vdXsuYXWa+3rXEKr3Vf8zIax9lqBiyqCHHEbZpMnto5F+ijESXfMcmU63jvvVjVRsxCZ5Mrv47KlRennE8sVyzw3A3u94wjcQb6zhvaLPlCBkKgbxh8504J9kkur6bA+qKnNRGfiamTHrHYVmRSEbW07m8/W7R8ZgbLLXoHSciM13i9l5PShntVyXLvZ6I87vjgsgHnS/erqw0zvXz6gu0FF3j12sghPoYo3OJsmcZ8NBtUTRmPMugOxtMpv7HsOlgoVMQNSyhM3sd0VzPFpPNVyAfXEjsELnXdyn5zfPNRYZfblJCjIniIWYWxFQzTP7KsiY2HrvhDZN6IHFIDETnkeZEpUqu8pNbR+6VaycYwefLuLkD0SO/8MzpJZpe0lNDTKXBpeDEVvFNNlzqsY7P3+7uGrZYUQV6TPxZEsWvAMZN4NvDqdn5jYt4VKSMMI2hWlh+CHVSZvNyZp5g4y4WjbSfyh3kRQiR3sxN6sgbpNnMKEnnatW3H8mPkSMw7IGxX+OD2fLXw0tQJUfaRZzhAaOT2HPr1WjXhxL/nUSzzoDrI7nn5JLBXHqtfyVzdNZ16LsPRPtF6H1Rae+kDU0xGKp8aFa788mZ5Ga0NrKIYjlb27b+46k9VYqg2Rt+lEmZL4nD87f4Q6JQc0BbHWhgQjnZr5fb/lT8M/ZVGmvB1W09L27q0cBzo31KWQcWivK33kv4YA1e5UsOjCqBORwtqvYaYk/bLwcXGelzvZ1FmqKBQBUQkgdscsYrKlsewxdRslUTeb7bzIE/lqpwzwQ5eUPMcq5yHAK8WvtuHXOXY6TOL43ddMGRhrHITyGvn9/qadA13Wtv9LMKKof1EvTDULhAOYlQxkdBUNoWpIoAM+FdqcirSGDl3TYuBYvunWmdKFk494XFJ26+3Xn2zTbdtOX/YxanJeARfTGCEprmimxbMy6OwTFyssu1vDJ0T50RDxn65PqCn/TYTM+Lbni7lafq+tCcn3CAHJ1IiRiQMmTlBFhd5Avk512cK9++FHy4hM2vi5FIh4diWLw83vkkMnBcw2mMi9piLJ3HmRw8sAvKm+ImpborkBrlsGGSZAo0kw2c/sTqN6Knj9s6YZS2CyA2jUt6Q5mkHCgOEp1ktcagKmF/JXI+TFu9MWJOBBNSbcSEOVP8JbPubNb4FXQuGeofPgaaufO/0iMg0U8+TrYy6nJ4y6K25j/bXrMEXrIeeE4VdGqx1Kz20d/wn8GHDZ2Rq2zAg8byvVKAvSuH0iB58M6RbVtcgLfHlwSYoMNHPuzb68Fg665BA52J5di2XEQeTcjEteHSrQBSIgrwtdS+tzMpoDx+8cbkFD6xj6NNhuFnMcbHNk1bNbItag+LIlhkjplttKlCuEDbz8yl5fulFAXEaQHGdyYMndjeatbVxwMxlynvX1Bh5axLk8b/ywga/VHsvL85f8H2N2TGPSDK6kAHPtTsEcJVT97XMyZoP5L0KwdPdgt1PsOJIQ4o5zUTnBYiH3vyB77EQovZD+DsmkE78Aa/pt1xAuMZ/930OMZDK3qG3pKtJq/UMOyy6z3odqsdVmYnzvOBOf6v77sbMAA/8ovmcaULrNm2uLn4zGmAgVZYHwQm4VclAMYLInU5az8A3emicTjXVPlYfh3Ia1voJCnIHkm/Wnn/hdlDne1KXVPyOeYZqvlsMKizXbJlO2xSNy86U3gQA6tnBi+z8GNpd+jZAE1CjeXfHPSN2aHYIKHPQnHKdVFVOJJhQEvALlwmNGlkWFn3yb3FUr8BTZ7CQ7G6hasFrGG5UDT156IjY9jjbryvHXIADX0Yt1o22SalR4awf+gcZprzrBSxGXsj8Nbk1WKhq0DDXQaoBNeurnUrmU/TAtUFQ5VryRo89PnrKnEsIbS+wGdM2R7bMQc7B0r1aV476DkqJjhQc7pUqIY4MUhs4XGO+Cixr/mUd9GUeSYKsZliFaVxMWV9X2rC1BzSk9T+viUgpAfpAotm9lf/qtSQFJjbj6d67Di6FUst2dHnFyPgUywpxuNFW3Ax1rwxDyGlKtTmteOe5R9n4wdE8GkstDVgqT6wVNWmoJf6lmkwHvZerh0b55TrsTs+/2TSfOGZ31LB32pDUGAjazTlgNimFRq7nYF+jCIyQ9CB0KrxsNvKug6kR+1zAOiqTdPytZoF2EKZ5spX2ss26d132UgVyHi/indMCmJZz7RcOmgNm+vnlGbKmpuzoA8EYwGSsr0zAIkfTfR31aJWAHcZElCzqqGEu4a5tLRFQQHgwbt98t6ro8kTAQ45I30kC/gCkikrOCcwQp/K08XMnt8ofVQldLQ1SbfsLC7TEaGLMHW8UZ3RLSZsOAG5XhzeOSa4TXEk5JebWeOXLROa71Zwuvurr1TNxsq2V6r1rfS+YMrfmFdxUojN3x9yV++Uu6E/mHdbip1cfTrFPBgX40QjSZs53Z0hz5wEq667PDnLgpGQO3tfoOuf541ndkTPWhGTyApT32WN7DK7YxJweuAHkiR1hq/TdxPjMf0n9lE/Vnm6NhkC6H1H2QYy+M54qSxXDC/oQ1eO6o3jN2KASM+/LlNwqf1NlSKr+MpQxg70NaWWsf07waYN4pqfC0hQWRZyfmgESKja+LH6RFq31g+g4y/wGl9mpPzU+SB9P12+GcgHfBgFChyYX7nZw02abrbJFQvriiGqaKI96JAZu0qNALMR1wfnbVj3oxRb+cp/xVuaTNW6M3jmnqkY5rgbfrdhNgc1vkpQ7AkQFGJfFczQxMm5r9hobC4KdnXE5zT6YCUUeUo76k+7Ww/fkprPkwrF87cDSq+6pRSFYUdLc29TfVNN0Houd9Zoh2Rk7hK2A+RjR9yqFBwHTDAIObhxR750NNQIP00IuY7GyRUG13yL5wOVWtbtNbNRIEaSSghVwvODWOd3xDUcwV+pjDlZ7ovd/bXZ0KtWw87aOt3eDxjgOybiXCj4koZoQojoMXU4CpKSj45P5nkskdDwIOCiH48LTKrJDGIWeNjz2PhrJdKFZBp6RNZd8bofihwfBjFbb8sLk+lkXaxcc//D5RXhzr+P2OHBkKIY9g24YlxO+JLbzKx0lP5KXPpZzCQhzniJi+8Qb/aeCXyHr/9FaI9NCMcaCbKJkjQMHgtNXskbQN9P40+CbsdyGVijm5XpsfNSZj3lduIEL+lFCXz8r/Tp/1ZuMWMkToC1Iil1sBzlCmvu8AkSKS6M3/3RzgUJJ1mZ8IHFOjWVPJg9bgMkRe/mT3jKuuJFc9CQf9Amv1LKwlol4u7aHdRzm9xFkQdlqXKVppImI1cqLhRqpd0VXtGQ3jRgh+Bu/v2QZ084Acc721SQ7sd1ry7RKSum5PhPEDZMH1fn6vdzMrk6uTYExTt7l1S+3d68twSRv7HjZEjHC/imgnQ8wetHqknTtlXMyBM7Ejg725XLj+tfoilyazGD45M4BUohsKvrRr4+Vy5IOCjSw580NlTnmF4yeAzW2KlZj7sjsVW4YtsBZQPo+/xVJM96J3T/tpc6C74EWn2iGtUpwaPoFKaIapKm0MBkQ3EuJsDiFvIOEbXyWc76fsnICQ9WCh4CiPtlBv7u6q1KA3Ot5jY2o7povV5tyFUxPVjXITvKY6/6jhaAW28WrpPQMn/VoYp8rgCENlCyqk9RKZ5viwfJrwCssU3gFGcI5s8DJ1sCTttEjUZzYTv3onhDzj0pkp0AZVKPOelNgjuqoWgtYptNgs4X0Gfm6t1lstfE78dD9ldRNai/QAwwpkFGZNT9aUPQi/Puw+iqb4s01MQdJs4lFhmdE4HXQhd1NvQKL30vHwhE+2H3hTLfccmhm+d7OYpDIbTscDrKqpHcpu1L7jUauRgrLJUSp0soUc0Vq/j+93fOO/X1ZwtvqMo0S0dg3H8xMV/ART2ZziSksK++afy+RTCd/wQL2OmMmz7xKGXHmKN0Uk4Te1IvQEnGbxNIggxTcn4lA5+TFBwk73rKe9gK9BxePSNgAfA1wzlhdJq1P48XGlpeSCPdEuCDHu9m+NHvehIoDmsE6yJLaLvwxyT+PXGaRO207VrKcEw1n4joPMvnltujI2P8hNnonPVKnlfjolYHWOcHF7/sP3BlaOA3iY7+Sr06IGaCuTPhzPq0a95iUeg1cWGx9pjm30/pjMTU4lN8iamwaifBfZqbJ8Xo1Y9U17f/xsRzTLgv2r3Ixngvg7UCD9F6zKOfp9DrArviFA7HUJtIZ+9JfvGeMQ26qsGXvdpIw5ZAIfSilypYcikW6XMoQsBcn3yYPkltS6sKqY7APFBFUv9Nki9MJTa4nLYMoGH+jpxKkG3/nmY5I09dB1W8rzJLnwh7DWa+kjvkGaf0T0UpLRfhT0Oz3GINDfMsqMrqJA3Zy6KyfrRUkp1CuDPaeFTdrTe4BH6i3THKk2YIvtvvdKwnD3zrlfVWOXcJ9uHyjtnpY0mTTuQF4A3YkhHt7ctO6IDhLQefupnyvt9DitWgsu1ElErTmGPjrnQNPRHiv2IsAFyaSSg1aBTfD7XnJ3V9RlISTZF4eWxym2M/PrK6iXgiVLK0BiNwBBZXK4kfuloTJ/6YdtU7/kpMfqzWKlmLnEUI0nZ9CJ9Kqvp8E7uMssCmHXv9yAhhaBnU58QAIbodzKIaWqMbgBVUdeQbI+NA3DURUeHkT/lADDJzn3f5t1VoA+bj2AijPgQQHt7AR63nRdbpegd3ViaJE9DvmEReai0vmOf4Gxd1Fw20F71MF97zJV9x/6WtBRcgedSvoXCCXELWayW4ww33QsxFThNGFS0FIVO2XT1yuVA34gbHeIVWAZJP5OmNdthxVMMSIUGysLXQt40Fu0Xh9mFaMP3mkpNvnpDBeavWEMKHRZW0PqDZChYZzZ80tK71PrXKDivpqdJyPo1KsQcHwa/MNEavH8a9hf3ZgRN9/HYs8kbIynZ7oe+XeisSZD12nDmZOY3h7hXUxUOQ6inJGc2hBu/HxSkDF2SJWTB2y3O4OKl51Wgt7WVEKmSK+A8ObKqjlZUGmoaVGkSn3riwQ75YwSzLQo24jpZjz3DYOgGjFwRNWFwx7mpd5vxoK4PLQnmppUJ1tjfM9kWmSbFpFjL/3wjFhMxm6XDguuDACYABsACpJRXZsdHeMsM3nULP03jPKRUWcXvMEDocSP+zZcThjOEE3zrgKuBCX3aoqpxpa8UKOq+vOKntBtV0F6fIc3OIFOxsWKDrjlALwTA4nDLvOD0GJDUxnJAEx+lq0kzgE9i1nS/nKoFBhLZG32ohTQg0NTRypkohRgmUrPsx1eY54vcOZCGMPKvCFiuYE/uRUe2/lCuCTWRotajSEFXkLmzX5mCKXulQEcwGuWKksaateXA6LQS2HlRwqiOg8eTd5f9GSd00NI6GBtIu4jJJFQIVGX6o2zMghFsLtqZfV2quyI7GRlMH5qpq7BueYh1KMYZRDChvK6MvpjB1NMRaV4yVsNoQAdjwT9dNzrF/KAT+0qQ7TdP0O6RKQduqLOQitKzLyhwZxLZAY8Fo9HUzpH2jK6WxDzjXTnItHpSDKZ6wPNooQsCVi47Aq/hilzlg+BWU2gMTIYYdvSiQ0j8N1HLSHLU7Yc29fYXv0X5rs1lToazTyroAbb9wH67vP2bVQ8i7sRiWeMIx3kw//1UuNq5qW/xn0x9VMad2FAvRsfqM5gD77bBYNHQMCk4+p1+RQSmqZgTGAz+fBRGyLNNAz1JBZzoDGQ2xdEsxKb2yGLtZVdvANFDajyI7n9R1C5dtxg3sl643gL5nReIH/1JwydxYBROfef7HDURgNvbAaFosscvF4VRM05wIlL9WU7LTIXw/G398q3RsYN4HpVe9ACOdZWtK5f2AwwG1ssnaJ9VQ8f0nFKBDfISoiHTWbxrODJR4FolpYqpH0xeZqGuVc1hhsb9piDV5iUD7VaWV9O+VBmEnKZqxo/qnJ2q2JDf5Hbw60yC/OQurqagvujoxCxlYLF2SZfS6Hy3ULxgTKHKVKA4vtJIhQcLUpNLTGYKpWmZcwrSrKmCGmzgVFw4raj+eI02OhFzMUaR5fgE/LJAs0KF0WhGbC1rT7qGSM87zS/U6Ky7JO75tS+8q8vX4iSZZKlfhv2A+wmRSC/dDM2yHGP4GaE7HMVtIj+JzU/oBU/KfAgZ/c7R0lM5XkKdvHv+iiV+EXoLhcKSQOfyjmUS2eslkMbXqrf+zPL1QZjZBbcFM+1JrQbwIxB9e/6DfZBYLOCZtk/Gcf0YnFPY9Jt/8POrV2Ef4jCs8Eu/ayWc3mFLE5t3OKudB3sgZ5pFRCDAd1L6KSJLVgnb4nRhTipTmla2C8YU9/VM6erOjgbKW8pRJ+1Z/vt8RIw5btbnNs0MPa1lHYbvyEpaWq4XNpK/03ys/qrA85UC1ZiTx0L/S22vRgvQePNQsP3WGVXzW9DUrwtdWS/hqJTPaZd+Q1AnYZ9+fRjQPr2rGhRu2TW2eYNmzvmiTwu2fZqjO+U9m578W/hKjegU0qB/kJoQsVUevsofqeFZ5+4mniDMsV+sBZI4MkcSPmSkeSIVmme5JBM5XCf/q4UpIEv6X8kkV1+9+KhS1HbWGj+iBg6AO0sprvIAhsJX1GInpkOKPgWQHOrNBVcoY/irYYo1xJVBdDuxusmXbsGTboW15UYl8Rg9AHr6H/Zd85VsIQ5x3hWopmVzUY4jwDdXemTyyrpN9Y3+4hHQ7Kjd4A7jYqE0z1Tfo2pzJwUI+grIKMPBWKRn9DmVl2zm9R2gH81uWeOePaov3PCdthltdridHKt7JN5+g6G8gA0g8BBgmTPrk37D/MZ2FdE9vHs69CBlIiRV6eft5utSsp8L8NG9ZGCdGOqr8SLbFIejVN8geqy9/AKmTnq9whAK7LZTb6BqG9To3bfN3M6SPi5rbqa+JeQ2tkfOvmm9Xc5yLj2/mJ+aJua3l66fe0aoPcLlNAD1YPy1XoWWIMC0m7PflL+0y6uP4PYbogdwF8b5m1ocjbHfFFPjp3DwtBaZ13P/8q76tSeArTUJbnF0nCbiC3PvOVTBs2s2HV1+a5eSSkTad5kIEL0L4W2+ZPXmaDzwBRwXqJeCTDXVwbOdFjVSLqI5rEX7ztg9zR9y7yyaSLBhk1pB/nqmP8rgRMyENh65njna+x2KStr7Nhzh3I76GZWGPqEU2sMxoT2Xj8IVPFI8l8fuZFCGhL1cEAi70boybaksS/wDjIO/5hoHH/hrdPaq1T5N5OiDf/UeQ8WmzsQkJCpai+TNCO7IKKY8ufk6T+qf/K6nnnObz6JTHf/mNsMheSqva/DxGPZpYoh+cek58WDwWAXazG99Y4Sjoh41eFZILyTir0Y7NyLtSIAPniTvtnekYqx67t/qLF6xvvro34feHm+kd0RKxsYsmI+f3gl8BcnrCNJwd72wekKrj6/Q7Xf1BV5Qns3QT/du4d0IKIzY2/o/l/WXxjsB4OwdPrW+aALjVZwJsiFWHZKWGT7CpcfBoTfUG2Jbqc/tH+3OesuwuUkmj4lTL57wK9lkir55OFT3Z9+YaSbkddIEUAtjFoHtsBSik2PpfbUmttjyRE+L6iqjPjHBYSQ0FOrxpA2OHmPV8k4E0KHliRFHqk+UFRSAdG1QHxxMi5khm5yIBxhOkfBZJo8AJQfDOH+VMX9W76OXpaTPzcL1cCJ70nyCss3GCnSRhSp1MgMaEdxYyYAeougcK5V7SpZSQOc9r/tA0fk2rswd7FXzh5pj3IMNyz9UxoOzFqRBq8J/bcQe5DXZL2OC4A6wzxs/iie2OUkDh0EM/bt4Aot7a1HeQgQqD8RZSzwShSKnbUWe1znRbtPNLuUCgOp5yCOGzTJiHQFqgEmhTuyAf1kuEchZyOvdwrOBiXOnFl/WJ0+2skdvgNp66RCR6n1Go06J+wDpeql64idsVOqrW+mGErV03wqguENIyoIoNIsW4hYlSBeDsSYLkbBP4j6bJsbfbFhJxBeFYv34SlLWq9+SsdgfxNEUMgmxluGN7et+ZjeA8/XZX1PGU4bb45O/+GYGKXqn0GaDiAWP1VTuHCizTiUHTh6Q6xiVFyh4I5+wVNDXQ9aio+PoC3YY0GHN5BSYN40Vs7GDBEvrf8808CbO9xBQktQAx5NNmTxGzK/1O0Ge374hlKKcZEBUHu/4upxR/6L5771QGVmOAtFFZmif9+5DwgacacHRKExSIv1FRumwCk8aid0w4yXi4JCTXT/pzqxCtjkSAFvx4hfdMcvbioXTaBUMzMCZfGTcGRJvQX+rtzQhIrbJ9W9eRBGFawKws4WGBLJpWFv4JSjC71XMXrQe353dtoG3/o5cwtwdC9raOE19MmYiKWRONJkZ6hMKe56frNVSAEHJnvaSsf9gsf74t/EDVP4KnYZyXBCPSEPKpqFD4d/K9GBByKA2GEGBupnnFhw40zCuGBw0y5i8jADIilZwXfZ4YXZ78X8I6nvha3GHhAoVGDjbPZsPjxwL0bkQ+Cds1g9jeJRO3zNc3mSQcwSoXvvsksO41cJ6u17OWhAZlsSptl0QQWe0wTLwtlcdwssu+I3pHQexRz/mUmvhLA32yn5/c7nUVD8TfYwYhmc5OtJcBXE17BlWZMVqVxX6UOJlrZuPL+CI/YPrLvE4Y8nm1d3KxAA029NJksk3TcM8ilRiAkZk7o3S3bU7dN88JzUJD8SCMoZu2ytPKfE64FkGZwbpv4dL5r+fyvHZ7FdYkfDPkHqHKPaZK+R+Gu2d8S5uJvxP8GLFrpXF1JEF6NHhFi2xb+gS7xkN41D2JcoK6/1VEJGsBYFD2Vjt9Qqr163h8Cg532sMfLSBaRWIs0JBysDr6FKtwzqDIPM9nmef9DY8NELJCclT13OQ7RPCiAYonELZ11Guwofn79l3VbonrP/h8zpj7RZ+gLMuwPBRXUhkrzJYecx7/xM2YnphzEfNkokHwDYfPM7/4McS1p+ebVuZy4snhypcqHMjmmh8JgwT0PELr39NkHVCacX1FBFxVltJw2whw0E5ZcsEFo5lSfZO8lPPhInf8KE4Pm2zy+AtFbCZmuxQcd4iR0mokn01DRr+4Ui5mImICUHV6brHnrtK/PXKxhY/9Bmrgf42a5JkT+iA1qw/70P551VDW96/bwCToKxImJ4zb4nL1MbduFh+SERdQhUElRhXj+fCJecYDRMLjRDSo5JFnw7WkeEPU9iICUR9prG1GavVLhh+LKdbTHpPuTboV5gTJsdfYoMUDykEsLShbXW7WEFRbOaJl3DTRg3w2D768w1WnkcuhQBH0pa8ptLFC9/VRnTuMom7ii1fe0jtOcQmfkMWirSHIwdILLgMuK67yhtoG6FJdYHu1rait30nJfBZT52TeVbmseVdCbC22jdrVq+UuEBVrbtv2pXnQUWXcQeZCe/NpcN6yrkgG2MnJl9wfdkPOw0GsXfQOMMhkQ3Y//sKYAsoPgM65O4TBf1ZoNP0TNaSSCQGzanXP9T0SfSzxk2RulrdlMLI++7tqb5F/rkeMPE9SG9ugX9poGaHJQWMnzCtMS0PJJ+mOV2p0HXnRi9Z7k94L/M1rbyF8o41Nx7R42ZjrrvnSoeVOHfCi5mhw1gCl3sY6lAeJN63T+WHb0UWqnLEPQWlk2h6a6/S6D5+V/Il5bghXT736DvGy9jjVIYUFmrLSAmgeqxtudASh/kAoizifSRewum+yMvhhMP5AdSrhV1xsRQLg8G7Rm2AvrwL0yUKem/fqLws7EMcBMDxicGKzy0/4Saz6l05lXUxk5uqtZcasnqeTFTTPknoxriTxSZHOkxy+yHNoa1yCK5Sq9DZ/kD/uunXxVdYEDBgJCRk6TFB/MXRZ932snIJ2hW+gcWNHYVUstK/jQYmQTV6v/695dMID9xKr2+wPyIr5e/0b0GO6jGk8wgKIeVZLivJA148JgW8FXCHJ9Xz3tuuXrvESPHT3rBy5qqYzxxMjRcWU/M3mDJPGhyBVOQRClUEgvsQuniQwP912/KXdrUVEI8mc7bqL27TjWOSgEP8eewhueW1m5G8sNXty/HwvvLrLmYV59qvMNSgS9gqXoyDewtQ8iPoQ2W//Kzfo6UN4GnWKjBIl87kmJj69S+xTXfs9VKTvGQ5rFgx3F3yuDTqd8SOjWNP926hNdUq6WBdRCdhVHo6vR4Ct7lUw1vau+zi7LBCJFRsdJLLKIkwlxPEt4X6LsP7sqxSTts54Ehi+ki8WG4jim2oBuJNP3U0zxsyGTY1CKjpKkX9lPhSS+LZXR1NZN2Io7qxRe5sHMsZH3nR5GTHMLtHk3YXNjkbjW71xLJQXqTPKwq6SQ58OEMG2xUhF7shUE0FPC3oFtuwtEH/NlwuE6kdLcOO37aH7JyNDTIxGr23dSjG/xL6KSwM2y1vsUVvlTu7/CeTSPBIKaht+B2M9B6r7QTZjd3u9AvunfsoKIsdhfelnLdL/mp1H+pEDEEeJcN1TtWtU1370ai4YHx0ZWnULTYlSoFSWstP/claBuqSJUUe/z87QjZA/1ir6W7SyhB/apGif/ueFRjEWtpNy3XOcRxjndTndNC6dHVyEWlaR7YgHLOSIVboJAEMGYKtN4glSSTa/hhDWv9Xto0F9Y7wON7JetNIZWjVjlN1NnDwu/S63iykZVz2CGzxw8e+Jh/TRLaVXvS7BIOdn+uowWvMWot+IlgfhpwpVKz76/BKE4msCXjh2mDXIwxRzXz0UEJsZP9Cr141qaj51AKmxOvZfyyA+snPdWky9wtcKMjQbfIQ2Louhal4EazJr55RtApmcz1yY43EKIePY/7PwA5tyHcbJPMc0YZhWG7wLx8XzOq1tQOFnW/fx5zbEeaUBLUkjSkoHLgJan/8IjfxW/rjA6voKeySuOemlB42gVawBCzwW50ezvinxQlvlkW+ZVdrzCqx0LX+tlFapGsBS1Gkg+vIp98v1ajE36ZMZF9rQESJAkULGUweV4BqWVwtseff/G462Tpxgncf9oiJQZSySw/Pd8/53ap5ZzB6JiGEvzrUuBia4JrGE7UftMU41sOjzea+D47KBnHyuW+fWRg76qU/PRPb4i6+h1fPaxEA+Tp3Muu331XSulAF3YGeryatv0mDPsEVaD39sPLzTvMpTGq/AjTEA0sNnx7/J8GTyJj61Wta4PkgPdck5IrSe8AUV94/k3o/3fYC0FezuCSUrrv2Hr/A0cBKfqJ2A0BDTNeiEFr2+h7r3o8ZicdyGAy+9GTLCsOvMLbrHZap4FLUj/y5sAohEw9n1IQbLwRjDxkyrA4osahxO0eppMJCLwtKrimKnfTv70lpJ9HmjePgReo87GNLMy1XZAUkBc656h+zOsnfsw9GBuGl5KtkyezrquYUX+SM9vusxcYB6P8TiWX86KS3uhhX+oVDdr2FA+kXZ3qOl5df/wo/tEZtZiSlKVjR4a85THon/K9E+BsFROGsjhKOv4FP1yAXjfRLKXcsrStgjIMQxFpKiLMLdF43p7wVzVFPCiosPDZtpZ7dSm1+YZsiHkqh7jxVAI+HZlpeyC4ZznTjntGVlrPNHwP9lP927+8RwSi+p5ys1n+f3S5FcTxvV+7z/gxdb4WQldij7/nHAsT0hcbmPIqVCxjPyhDC5DgaUgiQotLifUtxDP3n9NHoXk+2m7O2feTpGvK3JJnMIvI0LeM6IIqRijQuNrebhO2i1zvwrfw79qAePL/rPACgCLTOf49Eq0iYC4yOAm1pb+Hgt+brdO6KJ9x486zhJqbopFlC5qIAeyQ5BKyTBL2KM1WwjmbfRWTGRKEdQtJRzTrMMJu2LiSeMt4vk9tuw6ZF8WWY1n1m3usPYe/Serq/1Cv0ac70gxTiOHh4QOXnWsdO5F1KEWNIISQjwAMfMupKKkq9O1YyNveptDDBSlLQ5jsNnFhH8MlbJkLh32z1wN3TQkCHGaYuZbBFEV+rlsK7RI3fy0jiHZrp0jlupEI7f5twzlN3mARY76eCgHmvQxe0z0jpp7SfDt7VFLhGLYzvmi7kQhfWHDeU3n2l3IOwrQo4shNsmPg0K1crJ61Utqa5xOVu6bzZPQYVzUeGXIDLVxer6IR4EIb6KB0TZbASEyvBcz7Zyxt3NP8Q+4hd4z4rKpUqVJJ9KAuvg03TalsHrCzvRJs4SwwOckA25I4uTcE8uzoBJDjCgsvCRy2PAablpdFbBd4jX6u/DJnMhLxJidVn2wC16koJgd68Vg2sq23gpHGMEXIbUYZZB1X6LwV0y/g5B/cDIF1taI32tdcjjkjdKOo4qpjjBra3P3Imv9AX1OIqLRWtWxubSlx/83UTlTDO/c+KcBWbIV9Jec4suDyAf6oT5NoYw/vRypYKI6dPh9RYmSps3Quq1ROkfA50XjgYO6ZXXRggGm9KTvYAT0W+d28hcEnsu98ytWn2XvhFAY5yhFUMyWpqJ+DxJmL86KKIqGy4LtIhyC5SMwjQSNSJg1pYgMNl+zScphB0+YyoxFgEeQqxKGHtKDa6ivnOrsnhLCSNqt5K+khabKvxScF34kyhcOIHuhS11KbFAIYPFuQZSeloFGRZSJt/IkCdLm2jCGZzoHqDefHvgnkSQWwt7uZyGRKl45AHoXZ7CxRoUqhmUIoQeCzG+/qAt3EFW0LHNKsPRZ73epO/0VIh/t+mcrYyoLqoS1/yhJSTs5MQrZXeQO7rVLF3v9jp/sBaZJEO0S7u4Rq/yJIh2pM1EHFOXKwWCZgzrj4kgN8QnV/ENYDLFiuPEBnylH92cS5dD2GzavuOrQvbaBOYpuN0jxS8kMDisl7XHKKussmIS5JtqcYhvTL2CTxjmsc0lQKjMD7Wms2oyItp4o4+nZ92A3w9mVzOMgX8+1Vhz/LbdCepDZngDSo905iEuHf1ySoip04eOJ3Pl1hJFF2K1IRTYS1rF+3H41PKG+64fIQ10ltUkMf9dIiZ2qnZ0/6ZX3yO9Kbt9zq+MG6fUx8yhOxEM58EL8NLrY4INNYPEsYJR4jkd3O2ugOAMUX4WxkvO/kahIWGkWoF5YNzfpY2DabSb8lGAMWbkDrLq9aNJ+ipCxH4sidIw7Vac5//f1ls64cYEtdFw4j5QAVDj/eFkw3rtr0AspJmYS6KlZ76mv5qC4ct3rPJLhlRsZkakiVZXwOOz8vwCILrNraULlMCHTIzG0QwJFCYlYsxVmY3dkUO6S2p82g0RSI0KvFIswFnUFdDWh0wJcWKZHyYlWw9noKvcRYIQT08ss+z6zA95AVmjwlPwPRana6Gdl56kFu1P/obvH5+jz37qeytXPvxllnB9YfYOScEu1mgPL2pnGqmFw3ze01BgxlnhsU8J9Tg/HANXBO7QMIgKovdJUIv+cyQ2OKD7oQSMRnA2aR1h6wtCPnbHFPJ8saAfpLWlWH5VlseHJMk8rseFSQ/o8Se9wW5LJaI6IJKHy81GLtVoQ0jr2LnV7izY8Ppl+zXdY81by31Fuzrnnxp8kiFo1mF5ZexGILt8XavVBdne+NX5seWHOYNXWalH9/c8jhZCH+B/qRyjTzlaTMl7hOLMCmcbt3RK6Cuv7SN+H1KVqnMhrrZnVkkFNmyEKH1Hy0rVGnigrDbLpG7Rg5VHFpGxT/hn9Go1BDy855aAqNVc8FwuZUWUnGBGfkhhqjTrNzci4Wc662UfJHWUPna22GbW0RdpghHpfmRfy6ZJdR50aFINp4X1NqixoAxR48vWciH00AgX4U1imkkNp9H66e10iESU+4Dvz/QjFYcOGjAr+230lKtSSNces7m/lBxSuHVcgkf2Vck6FMv8nGyxdUktqtcjXFtd8t6+m6YXHDJuS1wAjiywb+qWPPUZrWBTPTPxuaQDyt58WuXx6/l6xIlFO3QOaP2SVr00p4JJvNeod5EmV6++l3rVIj/pmCUJgZgpv9e9uvYHhDn/yVwc0ot3HCFhc5TR47S+i3Xxd3Y1i1iOmfz68og8DYFpl4l/d7jQQUEdZzh6U+UXla584DWVH6vy4yc2EoQumsdsGAnJ11VdKvPMH7Q/+0ABpjsFoO0hNEbI/GFuBlY2gn7bgo+92cLwnO5Qh+8Dj463oDGZe7G4heCNETSwbYjBWo0iPbbsN8GajvZbigllLrMpGHfVZQbEriIQO+ibAMhU1VNQLW3R3DocJGYqEsjWOjsuroEpJrYLx7iU0OGptk4+bU4m9/9KZF6qwWVEhHt1XSh4tgH68Wr7lnVBGz5q2pi8XJrtXgBEq+Mg+VYZEc6f4+3p+lEaxe7s1H1iUMkwS2IBpcrIEKpCfymU2D2WZnAooIO4qjSHBiYOh3+VsZJcvZTJrAW1WYcs3VJHK2CedNIlMV3jrt/HvaY68ACiahQER109woRCYdzSDpfYluIDpxs0YmRdz2rPfN5oLcG1FLczYUTU/WPcFX2emRGTcq5GIX5GbCsBYk9RpagIOYw8a7o6dV1/uH0978l9170RkMj0Hqk1UM3czc7cp13IwBuhJ+xckwiKm1vk4m7ypnZo47pi/DK+uEnfTgnQX9rXmQ2d+hQM9UOfPMk7FbCtio89Wgjh685qGIkE3x5U3Wwv/2fnRE3g2Fs5FBKgwKuVOwZKWRjYRhoT3nHBzXHB+kTj0fD1heA9aYVfgISGsfqN/ed/sBzOz9dxEUwRxs0FRL1tmuUCYXP22P2BlDTnAGkWk9QhOMBvXa+KU2tvMVUf0006iXkj7b6PuAEU+EyY092Vx0/J+bMjVbYE8ECQPxWZyP9I+RQag/5HyAUOgM9aelUCeUdd0UoUYz8WLT9cU6MAAZH9XxOjTWN2Whv/2DjSuXk2MvDJR2CKuoXEON7RpgZ2XIru0HmrmoKvgt2WAySuCh9Fpc9aHbqj81X473p1W7aPMprZdKcRWSssEUkE/FDGVaYAtvJ+II6UWTOXjevF47VJrBpxKfWw9U1Vskzo3FpOTfCaJ+I+Yuge4eCdPlR+TteWEsChoMaDtZxzyv5PENLG0C1hLQaD2C0gD9h8yPN5oj5hBZjDi+aHXJnbBx+Qx6yMMVK5PpywGrUjyJMMnVjpGcxFkkzc35W9hGG8Esyq0K4b0vHTAnFCYGbDQAiKoP3nNHvzT/foZzGDQ7zjOCUY7tRxb5T2nDryeOGqeOrFF8sDxL/ZaTjnuQKLLLp0rnyiefqJ95DheH/a3yP/Gpwc7phADrFx9PoZNXxbv1fpXD2cCKsxCTSbkj53Xim5d9Jk1O49dKMwbYVIooQpn9rpLnyU5HOq5CFEkNgXSmX4ziizuX346MaGSU/elBMmlIW0roaZptNNMZk+NFEbkiRpvvFfPAsPq7X35qACO6Hf6/Zedd3bzsNKM7czhBTxDN+NWMfIvE6nyFZxLJA+SMgsFmNrhVshvssKcf7I5DJDUAhFjka5BYLlft+CmfaA+SheuAjmPO04v6SepfZWk2qtIUWgbOdGwl9XvW+uaT763ObHd3bIOFS3DP6siJ40gzM0XS7M5cgqvT0Gj/K8ZrN4+cyuSern5wC5DsCK1uX+Q50f3fW9XTpoQieEilo8u2dLanPI0zZrOSsI4b0SPc3HWl5CUGtlp/BkT9LWuHzArVQqyAxfxIdDe6RzGNLvKeIffjCbypBJltCKwzo4kN+v2PBicxRk+u/5ZlPRcqAHNOAI1zzfcpk9KkBt//Oy5i/oBUsekXJg0SJfoDiWBzjDIlocEfE5s+UMNh9ieRw6nFJesFoakV6geF08DTFyCRbriZ828OQ05zQ/Jn2ZX6mfKvYds25NWcNaLWTeuuujwTZzN25jHFUIgubJsUQFhEeIEYiQyoPSjRM0KWYXERDd7/vST4vqvZ5pfp0N94WEB7u4E0mh/Ovb2pk5/lOSerc4lboXXA6ompIQgYnG7DMO2XmRDnWukXrsqUNTyT+a7I6Q5W4/MizJmQKs04D+FFsGXtQksUZ8AnORj+RaAO5O+3VP+Mip6bd7WDxVkOxgwVfSpHJW0KIIjd3FSIw7jpRYe2ksk0YJDFI2eAAtMhC8igPuNI4krTuE1rro0HfthTYtD8c1rfYxUXdM3PEZDvIgovXySq54RGdfojEVxyKwL4NGshwWjFh44Fa12FG3r9hBEl+xtiUrc6AEtv2MuSHTpXTCKuz5Q9leT1PHAZWPkYiLnuky177OZNSAvyqIy5XZXKAPcvILsyS4D1FoqqZGMxRwoR8HlsPztfUT7kCvEYKfWZNKXgdD+js1MFcr9hXWUVtd3ZIIHzn2ZsyaEAykHK3QtCHsD2XYL+d/XzAy47BhqeNYRuk0EVI/JOLHkmAUsgg+bgckXe6ACZteOEo2QnLsO/WUYrzSOtVxEKwy4yWsTNKRNhOXfYSsJfbJ9efD+hiKJqwThw95TZK+o0fSSSsponSy+cgVeVbh5m/+M2WGQkBzDN8zHRU1ta4q8EjFM5CfebUlDvmMVg5s77SG6mG5Ub5x/OZKdwHgYHzbH167YeKcl+EkLBihXX9Hlap17OupGM9LWfwDi/E867M4akmR9TXMB21RegLoR/y3S+5twG7V5JfBKWpOLSEfkua2Xycg7KRbM7h/rBXgOcmrCtKLE1pVJT6XJzdYk0Tw3/+0Wt1k4O7M2bsanUQWdYQq5GIrrWaEI+lYwmW7rFaYV0bhcM2/L+XmXoxOUg9Th5SwUGZYSn1Yc7gNNBHsHXgjtW+Fm0KVtqu6ildp0JQAnMnvYNFg4WSj3+Ct6otos3vRksZY7VZabu5SL5LH93GvbwmhIHgbymEIF6/6xSIbbEO2AoqVdOKhnif4ZhfKb/QGLwsnlb/6BKpR6pkLn2R9NAx82W87cJmWBye9sems5+F1zo/2lwV+hT3CZWgV6Pmgtr+qHPccpdsO55mmaUgd10shCqEi8ruvrIf6T1ZE6OZdCVA4K5age5CiGEyZun2MfmPXchXO9gNkkdbPzVkkQPBYp+PcQHcJ/22FFsrogLtJHV5qo88IYyMvbkhymmslmKaj8DPCyr6exkJ8b4OxR+NNtwsqPgAnvpvyAZjCYHjFOO4pEsc/wXINeD4X4G4IdKye0aH0zyU4ycyXo2EMvn3SuExY/kgq3+9qBXJUqJFiWaHGV162Te51N26+wSikIKJuNwxuRQ2DOH2AHqkGwHTjEsHcFKn4Lltt9c4Ecr7ky+43l+eY30wW0bfAFtc65upm6xA9MUwJ8rnOiqB4Wlc/p1EBilU2XdLtQ8RCn3XssVsniMr58JEmwdQ8WWf/+KWq1rXJvecvjv+7WjTlo5wd9LiEbZzr9XnsLpfGaKDuOImPei/qN3bOoh0vf9BgE+Ul3hQGDGcTPqMwngXyDbCEnEZUuzcZurh5sMFPuq6liPbop7sYTvVeRlxjXJZm/uy0JKZGFQUabJFuA22MDmt54eb5aq2jpDaHNT8zyE8RUx2FW5ihP7DGQUpod5fub/NVunivD5uf0XCxvK/Xpu7hNv1Gqo96Q7lMzgNPUMFV8nc+W5SYtbzgMYdyVi/m6K8uGaakyaHYQKfW2zwkHWHcQ38QuDGp6c+N0jPwkivMYZdxX0N17FEPaOYsHPTQK+idZjSUTnv0aZ8eNKTvPcUBBWwwvKbEb3CfHhnb17frLAcwOB+sWpsCAM60vKFUnp4pxDlSnNelKkbIRl7sqUyG7p5t106YdkaCvmoysc8UknOe2SQLZHRSRXRBZotDyuTqu+QKEGN8ENSMcfu9deQMB+eY+1v/r/9nJlkEp+BUpQ4sx/FEUHP97dV8w20I2VENv3z8TnT/t1TyDzrywVHzub8DtUdh8b6eBfBEsjuR9oXgqvrCnaSqMpyIBU57trUNDesKHmEBYQZTfmbjcmqP9snFgG6g9Nr264Wtx0fvHtUTsxDh8+6eLUPZT0dUqlnd9v7ITmUoKwbur/glUH7LLgDSjeSwItAcenTbX0W2YpXpj6REyuwVQK7e9tQU1UF8/Fb5/EwrDlGzGdBuPpgKwJiTH+dnP/EbjHvB0rOsDzgpbFz2nzfMlLKciQjPmxiabmwtiYp2yx+Q1MHZTo+usTvocLgxvY8NKUjJOu4PRQM6004z7eUM6OT+iFskeLQ/Bh2fJ0FbjkTvn/HnjJZkSTz2w/4aqOfUhimFJA/Nzc0pff5LqLFD2YEu37NMdvYWIveFoA1vZxR5wXbHbQQ+tCJXGjyldJGdsT+PWdu6pHYZiZymbUfPZAO39RyNQaH7mjyZVTwiuFK1T40mlV4v4OrWv0ggzQ3mgk9eUZ1uK+QYr85AMPr0cdyOt53JhKrWdcHRrfIsv7MNuSk4Xslx4Vx8tW8U8Plf+h9e9hw2Qd7BKZbiWmFpOTagH7fxfLuZZ06VHjC1q58z2Gg090hAWbgOmqneCr6UBb6k2x9m/5GOteYBMTWTQYaHFkXJMqKJvvKv2Pn8mdTc8RCaEsI2mmIXpkoAEjs49UiL4JBrM4RvhB43MAIKgsNWxtlp9gZmFAXORb2OD6o8VhGKSwSScT3YWI2q6LmcN3X85pIBsZ3VomGpr9lDLuhfl23UCPZZNosj1SiB5u2e46F/UQVcFguwPxAeB/Gbjhvl+tEVfpeJtNNm1GZLybV0RDZF07/wc87w+aF2j/jGzVpVWQcOz/TO9XMgdtlPZp8pyoQKrCofZskPQfh1x5sfbJM/jnLngsy1fD1lAZxQ1AZgpasp7lx3lXW4TpWMNW9k78aIhUCt1AGeKMb5uDXdHTdnFs0qYIJb3pPq5Zhgk0mEcex7wKWqPLEjktYHpiOvME8mX18euXuYCqggoghKlbKU15sxf1quRED/7z24gviCRNSkAyKNFZxYmd56WsRJv2MIz46CWMlBLCceF2cGDaMHoxn2QsJ+cuX/g6f420vXB5Pc3Ua92MjeaAtnb/hEh4Scehrd3bXq8rADQDG5VrY/HzsnEnP92RHUvK2rTM/uftVio3ZRnMc0ZFrr0haldAW6/+tbnUO5oltrUDT0BXugAVjv5hDxwhLUU9ptHuOz9pLJkMz3hmbNjyeqvFUMA4377Jv2CF2K0pba1O75Q9qs5e6YroftiCEd1mL80fo6eWNOVmTtEzofp0MlZEPbXKG2i+MQRrJIPOHp5olYVT59XQC77rK+TrUIp5OTV4AlwiWYk3lMr69Dvz5aitADi3fs35HeSb14qRVQstAAnQRYtHneWGwNcMw39xI0psVdW7RpZw7ahaq7Ttd3eCwx1QyPDX5y1rG1PYhQPLevBCE1OJHCCi7oukldpirQEb901xfWb1DzxooanVm2o/k4VDRVQIcllwEIQ/shQcr0LganP1bmYm0e1rHeT8QWzEqTBgpvFJriDvdHE9WZTglDHylSUVC1TIOeNljDU5fIHMxwm1v4Gncd/vk+pSLaUbvITftMyRrRtjh1pUoUtKu773Ky/pLPU3fD0Tgydz4uVEhtMm1C6xn96fLDYQsKuKaoCwPzfXO2Rn579zHm1/DvrLPPcsHLI4x2ilogUwoK+p089rpYwj3v3M/87be/4bXKeIHFdrN8B+cdoHvBtlI5M0oYyYOeZllet+mUik7mo3geHzlIoI0RUTGgcZU714hcYMDIAxSQkzSyG9Qo8S+tCT9qj3DC/DPDzaLZQ5n8uAsz23iLUFge9G959BVqmzMstuCKCnwwkGl/8aGeyCXJuh0dO/I90e0CoRUEAAJpt27ZtGz/btm3btm3b7mXXzbaNWcQs5IyGJvMYWVgjBJaS+NpTAAg5oGl22Y+HuzHn2v2DGARkqKutQ5Az7cdqRAnCyB7IDr6qlD5/CE5Ay/Wtw8RruSFQO0n3PJFyN2xko+zSbq0Onwh/hDYVMkcXobHk02J8J6W6ur4iZDS+UW5lzstkTg8VZy7eOpvCgGx8/uURJEL7yF0oi3DZ6ESOK7hkX9w5uVcvjvRUobEQZ1dVv9goYWxr6gRhAGbqkK1kMXbj8toutSlz5+WhcfG9dx1R+ifM2kR7wFZ9Ey7QGsx5H7zE+u2yAyvjG0/bShgH0keHJmkVXAiPk4gYYY9reb5vhVOSzwTtG9OAgdb6w7iYAp3P5bFfcKiTGgNY/tC6yTqy0SvrtzMRNSRQcenqId1i7c7RLOIvfxncBOkgUkLr8IIF06iiprdxT7xsLb1tDLkvc3dCI6tVYQCQziCNqQUpzNVgoaCbi0D6C1GA4gBqxiXs/c3zD8q0wLgUzH0a/zX1ruKXS+1g2ZPAaGTjr7NQ0oUkKJ8JG2L6fS5KoQUNZ8XZDuaX8SuQgKX0EdkCCclsK2F8jZGJS+u9StYPwmak7B+miUS6I9J6WbR+uBRqKkIfGzZro0z+imPcTegm5O4rXwx1/Q3X96FoH1dbdZ6wOhKm4pcZuYEHbi9Cq55K0pvUCHqoveAeYWkK7ZYYCHMwtRv8Sxx4cH0/5blIOdSOCnoWfxY1rkaIsFsXYhW5eRtXxPxBjqFwFPTJckLeqEuJPQa79KyYxF5CzG/yVSfdxtwY+m0JmiMAy6qoGszRkscAgAqwjhdgP39u7z53UlgJAWD33+t8rWYCQ/lZYFhADJ33aiOgmcDXpPTLOsYjUrPHGIUF7xvRYUcUHFN+fsI9qXPDwe6CSaH2/u/DmSgmFnEzFN59r3z/yfbFX4mRSkeEEbYoBzkNyIYegAz+epr04s6c5uFziaqBZgd7ZSpn1nDe5u22JpSTLYvRmYYc15RxUT/8Q67mAdxXEiR+KopMaMLCGPavR7dOBfsqF+ReJ70hokWQmupvdliofGW4BdLYflZKSuP4wkwPat2Y7So1RfFyf+ZjmM6Z36bsGots0mR0KhEXrBPT/AJBo3FAtzg5vuYmC3x+tYQa3YyI1jSFxRlfgCh+QEHjIX1YSHslnKE8EDm72QKEG7Xfi/4aryGlinrlpkRDRWbmfb0Gwa7wYUBayBCohE1Hh1LfmP5hNFXiuz+dsfKUbgobi+Gas8gAgNfQOhhmcG6rxgZcEbvTkiqZyYyL9+z3Zll7dMZU8qvuiJHAgD2tej2V0fRo1dqMqJAcUo/+8/dkNMvnPO+tX7eR7LnHu23KnvkzKqNNpaFjvM8PmVyuvoWZfwzuhAwXfj4/8BO1UGKdPdabrFHxDvhFLq4WsRF36iK/cxUUxZPhQO8ZvUaCHvIHirNm/44VIi7MuMHfCsszOX01CWf4jdGFWyraS4H7XZ6au7U1YSswF1rEYdlhHFEhadSioP6npzEJ43oiMbchC7ldOa+CbcyTR/fWHpuPX/iLsYs5PPwdwUJNp0cl+YWQfibX25Z1UmRGvUHP+EKhbev/ZLFO+rs1Hy6iafTqq1MMstsAZnWEHenNw4PCyK1h3eqP1fvwJyR6aMHGX2LBc/FXjRMDGqgP44VQbHJcGmQ/VBVZklwNRFQsZG/J5+qvWn59lV4Iru9jzzMu7m7wOUAMPvBjzjMIPsSeJrHGGnGBjatyihzcr17lYyW4r7niusRvF3LuNscdDvlOjs1SVxclJRenZFT+F48MoQVkixLcdFBL+0r/wuIqQHim71NJ1KEBI866gnY9SKvLPyycUBDHX0th7bPQABlreV4Uq1k7sm2ekUM1fEXucDt4dauITyeUjq+oN4IvV67Df6gQn3Wd+wM1b5norayW4yW/iy0ZtcAo/B5D6JvYiOS5rpXtWHSVS4qkda4uwtlYQocFDBPk5P+smEtV+gRo7Nfos1xKJMzmV0u9wAmF3wbCbd3slhPGxKK8F359Yrl8astn8HB4N8KRTWl/vrwZisQbFMP7EiptjvsqhOs6QR0d9XigEWEpPELlHzDfjlyJqi2Dcq3lcv34e9gYspRyh9QUQuMfjWDMtSQ/b/+5d+YJ6U4Eqja7IIUqGrTqNAacpyIHpEDps8MHmF98ypdMbDogzcPAxeUsxA4+CnekKjpeUdb3GOova5JBWreYrZeJBZv33Vi7oujo7f+KDamBC/jmk8DxYDFz1SMoKXGUykzrkgqZlDi3icO+2Gh2jBrH1Vb1op33msiv56HmxXcUr6BK6AooEe8QgdMeZRDZqBK9vuSszFEv+HBvtbS2/Lc2PZkbxU99fS3s+28YZ7nSm0H1aaYhxae4vpEhypw+IrDvTn/DbcUOwn8EPJy7hOeBzlRZOnJeAluXuOVCC/PecNcj6sLTLZtmlf3khNXQMy8H4pQc0+VDyu+WE6gIdv8e/dvOCwmuM2JUER3gMHGKr1PS5xhLpgcQVETdq0TgIsla0VqD0rflTD+yhVx7pNTR/UtNyyt2/sFDcb2jNIAAFavtG3eI4dwPUELVFtF++I6BP0QswkABYSTUI1JuN3lKGSicofovCjZrckdqtL3puF4nWHC6LjWtKxjzU6b1/cIQ0nfrI0VPPDauIMnPZNYajGSxNcYl8mbuwQucuGgpzgEHawTZ72eRYiBX9MzaZUdB/Y9p5Yg5fg3d5Rcox/K8fCdU2HQU8EXy31li47Rxaix9lPn4ia1mu3BcqpzwLIRdxBcaISSWVV2nv2EDpQmImDfC8/irzuYzqJVl/tKmjSGcR47g+ujtceBt8AUrHYIAGz7X3uJVDYXTAAkuUVQKGMHaIJ6EwORMf/e3TvjiGUpulcux7hQydLz1Ax3edGyCG6ZR+8KbeykqjAmvC/I3wBumHWEXOjpzCQt0r4ahHbWcW07wNgv4JwWlGC8iTwGXbLU4q5MIBt3e7EBShkhtspiDjfRY3nbE9Q4SU9UWFfkCMh9II9MfvVzSXirmnnTxPmV7+NWVcavJ7+vjb7ndbGsmy6LPFpNTKi+SOoPqX0nCXrOzaPxbA505pcxvst6y7yrr5/7MEu2NBDSwcmtePN7QQCjqIqrjqOB+kVd85GfA1OX2euf927016h8NKCsFJE/CuGjQLVf2oi2HVKSY7zgGOKXgbERAm4FVfsQwz2X5Ttr+BCwTV8TrliNdXc4tEmWm1+IvXW2mTfho+x418I1URH+j2nOVNnDQEBnuO2Rhao1RYBvQrQzUXsM7YTGb4qSzxB6MMMQa4ch4c5HC/NUl/7w/F0ROQvWYOwxsNK5vvI8+iaVBqs9jDtm5BTOu7Yzi7IPzYFs4eVXxU/48GaKw6kpNF0gZdf+MCd1koDGO038VqlpYnaTZGNzKTck9Wco2WsTLWi7wCcr/WdwSmR6FtXjDIkeViG6TfGGVtQf5lo9RVeET6t5GdDxBEaARYb1X33iBwEtef00uTWBCk573n7e7RrMoHO7FH2g7RU0Ctpab34HUC4xjm8mB+DGqhSHEhkKLZZYi+alSeKW+vfcNsmM8KKQH0cWKsxunsDLwILqlJ6RhATvyFupXUOJqflrNDM6uxF365z7Cba0nPxhWu3QjRVt89jYJdDTxnThYbo9dBLlL4cJSxmsaxjaSpajrAIvhN7tyX9pFEVF8SqLbVH4YJhdPZclZchwsDDaIdNOMnTgXr45/Q2fg6V1uciaHtTMrl0OdctWNQkB0LTZpRzwkKSxcaNk6h+Ru35f1C0Tw7VFp/7VAO81d+ve9vtLXhKo5Nru5YN1BFrkKiebFQHjqECF8MN+BJbNounilpKpVCoMZRj+37v0ofSm/UwOYh4fRPs+ln4YwkRGtr2ZrPLjhEokFsbEM0CimMVL0NJgiJYzUVoCcwA8S+pg1ys01Qm03YEqcud7vIwq95NZR7wlZOVkOpKEy5jvJ4CCbZgNV3kDmy+/nl9yKJxqU2vjhUnvSlUxGWZ8f5xf5swLhS+OGdXQIRAuXkxeclXnLzSy/Qo5tX+SCU5chtb2YH6AIFU7ZUmFNKFYkRhVzPFRAa5sg1tsJmysLkU6ZYehwa7J2yVIMkQnhe/rpqmMAY5UsFO6sDXkkfNQFeEy8I6KHI7n+Xz7WcJG8wwNpnOLKapFpxP5NP2Db1MqBt4lnA6zfwWtF4WSaHLSF4ldvbpFh16CHn2s5c13ilrxb/KZmyWtOn03bB7agIDld4SbeXAaFRHyD+B0TaRA4RbRuCmTljnlSJUez1oneRQYT3QXSw/uKhIY0pFoiUMMa8CJ5uNCM5Z4mtYxAmGA/Suj3N0w3LyusY5d0B0ohvEW6+J1lx0vPMRngl8iqVbUcEH6y/xm1mC/0vA1qqg+ZbQElYEvGLBzya4Am4IUMWMVBsPB2+E7dBNOsDRCX8xDdeZH0+hIFEGuJ8TqQembSpAH2ov/aR6Aam40N/1mlT92uUHxaIorshSj3RxVNs2G6lQoC0806YAZaAtE53GL3u3/uou93TG0CgT2SRNJHnwaH4lAcjKkK1UnvuIWwI2lrMO1zJHjQ/gmBESAS45yUlvN84Hfj9/A/fhhJNTWHRxQvJFvWvYDL6Lp8hM8un5ZRlVZps7kVLGpRqaBGv/e7jCHaoS5yZ3/AkczNNLzFGr0eyeWlWIw+48zmslNfbKR6RVW64jv2zk/oBioWUnwnRS0uPAFDgooIttlpt0gt4ou/7Q63VsAo8Pt8VfSmpz+/8IXjjp1iONZ97waYDpDbTKMzCATZOgbXX/WgMKpFUkSHq8uhuAfUboHCW7AXsvXxOYxbapqso2vA655VWzqiZi8KuV2UnFFfIwiGeRbDQ+EYOpjfQGyvlNerSIsggLyCt9tbIC/g9rSHlNiNS8DeaJv1z/URrKQpgLMxi0NsWX0dRRXKx6phOEhBgT5eHTrXO3TH5zkWjrD7/YSC8UqpiiH7dM7qpSrHwz6oUAJe53pv4Pe1t/RcUvE57zCcVl8sdC4WVmcIYWrs/gC0yyJjY+20F5GrhdUdMzTIGhDKTs/arK4LMoCCWbzKTJrSNC36L0J8XdcuEajsyPOsc4+dlW2ngbDUb0bxbvG6dXvxSlkkKs8tQPOPLqmhwYxpWaXsL2CAfS7AtFQ7yu/d3gU/GXiYdEgV3H9yG/omXkB3+AFf25/wMUPbknaRP/LQr3beCpGSSovT9AXX1DuOviq2RNgpGziNXwGooQAnXFoXyPSAdaogaZU+ACiscpx0Q/UDQ8UigKsSZm38guGX7vO7fiEjNDaZAng82xbOXPKfPGVXCYpaz0PVAnlEQB0Ycf2AlMkjm9UQMLuT34zi5VZiI3e1F9I8hMzkVGbbdw+oL8lAfvJVmeGGNJBtPIUrzD/IpUCybSDw9FVgRexy8XnygpVZsx8xjOgiMWrKXVWa1JzSvyyjhoSyDKpxGjy4ut6IHRLDPnfYu0gsgM8IPl1bfoQtwfzahjiz+8GnNNMQe3zQ128OnP0locbD0ZVkdUJh/Qd+9ena5MNo2TcLkHQ/3Gt47JiC4L+lF9YGcxHBnL7WQNoOOl3paIoz4TclO2QZpkZ9LH6btLTkOt9I9iSwV3UQIDUvhR36n/meYx7/ISbhynPxKgdUhTbNy77Esixsv9rjH/ra0SLf88DqR17pvxAreab1JIsqfvD6T+bBIdVBRsuk0KE/NjkL2AOBFjD6E8xHQiOym+fodp6uevCCfNi7oQ26C22y2oMhqvG0LryCmX2YIeoeIXifGCc8vN1C2CwCK/rISTjbWdVQgJl80NSj5A2PkHx7OjGfBwMKrPR9nLqRicCjhem7ZQO/k5pHs2WKSdToYZ4TZJeuu+6XN4KOneA4YfiUlxFG2lad8wRiuXtzzmGGeJXlNZtRABgt9F7MgggIlm0FVa1QJfVDPY0HPnQbtYphmp9LZuvfub28rJPi6kjp8kQBLcfkoFwBSYH5PzNwSTx0lHdw+YA0XQO0pASgSUwymZBTNkHBi5YymnlBblIqUigq/DOjVxDzr2Aq8dvJmCPGBIAMRGcX/+fmiNuaTLzt4EA8Lry6sjWsnjnsZoKFjXQ+MUdOL8/9NzQrnLCrSC7DcXidVkgI3FYmOy67PVRtUtL0NCAziT/O3H3uFctTwaRxhgcC/n0s+QWxVRFFPSRoCl6gVY3AcCBPwUA/cS8+Cld3MaGQRrO/Dk5K6mhkpBNHlx6MOqpezwQWs1uI4WQZAcE6f9wSlmvhGOyxBKLBDI6MWKPiNh7X2Gv0c3N/PyHMOmyNo/9ECDbAQiEBXB6xiKlnqXU0q4eoll0RVfUC1m3YglibPonpIWuz5GdCmgl7/sNUumHabhqLYCLfXiU3a9059tUYdZ9ssACF4mVaD3reonuIXjJIlrbpbA3ZE4mh6aECDKjGXshbDaRD+SPjfpKQYh3xGiwFJbHft2uyUH1rtz8fDOP7SScwzu2GsMUhZeDLROfxERaj3mAX+79PHnxQY+OgXduUKf8NiisL4N8JtzIUUSq6tafjsFeCKcrbNex8HMdZKETN3LMVdou3Uu5t7vsEszo2kKXkAvUhLsCm1zNQTCPfu/5CpCMS8UuFxtPtZAGoY9K1ZGAa+0sQmvNCD1RP11onPdFA9Jrvm6uYWOXjcNDAZw5vKzy0JFOWn2L4Br5bsfYDttwSd12ON8tkmI40OmVAYMFRCeNbMkS7J+o/04JUE1TeWplpTuMM/TQFhJdK9454WW09soJZWmTbJOs0pdtg9gIvtP2cLNUzmu+G4dbf64upui6N2MtIGF2OQ73c/CRdtpAHpPEyYczV+O9U1+jWmhr3gxBfK0vTMK6xh/eFPVdSWi70UlR4lcanZ/gzvMp/GoQBbMgjujDUY8VRpKzoQMs1fIuScbn5T3Swc1ZPseOGdiLH9Lpi1H2nuaLrTsOZgtpzYyI/OVEurYPIrfGZBK5HtQfhSRSqYJbNZi2NBD4HsD8XshUWRccAR/FwG5gnQZ/WkVlOkFEV0mxBFgW+VSvC6q1/PCObO1rxOhQk1dvEtKqNV2CWcQZycQxy/5Sy+YF4mWLVuJsBGDDiM2cEvxw/ieCTlB8Oe3kihBKJnOJhgdH7NvlQMfrUcJju5tyRL6tooxtCYs5qeHTfh4faD0EMWOA96I0uxDNY6kQn7DX/bLJDHNVk+WoZn78BzZiRcY8HYtRl9kHDBQmCGPhS4S6HZ5UiKcNxtZZlRXxSYVpdqLuYKxB82PWF99db48JmWxb4OhD4ys+P1IWNIVMc+3Kf4d4F8X99XQ1bB5QtoXECBoYE+pouYd/dvl5Jj91pRovVURt+gWCULK/RJ/gc8YcfGmIV5FaW5T4p4LW52WkJuBjpE/HXzDkIO3w2NiIKAUTj3mawQUE388e2t4UCU++32o/DkadYfibSRrZpagrkaS5szFePpE+LK+/aQVUbeeRDMuwnbNp/XUOWQFDouNUSFQ7d/stSa6FTdXbWvOggvaCC9hlPVgu26UNAl9vQ93lc9ENcwg72hoPoVjoSBavDh4AfJluqWQb8puA31FJQF6g7t7Z3RyqKmPZ3X5L8fQ77l61blsKlFG4qUTnCZGh6UoT/fOeIjyrCSE3WI8RVAZ/xIBaljeDI71+gvn1fq+iOndVaE+lzL2p4X+Ig3luHrUuvxjzFcvtWwOEhG7c9/Uw8bkJVnsfUJ1TcIxVEYwYJ4M63HcJyXnAk/JrrI4hz9LClIF6TUjtgtCW3ybnUSEcUNzeGynn5TaWZP+sH35HlffAceerw1o/LKkJMa9PidEYbr4JYU5PuGeBfFBWH/3EK/2i8KuOSht+87oWtvEQpqjZbHV5texjzoMZJT0v2w9G7wYgLf2iiHYJopPe/JJdVNwYg978JQutdtRoLsQTKXuGzRt6D+suVbd6GmNaDnbqiFPwdD7EMAJUMAPctxAwx6C1tWi6Urv4n0LPt6XaBWdxNmIVRcdXnC3J+wkMyaychqUbPwMBGjf1cgeqyzT2LRltQzFZBJx5D7JfZqOCa2+oqRXebFFu9Wq2NeNtKUlYXyTiym+Xm/lUtWsp/VqjpmZa/5ThTMwP+mVCf7kzZ3h9h1yVSzzVwWTWyrJXhj/ef1+Ssle3E56ji417KJ3WYCETxNLb5KrWxs9vszvvktQphU7S2/7+9uMTgyrC9SPTrBMHg45a5T4cIz+5scsnvyIggxRHoO40x6nPeBbgRe9kzvl0mIKkhSeekk5gMlYeea/fAGe9JB2CFBCWFCNrXwVWLIJUi+6+CwkiGUJkNaK66kBiwGFCRHWuvrzjAyqWspSCaxC5oD6b/Vd3ys6vqDyCmW47vJ//t+gS6r4Zy/1lY836Ab/jrETn2iidKG+qL0h6l8rQUO0u7uQn3lmE7/KUn9MXyhfjS/J/zN1JQ6ZZyJtvgom2gt6dF5+9U3hQfb/4SVkTVNGaKqHMeb9VhqmQmTRDwEhXXW0Xu9+XCDwh0aFVHhGZc2OnjfGMORDQd+CX92pgRUw/cvt1kLON0gYKorjUOqVDQ23iQ6977byJL4HDymM+DVwEOR2E/BpPr1GkA9hPhQxExR1oZnTJLntMYqjFgfx/kyrRFUa+V//wEJhwhuKhFNpO+ud+UC6Ix1f+gAJbaF/NwrYs4eWaQcuNcCE12bQXhrx//kDKbVpft5f1mLylFEeITXQAE7cZ3tzvPnNFCsmXzQFRitumDh/VdpMhk9i+JAJgCCsBKPbVGX9jcKLf+3yErRbYKVkGQFtqrPdVQYYnks9EGfgCdWqg++xX/SJa4pqz+Z5xTMTrLKqf3YMZBs0fTvBPKiL63bNBS7iARUBKSNGlGczDuZiH1amgRnVcKFc/HTm7/IiFE6Bl/yFc/hc9bu+0BF0DAfB4+ueZR4mO0U/Fh3uMtbSRmeas9Txx82+QJLcZNvm1IN5Z0Ey6efagYDXC4wsUHyV4v3v9KiFSNg0euFTrvSlulukH1Hwn75I6D+KHBerfkgAe9UiN68Whp7Ab3bCMQpfPbGqaCp9bxvWcgnBJPxrTsNbMAd2WTZmjXFn1veIyd0ATRiFSKEH9OI30Jclm64Umv8u9C8jBwE24qx2Z0EbpzQdwhLjaBdEG0RDxP4rJqJqISFzO6zLu1Sj5mljRw9Dm9UFXVzVm3RAoC7ehRw6KYXaBUsgfNHO24QuyoZOrUIPdgcxBU1L/Oggpn2a0EGAcqsmL7jY+kqqJwuR3wthhkC5lBS1HqWdw1TAM5taU1tqR4oFbHAmdhxh49Y8Nad6vupfPXBIiGcFD2sGT+PO1TgyGb9/3FXyv+5oqAaL4b+QKM/4TheRi3idGX6UOVpk6gdauUHss+0GkQbXUjE0ncpbkt9xC1+HlwcNyPvkpxJ/5zq+FpVbeXgr2dscdvp/BH/ZuMTR5AnUHnrDdYwl3FiohmdHl/D7knfswfSwIJ/iVu91AwaGQXQcUUKqZxVTNXkktZ69kT5et3797pZTqwzf7IrEM5Zh9aF9KLOgq9nTHyRzF+cuRxQ5Sfdh0EJUNnJtDaEU9hyt+tvYKh8fl3p/4ZH65ZjrujSpw/IFcyLYdYW2mRgIOjza/qeT98r7llA4UkSY8tKb9Ilw5UYGyaA7ogxK/TTUQARBzUFFQopkYq2EiP5vUVgBzkdKy9rtVThIgwCkY6sJXwbiKuH8gT0/DcnUtLmJH2lYO7UD+ecOcB18jrgWEngJrgczpwr7rHxCfQoervnF/z3N3NZX+nuFX1uatb70FY0h+AGiI6tAz3KkzFJfESekiUPvSfoHPHUF7Yuujcf4Li/9VHdTDXu8rJmy86wRfKW46lxisrJyH888XcsjbdTGf3hiFNgKVsFuB4dgRejFJ73CGQSEwe3oCp1juvj7UKW2H8YUNbXl9feXN1I42b0uQl60QZU6e4MhTb6DYdPyK+mUiyuygUMezxd7yx5Oe2RCtSdm7krCusU+N2n2QEhLt9NBFVydrD50/u3k9934w07MTJmZNkeuJwN1YTuj9aIANvC+/YkHiQu1w/eGLTFSThl9BTJ+VM7dlilzc4vKmDAT6OtwW8qhOzHuJ3QPJ4kDbKBUI7hXDHTDq/RaXpna8Q8HiRsbXY5gju8mcit05Bb6g7/dc1Vnvu2lpEcrFQ8WyOrXhn8W2Zc3z3Qw85rn+iTd3YmYpq09yceKjComVgnA/OMPndsKQgFDjAWRgMB4P0kaT6XB/ZjGdYu/7qs9kMtzlAXi6gFstzpEXerU//Aotw16faQ+ppqsW784SEgOQSOZP+tEJuqX60bXFO814V+OhPPcW+4z8PbnaHG8s6ESiXtCaTA5BKaY8i5STbB6xHFGcRliZ2U7tzUC2Oky3OIlmX150RUkt7F6Tycv9ACZxyRWAgKFRAUgQSywg7+d9UBUASJWlx1QAIIIUTGnMEPLWvvEbRXYM75DmQ2eMu8mygwui7SCEzkdnW675yL+ezDnX+YD5v300VqNBxPzUMDGEq82nMpLPJl7w6teVpgiBZU5bpLy1fVEE35yrpAzwMGtAMZZYh4RrL00sT3eDiWHmz9u4trQXxZwIH5xMCIHYWUXtntJO1MfSJjbl4b8xRmJrNMt7ZU3m6CnntDvtj0ck8h/CkQKn/xQ3jR42bC3Ww8m4zKLVsnm6BBfJMMJIJNndtha/uEJ4Po0BthlV1kf0IQ2cmPy1AXmmOQshmiY6NXdTFaB8lZBZYTk4oSqYbCCIdd4gEAbwzHOmoLAKsmafCOhDs4URarPd+D8QLHR+0xlbdGbzhnLv+JHYfiztGUYw7zue8l1P216AEtA0wM3203U8aSIjEK7/Twe3SA7+ViLcozrrfsjS6PczRhGI9UKUnnMlVXUbbth74QUuNIxG1zGtHW7JPjgZbi1tlvyFuX6SIpt0JSeFdM/oZveEX+1gY8djiiOhhftZb9Nk2bSTPj7rx/Vgt8nuQR1H5o6k+iah9fkb0Fl9nE1ygl/r1tWw54mLUd9GKMJV9VPAjmh3G2VyqqMKKbcp+UwdFb06caKcotqAgW2G1PI1Ix0PfxpVaHkQTYFi9Q6YmE/cpLrmf8YVC9Yt+k5yRelje4fmQUN4uN4dTvg/NPrH0fF19zveai32di3Nd9wVjaws/yjXJW1dhoeWx34jwbrU/yBFte0lxZ/9rfBuphFmequ+VmFh7UMsrwBn/YFzSAdkCsmlchGF4+5djFZxUPMM7+RhLLkpngkJkPQSwkaM5H3rPCRhWRHamSEOfgy01Z+e17I9c8JR/CEhxH/KYuiBZEewdtL/h5DGr7N4A4qJHsMzdFB1aLyWPWAcauU4iP9aRLcBIMwqT1sPMSyaRUUHZpoEfV2y7Syt7rqLdt1cpHUCNB4y71ERY4HxBfZ/GwXd6LANRl8fF9/73H7k685haUzxZHI0qUNHwuZpAvMfcsmD/gyyMSeW90HP9XRnr9SycDn+64cf2bq+cP5k1TASa4nwEmYShIGOtlHePIo7EXJ0MBNPH/L6Iu/sRbYWakyoITEOxDzXAzn8EdER6ESHTs/Eg1Wy/hGBRrH9hypLufNgci8R5hBlqhho/BsU/MpAqdxvRWFDc/xAfewCBfsUNQYKCo6eTUq8ygczpOJbQwxgvqFj+NcimnRoNDsvDVn1oOyxfO26sKJSn6+c+MoMFfv4uA8kJ3TwclY1Kr91Loi6mHCbit4a9Kraz44FJU1gQIUpkt0qRPaMaIfheGew89e6L6IdkV4rKMob0ZuaDXOKew+do72oCdl52hvXJ6zGkBa6GadhNfJ1wiakO19mP0t5aW408pvM/f/sT5CSaWoaBLlDt5T3lPBCIJu7BA/Qaux2dB1pr34Gx66leMwJfUKlk2tkBGlJAAmncQzo9xgjcqIjIKDLQ3POeEYg+HGxQjciHWqeJrOm1R9LFkAToYkRh1TWmrh1HktM1KF+iJhBY/VvnPUxc/e6Lxf0VcZ7ADootXpbYe540hMVtm99kD3q3FI+wPR2mVM/8e3kxJrpW/ECZ57ETR/jj0J1B5vmJz8PhvG/zcuYnmn4wAuou0kuN0++cNTk6FwQeiGoxlJ9Qt4zseXzW2O+ha3QCG6EmvC+5Zini+Vf2oqwjiTuZIeHWVl4LtGdUwBQ19p2XrDISpfwLENw0pmLvF8C7ASojWXS7UHzziHCjdKBUqBrAOn9OfQYDJUNvte8yPDi7j/h1ziAgXlfhaincz+vcGXkcoCZwqX4rpMGyllg2TgpjEjaNMeUfjLqX6Mqz2T14FH02hI3VchCHkCW7eoeo3Oz0MYr412syw9qJYzi2UJM9KkLja5LJhd1S3uhJuN8pXrILkTXC1rqYIi+8trQZYWq8uQ+SEb1P6aBH1memNHoZrQVULyyExWChdcP/knYGCwsKJAzP/OaC4y2mMJvhqHW91go9qjI0ZwxqLzrUtJ1r1z7Yj9/BN3o1fL1gyuqQkHiyI0wuEf1fiC7QGwkEu3qRRmykVU0Usw1Qy/GZPmOJCpx/F/2by7s9rVqyLvyorX7H5xtvt3d3fs8vByeH3tPl1K+Fb0nMo+StGpdC5IcKnU99dSkSzK4adr0tp5FWXLw6l8YMpsMNUGfP6cEKWLvaSMjBtOXsXWmCuecg94jeoug6A/wnX3t+pXqIoqnyxxINjjPJ5aX3fmnWL1/d6xSk5qobPTo3iEM6padj6cbr/a3IoIhLeApjUy2xzX/dnJd1XP7DMGeJBLfxbJLnEMi6jLLQWtBChB/hArDMASkrsFoaTyyDFS/J4gD88gQIkqug1SzPV11iWe5/lsodmGjq0yy41O6Nfr2NqbBg4ZBkjvanjXuWdoZc+NlclS1Bgq3FOxJtMoJm+ionYdMp8znhMjn9UVRHvdO0IpIcblwTWMl6os9jMfy7/viBL8PtoiAfgjvsbIsIMTJ5h0e+Eys/sg1yWCIUgP2a2/rp7LCezTU6YaxjTGg5LQlDDyhqoIS/qGNFrTvdkYZHhmSnx9JBhE4OMug1VjbDL/kjPOGE1OFda9EUJL5Dw1Eqw3xnJiPe8LD70N3ibsnuOZ/xcGsMYwSQF/a/L8HiESYnt9XSoO+Y9qykeqqBtU04quNa3YHr0LeuM5K8fy3GBJu9jhYiVQ8Oh3v/DZz0BYfaLLdfSkUW/QsUlSRy4X/xdBo6PkaeKtNPity+W+4TAXyNtI9oifa8jnTJxQVdEmmfwYppKjEntQtrQHZHYCisYvnJZEgv60JfAG0TuX4hQ7jLAcxf+KPUYdb03XY518CPjat/Aigj64YNo8E7hy/h99x2Ai1YaJJ7RTGuZ/nIkMnUFEmrMd+UqKzukymPlmz+2SbNZMx37GGxrtdis135EuHNmo7yzpyYbLX7sR0MS+MQB/izrfgIHhiIo7HvTjpCEUoPyyYdRHkTeZ6xFPKCVzOo36974VBC7wjHXz0ZjzrJEC+zdzBsjfuHHeQ0Wly/ty6UKL3HdqPG953TfsPndsJhouwxUnSaC4h5LCRlcHUVDu85fOswkpkcg6xcRRvr/wmRwVh9M0xyrGHmbgboz6Ldc+GlMLH3Vm5Z5kBsV7PPlgmKsJsOl3xx95DuDZhfl+eAWw+f4G2v5Ilt+D2xvENm1SiGcc8QNVj0dAhsFnjn3CG608idHLpOSm6zLBKL56hvt3pYpEj1nbTPelZiqygnWNBJbUEZ4VUrpotmty0BwWxYdFj7oFL22stU4GuvJqcKdFLRpXN5xgNRA2PalovJOSghyTj6zXfD4WSH71BXQjc4BQyonkkuokwV1KFeuJ8/HzDtCLjyaND1toF3oETk3vm93Qy1Qzqzj7YlRRzJ7WTYOS/Brs/9XFheHykkXt2LQTrzsMJ/U66gR6G6Hmi9XflMCzb0YMGN+WblFL6uHvzygWKtWIQwsixfx+YzCLBHRDlxfJe8m5Pe4t/ghOeDa7k0Hnqf6wFNDG7eBhzDCR20DjNQBULgAu960jOVi5BcjOF7KUINw6RIm3LOde8nOo12i+Im5ub+QR3c6DRux461qUXuJ97gGGBzhdM82ehu4jmcw8QU88AFKSiYqOcUpLZ7d7hJq4qcMCq5mejbdtqMibuSqlBMKrtWtJ4GXf9CxHevM+KnZh8vsrmZNRzgdSLY+gHw4r6urvSN1bARF/6Vnb2aEvjHtdf83YXuSAaCkPMVHf+AmTaOdBiRgd5Z28NtIv2bl+OJPq3niGQAX9iRme81DHD9FnBVfpgWPyMB/wlQIDGbb5wkZTyvQuPcwypvy+Fvao8UVL1Tg5/0K8luFjGU1DNMn7QsN3JH/AowWkzlXVlNFBpxluzr9nN/NKrEfGvps503eBQCOcQ3tHsYJl39Jz31H/zIO2FJqDeHKtYJU6lk1gJ2JfOKhboQ/Et4QXdyHgc0SPB/c5nOz7zRPsKL+L2GqCzvvXjjWTmSdJWPTDbM2QhwmPlrOT4tpwRrBCo28+cjeRO31IYM66ivvuLsOxBB3SmtZKuVw2jKjQC6rbNQkLmv//TJ+czJtgq9Jo+HEbNpG2ajUgLc+fJWNO0g/dop0R4EOMO1OF0Ib1ltno1QgzAnrILJ+vQ6Kw/lj1rzB3inlUK0/aQrXI2P4OooNvhKyHUS1bWAzhPcLpl3Xcy3OQlftDEb3ni9bUFib7iBZqBCzN1cjGwjY0OjkliCxVr9JHGZxEFc0DHhenC/nKRIdaSQdpKP9EFleGqkt+fGdjk8G8RcmpFY5rZHakeGC2DE+1xgkk4VBwLohQy4xVeHXY4ptAgWKopwp+IZmVT3OGZEfOIqCZLe5Q/jWCdSC4prdDQyTkm4CLhnhpACGkrZv7Ojebl88gEE6/QLdLbJWogLsmhYECznS+VA3xQkj/B782bzrAHwzyOa+TWNsVMJKVvv4lnfnpgvSDBy1ju5sn1cpUxz9IVrpz0fSNltfJDnYuz3JhPcUMEBEOtNXUy6m3uKcfoHTzxVehBfj1iSna/nmECC6k8c/+APoAur+bjIUcWJ7jmlUosU0T3HKllhz+OgPeZ0kwRlo6Wl1hnS5ZGTXqbjh5sKeZfEVCKSLpupwuAq0erJmuJRbvkPxztGuD5UDoPfTbBWqQbaSrSvTqnETuBlv1Frylb+rvfPNjXvRPuwzSTHWWzlfbuqc6Qq6wqTD/iG1uuNr2PU11xdYoLtiOERN0h28R4kOifWywstnws197t69AyiZL+24y6kGIuyWY817oYvy/NvOSoRu+9gVhRfukdCf4Z4xgc7BwqZy1d5+iSp3eQuy/SXGtqDzf6ZYWXzk7Pq/rN7Phaliayk+cqYC/5WgTQa3Kp6sNuN1tbnXI9D4MNDJdFklMdS1LoVfEvDirZ7FUJTnCmNru9ip1qR44kvsIuoMWbqalz0v5OECq9eJnEpwRyEpPh5xtO+KTsFwvRYJKaWQZEXx7bg0KjWQf3Mja+X2k8GoUl1LM5qfEx2/Y2nRAwFaRogywOAZpqxfT0xGrD2ApKmRWztSZynLen3MkHrJH0+y8eZMzPiArZR85tGj9UTvlhln8ndqYTjfXgC6XvMxHPKG8GvaPmVvH29p1TgxsZwZtZZAmF0cKs9SxYR4qXpewq+KNxtjzuk0MNaDXru+HgkQSZ5VYPYGRaW/zSwEmzJfOPtMLai+6KMKErsSinqw7PPNjIm60+APG7Zq1gfR9gzwKdAq6rV82SADEZDJrfT9w/h3SUTevo71g7Pt/A9DhzIySzuvG7ppzblz0ux/3VO3nxOCUCjVepWm7o97xUmOzrcuFYQDLi5VDvD5QbarhUBmXJbfmeWH1iu3Q3N2452VnJQLMD0UpcamcWSwYMoK07Neub2LGzLalGhcGbS49SxTOGgmbn/RT6xAaekRhnUcQK2IXCaOGg6gAO8/IIkQ131E2dvPw0OD0SiFh5aGxOGJ9Du4jXdmNOg23r5tm35wHLgz9oiiT2253qnCVds6gmxf6HcxJsrH+OFcBKm0QOupE2n5GuBP03kF66UXcUFOOC8/wjGBrnyOM43nB9E4p4+LGpQqzgo7CzU1f1P5OILpjdz6Whlr48eL64EjX28ohK7tCM64xkZ2mYBOkCxwLv8PpYlsqnjqCqv6e8p/J9tn9CA2MqVbrarfaQsQPG3aM0+H7jcQifcL4qGWrKFbaJdHhHWI7qp2YhxlZaI16mmEiM1HRdzdP3WULCxPUJty4XhfIAziTkBqsb6JyGcIYLN9UipVI1QRIfSRW6rIO5fUmx6BCPf3dCXKZqSwNkmIewUr/mPxG8KF8YHbcMnm+iNAQPXq8efSeW/ydylte3Dq/jauRt9bZSWAQk2eeXVRm9Oor9vxUsbfLMW9wDK54Ugn3BBW6UO2RjknhNImEqL4udSI6xCtDiOFeH6hI/z5rVmUM4lwVnFlCiAexJqOqq6sVBHmjtJFPbSJIGvxQ9ogJCZ8g2PmLyenumN3ylaGS1kd0KlLb9I2u6MDjXn78T1kgSHzZePotT9U1EJibduMUg2tuJpn4ikIM82mRYu7pJs8GK8PVoOv7YTJqdScuMFrMl/Txdb4PQdC8aFvWTT6I3zaG9Aw4vDBHZV45cEAq/khx/9KRfNDW8vwJ3EM4lXnb5P1+mZmIbvQYTvhVbPgwF7VPL81/DeSMmzQ3U8TAGMGqCYk9k2aaasaptvXASl9BhCr0KrguBIzrDTdZhL8vH7AxwvyEhB9gj2NMyb8B/YvP6kMZ4iDMNspmsaoVqkjzmLWMzFY51b0KLlxDcizcmORjaJx98b4LS8xjE1j1COticKdfdMYdb7m0G37XA5N7gj+0PYBlfSOvj+sX4wj5P6Yn8q3snMZ3sFuMlGPvZaleVbYCMf03+Iijhvf5ab2SNPVQGS0MU5oSK5v/eYY3mF6XWJja62FltrGi/UcotbKKkaMXLH09raKigVRebfA56lKB5oZjSHTb0Ojil0y4VXDK3SvjfBdX8hukXOR3DHtZmJN1xKpCfRwNtBCa5R+wJq6W2LFfEfG7P3nbXpSMdij/5qVUa1jSYb17ThMwk7xtlD7eZnq64S8CrganYEd0Pb1kZ23P9oxphTIIGq5VZdxLLO9PwaDxd6dOO0Pj3PQQsQbbkysibm8LP+8u6FfmjV8w+pD6gPHlTaukzgcfFqw1aXO8qHMJvguA1fPALbAFYcCqpQGCEInOi5NNIqgQP4drfdhJSDpA0CaLOhA7Ic15oqEzM2NosRrGNxwuLszEi7ABucxIkwYt8jVze4mYd+MiJycvK1PHRuGKW6rfkPBDlI5/In6CLaCqI/tTkTDvIjG04p6vUlgtcIDDXDFdXPiySGs0PpCi2FMzVPCLx3qoTmwSNm6FsLMHOI7MMCXaAdJvmxXy69bBZjbd7tkJvSgqT/9GsJhQ1wwdqH129jiByq3ibBaLe3Pc3QNGLE52BVmZg09v7KSR+P1OaIYZ7twqdETXaxrDQGUBvGXCtfvKhHwjTLdCISHANWt3bg2dunbHxDmWRSrRBpSyEV6mWOn1WeCcKFXmbMy3EJKvBEyitHsjCrguv9SLcqniX4hLYpB1TC5y2gFHg6Dk6uD0iuEvom6QaIVhs2zn+Z5DN7Nvt0tWgnnIwlFFnUZOgx1KthAwoPUm+ByBw0upNyipA222r1Jzx7MrOEtU8aE95Xe4LMOyKupwEUzjbLDSP/yWPJJMq5xIeQQXJKok0lh9Sy42gQpcmnV/peOC/z0BOuBb3+gm7SisO+vGwqFY+MREuJcxKPkQETO2lvEwOXNbGeOeVWyw8kTn+orlsaTAwUtwOXWLFX/NYtC5wUcyxAKaVvPiYZa9dDK9IetGEG0j/VJlj5PvE19JIsBXpMPhSUy/dLv1ts46ntUvz7iLY7gRgHwVxsdRlFiTBrVXCqujR6xeQsa+mk3i8WM50f3+7wTzGFoM6/s2XnGeohrMYvFmSSPd6868Rsw75rkcRjt6PCyfEyHld/EXhM1H3X6nWTxAWKpos93sS3fUvG6UZJNuIbeioQ+iEv5Nyonl+FuP3sGYM+ML9OB1fdQHheTvhgHr1l+GQyS3RejAZCdo41KGWmpeSdmH/jpgZy4oGlQ60pK26Mixg6PlGMVFy/weiOXWxcfhyu9WqjPEcd0hKb4OhCunvK8egiRIz0Aynr2BQ7B57PJ02vuMAfCZFVdM9zEDX4Z1159a8IAUEeOoK+FBfgudIspzP+e4pNSMB+BuygnEkEclPBmk3CWrQjG502c8TobBzb/VpgLzpt6VVraCPPfPNJoUR9nBVasUkgWa2HegzQRtQCTbSWXMPLZxuXFWTtEOE+3mVR7JG4YEYy+y+AAy3GHcFuxamLnaYybfO+CFufPEO7yMlioNUDgu7u+kL5nd1bs1X3oQ3eEy4scxHe40nhsyVbbBWwQxCLsvh7s0FU86xgE5SPt2Q3MX1OQYbsG5aBhOD2C01AuUgW6RCYUAgFyWM5VJ1SHADijs8bADpwhummePexUDI+jnAYj0l3OkOjCVvc4CejGtPU+dLiQGTR6qZjYrHnMVaEYJmeBJG5b01N0ax3n+YlvlpexqBiyMNDDnNNnFQPF/ZQ/yPY2hKbkaz+BD60Ib5OTyg0lFe4CppM5xrMSj+0S+wZzjmX+zBHVVHl7E8mU8keES8e+VUZGNEPfvNbIftHFg29g4rm6QRjnE3XRIKq5IdLoiDYt4XSGKxnlnaFFClCr7alH5YodIk+12uSrYqHK1G4IK/oP/Od9zonF4jYuYXhDp/dISRqsxyIytxiLwvbLF6+siyOrAwREpn/UHbKks9dkgZTqj1Zl5dkDfkrUw08u0o+G//r+AljjsRPr5jg9lyHohcX5DWQYgO72IuCQFIRqppJSY17TBt9IiztqJ85siAk8LJNz5QwSWGNyYWUpt3Ly0wDuuZXSjDCmndM9oa/DPp+80YZjODZEWUG7m6TArczYXwOJp9deUpP1YeD8RnGQa+BUNnCIG9PPYy5FNjGxVpnF/FiNhwrvR0sb1dYu5eLSramtLAVaWbe5ZufNUnacPnvnWBrRWEA3OHm7L88y7bgLBXu7WXtsswhgzNdG/C6OHwc/7SaDIFVbyv3mZdnDLPG55bBOcQ127bul/DndLJ6EXMZYfR6KdlArtWch+6sMO4q64UOaTflhPlVkfCs9S2lkr9rmUV0tR82RfZftOPeksE1/79VQDTpOXqyGkMYDLNPg35H8kQ4NHRnwB2vqIoxJ6Qwsky+rp4PMQ1fks0mAWQAHrOwPdx9OTtk/QHzT2xCxP5IrRAoXrBJtmh+z/gLhd7Hy0ANGCwLa7Yk1fyniU84K97DbfPpe7WYEf1U3AH8LUwoSonLOp7r78uCJWQd4ueDRyoctgJ3yWGd05VPklvf1zqCOPriQt+K1JcTd1+9MH/sMciEh1SKpns1iE1Vx6PrFYZhrS7RIjlna6mhljYFd/vhGp8cFZr7y6F19qkAh0pWLIiS0GjsczIpDuI5gG6oDdbk7EidJQjVZJxaqw/hG0C94gB1e18fJ0k+YfZPBAfJJG9UQjnHHS2iBUoA/2lHp+QYPjHFsXAG//Hy3RtmXyJsKJGLkzwrK5mGSeSUD+TzcXedysDegZk6Jj6kjAFVJMP0tKIN9yimy6D0hnqYddjLlVQrTfK4xg4Oe30ydY5hhdVXmxjGIHPD/fXC8dGy6ZPqNhu3GeUKBLmN9l3Uljc6ThMKYUZUBinA8EJ+ZT9Yjt3yEM0UU5VtO22NtuNEvocwXLOetC1UFnOhb3QUIO2wndEfGMDXrkpSJda1zWWlZt9HIcLKzvgHgLJ9y2YapYiDqRLMsQTSNXWuBDceLASvbrrHmNL103C0T2qR1xvR+dCLoqHCdOnIjVH33JuykFhQ3JapJoez0k8mwTeEjkvho3q2vP1//T+X/g3b0L8/4tOFoPCoTtxdp4rnh5CMUyuYUAL0rFEuYyreq5HzS9IXC+Bd7IurZw0bJoVrovCd2EHxhBNVBlWV11cSKP4G1UAw9X5UeQOLRclJCpH+b7zDnZBiR8/HYF/cfwpBfmc8iA10GugtFnNnpxrTrlIsuK+LOkRtRNVFINglaE8ZE+Bn+iiXRtzzoerC28khuFxr7Gh9TNbth5sttrUbL1sTVABSabP3iBeqljIQfHwysH711f9uyWeqG1i/KE6wEtL4D/scZ/j4CYfV/iWNzPnFOnDkRbFAO2nvtD8dUAUhoZTIa55LVGAvYFphLM0wd3bUqEpvT/Nqdls+aQrdyKZVbpWo7WSamL46YxOxR4IVAe3c+oVFXKHcgwVawG3ZkMA+CEOmx10W6W1sdbuRqP5Xz+WMnrx62E8ynx/2twN/jhVwUQlxWFV9gve3UPcNii4Ey/nfGeemSRmJqeAtE5b3YWoc7InlC7iqo61FUPAULWYeWObEUnbwFlFwgQZwGZsV1hf+WCyhwpwKufgODh9FNv/kcejuOemLnMicT0JnIH6+xkCXjo0G9QAg6tZTphn7hDIcAU+a8cqaOCqJRAu5C5We6mW8153vsmMxlhMCIWZx/UZs283QM5ZgK0Y+03eqNoey051dq4njkyM9N/TnLh/ZRFeEJD4B4YHyFdmKVDv2G7gnAhjC/6OwtwWGQmcCi8U/FZUljUQC5wTloSoRxnrpUmrMiFIk/vw4E5doK5xbzLp0+mOCCYc+/Vbl1IWGAVrzv01428PVEp/3V9biewhlu77MtFPAci131/TlcTv148bolBPBAAHCHTxkhq35DsTXow6Hc47DrimkV5jt7YcwBe7jM/tzVRB55nSPSddT4uzhVPOiIkfXllFygtiJ/edd4rrBhraPbewWlKN07m1zlCzBxXFrG4ynWWLQiWQSdwzxZm0GXTVNiY+lObP0HkQu4Utpe2VsrlYxk0dyCTyt1fmnM/k5thHKfFIOaI01uj54irhbxWJOMafSEV6YeYE4o2PMYN5vfcBJ1VfrJPr23Cv/R+pah8Mbaw5a+dmuXS3Lp4C8MDLqmaMv8k+n1KLwJt1s3Gzllv5/bbD2hAg9gQWX7jcNTBgOAtrAqgCMyJXvbdSFISYv7PzdMx52Wbgcq4gwtkKBWhku6kV+kzm8IdDReMcDaoeeh1rIkkfOOP3lQeKn6T/hGutJKqEbT1qU21zT79tjt9aBzt3LFcNTpEi+SY4mgyOyq7wo0RjhtVBjGqCIXyULiI/NTtDrhyQTX5dSYf5PLETAu4yW5NfUOMRN+MjnSIVv4Hi8IZNWt1XnYQmC+OiKfymAUZcKCWTDMtGQJ5TYkE/RIALaw3ajALZf4J1O33fjle3Ay6pLT4uGbHUxgaIudqRPpMgTgb66UeGuLJ1wUcT5IwjsmWACcgunO4ddVXyySCrM/Ql6TRlD6NlAQzT2kXR3/5fZNg6eZNmWqiW6sNyjh7cVsFY/rl15zarBvCM0Bsg7lPuhDMn63b3cVsugA60seJvct0OoXhIAFvRSzast2XJfjdS2kKZU0WfZDCjyq+mQtdmZXEWiaZmyllL7b2T7m6xvb7Sh6NQXSe5lfizHIWyBzZXIg+w9SU2x3ni1w0wuKyIVbyC2hqMmDZwRV5T0Eb781fUcH4JuAjj9O//qWNlGrcd3gZS8cwpuR1XlNuSu9VAvwDioRCAu4Y6qfloiYcL4aL5Sg1yTGkMEZYCnd8wP1bc28F9h9Y4PcUXf2ktuqLJY9qicbI/bdc7qsAY9T76cpvUb8oeQDRRCHZhGhCjuLXfdCmm0B1jD8LqBovFE+u1MAL/PgXDVGhRdFdPOq6xKb9RK53l5NTSaEDhpA1+OZqh5BdaKQTKRjlKq13qi46pOI1hDH8n3hHrt2itrnamSfeVmPiVJN3YPaFvYgC6yPTKdxC9g3btor5h4TgxY7kJr98GGG+U84TlmReGtwOCGnr1vacdzN8EdnIXHAoSH7n+k2wNCLQoCANBs29bNL9vmz7Zt27Zt27Zt27Y9i5iFnM83ndQfU0L8YtvnI5ZUczF9WlRdUeQIFXS9mRvXD9XTwNLWcZqJBgzfYfakaORGh843XD4MNhPV81kXlZBTjOMKK5CelBMmc7K5XjVwb0uRSj1LB7QzNwbiE+vEfyE2ESr9w13V+NXeOOfgC4jYHhzyKnqeO5dN1R3pey6DifjIJFFtjCxuTTwUne8YVmBjX0e0T/wPq/kRILLsmRGBP0/FQZ7B3b8prwy2QaXMjsJGAeMNpVSd/s5S8Xl0pCXqsbxxxOiLahL3nTP10LOQds3n4WqI9K9vSTdBLq+eDE99S+1C9yyhrsQ6w/AJKpEYQlCnfIN4Kk02ryQh7FVw3nQQUzxkpB7uSXwkuNZzATbtg1XooeJTMqO3KGlsL11tnUDkyqIqgRU/kkOazETDoceFNDgN9aKIJGrNECw+UHafxRudF8qnq0tYU9qzCDV96uMHMFA3p/uycW6V93fpO5llvsAozXaq8WFBjLdlKM7nRV8foIHTiCSORfvKjb//GXs76TvLDnWolqD5B16dDLK9GZBfHqUWE3Nfepf8jX0gEykk5noIRuRsjBTTP3li8FViLHGCmKmXEiGmJppTBs2i3IundNPJB6WuPtFTUmGXOyNJqzRr88DVoteG3l6S0wji4W2zDQZLADg3MUVPyo43MbVzSJyl/dbosC+SJuydQD4Mq2Bm1fPuL2ISkVXQHN+oSFmmPOVuOboX8y4/Mc+c8+/gBgdbcwVaqQsSkc+FNICXWypaJFxeyTn/SC0jcgGhRchCw6yugCoMH+7+CSHUw9sgAEaDZWPwQ70w3Ee3UvXGmtQQot5flfnI1sGe1FzUmLocsOkeZIsg/djPsJxfq/YVPW92J3+f8UFwCdvEEHfr4h9kSmHl4QHFHEtJfqnWP89nLBhLKJVRZvmyHHglRMk1XbLKvicbz/Kfhq2TTprwDjEV4G5xNRnZXZFWXe5BtQjkxN0UWJpH90+itaHLK9DKjpLPSTiRm/TzjgekzoQ/LWkGoVSjPruUg6xQ9hCQwqaV+Wfeb4HvgJ2wuUpqTWNnh2nHHQXdsNU1Njr3itUQEp7w7Sp+Kd9ZyKR8xRVRKf7BTX3o56e+nYT5DE/wyRPOiodzITp13faAHKGwj8PumWwg8HmwEpc0YjTUhldWHgY33ojxG0A+jaPTD77CpoEYjrY5O8HxzBukHgK7qu6Sagt7+ZPR+8DHB946O6wEkDz80sdOV5E35VSCLSxrCmpII3/a4dRhhZSbugNPxPwsBcOOP8pYaNZTOcci8lte40hDh1ftenTJdwsJEB2apb3sq2ckPMF6GdU4IxJGMifX05t/w2wtF8Qk1KX0tosy9bmo8YvQYFRwGgiGkznZDNoQGdWlb0KtiaZX2aYhR/0n1Q4JrP3hdztfjLPPpxGC/TFFdBUxn+y3cOd2aS2TF7qcUUkXcO69IUfXpWG6Vz/kiRxb8blDR/JvqHJaxD7ZGdKuFsp/i/remgqfMQNZXiTgzUBjYM6g0u8gMtcEZt48hs49jm7SNnx1DblvY+MD1zDxPbPOKP4TXDyIuQi+mEdBk0MeZRPBhdG3l1l65p9dgjCfQ8HWjpFgq4utulX2MpxyqFEuEwMDPrqu1fE0MiqN/YsxQT9uefvrZjQp9U8KeKDjo3DCoU/Uo1FdkSlnSJj/fn5V6SX/BTb87P31PcL7s5/lGdCfa+RRMOS7wLHSwJmxZ8MtBLe6Yy/UVNTcmg4fzuz/6nLBsRkgKgwt0P++UVLp1vC5ET6N9fCZkA+efXhY+n2T3+hkkQRiMXWgoLCymxy60m3uiZb0/yAFKdePa2sHWTWMHRXFjXQtaN696NBen5bvF+q50AS8QYkS8xv+TN1fh2/TbzPpXhPE4sDE9sQnRnX1XBfiz6BKwt+nQlGjJTiMe2f+9WsVHS5Ijr0NpNIssFw9gTkJ0C5RWsPzTp1mUtsbEx/4mgTPHCXmYRcn07W3oBbGSeZKpMDs59ayQW3vGM4JTwouGDaNyrezy4Zkl74xNJaRxwqZGa2Q3bicL+Idgm1fagYe2ZN+4pkgJmJtFRFmto418RUXm5+Qg8klScVZfbUuVo20bwMY3DsFU9yZSqoiq1yODiT434REwM19VnFb+V8w8SrJgTJFTDK+rrOSJEh4e5hexKdQlHMlWkCE4Ycuc5Jh7in6eq5f/516vdLkBV3Dknga+UZxfGhniwpn7tGSt5Zg58Y7riudZeAbsEaDca27NmvHW7ENCP9HFYRrzghy9SsUPqc71hgGcwr+n9bwDwraeKsTpC8qqNtmTbitIEF8AQZtQ15pfJB/39alNq/8L7Dj+hEW2EV+GKXs+SCVXBiu7UD3/QfBeV7EjkBZo64oIAH/a7x2KjogBSQrJQzYfGsbqWtpVYhE6L0cBh/Yr+1E5rWAkpr2YhwghybJUSG+a3y5pLwjhOehjjyZDr5IaO1Kg8z+0BlOtYat+cslB91L4HKcH4q0Kpa+6nMHdM3a5hkR6+DODCljz6UPPfefAOG2G1V3vEYj8t2FBKcELWFgd/BAhFK/SgbN03BCs2ll1EusZw8XyM8MEYgRVlJ8ooszY8ui7ciWjbADIKpaAoa5Snh7K5DogNpi5K1EaXO5DdZ37tRX9avpMbMXjYMsC2/opViN+pGqKR75Z0s9KtdPIy22jLuJxH/CbNFK3jC2t3UVG9/ANoyJuZZ9UmbosTqcQCObNpYXoD3Tf9sn5pYHqxScaSB/E08kYcEvqlGz88NMKFAlo+mRf4IWucB2aUOox+kfOEbCimlvPtJEP0XOr1DjDt1mB2OZ3+gP6kmMGIUPL3pmPAPnk/hcJvI+W7Kv1Mn7UEh1G1UiZ8pngOTveJ/NAA/PMwj4+5gLUlYkL85ldmQQCXcSKV+deqrEoWMPAT47M1nGAcPYDh9rJMhUvQ0eFYcGJT0MbPzlO45DpIiFuFB6j/dIAzzBKTdPpTQFdh8GYmkrhA1CBoOquZO6md/Uh4ROGkG8yEBbFmcG9/qWr6WVpcVhVUoyqSRECt2kbDVN8qD5GtwvitHDF1Fp3WobxUnMeQ/1MWJOAA0du8x9jMySGJ9OFkyqR5jjSGj7HtXwNye42bdFiM5BCXOBR15VCdnhn0IvVEK6lJIAcXwx2j5XXOdIQVjjPonLOUZsX+5l4QIsMqWNaBO9KqMxhlu6bowZsTCjEkdslwb/3f3+F5D6HtNt8/qibz2V26HHviLX+uxS1CZBuo0IqqoZd7ho8M8E27USDslWHWVWMB/4n8BTnrh3XrioLJnuUpbjiMRnTlm2s8jLn/qlOvPeBPO/SQC9G/rWn042M+DBCSu9+s3O6oCV4ABBnCMecq9ptQfGX7b5cUVFiERi4gO270/lFJ8hWnWbViIMq9dlju3x1hPNmcIbIzH+EzzZgo74dq0EokulmbUT68DIBYR1k+aQLScSJZyvUG26FDfTU0C4tQmuedu3lBzeSIajrzctCOPppaQImks+h+BxM0k3+OVblIoeIq7hyU83+TJG8K1sbE6NQy6yt/QhS4ZGgXKEe+q/k3H8LDrVsClrYg3Vt0mFWOUT+Zvs/PcWEzyls2gXr6397VeLQS1lrZUDUltGBgWBu4bxdb5lHtptt2TPn8S9n2q69E+1cw5+s3EJZfeL+3CPcmWQgaErPrkoLgzRnhpgDmISAQ/As6vi5hpetx+Q6IriVlXFwsm51uzKZ9XT/hCp8DAISn43s/EdhBYUqN/fff/y3Hl2Zm1azlG+s242HfNGU+qCbjI6kWnhQ8zmouRPSTjXC4AwwSM19QvlukFiY6S6wWQe8Rv/Z5sqUfIksmbwTsQpSR5dY4n5MXMM9GL4OMk5PVtk6zXQdzeMLjXQVW4uq7rd8csEXtTRmWdSGlvlI+gbuMPPdULuFtq1e4l+8sqGrYlUSLH30hGmcj8bEkjOi6WHZxLiyKAizQMrh6Ap76a6syLsVKh4kt6ASNrbiRXsCmy/RsN/0PCZYjfnDA77548AG3BQ53iXH7mRU3nnAfZnzI1j47WAOH1j/GSSPEhfYvRbn72dtQDyntc9ibACFa6Q5a0N0mA83ZcPOv8r1NCAegz76pGfIj1+CLNuh/bmCB341jwchRBAxrcq/sNMjWuwquOC/ayWHek6FJCiLKlt4ARokooxATIyE3aloHYtyO56j8Mywe/qhWDvbCsIQppHv57Sxrc+WYVP8EwcqXmtuFjql4PLfm+P+eebmNMgOI6uENXdJbC8K1BS6RcSReysJXtkKMFmRChuc4fAPwscLp+Mp2jE1JpX36MlXzNYssfoYySFAqSbakB5rt7cIKxlytUlG9Ol/vlzeI9jHhgi9F2PzWTqhj39bDC+G+twov/D7bnhBN+NWfRSZdFi3mNV/wcNy1IipjYmsnbjdHvO7LLJl5/eYchYStGNKnKwubHNdh51uni2Bhf4E4nhXd6BH63ZWt+xg4FYWA+VeM4YeOxm+gxy1ELyhamuzhraJ5NnfGIN7OB27nmwfqit8U9VtMDhtpjXyBkWUljlHYeyEVxOed95eN5XrTrMc8D5BG2GbUhjOfZI8rYLywy7X4CADhLsuCHRW1IYLWNKfN5D73F8jk33LS5hn772XxRqkg8DyHbJmEzXK9v4KOovH32Ci/mNYjtQTinalgUqPMpKuIZCXo39wTFMG+nkbxfeDt7n49gavQ9/RfjRF5bWacBgcvil2ob/9f1wBt9kb9E12n02H8QdSQTNS6vpG6m75n82iXTyT++O/hrG3xe0V6d3m2Vt0Cdnhom8kfeNyzKHdivL3OPmXmw/b3toiExltd8+sJkBa4bO/IsQxulh7HVeYQY79gmBI6Vmn2RFsRSVVfLJIEq7fY/63lTk0WnM0lh2Dnw6lSTHnjrJ9oHoakblzvghM4/3z6KdWxkvxGGtgJABAtdacISb1D4p1UKCm6O3uUTtt5cVTsCTTFDJX0LOs7INFyTEWtOiUWFCioLvW3KUkzRmLg5C7AOd8lWpxt5fFFmkU5h070CmcovjABelFs9ibUSAEhwbj6Ibr/EROxitk2RQe47yok+z0q0UKT59o4cnIOgc1ATM8GBt05VrNZ55F240I7+X5lbJElXc7R2WQ+zTaLmJFTri6S3wJh5e8mNIrFiuCBIc8j367lEyzufDtsxK38DVI+FjZyYdCmOXedGY5C/NBj5zNgPgaTyo4tCYbvHT/FKhHbWzbAzZ7gk1GLXJ1MvAPsVJ82e1q5scPjdcJ2H5QBrT+iUPwJnKa4SMXPoKOOEdMFmkzzCusm4VbCwX2khonAOamUTdDHMTULwCMLzfWAvuu0KB5prJmQGjiv6twyCvzv8am9lhXfwYe5vb8CGHWLXCe7H8VyImXlSdvZqb2Mcg8CpcFUlZx0kFXd04eKs5X2ZUlgrCn40iNyNnfWD2Cj/HQZ5mx2w8zfgHQeizm2oXTSqDWrwRQjVMRvgfHi+7oe0kTadoU2heBtqvnXWrQQJfM1R9yhF4gtKQG6q6tbt4CWIxOIrlXDVr21oW5gxgcA75xubee9Ad9jQUw+bnHlntr402v8rh3wHAN8ag5Os6I9ea39LUxAer9TKPJq0X8FsUMXQ+/10JQOoE56vznzeAqKqu8aFSBTMrb8AzjhmxEX6/2nSBNUdoIIIJsqtCdNzDa/72P51TqIZpbtOz6zFEwVuHGC2AXP7A1e098yUfgRbAAsrCUrMbh5T/qqbmOJjeJEdUO/Bgvw9wqtg0lP0piANt6uLW/i8nRbqvHMbK9Cm/kTmfXs8t61uVsJJcAKSfPMMJSAjOtk2W+z5V/Owzae0MoHgUyDZ5SsxhWp/KxpjMsadC3E+ROk8vnRywNRV51f2KRA5sqv0lnEcYUQi1Y+csTHxqxTwVXGlT2mRFFbH3vBo/D49fFPyItdIJgFIERdPHHDDeuigeDDFfycjBuRf3KYSor+ehPAO5crzaWrdChp37G3CrsSlk6vXocsogrqO2GsdFa2ItwRuzuARlBYDX/dd90nCEIkpLEmcyM/IW8DwGuCzbPQZdQiCT8JGya66tO2y38lAcgJTSKo+SCA78u1m1F+0cLaHK5gycM10AMmZkrHrPyn78xy7pLpgR/0GCNrN6UXtvYMCvxSVMhJYWEiQNqoAu7uecPvPW/Dh/UOVvhIJQyN0SXC1i4M+KP6bmtlbJYLt5hSgf+374mDHWgNyhF6QwWLr2REUDDCOcCasJ/x2GzcfPihno/hIx1o7f+8IZr8/FLVXVcYNNbyQbc8JLcbof6HQ5646h8TZZfVTTA53HluLSBb40Ul85ZU7sayIQZNejRIM6P+/u/uWfngdn3pC7AbANDyEpAZLzzhg263sIw3hDs3xrUxdotB7GGTDWEjsdgUq1NHeXnIvbNqkbb6fnxmZ8j5OqbrRt9DB9/YUVfOkR6D0YX0fQpUy/ysX8XU3OEXHGuwgLDGGKrG542TzSULO6mT6lKhIQcPEd13EXxU8yn0XVg4AcVEds0fZwQ61ubAzEmlCqH1vxSID2jAUI2Y/gLcx0Or2i0KMyoW6JRPw+UaM7kTA/qCAXBbJLOtsjFVL30YxAJNfroy40RsglZEawVrhIA5vbNqyoJj9Gu9ZPSqbebEHGyXmJ90ol/N7ve/7+xpiL1Y1hUo00O75Gu28Leuj0xAblDn3oHGpI1waNc+VC8QUc4HONlwBccBtqgxq0aOp9ONgX/QE3/VYobJPAkEfGOqkjmrlafrw5CqEO4jJXGZ1FIvRjfkuOT7HLuVS17j13gsaMMhn0yEHLZ3W7GNaiB+xjeOHLoGnQYBTERrBSR7FKgUkPvJZTYJW4IW+oyLNH9s9a/KOvCUSyVCjZPydrtzOYRQzCJB2EKM+4BETBYz9Z2BUWWCzPIZgk60JMhxtlI4ag1wUqU41n8VyNJGdcs3Rjjys0wJUDuUyzovceWNUILENKohNCZ4qeoZWwzTvJx28SpEtfhrafhznZ9UQ64Y2nJw9KQeEsU8yOMfG5NHl4qSo306cUzQCgEJC3wJtW6KtjztQH4v3yzWeMG5RZMT5FBYIOHVZCgIXH8/lxuap9KOTYiGb3J1G3jddnWi5CpyAoLtnhv05phOxPhd9wZA3h69GLvnxSivjmoBb0mXyqscUlCr9aL/eJ7+Nxix1yiWeUiiCw7ItPoQ8wn3nZjeSHQ1xEpDkEK3svVX2qt2pX6LgnDVtKMJYQrH7z+jA449/B3Gp5jFfWxpXo9Smulcfd6Ucc/b7Gz5L3n8f2A1xSmLpkvCPKKzclhAQdzyVFr8yAc7lI64b3VhUM0BkP0DLyn7ArH6JxXlHlYBOOlB5ZTYshwd6seAi1imTyz3tcqqyCsbt+VV9EbE3rpuSoepgiRDqDMlY8GXtJtajgCtw6vhdpi/Iavlpcr7FMmifkh5m0kgBiv8IzhKzz8AhIGWZFoWkQpcCiSiz6qaJVkTf9Xw56HbvIItRDRFDuNkqNdUY3rUtad6J525NDDxTPltHxqMOa4YaYGNXl8zGcOuYqqG6imyq72NK+xMEIehV+eM81HUnjJCyrhfsKWaZQB2RTWoA/rXRZCjcfeWQEIkFfZfxzV2DeOis6pEnxdlpGUu8Egq2/9ma0vewVtKhkBQU3iMSGuYWOpQdY+1L7LeoHmEPMpeZx/f2jC28ugnw/65L4iDgOSbakDfE89RDgUjVTnBu8r4FQhr3vHyPkylKbIx8KBVHtBEmHPFqSAhH/l+Li4sRB6iWL82IyXrlyt4/NhqJZCb/lgdZNWGtjlWOxluOGyjR6krYyrd1qPVad+I2dL/UAmf2OxtszIu+SwvirKBrah6hAyQpVw2N98thFldWo0SRy/ARY/8BTvIRoT5gAu7Phy+3s+JW9Rx8y/ad0c79Ox/GtEqbGGrfcNzfvETEaG/gQHPEDs/UAommRc2q3Tq68QUD4uYobFt5mUV6e547tjBxQY3Pm5NJ+wCYonyrKlSyvMiIzXIUqS0JV3FyiX+mYmZrHZ/WpfVJAGeiHvT8bm+jDk+8BTC4zy4HkUSmutk+LQ8plz8afWlytJOwT1k35fDYPh6DFZeZFEW/hjgkI1XGa4R1P77hyNI8ZofbRp9NdjfWj5jrgNhe25DvYwh6KyOmKnGw3guTuH5J7Y/j8jywS1pADGp1eZKiBNzkCUOvQCjFcsYntuFzYfK1QU8MwbPfq15fqCJxDjGa9XtLwYorvxT47cWUDiRHcqGm5YsVBBisb7SDD6foFmdN/z12HIaoP4v/Kuo1CXbFjFiFGyqd+VtvdEyZaEP85xWACo09OI6hXCEaoiY66X3aVHC2GPA2VHSeOulIijsbREKCGHZQW9xlmiCd0NO/0OhnM1HfMkIajf9z4gDYJAJgXB0gkUaZb1eTbi//7ZzWvpVQ+akNQyjgcdDwZ3LhksOlk7VXFu2TZhqWDH+tmioloIgt4WaHERxqaUkeo/L40UIBLg8kA0ONSD6CThUXEarHDIhSVgUCMV8UNVX+oTIAyRPNbu5xm2V64X7T3Qhlqb9mfD49f5dP2tWsnEkhctgoFnb3fBYCPeyuxRh1UsGSxyZMSoEuYn4ndMbFXl6Z+DpbA1fI3uKtxGPwz+V1kH/18IEUeaux1mUSM49hTYHW6NYGqSfh83o06HGLWGAIB4uML/k9C+G2sadOg5hxZ2iZOPcw3yBba6+TdzCO2kDSS54vB4b9/c/UWx5HMwqzV1fnGLwc7VqdyVtC42Jb8GYeT+YT2HQ/SmMSOTgzO2YNKqzixFWtSPZHaQCWeXotCMDD84o0Bc1VXKS9WUV9mHcThuMeSo6dma1YVe/8BLdgt2hYd4Nhi7GuLQEoN4EpsCITMV+RBue3g2bn+wFdLt0AbH4WtQmiFFS4zHtTokqfe15Fh1owecPyLtnP164uCCOwbH3L7KEly1QTiTvzaiyV0JtfKGCEcXSrrMFcF1mdSOdCYlarXNrpIFDaKJ6jB5smtkmOODcKS+2p0NDwaHyCMKJ6S/qyTpEzv1GWx7lRxO45wprEzR16BJUqjJn2deikgjidl1MWPjkXWWHCczrroiHQ/K6YM6LSQRNQsPtHO5gJB9b9tUDGPEyhTHaUXTYBi14qf1RFfxvwmdkjXeV2o65IJT+Oy9+UKUi1NaKC5rIiXKR1GKCCj8zdEmQyjGlEv2fK2mbJJx8z7jVJuSQ8WVChsoj6FExdJ3mzjugSR5zvji5+2wwx5T3t52Nc+IB4eNR7W7r7T9g321TevVaiAluQDFL4MWpWSkLcrtJ/rwVObBV8LrMCdDxlDzI1ZhmxFNxE6fTWRTH9el0gOhxTy+Yr9CuE6qhIOC+LJHFXz0AuG2tJygovZG0izPLKhLquIfB7iubPgd45jOK1VHFiaC7wGNwzXmfJRVWp/LAzEv7HMJkKPeFJVFL0q1qIpIhHrxeNqlhKYp/3Xt06e3mKShjzBFh5yYEVBWk0n1vEw2LmV57WMQ7TCMz71XxYdWe1nEvc8SP2lz2kNaU6yIwcxLnfKJywTGCxZJ6A4eO39nuSq6QlUEnq9aiDTojWeL1zLnAhb3KGCSmybFFKUc6cXkD3Cq1DJo7fPom+RmG0/8k/RSCe/CyKpbxl+/89bETQ/aeyuzAgLcQI7HtpgEaHBMRk9zIMlpMPjENd/pBuo7oFJOnq++71AdyR+RDkH+UljyMx3izf3r5ufOCggH3oCaVv4fhPerwC4RWQIVSSxFQD6IBndWmYeImrdnZvS2kJSt3Phx6/o4Cs2LrQtqXek1uzpsPfshxR4D39mTMwdYHVoxFY/7uZ4y0R+JdXWin/PbeoDviJ8K/HFQotMxIRdU8g6HEzkAsz8du32Eu8dMagn1yiWJC7v9nnGDsG8Q+jSlG1p/J9p1IbAZ7R8d/dGiE2UhDom5B2KjuabGCdeiychJTo7cGI3zRUUQ+NjV3FV3Myszuo3fz5risNFq5Zv1Fs6osmfXD1ma0M4wOvXNWWlq7dzG3QsDzTeYaYHEJhZAyuztOQJHU4D8mEibiejDNXhCLVP4HWCPRLfR4wKLsLH7QU/tfgiC600WBJjy0lYWgWZXC8c2PhdBa05pJvTLp+SpXhv6hes0mPIDg4mL7dNQEVMfnAJloeoaj/fQ7Q/+31HWxRpua3jV/6D0ZKwTutuMkDKJTGii+7uDjXZ1e0RPotfrzsu3t5U1j3triNr8eLDOgRWCq83boU+NMARNTKaDRo44HQc/BjBipu40C5i78ebWxH7vOEBtvRScWwkYw+RRsSYoG0pJl3VY0Bhzs6s4v+AnGU/L4SC2sCCF+5xgywRdUi/jSM2VnzOiuZk1zZmoV4SoBNvuGhfd2+S5iCwjk0ioI+X+J9G/YoYJ3tD1Hfby06AvYzQ1bm9LCDc5/50vqPdTk04d5rSTVG2whAOtbzXRayAJcjdrVJiNYObiTKUkwPcp/CFralVI3vO5u/i0oKU+NL58/YRf5NH2ZABySV6prBCl7VAfPcMxPr8+IS7WhVPEF5urUE24NlGrIxi2U+k5C8jtD4hgGoSELo9qu1xhzNt+bAAXPCxoTH2rd7nm6a37isVdg9uPum/5UrNixcLQ0MFRT3ynwIAXmSWJgobD7BzxNRFU5L2n5Yepnh+nZrHSqwUdO8JlaslR0hMVfn+q/2IBzFDSS7UVI9Q5QYPKlqCiMtbWlL03vxBhZ2jpVItObJpMGcBJxQjnSBhymFjyVlmc7rL5NzoGJfgb+beV6e00osSpTvxd0F9SJb1lx28/JapZsIZ008DCId7ose7W09W7Aij5/RmTxYmCkyWx0SBzJQgHUzTN4Ptir5ycx/lGVyGYwsUH29rnpAyV8vY2Vj2bJ/M2xuL5dL+qSKwTuH13zSt95l+v2Wc9OrNUmWBP3X9sy24YOQm+5ka1bltcWOSKfYEg5qZ7BAbRg4tcSw6qD+b9uowSMz+XIF7go5WZAlERSDLrko/mayaATkjUFfvFC1qhLEugkQeDSgvL0JrJa8sahRiUhzLdNtu/ZjXX/wIh56vpoKjE/F6nY2Dsk89YYV30oIUBYqQaIxprbTplkRFP5BW8NFf0QcKNySD8mSn06pU+8nuiM2nRUZSKoljYSD/DNDvSJrB3F0x5NW71vxr9MYAr7csbhv4tK6G73pKV/XQC//4QQMoh+l/6ssEa/ttdRHRwr54IPnfzsvMkLDtkmcJk2xrwCrSVAnKZY8xKJmIBpu+C/y3dDg1s0nEuKbDwgWScVwfXsSHbaVopIQZLaOiaVR7MmT4CDK4BG1C6DZ+imFpKUg34sXNPkbSoL0Bq0erVnI3La5pxol2aL1IQndHwLq6IY+BwAsez8SR68hTA1BeZpO//KUpUiYts95EbGoKAmEntDN1M9BxNSOxhPZScv4UEmS5U2OBO6JqMPeQGjFx6FDhgIAyAEFrT+OzQiTrIm1mO5ExjjwmEYO22rueLTmRsiQMXpCp5rzSW0YoLWoUYX6RsS1FYNGV4EJPMjXVxZsuMQ0Lo7gDss6r8wWv66bRmSLoE/E7RbXtG84UiRiQIEEiDy+E5HeuAUSWOWWZ16el0xexGUWFymyMYFZOAWw58ju8GC3nz7tTGoZ4I6HCkjzP7XfBzdOg36UPQNIRmYmuOVAsSr/NGThQdqOERBrzGzhzl4aD0o0bSzryVCjOcNl8C2JaWYTD6jXubmMy7SdxbLeHV16IN2vKOiP0+2Y7dLUeqQmPyT2aRlOFUp4Ld/iVUYlM7/AeM5HEAXpyLSODUte+TS4RVad35Jfg/aafTOy67D8H8Ad79vtTmP/4tEQOCY9yg0l+YEX8Vyl7aLoREiGWHrkFoLPnMjsHL3kJwrHFhf4RjVKcs5UFISC8JaUQxte+me0S9+8sqjEtNFHZa/NDaFayJAtHy26AMVwCyLKkJc3JMgeGqPdj11lyxljN8WE8ox34CzImH6qViUcyOwjDiOrcM9CXjcdgnLIk7L5hq9A/khH1+asWy81n0/ej0ntuNWJvNjYhOng+7fWUMpgGQIOyp5DTlV0HUY4b/o7pynAGX+C75FMzRhLoUPoKmEUEfF4Kb4V3cJAVcDaMeS0ckxSXMh54/vDMWDFFAFtAIqyPhyg4/1llbYWpu6kFRiQJhNokouKzfwImjmYGcjqoXuUA09v452fztChsfqPT/7ipEUTG0ThD9L03jgnNNEXg3wEGPDHucOFJL2ueMpOzXS5Bup0SJQKBU4KNC4foIXl4kxYkkRrILQ5mkl4Fja68j5BPcwsrMQrxVwkZc4HPOXTQlC+P+m/0+Q/uT2KgSs0YCTCzUzDXla98Yab/bXE2KAPq/uMypRhdN0m84qe6syDH2vnjrz3K07+DdPw+IGumZHvX0qkz8si2ywRa5yHSS+BphiiASt7YLASZ7q34gYJ2w9ZOGSNje8uExOXqal8T9ww45ca9lRWv8Z75Xh96CtzNChrM9ZBv1K6P2EjOJDhlKc9xZQMPoICCtAD4Q6Ha8Ezb6JQJgN4x2UqiGlNUuamHQhcaWAhcOjX2AduRmdCLB/g7KyTp+I2pAcMsCIyaaky0lDyj9m9jFPitrZEl5bod4yrvzHs9mtusnT6w/jo2pxkYmlidj44dipKp1qUevqYxSgmHg8/vBS1yO9dqlWcB7RamH2XieRsAG1eJCJJhKXZH4xS907DtkfuDO5mHvkBJkGF3VHzDi79sCLMmGmcBjYs8WNB8f4KKIAyMQB6C7ymAKlD20kpXnmHCoI7SclI4eqh5gL/9mt4BiAiM3yppsBNLCVXFm97EUTm00FRf07UN/TSzEWCUMdd5RVcVrjDOgwdcX5O2ikt9k4B5KZk9rXV6w06YrKJwPQ8GYFmemeU2QLJtc4OEUqlyYeoaFxsdq4abUXi/1LuwLf3rDEyAf8ZppOECOCqRmWtk3NL2m3yz5SVkaIZdUc/0PblMkkteUg9+j9YdxLjHrswLFppZSWWB/HjGpqCpZMdjzr/aG47If9a1WAA5JpeHXaYtroK4Z5qRUJAJ+t/wOMla6v2+hCEz1+jf4snAy7zYOviTV8AKKWwfLQGGLKkBmpRrpWe1xSz+Exv0AT0G51pVUqW+IyaGROo+ypjwmTXsEIJHRGfVAjMiAz+ULv2cIy9OFBxZiYulw/l5RHlXwgNs2WLBSY3r39+5egF1yWX28s7fDsAMvgCfGVFfGAHabZPIfwDWjMZTnneT4f/KwRmoF6AifoaCS/PUnT26HIxf/rNNUQ6BjPkqhZIu+mjgKdW+8OWEnjdQ6s6kBLxZukkLNZNsZ7+Ib5g8NYWeAxn0xugJyZfJ89ugVPreLVb8RVUPTIM+g/BJIuX3ax1MT7/y0axEI5hbkPV1t/TuX8NFmtt/IfO1bkCRaK4XZxW9xklsmI5aQZ1VT5CPHgWqbjmzJg9pBR8x+jdrrkUU0DDCZnE7ee5OiKlmiqwUl0i93IL9IAiKtnLIOXwd9dR2iZ8VCAzGpHgR7WejIQsTxLPjYBLnmwcVNqItojgKohwNga03u4kgvHR5SgXfRLv/fANCkX54q3EoY4zxpo9VbMx2FU8f9td1jeRB0xwH4HyBAJ83P4PBIQINQGqWrTgVbU146DErVLj1zU6+It1IGITFBucuKY8Wm561PHPAAhdInxEoroN7uBs4KcfzYjIJUrDiqye/Z6n7B3Xbhc7QCVfqiFmHgDPFQGL0y8+MzbsIDagkbHYyrQ7mA8Mgq9WMQIMdpdxBwRHdqJE0E5JZVqqPlPWle4UOOrCUaDKhVz87NOSMyrtD3HodgtYOc6avnyWHIx/+44UiMYxDFlU/jr2zW16h/sESwQ2yYARwk1JjVn8dQOSjzqPx9zhJCpprJYQpmdFqmEKL77lAUFPvAeAJMqPvmdmSTOY2aTWHNBmoCjbvA0Tkaz0k5DkC0n2WqM3qGiKHPfyFzwC7E3vqYdvwkzL3fnrJZ+yJmj6KKHUcWQY45ADdSmLqh46coQlreEYvsUBmNRneIXl5O0Fs+mbW1nUu89Do60vVbx2FlYZOevi/YeA3RvVJThi0IgaKgG5nPdK3k3NwfPXNj3Hj8c1+cGAcn3TD8XUjFWCeJxzrd5Xo5ZDFv1GK0Jovs/izys6iqp0bTKZt7I8Z4QfBdgX0vnl2AjAw5a0K6/JfYNAhbJHjCV/VBnl2ac2g/qgoV7i4v7fon5pqBN8sM6Owagm09Xpx9Y81IobOb9Q/Zni8+2Z4gefb9Ku61fwFyOEbYNsJR2ris1LaQ+bHQz9SNJz57YN/GvgL1SiTVZFDQnusUMPtkNjoGv3GscYw5ZXAcpUbbLt25oTVBvlalqViTu9Kt9sZ05xFvkbXVP7ovBeo/WJsvZlDj2XH/F6IRnfSWZDCs5MbewFypmIDQHZIuCLhUw04JJRkoXDs0/CshJj/CDLKJDK47X4jGVnJVEmzhbK1jiRcaiSeWmipqycfT9GiTYWTTOUEj3zR2YZmfz57BRRdYq+CdJBqZlyVJwCzncb/ur/ZCAVCRxwiv9USqxoldXgZrp34ZBz9gu1eujCtWRlZU3Zh+3nS6YJ7x0AfgTuBKq+FxXOtX2dtXvYWHiZPXy33zdnDkRxPB8/4YJj/0WYPCX9+DNxFUxpQfHHryYnt/lO56TMgZhndj9tAqLPxKLGHqNUAi2cmGHX0fW2Td74ghrvv/yEW5+qa6U4If5WEgU4wVQ9TFgqERuRhTX3rp8jZkdgOrwxLuu74RFZlmbSu4jevmYV1zgaeytW1+rJt5yLubSqNmfeAaWoGndNqp7UprVQlnTdFqkrkJCF6fcFxJcbS9qJdEocUC2gFwnw7PQYa4PdpTUNppx+8ZPvrfRbFIDpkSCJpRiY41ZZjEK4S7ocijCmjfTZNf5a6Qpk3EEDi5p6ubB7D4my4eC4bLJ8892sCXCXIumDdThO+qYCZ0dP+6H4SLi4bYf1gG52X9KrUt/1otjFGIJtgNYDwk30kfFnVvxwZhgyR6fBKAZzk22qoxGpRZPyZjS951frO13xyLbn5o7P/KFppcczUTzqst7XLid5mDZlOqKZDZthcm/1S5rgi5MxL/MSR8yeAC5x3+8EabIWoBMQbdNvG3fLMwnrqfh9CarURT7ziqhjBqq2ZU7rEkUBiG1cd2m1L9mUtgJNFmXh1L4wcGomEipIiWdO25e6AWJmUnFTC81K+Mop7HDzAvMNwHz3wc0IcIqnqhEfBQqjRFfLsODPlWHIfid5TiVrxwdchjGYOopy2VbYxnOzEuhUTSRVaBRKxRQRdrVFgVYSq+JHsu+fMWavckEGRXhtwdZfsGsnThQYOFw9ENibCxkPNyH8F1N8cCS/v9YWYJAz8KAhFv2SUn8v5xTp54Su0MKsZrZlFsHDnmg4D1jY+wg6W5s/GFYCNUjFIsV50weGpIUAq9E3jqoniMELeGOpXnXAWKEfcYjcjsXhrpS21S0DSHN8cUan0oY2jLGKZgOHNaPVEnUoVR8diiqkVtNIxtA6+oZZ1jFf2Eiv0Uw4F7bS8B0yCiym7WjgqNQ5aGvHmPiXZ3G/+JsSYoCy9rLI4AGHW3pORob/Do5zCDP+Kps3684MwwvpVL2tH33TK/etPI+xvrDpLSKVGXI4b4lIv5cnUp/sokRZeDBIDyftzyiPlgmTPTuitstJnSGLzvtr4v0MAvUUkqgGBp2RA5LC4EPH45a6EjZj0Qx1A+rMo6jO34N4CG4UWCJkIFUBZ/O32umMf3/9ax0fTj3KlQDVKKRTGyVxWmZherW2a3b1acVarWnRM2dWr4qdLCizxyp7FR+6kLjnna2uPMlJuNARL5orrbpdaZkM9osoqD3ertQsZjnf4eWaYNFi4fOSiUPVgzsSv84eqmIIX1nkshg4RCz481lg6raAxSl69t3r0haKHPSoFs5gTHal4g4CR14cWuITf+Dswh37udBVeA1tAp+jUipxG3BKqjzYgYjxoPsoRhbiQJ10KLkWg4vzi0II5fP2vYZAOagtNs0SO+nvOODmEw/9lmrIOBiBipQg2GkUpXiI4OqHLdmcuWvdPRGh60m7/ynx8AgYzXfgJ3TpopBmzPOOnMisvLTdSWlXxqy5S0GAlcmy8j+OIWaZLhvoJgKK2NsGwx6ZjQihb4mt36T4hex1r9KGp9w8OyI4buUzzWsPHVCJ+VWkHiYZ/cVYwNlLW0klQzg3wuE+eg3kk/wsFjqqWEPZeKQIkFhEM93vbyypSVdvxrDgwgZQ9R9LIQuDLsEpbFPn1AL0nZ1TaeBdFYktLAZk6Q3Ktqc+SsUX3S16alyGiHNnU+0p7Ul4AD0FjchhZn5i8xyELT5+McPKXAolU0BEsHzKFKJWd4jQsftyUWHxkTW8Tq8NXyCeuJ6HMvvug3sdErjyoyaEwlsqcNhzqKj3SReFBUBFDRcV/1HyE4bQqVkXQ976Q8asM7dwwhj/eW3wyW41fzI4AKdvxuYZ7GOgYO5qL/SzATkwgQ0ZZyO1PPGtcvKNGyOaO2uzsTkSTbdFJZLRsNnKPqLTWdJVJ6JGsb9KKL20MCXllBTe2Qh6xMCQvl/5FP7M+2OWqDZgvNyLVNxIfjmZnvnrwyLcXzECgF6MQQxoR5ZNsWsCGM1ZtIxdzt28LikGL5X6kx2qp2YQvBSGwV5maFFeshxf58rM+MIbAL2xM2JsK3W/odmzTaQkdeGx1BD6hMiaj7DnNgDFvlGOe+3Bk9gDDQZMD81aMDbPNBPStlfQuIHJiOZg5//4cbkw2U+Ul+jVZsASTpeK24fqPl5zlwdujxiZcI2aNspqKENiIHDox/dBQRrQnZONlCp5M0E4o5KUoNIjEnRomB+jkq8WZz6LOBtYxFJY+oHetUnuYAkyZRMLEiY8erSB0k8ffKz9/tVuNXI4++C8c7AwzezAiI757z4f4cLmTWQfss5R0i+5w3TVT5NIOSxuVrQXoNvz3ng4TEFM+F5tD9VT/Gfhpb/4H0fsft7Ws4HmYRvHkvWTihwGN06wVOHAIFwf25Tsb2hbH0m+PdmuDm2JCdGiU9JpZEVvt0ZISuwKNXOEdrduWh22nDN6utYcpmsy7ivsDvujHZM0m64yFfrbHVIo45EScMpA063y1OBtEDncVv/dv5/LUqgn5oPVTgXailJcBIWj4K6KOj5SlE6hbAzXktZyJNXXfstKqzMdmPdnrMF3ZzHorX+AYW8ML0SNHMLsLEfPCP8CSLocquf1ogqSYve8FehPBIHFl2zaX5nL6D0aSLuz3Xbl+0jPPIqmfaqrGrRLS/dU9236e/MRbXO39zFNpZc1nbu8+iZ0Uu5plaebxS3Su2iQuumVp6+2U9RFvj7ybWDuALnFKg4Lg3/b7VZrsuInaTxf7qj/V+TXJeY+ACqJbtRjXx4tGTesRRA0Jbz1VHMXVnDmFe9LPv+++Epiis9O15qPxHf+50qTUbEnmbqnK9k+Vw2Ie60rxo7VCRyhh7Yed/QdSXUOjTQKAjBfgVsSL0+WTDdtwNMPfcpxTIWZNf5qxt4IjPwzk+uu/QqCU3npuUMd3OV6MmtWl4q09pP2pFjwYF7dpp6rsSzfMGbpZRqFmMBIQRZrn+izkTAygTEzLMyWBynFT2f2WwarUTzWUP3JeoGZV3uKVYeq/WUbRGDpOMSBJGve+BpCz67w5h7TGdOnJ6AJb4ypEzItD8Npik02XePNiTG3dCQw6Zhm1pT73TIvzfK6YBgzlxB4q+BlKF+/VhHOqQ+yw1Ssr5fPdR1lK0uiVvSZL3vl77bo+gXEsofCUHOWen1eOQyii98azIkSA/BixKjsBHzn1NAu0YVAPL3yOt4pKILLlksZcbUlBuFsRlxZMdPFByMX+1mxJUmp/l3wkePZ9VYvD+OyxnpUCEM+UGaau/EBdsZ7EhOeIBkSh8usGA5UQcl7AObH0iT9WVo64y5yKjiB4708Cl+zguDA4jqZE3xbWWqryP84t9nIMXur9Oq/ejiQZ76g2B4+JSHBCSNVxUp9HTYq0iN79uY7EGoy6DDoUd/g9rtwoxm/tJQlbJxgitl/tNTQ7JMfbEFo3A8JNUV7xDZRAOyVF9lYceFBfSQxddx/YOuZUUWIhZd1sHLsOjp/7Ol6n98FdkvzYhUaZLygtTDjpEerzT+U7xI0Ux6CLOqE7m8OZreC8FiDxhGtMgv5PDLKbYvaksJO8iABSA5ZCBmd9ouoIJt/5T+/D+H3LgMXem7LGj1IYYBuljhqZzArTq/1a0Xui/6aMoqdMM6hV79ZKLkPw48S8blwNnYtsC8PildrLgYUhYQs24lzkSvFXXd0ef8RGdE06DiFzSaen3bYV5brjki0tpt1uaGpdlPc9R2WpX+K1D/VZVSR6+G6yO2jp9i5STojhrieCzmILbFrJxp6YSJPjrHkvAvKvn8dfiN1tSAPDzzaG7OqeeKU0lvV7krq2tqaRW8dUUR4iN/fN7fr0ObbUrgcNQLoXFlYrdZ+LrLXu0/FXI39IU1gxkOZ8uvlg7YkUFlhDmoAmcuzuXCuVX9XyrUXOGpJ64hLTeXIykbIIQiOD1DrZXpCBmCBUfuCLnaPp5nYHJESYDyf8lH2DYLf4YP+oHAcGjXeAoFoHFnKCYJlUivMaW8W+2YTy4rgV9OfWLNpRzS0PtXXM3PmeeEvGSAuzffvv5onTwUYUCW66VfWbmxoBay4wJstcXwgbxsAAaKMhdaOodVZEBPYqygepajVFd4LoEReCHWeFMlKs2mVOGSOkDpOC9gVeTzo6Vzn8kZaYloXSbDzPvlekK0F8VQ7dYIGRwDiIr4Yog7ssUipLAV09BAIJ9HRwofZraLvOMT/cxHfjHx6UlOewIltbYKbXrjaoLwqcP+rmVIfu4gA4+Wjl4gTRsT8c8v17omB2+nfmlSbqVyP8+BpvY1F3zSSwiOEqDfUXc9atzBvfBpLRcKyKp4Zg8OGMMDf4lHxg1JIc6Wmo3a/CDatKB1rUKQ2Vs2yPVu3GVt7Ez1601/GlvXoDJezRzbAwuccRdK5sB8TbJT6l1MnNbvacE1t21yrGOZvrkzdlA09xavQluAX8Ct0J6Ww9hYgj7XMxzFdqFbM3rKO8SlubqxQ/+ktGHUPFcJ4m5LF7Nk0hCcR2SLZ/2jKGksocovck2cYeYfDK93gYLxe1C0AUqNvYtkz1HlepKdWtQSGIEAJdxHVfH6RjsYpz8vM/9Udc2zNz5vUgL09X1z7q99k6o1CZeOgUbDdHH8GBuPe04UkZidK3q+3JQOSeGdoof/HzvXJCdL9zmIYECaEuAjWL9+JwQtYz9az5v2NnVaQJT00h+j+8xZlCKxHs3lEsEMnFnQx2UmX8VPOkDNk4jL4StHp51ENrzSyXK4vlYNfUpTQd2s48LnCXZPbeRjNKF7J640urI7R1UF4auNAVU17cmLhDm68z8WY0mgW1uGvFkUX0qTZfTAhYwsm3lBIWxgF2oDCzN8lyv7LVamyh9HcMXOBcm8os5hRlMr7F7abPH4+dYFhUnhTdEWjE1vqYAp/UEGb3/RvY7pqHiWhP8A2gJLOXB6OZn/ocMvr9QnQ/0SS9Kt37g7Jj+J2HgIF1bvRCK8+0ool3cVSdxLhdce9UyXtawvz/wjEnkSlgHzo/M/ydyRI+k8f0q8wIdNAUNerYVgUOL9r/5E3IRgxB+RkF8ODXoDjNUlduBLkbMp80NOWMXQV6dnZevjNenRCXcbajAx6KOu5OnwZYDi9wcBA8ynfgyZESRjl1J8vX7PoQfgcqHY/LY3zDHR7fd9fU/l//rxQvXwnLLLNTug+UpPtIrHANNf/WhQyRp6asPY3D3tav1R823+2PZBM+cT05CM+Dh5qREXAgwmKRMwVtBULpMA5c4lKXIyjmwvORgG2EkaFDbXtWWYMYUqM2g0jIUexdwcjxMBXZCD80rfKteNAVmt7TwaOTJ+tbajayR+92NdNmxk72Q9bthJXaZjFtK54V4Uz/omagF2nmUC30K1EIjnA8F6gWYSlFRMQS2J8ZbmXTa1X9I8ySPdgBlykJgc9r/3K/L+xKvMw56fAy0GagsrpUihO0NAhnewS9pnKErqpllSdn2KsJTvNKHIEB/4aEU9Jn0Me3dgxjFsxYt0M/HShselfsuCfDmKsRaURoYFq+z/XCFEaBZzr1ehwUuGvUR++zAd17uDZLf6jkiwpifvnEDaeoWWljBTJaDHXgQTaToV32ZB/Qvh2vKc5sCH2TZ/XfBTgFvOUvy5w+YUVZgMdGvsaFipIaV8uRbDhYYihX13SIamarnGISYZdM3TWhuNeakqFHQhVhWq+Khalnmd7CPd0F5ISi7gM45R/Eh0F8ENtj01anEfTKjbFzMgfl6izitDYBQmlK0YIjWNUm0Gvcf3aw1rC2mWplTzZpcMVH4lMTxGIkrkJTZwEbL2x0zGJ5KTrUnwYOKYwjQvJ4zOYSc4k6wl2xdYwSPnBhn2T88r6JXjuVKwN1RYK5EVjbiaBQ1azP96xkzA1K6Ko1gjd7Z84KquVRSCoA/z2l8tdSeic0B7k6N609YbVQX2M6ta9BJCeODp++sH1MNAJBMiMMG5OXnLjv5oFgLhw+XVOsvCwoDSztZLTmVDgcO84GfD8eeX78qelHhBXZfBLGcX1SlalQHwDedHKzYsPNEi+Qj1q87LoRLPBcKTiKmXNoPPHgQ6Smw8H7i1bINXqlt7o6lbn6DF5Sgviwyz+ZCHUa8GTYsbvAck2TARaAIGODpTMRzzzK8b9v1PV1neAE1qV4JY+QK/kqAwaX+rHKVnPPByojqol40oR5OmeFVBKVRVjTQK0s3hJrYNbEKmtKHbLa7M4Lnl6QaJvaQgyYXT+Mgq/8dqz9LP47LtfOKLUKvzYyHvinsxmaDREhc4kRQ0yM29j1lpOJRZ2Sijdc/tfi7afI1OC5K5pRZkhg40V22Regs3tluwekSIp24ZPWP0NIFVSE0+WdDf+T7ubvUI30VxAwnvseENpWpCyGjQ+FqXgx1AY8Li5vAEQ9SQYEvNY9WxvEwP0cdjhOnHG1vNcle6XqYtsODr/r6wssznscaqzXDOySRlCZQ/akm/FkvJlHCxcYV1bU5N2HxjPXur44UpsbLDVvcVMyKJIQXIWCqMd5LL34j3nTJRKYb7A+O3WDvd5/2W//QS0IPQWzRhl3Q6TSTlzA5uHZjfljcCjKbQ2MOXk5veTh3/am8BhF8Y4BC2qOKcgRsOVefpSlOyeHQ2MI/4oF+5uOTuT4TTziqQX4qU9mm3iMtmnBCHleddMsYKjUyhB8VtsmwVjBGYx295fWFtfphFvWFrsS8smrW9kPdXEeiinPK+f6WSb9geC0s09cb5WydqbP8fL3UGODm1f9WSgNqX4Ytwk+LPpG4CbK82MOvTDBIEstk9fNxg0Zjc4plk33xdnj4T8E+RbbuyvsZ/+O+bJTIm9LkVCSXAYmYZO7skS7RuE9Wj8H7NTBRFqPDzUz4kdY+gtWnpjgFwrnxk7Vpyp6lqeK5+WsNxZJXNNATXpDt8PGjoOuTtUZnsp6c0PuE6A2VeW/QWIRoabnzdpzVA8EoZUd0g45bL7d2WqyOqCVL9bvNbQRdmWE6zCltJ2qvWnK38P5OiOSC+hiirLOIPm4nO+WSwm/5L1tMn1WA6t9qgWw0pG5GauJZqRi7N3FHBhCPYrEOW2T5ffkemeyWRGR1h3CwnW8EyIWFTyAMThPJbs1KMb31we8OXLdMxDOGuSSf2i6/rNHGVMRrc7ptJty3oYgd4wJg3uxJZzssLLFR3S/pOw4jVPw9ucRK+oO0aZ2id2ixoh6I8dLiJik2zNfGA6KcJ/rGkBrR33wqQf90XHGsGuWsqW+7tkzddgBNV+q4TQsV6zYYRGc5Oz7H+n2gBiHggAANLZt27Zt27atxrZt2/b8qJnYaGzb3EPsQZ7s5dBIAV2yX3sUVbjlAvk6TA9lOpRFOG+BbkbRpRysc8Kq+/pXJ5etKidMx18PWSJX7mfGq5aJluAaoBPJ6o3S826/4hM14qo3sKhqRW6bXlAfDC/qCbdinvaMGytBlLtmJSHa+zXQP7Yni+TDCxhgQHHHKN0z4Yj+ETvERKc+4OP+HFb8gJI23H030I9Ew4JXDvaMry44EILsJlU4Brwmjtg8deC3sJR2WkK1Pjon20R0cxPjZaA11gtACMhcOBOAjd9pIqDm3nsu5vv2hl76u1zZ9LfEbsJp3ECxMra7LHH8IlomRoh65I6BRqGq4pQbyc89fDKrZx4+4L9lakLYcjaZa6cvAOgquM19KNAh8MCMn0qOSmO5/+VlWjJYC9epp3vJFViAdozwGYjPwG+2g36IkHEU4u2MWYVb+Z9s2nvRZQB2KxjPd8vNOZ3vbfxswyD7DzrfBt7L3wJwHItLAYb6c7fFG7uIA5ehOcv4+rKEPLU7T5WeQ8hhjPFQK3EMcPfSNoI+z3eBlk3qqCyV6hDmI4kwFhtw+3b2Nv1aXCO3ghGVR9wcW5VdfoB+giB4zKn3YnDcvTC4akUsaIQF3FDUu5QIF0WCWn+tpcltf3SeC/0Y+PRUggfOxgnRFAinmCeObobaSnOx4ZF+Muo8VxBHQ8gs9hAavFXj00ZboFpsEVMh/pqB2X+xQV25jocAu35MjlwISNyDIeG/8oiFhyUNoIq2qyr6rpyhzn28AHuDAva/F+blSyYbNFLtAMsRpKMGyRcGYY/APTX4QfwD+clXEEGcfQxjWT8ldbzxu5P8erep8OKqBmG6AS0jBSeqgj9SfrLNql+cFx84YzkVGvfzhUWWURoKdH0+bB/485KfefhMOnh9JzWhY0r12RH4itTVjfwz78BMtyssINVQij1P+zJsbnH7ghD8Ob1Zfa+xFzcDJ4hOst+Ss1ao7KukM+i+NYWSIfdjB3OLpnpgfGDeh57LBnG8HFDGRf+A1b6XkSm60YyrCWyJWE4MO6XnpOsmYd3ye+jAmhPqoOGfP/ARD7m6PkAIizEzCo6D2cVWN6y0xujpB5hvo7bUqDCp/gcsWIdMpANPrSUUWyrCJmecl8nXzQ2r0x83bGrL2DHPrmy+BnETd2hxbO+ITqzVoZV+W8zgyOlqJFPw51GOhJ5hj0HVdcSPxyuVOII9kUhM+Fr2qvxs5R1J+d2fWHCAVle0bgEfldr8be+RDz2C89rH83Ug7Lgc8xTtZzJ5/fcLI/US70HtrV4e88CLhTqlcWcvLK9Cf8zzREDlPX2ztQIhORPF/0+pSHWkv5XfRDk6ZUPPVqpYehEpgWSgM6WB1IvKjJii4Twx5wdmGRfxp7O3+hoiAlizVlxdQ32+lgq509VnCWbFfzgFwBs2gf3C5KZ/dIAR0epg5JXB2t29mNFKJKuMGw4PgOIHuzLtO2BFtd2t2+M/aPj9vjxy6sJnt7K28t3wbPPVlgRh5BWzA3cPyH0eii46+lN4xjUYtpo5QTuj2NJojIE3JCqwAYqm+w+35jzNo37N/lolCUEXUhn4HHQKdKg9gYIT6ncyWR2rW3QXp2drAk1WJphXv0zR/vgN+OSRK4MyG65ao7DSEsBxPtcvuNmDFHKH5wWZio5SvBDmaeMwWK0SsDNvttvHNwQ/69vrbH32OqD4Rdb/Hi3Tdfqr9HUWb/qpnnc1gArWRtauttlK7VFH9S4S6SFpM0HFhCM/PaPCqQfsuJd0uuXkbx9SCqK9QK7ozTaCaHVC/jlmpaLpxPKDFrX7oP7Ub6f6DjPyPe9VSLKyc3joxZNE+IBLAMIyMAlT6WlX0gGkYlakFIIjXtsusElBfAX12z1Pnm/HWxlB/T2w3VLwXKCZmKab3B+VdMjsvJqYg09gRzd6Vf+vDD3l0CQCQnwSe+AEhrvSx4/XIJ/n9actPjjg9ywqF4KoQd4vPaRlL39nUbHXNWEFgpHrUyrxl22vUchDbfwu8mfzOjoZZ7oByq71T2BBNp55X03Upst9u02py/j6pXVYzjcKr6IynlJMjS+LLv8XN96lEJMDukc3MPPGoOrbz85EMO8hQjH0cmX2a2/eFVUkPPNv0l7gH7VLjrZ+4dPzntpsGay7VtHaLGIqjAhWpRxU0JGVwAOruj7Tg9nID+M//o+HbBhHUqXGKquRql7U1flxyp1h62qr8gLdgbnB/eTGFmd32cYHJE+mJGtB9nz2MGG/aRdHh8gkdEsIuFypNc3ElA4p7bpNulv63PFy9PkpEao5ttXJLTcnevWROWk+ClqYcgKOIqjUV1mcfZDKagO/F98pg6aqK7mFceHzH/Qb04nr7MDou05o3Hs1VbGSwVZv7Bjr8MXbtzzhBTSaLflSMKUBEWNuUwoZQQ8lXoEqKc0zc6PKR/ESuhN9PHyGBKafp6IGTyovfqcmn6vvMKOJ47asEW9KvpIe/MrXLDAqXK/5r/iqu+g44Zu8EMMfhOw5UHVlGoxgZ2JmfknAjM+/spkVKh6x+10x9e7od72u8EJZZPW/hsEvWWCHJ44v0iVCkOGycIPzWEgwbf6Q95jN4ZduIjSOLgbCgou4Ngsae+gW0ZcI8ILnh602Vn2rw7sQ90/FQ93sHJ5ZHYHzFNx2HB+ZXr62A2I39qNKVSwJlBr4c5SXl4BtMuBd4w9DOhV0eeyTKVbe5ahiwfCfQ6wi5SzpPM3nkOZq8prlge8tu/sm1IKpOzlCG0ibo9/LdbGp7I0PIk3bvzsIAvax1ZnZvDrSoOQF5qv+hecAfuRK6lH3XXHzPvd3reIjByLbqX8fLL4FzYCi01kFIfZfvpj0+i/nxrycY4mNVdRvnL3VGq0/cWFp28uafO3OnDZzH055M8lpdwd5ooIxfpG0mAhu/D52K2a2uRMqikAJ+0TPSeeAmMBnbJgha3MkDx50GCcyljwBL8RiabJrlJQa3IM/thuNSgR51nzeeIPJPtT4VIbt1NqIclz9Fb7jlHDUcIYi45p6//FYBnUri0SeXrWU9Hw+kFUOjGPaLhNIP9cuZE+2fgoYoQwy74bClqvHDQkNsFxQ3QeCRxrfq8IQntvomkNtm6MoQT1q2OP2wVKR73v5TqIWyLq9Jwrq4cdOnzrb86ow5bahtitAOUZDWgRGwbiCBrLWeDfy0cyDFI92P7Xxu7gjAEgQ6SNG+6GWVQdr4FqXLMk14l75GPpgEA+hBTIQ9RoN/VQCSvJkg6qCJIjjQOPSmwfFUqjBSwC26Ywq10hfGLv69XhBLJNKCXy8YNyKN41kk8IZBt18i9DEgWRaL/uwwLX+yjXCiVYJjD4ubFTLTuUZ5RwscAaoN6hXWh6aVXOQY3FSH8/BmoQmdzluuG82SVQ+f0DJ63F+WhcNIvyUU0hsxLGh2i8xo8Rv4LVFhJYxTrPtVpoEGgmMzyeiREI+ZRTFKk3XMcK8DeZHgbWkkipD9XwrS03F+nUEfqFtkcHzFg/eH6l9xwuFEhVE897ubJ2EXdynrZAgGGPvuFRlKNlvXzV4EIbHD4foDNmIsMUyHrpGRYM+pwpPcySRovMZzaZWK3G6qNzw/4y3fYHQhp2Tk13OqvQg3N5bRE0ggtOvC4YnxH5gJGtjX8ZyT4iKKW6Vy3xF8C2Mf/5zvlGUq/iN+YNjD3+XTIit+J9ZQ1pP9+E/ApiEfackI0NQLaeyFUVH+9Urx7QpmHqqoRxUL5s5nFVAVl9wnMCHsyb2898mbgWVf9/3s6hXKWcGiII0UW/G/wXm31m4Q+42t7IpQ9JtgHJ5Vn3x6u5Y8M+3dBMy6T9K3TI0/ERp0rXvT5O+RHPx7rib2YQw2VzbWratrT3T6YFocdxhjOgDc/CGfhvCUyQpJNLnVpJJU9ygtMfarxsNmcHsoeCnPmGXdVTb1/fK0pg6I+ZZWC+lQMhJjBZqpJ3lg50I74bn2Lz1nUyRQO0TSjzgqBB69fhVwi+I9iM+35whTFWbOd9wjrVE4EBPck3yWfFZjhLsAYp0Vqh7BcTTlqk+ZvM6bok4CeH2IT3x6CfUnE9JRG1YihWFZa5RbvrKdRdOp3TZhsSEwgkJIouS56zhzMToRpFkLFiHsw8/0Q70XYJ4eSa0RUM32bT0XZ8d9sXJPZpN9ondJEKBVpTbTUDMShl3lhdcF7rF2V9PRibOHCswdjWeGrXxIp/ctIEEtq6oqUyVQxWrDMnpWyO68lp9mktPwRkV+zYX0ELFDPFLDMPXTU2s/UzZ7Ni4uzN80rpO+EkaoWUcKObH99mC5b6KnDTYP6M/ItAgoiXZxv5Ph+iOO0OiCIwY1Bu1RCyE9tkRzf688mybbebRYtIzdtIojeVbTJFBhSu7NFoWeEfuUDQWVfudOHF+LHlUbiGnNaVNV9ppxqkOjuKrVscpiMyLbD625DrkkHabtismP6shLLT6fTro6/OqcD/4v1VrDPGPKROPkVnl1KDJM9t3PxGNiUwjO8mFG7+9qK5tvCuFW4ZYt+e7vS1Msa/W+DnQrxNQjHhpOiuVy4fU/gqia3cPuv+OSZTkgTNNELnwnR/0tb/5HCeK3nPTZLu2RaKQTsXRveZH2VB7inPJ7ZNxGz/NCXrtprystwwUsYbwB+gzMgGsgh7Qx4xP7lQTvmO7yBZeU6WtQsgl7oz3BIPCteVbFTlC8YzC+iqeXNi3Re5TVKcD0J59/EJBoslCO0k53lOBlDoilLxYJwUNjhZj4VzT2oFEa1xWaeP/UigwSJoX9RU21GB3YdFcwJtDeU2rOQJZOfxrT223PoE2mZ6uSuTfkcIF7QvNtZmS41LI52fDtx/HDwnlP4i6iv9k0ihZsTmok0SSoRMnlBjJM9KjdhTa8tDmSubHq5hpVrMWlRZN0/hiwSKsJRGYMhcCYx+flsb689+q8huQsEfRCyDzRk5sSK/F3cG458OolcUY8yCJvs+4VD5dcYxpXi+r9Cp5q8gLj+OUbFjpVdw/17FtBDqhuccrhzTxrIZrkk0aB0YXTYZOJPhlealVjti2ddJi0frgcp6u/TEvH6vAy4keI7xrStqyEy69otXYPPP6HuTGN9eSEcPJFaGgmJDdY9qjTVA4MiHXTgatayusyYfpl6o7/fYiSpUr1X7qjHVU+aabZB0Chs3td0Vkz2sr3+6qMgzNOJrttTqBMIh2OmkFgqwJQpfT4DIMmvUPaL+S9ploJxFmU5EnYOZwrLQMpkGxOhCgc4LVe3/YhMlmBMBfZsHBA9UYDHf11dyhSlFUVx490SfOTN/B6u6n2x9ZlqIpQLr3bAXJFIOlub/RdP81+hWoxSWTNsUjbXWfk/h3mn5wlqnX71QEKZUzLJGJwFPxbfjwLpYhnYLF60RLTCCwZMRVS4SBoRbZxSgUQqXdjQh5FZ7YGLSRfLI1v2ddDryBZRoBWggaPlTSuKMaGZmqNtEMhsmB4vQlcPP6KMygpMHqelTXXFaOUexiufkgFOUF/ei4/rkW59saYoTgsX7srgVTnWxCtxYNA7kt7MCf7hum9KH5dMnPt8ml6pxx8fIRFs/TbusvVXVtDs+vMVUrLWhQGnIPM7Vv0mNFCHGKCjS0W6262uvxblS9R2xrqT16xwaaoD2d0SH8iFm3hj8aQjQWBCCVJDOm8UlF7w3/CXPF5IR1Q+LV8LYJ8U6PDRvzFat7X7xk22cyYYacVSNonrKmZt1BoElfN92yVeiV1HpwYvs9odIrPu7PZWeBooTo/k2sVsGcfBV4ii4QBdOng6H7rxivOo14F7c9ZNHDqqlpSmyMKPaDXHvnO5FtobKamAyMl+uQGwNNNBi/cpqihPWyMrddBdBosJ9R/vcDEMuPrfqnuoHtuoA0Y/GcDS5f1qghD1FswL9wGLBCwxkmrWyW2VdUabFUCqXCcNB8KkijFsoPd/Rj5/CMjNRV7Rr5qUbpmY0XXxgZ9+z0R1Qi1QDdaGneh9VsoMFQYl+n4+86u7seTfvR2ueyXyrVhvRWUUCWiTfZaMYsawWBDBVxh+sot2On6seFghefpi1iXcce4n/z6+WZCw4SXjdeATXzC0u2ChFOx10NiCMHibiexZb+n6Sf9Vm3OY+gwKlY4AgazupkVHLr2Si0huzqdEKZ0jXYWao+CbVRWaC/WRpX6ar1Zg3QgH/ez3Z5i/N7KaFeeheuqCrijLcx2WOyeEmhZVTGgcXeGqVQNsOV0nZ+8u+idU+X1n24+IQd6g3luJ6/KVEC65R448PMjlXaCe89zI7CcDEH77oElYRXdOCBP/EKbXCU4XtxAMf1T2Rwv0yCcZn7I7SVZj5tOm4FsUszHqTEXTES7EBDaveimz1sKAqOChqj7KBegjMJ/4G1kNFMgihfnz6ay42n6Ptw90EyfmsF6sukwOBGxl+rPZz+cn0knc9ZbNr71gMxc5Gsxivo6bqzMGco2aThYBnhF5YxdLt44AUXYSL3tMWO5cokt6dUpsaFFy/POR4v5HB29m3kbgs/+GtJe2ZhOYe0nJGxXHFYCwEWzQx0i+AIItwJB2I9VWz1p+JyfHKrCZroMWJ5qhcaAiEfZjD9vKmD14lALoZbC96t09R/+n74s8fDd6x3lU8xMP6IW/oYqeVBZVnMMIaYM9dFWDI4MOqrMwsg7WSMW/pwlCPbjicbPHgwm4E/f6Nw+hOcG2kmld+caNS60G88XnXmwDhCdEOmgGewkQqmFy6SdzvJQaKbkiGU3ddbKOmdv1tYwE6/sOoHxq2u1gxEZoq73S2Rlfk8MOucgzL/2j3Xfh2/FRCzrdG9vZekBr7sZNEvuE11qMQjoR1RWq/WbfJrvFYBQltUXUmOeDNPi+N3r8/LEaPZ+wAopF0qNGqVkwE3UxyzOluaSFrG1nlzBrOhsHRBHo/wpnTAbeNEXf1aYwemPDsr4I9RP9ef+9RX5BG7MeR3dMQllaMLULVgYQbhwRTZLgAh8/GRrvufcZ1IRSJohArSLD3VTMLfgkcoLmLUAIsdH2IbEdrOYlu+riVpA9jORi2RuoUW3l7DqzbkIFEBsWbHK4n7RJh6EtnN8vw5f0nVQl7LJ5JeIZyyOT9JA94N5nUlE4ETF9A3eu9yh44G4P0Ytfwa/KpMnWWTdeMM6DeoWUEZ5BrX8cj56OMUeCmfHAal9DJv7ZztGRC9S1R6NmK7pvvXbQHd8desH7nxP7c3Kd89fo5YYUUXwDrWZ5z7lw2Lcgi04n00XG1TcjwtX4D/lnSgPGbq4GafUwI8+2OxejM9xnOxOieKAvhd2gFdXBJKIXIAPX/KxXs513rwT9BCnEbFaCUZb4vIjSZ0D3kkZVSp1J5wfHSEInypEXXKe18zotKgjg9QDrm0L+L4CY6g9m2ThnfiJGGlgJclNf8Km/uuljH362rc2XtuOj9TYS+9x4RXup1mM1pkOuvWBLfSluLcWyFn7LkNEJ0w0gPJi9w7hJcntOKvGAz0qi/FItEnmbyAUmHAeO08Ht5Ri3pQBrWVcRkibG1TNYBjMi11eD0O0JJ0Xd94ljn0qE4AK9iH7vmJwDIUAqf+pkUFdB6L6hX8U3tlSh2YapT7CtRrG77PM586p21U11nlDABK2x7M5y+9tdUmY4alcZqRCl2ivQWoTybrEEMoq/m/zbXzCxY4sgeLlhlqnSrWkurTg5XE9xR8/HO3SKx8ogIpQtvOZKtsEwZ/EGyVfS6gyb2OlhHf7Wi03TBjiVfRNf4VePmTkM8afzP6xC0ULgSaUA7ZB0+WfXTdonS29E6fYe5o3KjPHFI1T9uZN0AaHPuiHWWLMqdUrRiGqwexBIFTkdCjm2tVywbxzxpMaX0giuSphnJdfHXgVN/jWDhyos22RBMH7OfLomgvr7gxNgkLYEcRtAcbj+AjHHn1zZmW75DZbPBi+JQH10kpHDfyzM1GzimSd39aNHp3KVORmxmDE/xvFBFKOS1uXRrEv5YzAVgfLSV5Cbm1HQsfa4JOEL8pv2rJKdPelDSX54YHebdh1LnqTQs9QtWo8pn2doZX3eyHCYepHa/secRm+BzaGwEmB0trznQ22XHtS3gjPfKHYwDR/bdHRkpSY1eirMZQ9kO8vpyOpYlZ65TN2RbWLwsm6llSbDK78nvoH3AdY4/HltpKwblVcB2ryu9G4xo2jU7Hix0Zmz0t3XVwr6Z0S+pSM4F4W2e9eYntJqbh86d92FfTqaG6KsuT135wQskYrHswLHpnvhD2M5W+a9YNgtoHTWkUAycE85JXQRj1rnl/YluEzzogrReMrxzUPY9GdxahcQDv14MmrEcJAoNc5yntF2pcjtVJPcDTVug/iPqfvT6Hq78fIhg0xl8cfk2LNU6ith4ed6ramPJSAn9JES5cF7AEd7TzcSF4KfumjojwG5hoxP3WexHfqE7ZMO41vted8EpEV7iGvOxZqpZz5yc4RXr9CfZkSjMYJmt5GEyFnQUo7AI2uvMJX/DS6xMBFj1oI7Kr93jgLbVH72Jxu43UrnPgDs9oQ5w4Jeua9aT+7F8Y8UvpZYGJKUDpI9rMx8CSl2Pw6k4odFfi6PhwpBySLzoDBAfy/EVXqTOx9HclQ0KLdWjdrHnVFzwpb+LkMzctX4eWs9p8OkAhMOfUkKCG9wiu+s/oxS8PgmUx8K8w7OJvJFmWXfQ7SEWiI6d/gPgVs5aOiPzGWG+UMCi2V5uGPmHgAQpdvXbT9H8LlTy/aCUMdXMCmxt+7ldjwyMDcz4FR7rJrtNwj4W/cmHMhPPAj9P9YWqBgRb1ShlD7kzNUfLg4fwgvovjAKk994zG5q10xd6HTroiBKNYiwa701Us58RIXA6LDLt73djbFdjm3LSmVIykcGstkJxBGSz8fjx4/tHqU0G40Gs7evGZ91ZndJ+XDB93IYVsDcP7OQ5w5eN57WB41sFtnaoKZjqjkpUOXy5F3HhmYTEVcfp1A8lcYWQhnwqhJ57EzXq2RubYwUt3yGvW7U6ecIo5bRLejHsyDKQ4gmR2QhLoTV9TYdGpytHNVJ3RSjaJqiD9clf+5ForGvYhFRxi5XKItVL337m3+UhbCEEsJDc2v5zVBF9AZTq0tJYrp68wazsefdrTFfN3KnlPa0VnbUu9AAhTc3AU/oCy3nHQ+qWAnwF1WfEUyDKlr+1XVQUOc9xUhyycDcQoLYSdW25QuYp6h6JakbAK2uJlm82ccb905pCizpGcVaD92IeNM/jDaZweBH756ULkoAqvefHswLNezhiLGflPDvDNPQ89MefEf1vbxy76vtGNk4xuVAtWBIwyVuPNSlnD9QG83DOMp5C85bV8BrupvMg33VXx4kSE5b9pkHz7qNRGugkeDsw2y00CKogMc9qsQkehDIUZM2EeOHsw/dHZQITRboOVqTkjLL+w5TBQUt3OGxDeR7LPLdcZAFN5Ick6rlpKyX8LoTZ6qhML9BPrGNu97BSsnUL7Q1JtwhqJbqfd6egNxHwEmuq97BxXmsT36WLZ1hXMiyl/oidS0Z5VwdmaiqnElfsJPJkchtsDPvtRx/xzwpXITYLTSeX4E+ucjrH7szfKAi+H3grnYI7KYGhVRj9CKIZzal6KL82aXesj7Y4mJN8DrsDDVM7EDIW/79+ihlSNPKQdw34YgpJxnrXHqyIlAXKVouis4dfYk2yckMjFNRlQRKbBBZn52+yK63d1wT51J2Q3kBBd2FZaKqZzPmUg7i2oFuukoC/csT1KtEm6k3xCkSiMN/wSchGidJX5/dKnOh/ChnrTe9o291730M4HOSoPZ3JEPs7TISgZf+GvaxRb1QVTkRnAxAvgdJ4agfIQIvCJrqrk/L1dYjYkyIJIFM8ourIOMyk/Br+CE//NBbMBGgc1A7dr34vrEAcO0tCsjg3yXMRaPct4IOfrrwIhypv/EXLEJTcEAH7OwBKX6u2BG8COe4Zo6h1ZZB2SUI7u5/vnafVTMSG8bsRd0Z+scV13YS8E9J1jXrE7bbPT7xSl+lJ0p+/zXzq7TLWhTrfnoTdhwZfp/dcxnu4ZqSODFxt8mveGEJnYy7oQwqMne8zM27moyrvG1rsgB+QmOfjY+JWK5OCJVFaWFz8RTPEmldAkAZGeV8qiqIgbAczlFn7ZIF6g7Q95zFPGctnd1oXIA/g29+XCVHVuZmKX87j1TuZ6ZY0+mPD8gYZLWwVs47YCtxE8XO2PXt3LJ3TlUi+txpfVvGAkIdJpRzSc5SnC8Kz9t5jShudw2sW6dayxaM8epYQfAfKSrYBrqjeYouslpR47gWS0FR2Wx8RNuLmQpQB9qDS+6a/vmxG4Z7l+bpLYiO+h8kFfFfudNBBjuoByM03+Lq4jNknKskBSAd0Vd3VFIczbeJdboPZ4oJwJ8+3Qz2Ub84cnHz0bDKwXslY28uDHX/aIgFWDvkiq+wZHpO2AovfcASY9Dqw/uY+h/qWDRWIUqpmqxTvG5KiUaK0WzQYJZUsf2OCL+sAkQjqB+MV1mePYZXYLSu4nDhV0Oj0WHlyVRbRv9d9bEpeIeTG3TuOByHGkBaRIbhXGEye/5YaMTMyZNc+pQlOiuh1I/FEeMedG6iDAhdp0dPFRdXrqzqsg8mw16OZFrTxBVGMjyFpm8zbnu/yTtL5Hfy0Jpajimjh6w9MPIrPEZg4yzgmbKi8kRp6vm3iQZGBociOTTkKiBLIHMxxpr81PoDSCPiRPO6NTprNQFGFD798NvQTgKzcdNbc1K8sMwf+AhVAvL3FgOL01JcYRscl81ik+I+uSVO7qHve+ZC39MZOkDX3RVIMIox/OyyLCUitrjubHWuB0MZ7Fr/hLn/UP0hMtT8eFl5WUNhHyeEhprsJFWVJHNaXyRoBmvXJjFHa9XOtbaWlUoWbtiEosF/bt1IeN4236VaSwmT7ZwTi1djtfntPQ4QejzvQRVUoSVC3Zr2vQ72zNbmkxD4hfwTIHJ6NqjK+07ZUt5doVnwCkUqe310srCy0iZ7cktv4myS87lwoVcizL+kbWctPZqndBV0J09XudHHvvnPSX/qH5K3AUNDckDhzrN56hBH+iZc9h/sNKfE4wfWzrCkNx0F8M3y3kUIg6KnO14KSqPuI17dhsQ48VFKLuF/RszqSXFItZ1MKRWLwhH1kAxt8oFhbtoLo9WoTtWSEPeTK4Xqc5hRDo/w/MR4mfcGWrU5EYfT1oC5UCc2ZNavVDAlPDPtHHSE11z5pNXwcTyoBciKPoGKCWkAXUEig1O7rxyRg3fkmd+Qz1VMAPvLlEGQ09eo7EI6GtCyv2gzMjy6u4MmLOQvOqtloyKBrS/JCR+znL+7Gk7x1KKCNPZT2Trkik5Xiw1bAnRZZyIm9s08WT0qQ2sQU7JM/QJZtMrdXZH3ahAFQWggijBInjCtJrFwAgW5k3mLTeo2TRgkFcOMeyyUhV9uqXMJM2ArB1dY8HlfeVpB59rSooFUTH2HTn6GmjxoPXJRl5ZvbDQ+jZAMJ2EWwrRDszA+YGoXVUK0AQKRMqSg5kEVzBSDaxOXQXIS8cOKXZNYWLur3Xf/zNPOBKRpb5pgJWFRwJmYwYt2UUYLKxitrSvWsHDX1fzBQ/gxGVYGNpQVsLcWzgPoijMfJKlrYOweUD7DaaR7u2Ham8Gco0l2kv4G4PVU48bvegKXsv+yQb2FBCBh27U92imnB8FXqAb4K0q7tS1wck5n4bKKmkz45w0vvVEf2JrEppa3iBLdcbn/C/4aISB7pw5l3SNlKvv+X07dSWuVu4brZmcVJYtUCbzpeCkTJBp9V/plKic5aOqtYXpESHMtOAgd4XXiPMKqyZG7ZAjsVwsRjz58HqZM9gQanV34bevs2gjUDNjkdHqU/IlUiH1u4qc8tIVU8yb8sOCYyFWM7kQv3tsedzOjSTdc2thgNL2W5+36WW9JsCCs5b8EzDkxOQMA4ojxHPEDH/oh97W3kdkrM/oqgh6JI1DMjYtvnf/7w44qdI/YTO9sA21xbNlSoXSzZpOUMFiblLsVqViZ/iyuyHICYoJ3qpz37j0klDTeomhXJs8t+5guc79pan/1NUqEaFTwBxq+TpATGAMiqIkh9b7PmgtuLjfEPBHxGCbayT9T/HT4LtJrOsCOcJr2Bkw4PnoeMnlPLjmsXXH1V7cXLFhHnDdBifwPIHsBaq5ZP0xONpiFZ8W5Asi9mb5bSZtKrfHBjPAEzsNhXyRr1U9qv8gloMJdiso/GcqgOECpSSQ/Fy2FMyRK8T3G30jCgUhn/usfcfAVLJEZCBvycMp17R092eTqRc843boIqgMi+zYQUyJnFhRObkL6ZVeVMCLahqPdkxidWa8DrLURh+WMNtSWghqI1pplftrHobS3zAc+hfziMYlzHMQUdEgg3l5TbGgbYz79FlKkMnbOX2jGfCCBRgPMkB+qb8nPRD0uqozOJoQ1tTuIs3DVNsi5o5eU71aCA0kO4kH0mX01Ye4JOsR429oKd8BGXlNKmotp4y1Sa9drhKeh96rgtI6JiKuI7KGb19CkcBcXYtJs4MjQrPQh+tbdN68aWngAqry3BD3KuOC7nz4mqsTf8YdZlFiBE6SJLfAXFtlO8UD3MgZT0XtTLYx83edZhtV+vJgzO9cmmPOVJS+phgUZnncGH5VTePkfnX/Xn8b0tgEVZKCVlf2j0KdMX2C/wvKdjkUgXERvmzQMHRfBU3Ji+6LYtmMp4Vktk9uw+rCTz9Dvj0LCNevrumRv/sUWEiNRse5ulfdBHiix9Inb8WvACter6UN8MpMsbleVSM/Qw/DmM/qmOpFPpKe+0CurwYHFxq9ms7QmvqpreWt+1HGDGQngfVSRY5Du3R2+EMxCjZ/sP2gu4rRoQkfKQtsvhfEsWExAK+Aa7VH7n89kK/V5ghxNZrCKeApfHPa1BX0wxluNtgSq5WUuhtwQ4QjxlYbtWp90cF8yw5xMUxIyO6+3QdXsXUi06pWARvmGQjJJCWrwRImynhfNf1NI8rc5nvEq/+V+PMWILjuGmSlxL0ITKLslNvucbH7VjrFoGIvft5OASUNabSAQt9ra9c7A1JykFsIWqvHT5iJ6vGROEuqBRpfFbqwmNQOUsX1xImW4sRHHIyveyQt84HkAG7xymFF5KvjLNXMpnfcov3fJzymSj8XseXS2KGWOGDbhJ1xQWB1UCT54bgL+jMzDOytwmUG/0llYScNn9/DPc5tgESvqJ+b9kkfw3XeZu6JKllLdnT74eUGh4XLmbXopcDnGy+ovg9DpXzb5UjbqAErxAK3Y7dl2IBxkEUtjX0f+dNP/s66U1lWg6f/BwiXHciGQPoe0v6QXzZ5L7tUvsL/XBg/gPmqwkcBMPfJICojbZfVcCQ0IqbOy2GX0/2E1YByGC7zoxumV4T7A4+538PVzGUp1aCv9ljcrngtCH1cjQi8JsDDGzN9nbXY0S5kGOEI/cZ7pxeuDbzU6QhpmwhcO7TFo1GP+HBmkL3/A8/Vq1YCY+HUlUWUjWm3T/CB3oflRMOqjmvVqUPDQbRd2Mq3wn+WodGfENzMuUpjwqNCMLbg8hjTJW64oXGW62tvtQJhqXbTRrVEa0k64z8hE7xE9RN7CI2uAGrEvWQnddl4bEYc6bH/u4w/K8+CJsoNvOAkoLPMWWk8EbJvyU8eErPR0SXWHHxG5LAbqt5bw3CSpP3Hv25P/jg0e+ZRZe5RtcI/nnnHsez0nkFknnPiapkc/49A7rtbUeWlZmilT5vyK61lf8OFtZVVsSWL6zC0EPUBIFtc2QFRPMhmlmkHk56dK5Nv1HNPiUo/ku0CN33XO3NNCfo0ZOcMkzeppxHHM2SKO/g8kTnapgcAcZBkXx9P1KiJx1r/t+uHiVmJGSSKqRVg07QQAtxFP13jhUsxNMRmbCQf7w7RIHJDZoL7Yphu83aViJfpP85TSBdBoZmEBIgZhi2WPddHxvkqjRD8YpNi5hSljtbS0lrGOrH0wUdjACmU4XAoB123m4mQR6H9q4MqX31dnjBK0NR+7axVlHjZqV1vEPDz74isKfqzU2Zld052r+fv/vaKY6z5CoUYyVg5yea1uhtR3O0QahGNKVaYOrflJaMHvFRHILP9cSvscmC4xJ2qAMxyS3lzxkhcZW1TMJUtcpu81V8TiRUtXlFBpbe0zqc4M12HGhutNBFHpRuQGONB/YE/2BR+1VxQ6gUt/sBpZursbaxBqonR7wtMv4OZqoMi9qhdcG//JTVPpqJgLz+lvWGXq8wnPvsw9Og+7bUXtRWejPTjoroWRiYDfUwTKcuJE4fMkBcGflhSaMot6T6STfntO6G9gu0YzwpZu49A4GmB9ynaMwfBa2zW1L0hqVhcrAsf4boZpton3ylGlFQd0VRRgVQ5ONWr+7TomPteqUXZBU13ouNm6eNYVIiR+OEsr/8I4KdhTnpAJF97KefJMA0XWv7YaMt0m7O29otrBQBCqpPQmWaNlmkbDO8lHZ0ertaOAI/qf9gcour/kEyUtYRYcQqP+sYnOdiXFetKFdIhEicxGgqh5xmBvjJIiPBUOXG308LtyBvRnFsurx1Wv8L4KDhZ+Y93kDjNHlREGmypElpi0TcskQ1jqEErTtJH2AYZssY1Mo5SSW4audR8yLEZppBY1902Qrs1igKsGncpQKhDGOhkvEEBYfLx8v15bd0VVZLLtq0akc9zaxBz+GEzAg9Xa7STxtO+gcIxXXBBJgdcDqCHDBr+/9yRyjR63s/GfV3ICrP1d5B/Md8yco2k1l8iPJ3KWVJVkrfb0biQSjXS8TZfg3e2q6haSis5RPvQk2ysk8qokE1aKi2hOT3eYro42OLQgkd5Iblpvgq7FYzjlnMYQtEoubwrcnWrk47PULJkXMZ0MYBmrQEm1Bjm3Ieyx5oCXJeVdtyp+Xxg/ZYK7fvOn3lJlbOAKnnjPJAMJuw8iNJc+6vLWexXuz4F4LlSfZcXM75HROn+L+mwcnOYo2BFyISbuEd0iQ/NbFta9XEuuGpNkicPrkGgIQy1tILQj2Yes+qzwbQZr3LaDA2uqEVCIcdI2FGsrg60s3iDJXM1nKHTr6ePdbaX9Ol72JjsLyZccJS2i/0Au/2xJEwAshesS6HIzv0smjYJdLeDK2WjHMyTpIDMC3iZdz3wJtsqwOb8chZD5VV9/3OkBX/VKMpwCHw5CJc7CPq6aJMEXNO1ny6D+ffie86SRan0q9t19u3ZYwl+g8W2trXDFExN71wxwjToTYdoeET3PF0w8xjn4ByebZVpa5nfnX/7ZSUPJROhsXzEm08MElm+YbOwMCYTVT+5y4+TcMHnxiA4utsl6koqvtoyM/ZX/Z13knUjONK2PkmwzPTrHlXwvBFFW7xh+fx/pXjasQGG4CygyS8ET7T5ZbFxouUCZqUhPBZSIpRajXt6Q5DCPAs7Uksg66oZqVEsjBaanCWKTQPVek5Y8UGocpwBUWS42wfzJQagU7gJbxanS8deMCsXJMs+J2qsmdWxNQ8/N7Kjjdmub4dV4UIR1EWLtpyjKwNuFxvwV9Uo75qRTONHuOIrPXKqLBl+siXgK8IkYGtHZFGCvwE8JnaS0eVjUnO8pyZ0Wnk4vwzurkdu9/GMAVleepD9qx+U80xugnlIvi0Bwf9Z1aiMHCYjZYq3r3HDFVlV8er/pOqb1bRZCGF202ZBjeCHuhwrGGShG6qqwJLhy6fPuKykFbVqJPYYDIMc29wM8x8EhDRk3YC49RLI9zg2wMO5zhaITawwjMGajckbh8EwMWW2JSs6ojHO9yRHqWc7JnI8JPaOY8LtSGY+Iyt0fcdp3BQs0j56SZoKRucX5eFhciMb3hGkBCEYmCfPh7R+OXgaEhoK/pd4/NgdMjG2VG3k0bBR9bWFF2N232ojJFQ2KAvMlAv43R6j1AMb5/WlbUk0tfUYUSXJ2sQ0hPd57ae6yAsY/do4LenKcCrywbXh20ry+2s0sh3iQ4AZSlwBItaNTYz4ZlyLcw2i96qaO2etOfjrZgey4int5vLXvZAV1iKqvPY2631gZgt3WYzF85QIF+MUqmIVx16/U4eUhdDyO45Cr8fuWedmJToE6HJDOFNhxjmvgBFP4uoXh9fxLrXjelUnSEuBi/7LtTK+RMiTgdlFJYaD94tRbtQ9agm8VXB3HrjLCYcgIutS1mgjngj9HKK+0nm00aQJSAq+k0Vf4ubFxum5w/GFX6P5ad+jXjEcbCE+/n8U3lPUqO/ZdwzHq43n6wBMkWxdsdgzKR2Rr77t66pEsqWjCo1aB4HPkdNQVt3vUDXUS4belMipu7iZu7p7Jg/fuyfyD/IN9qDS7Zii8rVaKSfR/GgcnTxM/2BVjREiRsYN6gTtIEsa3hW2AVuH98eS1YTTgZusj9eCG8ZrM66Vt5UfSwxr+4Y7eqnN8uqa04x6dF+npFhYqpsn7ZsYeiIWQgoBgIpLNgNlbOFvDgRAfzDDUu0Ineh/tc+7pAqudW77256/nW0PFiMDmVASiMoJpVyLMXLtW7BpziJhGWcFlH/FJAneIGVQr6o1YKbYmktNRsJUnmzvCx6eSMk8zlhYWywRVPOPuvaez5dqlSURYjfvzHv7mtilw3g0SE4kYDH3n8qFGhrPxe4hZFq38nNcEJ/rXG/XFD+m+f4NaVznEYK8wZPLIHeiH2MaRYjqajHyq4iz2miqsCdJEtrd7PQw9xYM2rHl6BaWbbajxH0yenzwuLtwO/Dy3M6/pjVxB3X3leHnfDJNdwR9IWYUeKvXw7Efp9mw3Kyn4RAlXJ09ulw2gnWBawqLK7tp/ahbap9q1TmgMo0UOVF1Etu3TzHxKL6oNUsAkgrAzKxR6IqtxefMhOdfL2d0flSobOQQSyDrhWgI8kXTJ1FAFz+5r1P3XUzIqSwwXT0+qdIk1GW5SNKW04Zrcn2CvECFC2niNjEWeENAItLAgdjNRXdyhUW5ygegy5O24eBor7E69OfgxOBSz7uAu4liym6A4U3X2X1xZkZ5Qn17fc60VrH0aVZdbzHk4jkr/pBH6y/tHx3a74YZROoheTHCMdOlxM5SaWix7ROxE5bRpYn5gDc8M23V5Tqnl0+jj/Xa7x4avdf5nJTtqz9ftwVv6xnicbMk/sQENKfoMK24d/ntJwpXYLIl6OtLFTVbd7hi/lUyo778li/1yF8Raw6aGtCTNyAtAJNmleeL1RZapjbzwCPz6Co2P+YuZ89ZmCzl82ndI2nGQm/W+lYF3qsoBK6F9fGXVTqB35LGN7/Szq07kAFNGIzRcsv+WuUWzfBXVOXmFtMX4HIas9uejuezl8/RlHOKQDgeRvKCN0L59opKOXBXFIfyWhiMFszu+2T/C48jElC9yzV8Ia/iZAywUR8F7uM1+dQfqDDyGmzxgQZSIavWbsuDF29ohe8j5uMYisJS1yjrgKPFc1CzCzuZJtKIUXfhQ3kq+5D4qdM7ryLTlQp5zU1lZlMdwsBo0VnUCjFDSiIfgjnmVeOiDSBTraz8hmn47ukpnySmFXEvvLID5CdX659HymzJ+DGGmBYThw7wV3FPQ2W5JslNm2/Hj+vbY9+NALF5JAjgvSbX1OEM7ED9ZtbpLyS3Lcrjw48ftHHFDzuRhztIJSnD1pyP+tE2wfHy5a5ycXlmQG9feOOMoANCL1fKsJ7GKLPoQrse7Oy3BRdGcPHnAyCGvGTZNC/1+nYxHedJe3rOjmcDIBteZcp8QQRtRQpV1ywtlQGu9qYJ4nEFdvOGxwUp+ajjT75PFayibD9ZGwjZiKIEVf9KosCNNMqCad/emjzVhFZNuiI2dRzkKB33XTTlR1/IHAivRbUXDZwfODA59ekaRwGwMfxbwPNb9/DfY5vRBULLTg4hvWfC5h3+pibDgE8nl1RLPaP33KejxnByYQpBTe/Bz5lemW3CRMZz24y5QjlC5bdFLkIEJJ3hlB33JFarK3CZucqjrAnhumHUod0KgP9KMPG/7LeVoFo941ZQVD4I7hBHEhDevNFcCelgx/GSzJAy4sFRfulAZG76azIYqafWGscotv9I3VQTGtq4za1ZjOnm+T6xeaDeg7Sv6YmFSB7bf+NC7a5Xm2TW+D0n960OMuxHBClXThdMmCmyN12UcK/RZ47dtwvsF958axiJjVKPM2tU+vPQfKlQSEXLoMFQkDm/zg0KGcYffw3jjgJ2/Myf40XT9bBdqBjaX/HtdwwYdQpBTDVDiHaPRPwDiRgVr2uHndAt8D3Xbv8aBQn8LzXz0XzNWecqslvVgNwJQ+2tzlN7HpUwVei2AYOc7kSR3SmBDk3uo6q5sUurO9MBw7HGPQIPKE+SxA0AtG0oXL2faXzpq61lSRpxXHl3+WRunJ27CW8hptFz1PYfZICeSflC9gFQlvMpLsH8t0RMGb7wEeTpvqF4WOlS1kc9j7f5L+DJj4PFABqmqyC2VS+XZfWViviB88dJ+SFR+1lDgIyL5eLLjTQLZUhj1CkibLujsB+Pksv+E67QRC94tpVhA0uOQGd3+a8w/fQqcefQp7wzz36ms3JY+iCcsjzYuv0mxYLHrGtNECDNCUtXfP7QUXDJTqbeYyOLvyXQMo5hFTXjoFVsmtFiiXKR2e3Q9Vu/gR6D3U7+fwnrWndvMmJ+/qaCVw1XLZRrXZk5VUvnUkVKJNRIYrOPDZ2zE/SpRnv558ovNPEPsmEb9evsLMAUCzN4mrlKTfysXb4eKMTAyzcKbpk/YTpecaQkJIsr5j/tP3Nees/1u4H9sfEZ8+5/iW2mwtPxEt2Tv45NHrcwAcrfZlQLquSF6HUNolx/wooqGJVZj1YPeZX5mppeJF6dcnF+2AKkZ8FFSd0uiPtUHsXuNclkcvOlnt55wTQ2qqvggnjgqGXSepX/9CRR0cNU4C9ieo9U2gUUcsqYBBLWQXz4J4BuNMO+hSrYFLqEHnKdl++kzlYKxzri+P1+I+xFAbxj1lvNNxTcnJeKALXg3CnxwgpTOk894DNH+0gr5tcMNToPI9RFTm+cMfeTXxFNm3eSWqSUs+S/WAPH+zWYJmyJUUpr8CTMGyZFsIfIWy230ZoPZL9U4/oidsAuYznwO/o9u2GKn1iuTvWaHL0lF4TVMAMDC9Bvd2vKNk/xAAx0UuR99HlPdmnCT6bho26gMuN/O5XwUqcGLFK88bfi2lp0QOvVWcYTYAVhcnYVlKNshg8ZFEYrOvKR6q3Z+0C1sVQ6Z7UUx33SO3WvpQMVOb2KQ79oBxQofjbKYC/aEoKZtL9S9RVpx6Li4UAW7g0dDYJ9bCOuOOi649ulZRxMEhq+KGPLSyN5yElbyLq4K1qq5DjnEtWWLZ0Zt8YgNyxhKhAcyAAB6qeA94b+heFaL9fci32nLUeTrz0DQzOdoQxBXQRLsZS+2sl91rIEN0TS29InIxRJ5zEPoW8TKG0FOweuikkf5NkRrbaZWhpVbciA4EXr+LtgNny2N/UluOzORU1LeAWqCaC4FzqenWKIn78t/cEoQEtDfWBp9usawgAY13Qpn9Hv9DbSOMhitgTW5iLjbn6QSuusoHGfdAR79zjDns1qf/OJBd3XVLlqKYqqzdsjDtmmL9RKp52pu7QPETelZ1mu5ILoaL/jy6pJb1QB8bb3hhlACEKF1KLidenKS4CmW3ScPTwbtDKFW1x0jUmM3xtM8DhqSiptjj82cLMQU6+VC0QeByXrExtVBscgOxKdTOMpSghb0R7oistIImiw/oFFuDMsllubSw2Nmnw//r/nwuHRqkVfXW4/8KKMPjXrgzeWq3KCe2eGY1sEpLTbR2PA+wdsrb2e5udlNCGhGYMsglitKdFJRhZNXeGcGBBohVB4M3w/kXtj7GZNhW07vtiEAH32lhLouDKIeBuPOlh7BD1k3D2FjWnHEM1Qzfcpqf+PN/jTtDyCblFEMuwMFyohLfj1z+pxHFE+tUyPJcwGo328kw2U2lBp5n2yeyuRHHUmOMRZiOymuZW/tWkpZY27Hw2sUl5NnHyPQ9KkUaKcX12AZIIFhuu1lKRYri60qxqF4sIEeN7v+WYfUsb6NxeionqZ2xFIy5qanPoT+onPEgYgoxwCf15s8h1WzIUQ1LGGBaCElu5UP8UJl5IcKX5i0DrFfAIhzYfymbq16Yw7q8+Xw0sP0sfV69CFq+kFOnZEZhNDNkGo7yiZM4OtCEuL1xAqq1LI+czljGUX4iHoqmjor3rHvmnCJnh6JAhXDPt1p1uhayevcfhNVy8u/2pd+bJkZmIg2/dOkAm9angVqXYHSg7UbiwjMEFa7S9ZH5GcC/6j0LijYPaFRRNdAPxNTJlpon85pH6ez5WJ8T1b82NSppEcRn6IyVOt7Df5nJpV5tZkf+ietdiaMd/hMUYUu2hhONGXqomMs0m4r9hurYsPOlYGWiM2jXcZa9EtJMoCzCcEM48nKW8QVc/kT//2YJ+aEv0ls7lzsxIXJZp6Ug66YzHzoi0VlwByVgBjcScOwbRZO7/oUnhqwf/l5+MuFbEX3E84ZeGHMZiuUjD02yIrmRgWB7kX57xgNWeo5gs5wvpJwjiNchxl4yec/9oBd5yqygEIwnBOEgnd1hN7z4HtopFUh3He9C8dD6ag8yQILDOSCgeQl0q0Sl+31dmCGHnCMEpq1AvD44S0uon/y9vmdI0+Eb0yWVuqADEi9xp+TD4DzfcYubIkkLASW4sgyXSe33nSgFC7zhmrOW/ZhdWfTheQ2JiPgFYR+HTkg0sGEPUB1SzFhyX8+Ee6rzQMZ0+KZgqB6reiPD8ShcbHv+L9zDzVShp3qbzVbdMf1viDdgiA6c/SZISCEvj7VeEjMc4OHxBpwlk3mo0xSctObU1z4wa2mfy6l8/niYKYeFqhN5wswiXpGE/VZMU1Z8lWdoFj37JMamDywyJwXEbifvUSdGQT07POz6ZLDydmnSR3C8E5UiC2lfvnyG+hDy/wM+MASy+604OqyckSAao4/X/3GumCmO8Cfrb9F8h9E6trRQw1lGrt/vVQBQLuEKp9buxnlxjde85cNQv4D6HkFA3rWsIUyDyt9qsB+VPfoECP9eWf4IjwnbYP7LiAFcOLXn1tZwyWGL9ok/mr1dZwcc7Ew1jMK8iJYGF1Qd+NoOEWddjbPM2B6gFBC+9UApNUAdfs6tGKqrZdJYi3ibHD0LdSMdvaJd0u6NwIMUm9g7tLd07MmNteqUGiYt8owo32mTSLyx5B4+1hwi60P8lZyz34CZQ4mH3cQQ8T3GWXrXc/8mqzNulDn87rBjqk7F4NwnWRUdkEYYeVzy/jKle4xVqb806N0hBNb4ex1c+/+Yf0Rj1bD4nC98wYLCDgjD6XlpIQ5w3rVdjYsPvhdTLGxtIklL+YJcnzh6j4gh3suvwMgPzKopg2LpmtMF0Nc8qS0yrGvgr0hFA/ncuf4nIIh3os8Vz5zxvxX7sb1D5zIlef/6fVfiyoy7me/qBOwOUESytr7jFWyw1RQnuPVQkklf4EOokdtYoyFlhp0lKyCyDk4RxOlnk/RyoNEvUDU9WWYCXgxhlwwBE6Ogkf87XltzVG/jaOA7Je/DlXmtWVK1N/HbDLZAsL6etcuD4PJU5fVEPWr5pijyUqOcAIg4HzbwzaCgcuf3BqVIlacxk/854liCfi9hFFf+mtmCRZmWoeghMV5TymGbjQhCcY66pXWvL9InvwfR05VoDyIFSPusER863Ir+zn9Zs3Pwh9VyUyaoSVny7CfqX+apLlEesLmnAlXjYtlVdROz7i1DE65NW5M6i8e33yIe+N2ngzUzxPg/GDs2JaGb7ceAQvNNr9kYC++FjPaH06kj6V4Zq+0A5EsnkoDTROZWxie46oI9gi+E/hxhEbcmk22kbPu0IFYdWu4J5uBcerebAhrJagIIrgr9F/IfIrJuHbuXHAQ3XBSE8TkN5Hk4loUcI9WvAzakrosFjzf9ItweEWhQEAKDZtm3btm3btm3bP9t2L5s327aNWcQs5PR5JkzN37RlkC99V3t7ivORY1MUvmyuVWmWGe1ffiXq55Urroyp6tqNcJAilM9G9Mm1HYl+vz6Q49mk+/M58WwKvzDFt6mHnMbhpy0u++zepcpIPb2GF07EUtbq3Evshwvo6imouzRAxlWrE89rFRlJMyisa1O8Jdfrjoro3UeVYI9vt/yAILpRQ1gsiK3uSBMS2LJVAUR50LPA0ycrolMKI39P1L1jveO+PLXkbcSLIaHUBkVXalk34VM+mAa43bQN6XNOMzWEbVMw1qbA8v3kP8PNw4sNTNPhKie4TLbSCdazZStngL1etzPvthxQgwYjsWtmySrjIkEzxZQO3Dl+FGzokggUhLWd7S91a7HHs7CLPXM1xWGBSi4mApUwebCfQKlS/kvjdQxUW8wV7wtqMqTiZlev1ubNqAzS+dKa9nzIp57hjS5L2uCnD3J4Q9mNNMNWW6nzJBm8i5TvnuUwLLrT8oOD+2wAWneO7Stgn7nUCTU4mgkhPBOexBhWQ4hRjmYI3P7RPuei/g9uOKywYEDV0Ogf0jtrQjtpOlOkIVXlBSmjirvUR+u8WNLExDFCRbjA5w79my9WrhrYVG3FkjA8S0x35dseLDLnTcJRmMLQndf44P2/YE2bRNqxzM8NyR8T0x4qYRxKcaKXlZDXQNBkNGo2ru+cOr+QmrlXCf/XYFC2HsqaW9C6+dTwXaOFCrngIboajQWwpRDsGxfcZEeslEgEviWSAtFT8Kz8MVtgkDdxjuJL+0fPH/q9DSVN4P8cM8WNjd/D4+obp+wNJMUUI1kUaSMhDt9Vb5qRh7o9tJatSyTsyBO7ifC/fOGC3kYdmQswPa5ShrJQiE/defu/EYfwD5OJX44huyEvsN0IcgRxD7eKT9Sa3QzbuNCbt2toAkAP8bBnLFUFU0Fz1IHw12cHQKuE5Ig2MuMwTig37g3+A2NqaNN3UCJq2i9u9+3smU/oMVLcaiUB8nG3cfk9j/Oh1CpjSdsMt4VAycP2OsSkHU+nS2lUMedVGX0UjgdZF9KNYQRjSLcvsrP771kw1pXPTANhlLL7e/aV7hpQJESTZTeEY4tmzPHL3h/zyg6ncMcylzGrj57b2qmsqaq/uozwx5uyL4IdOkp+fKbAUORCPoaKU8YOlEYjvUa2ykYPK6anXM0rN3Dy1J4mpWRSQG3VhGmUaNE+8qsMCHbcwJv5cFglRjovTIM7Ar3H8uCpYyak8KGngPaxaRIzq33zJgPzfEG97E0cmYxjgNDTPnqvWRrI896Lr6sleyKwe5O5Ms9ktYmzjH9rQluQXDSEXDKD3KIhCnF5P1MhPf8l88lSIZr+VUHH9pqMRzNyc4w/jMhYZHd+YrUlEwuMUeuw8a/rXPXnUuQTo1x5/29SMPSz518QrOdAPCatMzoLURCiGNFHUtL5f7TlA+6kZX7OxApceIJaMPO3/+LcCrocCmpRSE/4vXaFwsoZUVkshgprS/4Rgl3ClEcvBHDKuQOD2TjvPpDSmdT1Gmeug5G6INdLd4gs9VrPgHsy+sNNkNF5EtAzfI1qZ+AQ0atmiR9q1shJ5JR4QQO2ZbHJNi3gQIEyTfDi4h20AIqSU0v1+5Y4XhnqbhLT/+YqTFgKI0LaCBY9thlUSlXtLA7JdkG2d3WJT8lxckxcG4yJ+wcrMBrTRZVFhgmUuJYvJUQ3hhmXXeJR3bYq8YrufTQcaB02ftm3ndeMEzCoesl7fZSpuWYh31L4r95jax7MbqJOEiBWyq37XAg15dArBw7oyjiT6iAHXsx9fGcskP0HvFBK4DmaMCGFyV4vJFY2UkGEydrL0xJtuZOepSCeJ+WBg7T4vm9KlsLNS7t1DMethIE9pw+Ui/J/FszRe2PEeFjHBuNaX5YsL7qDIFe5UDPSiCTCQzY5GAnTJnG2BvL57YTxu4Op/tOZTqUzp8fi/RoLGJliX+csYLK9RRDyhyKx8f8Nr0UEcLRQsIewJW0g/G6I/RFK7RFiwfpBlbQxixR67cy7FZDXjxc9JmQ6eiNQxHFUtDXO2tQf7adoSlxy3591Jf2UexGipxoOSrhAW4xb1cqEi8HCxNm8CDEaIod98AR+B29gLYcM6cUkN0HXKyzkrUM0OTorioFPqMU5uSOrKCEy2SgrGWbQsv1odFsM6FBJP9oWWW6TEUQf3+j5VOIfhPa4U+a3MQnGb+95VIm/IcB6Vz4AzTBrX2YnaK0biNvFodWlko0fhlgOI0UijtzOV7WGKipo1w2zvmhSXPuVjJles6e7e12IOZ/2GzfGLRbL2fG2n2RXcbVyh0mzMBELTlXd3fqPsjGWG2aBqYu8oSczubcNQm1TJHh/WmIkV8HmvVEQcwRV0Ddo35FlqUn9GbJz0tQ40NztxbKe+NF/L6dq5TOspV53zG3+1ZRtf1aw6TlgI5KZGlrxU2Be5beFJHygrL8paydf9OK9yHfPOf5j0WSIbnWUAT6PH4iRCBuaiOe7VByoG2xb+8dCqiHKhKTCr/qj0X6JLBfNpjq8ZHLtSqKPhoD85Tyw1GR99YrRMT6z8NcpfLa5plWs/8LHf1g0VdCIsdAMRqL7Z7wOd2gbG3Et050g2V1JRULm/VSKTb15T+M9F8JBBb9Wlgwn+eSoMz8O37DLTAgsxxqVwX8zqF1hotTdi1jah59oH7puHnQGp0ZxhTX59xxuawyicCqOnwksZquO5jF8fWWLfY2BrjPL7Dv0DxnKDPmNuCR5QuY/cvayvUH0H7dobGFwIJzWdvrsT4T08p0uE8gaks8sO0jjh0fdkFGNqcz+ML85PKytjkr0dTuHzw5Vmcw8fjDiOik22y2DkXmbZ1euL+OSHUHDp4axu7RY0F8adxmXj0Ma9juGDKqApNYlVURwkF8W32KXdMae/duJ968XxbxQDAzJinFZA2xhJx61+tb3Bsvjs/CRwwZuvgDplZmHT4+XvqjCzLR36Ab2xyOkeqRVCYQIsO8vqgE0aK/nr13Ck36SLixUL4oO5nN2aZi9RJ56rYgucpVP0t1lmkfVbxXFRnKFM5cXBH07jHUpazL1CfXlf+iXBfk5asjbn2YMkEuo2K/ueeCHHbgX6VQOsaiF/hd8F7yuezgculoYDmdyGJGu4YS8X0NGzlX7zfh2Qpz1BfUcJzIZHus+pW21LE8Oka7ZRA4MbI7cLts9mo2kcMp9JNtLCuKl6YBQVMDfSLBS7u1pIub6n/CQNqJc+IOKlZFS5E1ld+sh1qYRFDdhVXIG/tx0KmdTh1ZfaR0wkb0ugHJ/S/i/LWw8bNfZ4tXepse1hdiFnGR7bDqvNDrgoytbiDEGuEPECo5B2uX1d7vbKMeFsdv1UXsgc9fv2nhT+rcQ6qHHiANxhWMogb/SQcq2Oh841T3dE23b5VLqSIaG9Qe1rPJFyU5XNCQNq3KOyeer6F7ixVyVd/QxJjXCZY6mP1IzQAI/Pg/WJG1eZdEBmvbao5OKya/ZDtKOM3XMaALWhU6Yz4iD6TaOsTAfRlMv0Sh2b0t33RQSm3Zjkslu/ODUrBdvn5QyouFMEfCZ6XKF9W/qH4+X7JxKM9Q5TtyKuOozHFV8LBLXQtKdHcfm0yFHrGOGq7IAx7NgpJSO+unBgxRLWuTbEKiEoeUSoQSR5kFsXm7JbLUpab5f5v3l7GvxqeMweD7IPiFkuvy9f90P+0cBTIcut5ZobeTI1FmsNlhp/w9gMGu/y3haXP+8rX1kdGjFnYkvmL1t0UuJB32RExdcGUiX1HaQwde7RCND/Psoru30s371iDpXuDJjU2/laj29kfeW/yRwJgl1nAhbdAExcA80LXXLyESw2w3Zi6cC7TaqG+5eppnk6mbJ8p9BAl+Z4qmfG41YQ2V/1Q/nGZdL3xGm6+Ucg9Ix0a+HyLBcGx2pYCxPy1AFTxMbcBbcTGi/nfkn1oOyfxhMEe5I1UekikYo5LuIX30nDgMBs4Ptig1+TYJr5+jYW9lMsZZ8Spleh+jaAu0Mx0LIkOc5XwLVM+odZJOsIVxHsSvesi7I9j7B8ICXhPC1/rf5F3vs8fKtUxYB3q0ecoSnBIU0xFJgN4LUWea29RPeah3JTKqfC9md3/nWNZRlKDxrdz1hBe3dUMkWzTU/9Q8Jq4oyCKmaH0NzFaHhDKYAQTB1Asae/JoM9jahyZJbG86As3yWs46IgOv0o3dD6+YU51T4EBoZ/jWWCG0f+TLDwEa55wBO4mmB4r/iUGzyIa2NUzFOlpEOuszasxreneiOC9DpTGFkgNXydgEMuYiddZHCt3H8VqrIxMagoOxoVz6xHnG2Pg66tKRKwDNdoap14veEGi9XnYmUntw/YDqPar8ZA2TXSWKKCBUOORWgyZGy6cBySQctC2LdhvP48rXfn/Wj3TzEJJb3hOlY68rkVsxLc98y2nqtFISOB+CelAOlbdxJ4+EA11msjlL/JjJYbQ/PMeLHRNQL5CPDky57ePs8grsMZY60Kxtc37uq72xzMH/WwsnqAHK7PyuLNGfhS4GmSCJXiRu/cNAmpRSXlVvvehsHBo9++UhWHUohaZSOh/IlMGmxoTbiS0YLdMg9vQ9NKLr2Ytf2C4A26RXZnJmhRy9OG5X55hXQjx4KqqNKi6DDh6pPCFDdgsPsLSW2PlMcCqVldCh3ysB/fObuNFmf5z8dxW9jMPJGu1YGGF09kH5DV2SKaHkoTiofxug2H9ePN3Shl3DiDCTtivIb+F7dcVSjqECviPrQJ/hFmgYaDk/dS58WJ2osB9T/7DhYjpIGntcssbuPZH2r7Z4jFkAdiLM69hSQ+R2pYlGpTI4MQ5KK9CHPOstNDz5x2h1lWCDX2dw6vHl0Mk8mqimqnNKbG8oIfBMPbft/OkvXQ6UjINkcpHQ43ACtZbqc893CDLRtLL/PeljfeLvhl5hx+extOfSyduZdIMJHfkrpnXc6En7iGancq0DULkH1cbcyMUEu2KK0lp6gsgPzNt9GWZJZbVDCyjtDKoGm4j/L8SL2X+d4ZQbDr87wLPQyu6+vT8fDzIaTRyvSpOZmP4znGHq2SCCILI0J4Ed3tZvuxemutT/JXckUDnjIMVXVpje8IEaLLbqgcD7qyBOWucZOnaVPEZ/7/ZUprtrgCzlde83iZnMWIrmubBefUP/B3TBk3QxZomQjLjRH1BVMMPlKc+QQhv04s0BI5nqp8k4neSMyAYzmfd9X+2yhCrYEOfw3Ddodesogma6+MRa2rX6Is4KeDFlpH3ot/6vLqaDQoDZdQjGghtH/I9Afj/WgdS9TNXF2Sx5a7a1cOfbausp8fWnHSjAsvGKZVsk3E7IUiKmMUHUd4O95yFwYBDeiz5Yf3MEm7Wlo+NQuohFmwgnZgk0+xDntu5zD4hlsb43DFqaZr5LO3S/jF8ak0By17qOJsmTcKrKDHfmDtZMpfKilXJDYiOyqin1C9AQBTyWcS9Xu0BYzS1sk3qZUaRY6BuHuNCi3spKRZlp0X+nB4Esz35MZFa5gNn4eK1Tq33RBAvk4Of0ABbTPQzVLYKP0V6sP//GY2QnbHnR1TI7sktZgpEH9fYkhYQSQrn//yyfsAC0SL98v/6Y2Mm6BjtLY1lmCWf2HVCBhECi0iD2uWN3AABPpZnIY26RMVUKbk5crVUIlnuoTE/r5tnfyqp0qe0tN6xYugUYV3+e4haQTJ6HL5dzss7nrVDPgdNOoKg7wI0F1WtcMYW/Db/GxS4EgncHC9332zvCURWWauLJ2HSHKJjdqoE7aLZ04fKgw5Gl5AaU+pH1NvIii5ZWfjObYWxf6Z/MSVWNvvCsFLZvf1s87ZdRMf3yb3W/TWo0tgTjv5Y3Mt8CexkJD7fJZZO1FcgV95mydNv4KziI3frKKFbodVpWy8QwufKkIVmW0YTyLsOfi/WQUHcdxw6gpWI1GfqWlU4oraPhi7sMgdOM0iG11zLNHonH/p1Yy00A/B/mi+VATq8P3QoJ6nKqr0Nn/cprqGSGeGOq9Tnjq4Rgi8t6ahlKBlusklm3vmIyzw7VjrzWNIpBUidJ15TTxkyIjaK/LZ+ATXI+XarDtVjd7hLuwaxi1pwU/lxi7beu3rwjifmhl+OadrRFNyJjyxiWqXaunvwb7sPyoCIQK3PWKuTGWa8oOX5cTB0gxN6gE5iBotKVEHVPvet2oLI762ph6t9VVQNq601C8dVEtLnfY39tLUzXhvsJ5GQuWA5QxYCyk+7DZS0m130En+NR0rXi9KMNvYbHb+rzwszgfX2Ndtc1vfqhqrCU3wkOZKna5dPY8/rmW9DUeMQ+uCjg7RhzncyaMNopDxiDQ3nSovzzCxN2IQyQAzBK/feC/w/AmfsPT/DaeETyN52tZenLXQOW1jrI5HSo1vPGFgjcVJP9L0GoSLo+ZiD3qEALXffTm9C5ZJYax7TUgwKOEiAPemPGOHp5cLLd/tWayaDAc0xtYD6CFQlfgGkAN89FhDBvy2IaKdRzMyX1Kqixl/UwN3Hx06AkKdezDTcrV4O7X4bc9SXqDoUOmzX1+ldc/6l8g1VH13DMpBA7PZrRPcsScdrONaLoxSAtwyALChFgjHyv26w+lNtNrIvIj3/TGGqa2uTbWnwvCgzDMbwdQGC+E/GJsOK3M5HMXXwk1anHjWS78dfon5s6VnevqBSMyLHkcn51VABrVOOamlLRyrit6GSIJs9QSJr7hmur+yGD7dTyMHWKiYvucgq+SP2aiEg91kfB3inwXr7mQAGs4M3hMsj5G9Bh3KpJopp8zmB/RzpipvQr5jDYlOtxYzFGLX0FisKOk2lZcQpXa33QJJGsMo5tkqaU9Sq4Vr1rKPruh75bRBpP1CQuwAEOB+9duXuZ/S5y88KqjuF2hcn4cOSlTVbRO4xxlGgRtEIYE5Ilsyj76h9/NfbaAcLYBX7E4sYe0fq5/6QpAuoOiCwiTINnDbxeqtpRxUAuECI6uYoRLSPgswiISmA2wwcfKM2BstgGn6bSH3evIuiHFOOCV7E5ZrcnP6r8irvxuAYMnSL1wAyflyfEoglj+AUpvG/YP0wUj/4HXluE1Dep3nflZqqjtZGlEKMnEjmJ/5o4rdggXZSdaLICdkPngEYttwdLfYV6miY7qu/maAOIWisZcM54Qzg53tXMiU8LruGORmBNwQiOJy0bekMEl8hu+EziBo9ViSKyC/xsSLKGeq9Y6pAxxYKBqTp4rjOttzTIrKDKRz1TaacCK97wpb+VjpwAnatkclL8I1atX7bI5ONilN+3pSjBYQ7qY1vaLHrFL4OEpuIlxs3ulCA2+YR6k0lWFcitBfElD9gf/lVwkV8SULwrV4eIwmWiXdKKfx6JJKky2Kd+YqjAuc8jERZk339psTOu98knxjBKw1Y0YX7B26GdQml/ZJ3eXQiG7gBalsYwNQUAk81SBqC5FArfVrFb/Q15CytsyNoZ5PCnzd0eqVk+IgmTiCl7JOK8eQrL5lIAor956bXkomA2o1UE1hQH0PDF3aUB1FM5VWx7KEITCGtxkketr90eeit8Hh3s8QtQNZ002qLzR8FeHMtRyI2cjtffLuMOeffI3zH7C3bfN8e2qR4O+GWI48hxnD0WXYLk4X3OJfmheNDiQgQ9SCCmyvWkPd0NR4NxVeer5QHUTYdDol2FMV6gu+t+/ia/N9QXpE/RCj48JSO0RE4zNbqOtAZxIS4D3KHPO4bfOn+L8z05nvJ6Y5oI9CO6chQDX/MsSxtz93lWci1LTiWM8loCeeUEGw6e3Lgawbw8NS4c4fAvRxQSGOTYT4ySJEzBep/5Gw3ggBSggMoJZZwSSzNqT+tQTAQSpWLyYGbJ33E+cVw+cn9ZPQwNTzG6X/m5GSDvGl9SlJj6zhQmJDhBR4xE9cH9BUhB8/Eq/l6+Wc68ypFLB0iPz2+0KVee+1+LVoHO6og6zrnaCaf60OF+jFhKXnER7Q/fXNlAZlP4KhC8tjGd7sEBHsOIjL1SXFtqFSuuqfHklH48vDq5vjPPw+Kwzbvc/Vzi3HfW9URDudhp67BA8UOc9ZJwgEMmP6sjYdgBD4AgUgasG8HrQpU/sF9iRe9EcYZfGpRr0YVcba11BmWPgeT/1D3Qua774u5Vph1VRMD5MhxlP5WA10WE71EMNojhF4D/us9U7ZkpEP2pyjNZaioQFl2nUq6TJk0HI125Nm8ha91JmVRADm4i6rfSeighGZGb3MniXqTQEzZarzXblII/G0Pf5elu4M7yHR68BicAgaJ8KH1wBbrloYpwmyUPHWyTlZbCxzejuMtXgTXVmeqSe1h6RYcEUvlPlA5bUxC38EBL3zq6m5m3PBze58l4ElAm89xzNQxL6e/s6MkgHc/uoUrF6m5l9+Mw/7JxxBtaHq7oc/3g/pVN+EIDy5YYH4fgC3SatUJO066EuJBZX5Rovq1Q9tK7lWokc523ArgvF1LRBriZbxL3tT8qR9OWLa/1poV6UooWDzSj1nNAb4zLRtx2E9LLrKPrbdr4BUrqyQ4WIYLqlOQz6uwYWI4Ef1tKJVNfPVNqlBIgMTuL0ABMJhZNww8fXmrDEr73gjb86BIpdNCJ+GsaNaq9eJMXNFehXjoJMQQ+Jr8rZfn0HTBXAPzzQynMM60QlzRL2O0DorQB1JdPJkPJXQf0IJVskDMhPNW0rlm3n9+l5+hfYgjhXMe/OzgjIQ3s8T7xRlZdvcgNjjnrrD5VXaKr4wr6TbGMMfEP77vKlVDhogJPGgVsxZlQgSdOccg+4Ip7Xdlygi5n9iiKhz2I+R3hDqQ5fAbkTJiUXAr+cjkEVAF4z3WMXsnq48GHNKLvulKFPw3uRsmB53E+eXOrSzOztKzo+Vk2h7FEbnutv/7tHWaxpVQgpOWaD+vY6QRtR7V42oEI/RNq6IqOOIFsUjZ8p5Vs282k/5nrZ9ekioYIIrX3LDOftzNRk4Q9oo3B/+yCswWwiXox41nCuNLj+EDrkuViU24B2tR1qfnWZRJI1PHlnrKSxuAoo4FkYWu1D24juuGINyOyrb9nuNgIM1QLv4RPThE3iPGsGZSPcM2B8o/m9TPVw28QVgrLKBT5ZPUxWIdmv/COj+GAPyoIWOHT/dLI6TVywu3Ipui+fh7DycaAL5WAzMHoRX+CZ9LPKCgyr64cN0qoWDoHl9ZpEGwmzwDce9gPFHY+IRBqx49XTLUjj/JqrQLQwSOchdXFm36ojn3nVk73fNWooYsTrsvG0e+IbVOSEPbEzQAV535QryMsz9hqv3U77fn2yDfdsfrA1zXyvKpMmuM0XUIOVDR5/mrx3PDAGmuj6lqKjk29XQJ5YXOmBnk82trh4NP1xvqrVccS9pk04Sg8yBI0MrrfFHbvnZh6a4tQotsr25bb2eXw/k7iVrWzNzWW9hrAnwuWjCgx16BamtmA6LG0//emYAp8OYBxiYnHfFqOsDx0ulS79QrEogLCWqNsncG9GBGesV9VwnhVfkjOUpkf2J/4vEjFG8KMLyz8dKyuMgeNotzt7HetiHr+Z8Lf0lAjv6ITkagdZO6IkwO7Bd8rOeJx/QOFc0grASTeEErOrfe3AxWQCL9VsVYokpAyEURQb+oGAOplqQ8ZsJkKgNFBWzujO7YhYNfLxvF1Hi3Cta/uKL+mQQMSVeGjBbZNqlix5CdV+cebc0hnnPwJBin6tMPQlavqzfom8Xy1w5ECPfZnfJZq8D/GCNEpYrHJ7Ld+n2LvkrjV61XfykiCH21Uv6n8IV26XR9eY4Sc86OeEzJUS4tBD2hIMzjlUQBuD8TL85SMTd0mDIjz2BOQR3GdOmW4TrRxN1RGvd2A59wJVS2kQvji7siWOsmWjJxXmhy9ywsad9RmNFaDgxhpHy+0CFjP4aHxgrZ2+0v38hG7EyJuPxqyAYJUE7B+mWFMobV19FzSs1Jf1BozyumVGmHrnL7SOoa8ypnBxkR/XxffKITreJ5QduUqWsdi1J2B7TfSOuEHF/NcMrEewWnKQwxDmddjQSUEW9bVPYyzlYwYM3Ql2etlB/S+FQYQfjGUdseU363BsIw+RXj6QUD5vd+RgOPY0wxl/UyH+fEZ2W26qHjTdZ+x5QGCiIDwgdVce84vy5yn4jCDRBqkiiYxC4+ysp86FNVdrbfbQEUhJW8jSzEDzaSLyjKERDfIKbocWaR5yPDWG/sImXBZZmyBVuW5NIFXV7u/UY7Ab3dkgxP5NhQE8C5pIvwqZ5CB86Y+lKKYpYa8T5QCafgpf9rdOFA+LaV4WYycnY7M20WYkJlm/snziDal5klB7+V7BJLbvfZZyC/trjzL4mY9GseYvD/siesz+vr1kH06sVG7Tq6pvdms2u5vemTM2WZJ+iGDuGDvgqPHzRiLLLmWi9fxMz8r3wyFUarf2GAawft1tekkz5HjLgpR53kW57jwMEitYxu+9ToP8L6MFIr9pS6fmc48PVeI4e8C1aq0YcENgtbWMqqtV0/onBGJddYy5t8uBmK0lHZ7Yr/KD1W6Xx/8MGzX1F9pNKq/T73MyzRbwuwq610Brsb34WEirrRvITq4JJnb2I+R3DQghT+wNOEQ3dTbEQbjxYAjit3aZ5lgfap7FmWHI9xdzCwn1EuhsnEhctJZp58t1pK7MCPUVOjdn/mNcLmaW5D++BbLkYMUkcJFfH86FNDFrQ9kEms+tG9UaE1UpG8wL1qehWeoInEEG+wd8MyfRr1LY93oGa7JWhM9SCGm2HjTSQo7IAJwSe6PtbPbsOAFFWnlWVMLQQt34qxMj+kwah9MCzI2qzZfRHhCgjIkfFw1lDNFbkz1uihA0bfyxXazwnxwGAg82q4AStABB0G08aZV6rG9tzKD4eH7kzwTwKwjb3+ttSpyEpS7kpkSSQUx11ov3efcJMOCkCoPO8xu8SH897lkdAMw1yfmOowAO669ed9XwopXZY2v3361qmp+8yewqh4jpGMPK9Lgnv+mfWAzEBT/UVoARfj7TUQj0kIJESCCu2g2HQz7dQzf7EunDXYwT/sgOzuofhLba+OoIq2lulvLboA4nsqKh0AHefsnLLIuLRDZNmlbyCULDpEq3dFsnk0tsZJXSo96xMn0t5Meko6CQhi+kl26zXEOsGuYOy/tEXDsSixMm4f7JGVTSC8hlnNdXFzzh2aEWOPmpHdrdvbay+13wbogMjY6iQR6PdpRXT75gZFtsdhdRd6zzYmMs+BMvh+W1V1nl7jbCWNIxlzFS2zlk72XyVuH8h3q2GaltuYi4r5ol6v5Cq+msLzUERFvQfz7E2JnTZdcsmGTmLlOrJACqd1l875hs9kcahTyhKukhJj14MlVxP4qqQVb8iXqDOvHwoNFQU83SLrffcDdTOwscxIY+aVRTKVzg+rxZ2rJbT/0QhMswbsFfSCDpptYdq6MthR1lzb7hx1oVRCuhvlmzVr0d0audIBs2zFohokYuHsfaUXvCnGcqUconsKVAJEQcT2mD2D9ZY7SG9viaCmfa/A4heEod8cMal5lfk1dBo4hXSSAY4ucVoJ5BFAyYf0eoE+FjQ+1KjB8iUPAJilZovT58IEOQbtXRQxTwcTroCPnZxuZp9ZbGPceUk5Xj5LZvPaQoZCMug7U2q1Wa496s7nFuTYJT+LnOWXO4DmYO+7YNOGCTed7PfPY3k5QKnAvdZHmRx71wYO5IS32DhwFxbh3DAc+pf4M2UoZVVMF6P3s+okFVBIxNXPdaCLGKtuZq33I4hfIWSa7GtP8AlbTqe+3CdT+CzRbO5+JIXe/m3eXZ3icUzNLGnNzav79OQvipB36zBbylvpCYnUtJ6sCO9e2rxHVJ692ONQgPfm+df/FwSplY/UJGqmQtpVGga3eGC+fnYdYejbKHwliUmMuDOq6tAo0IGHmhY8ChFsXGbdtdjv07eI1XDb7ZBow2SAoyAPf9xvIEcff/sHBSNRVIHMM/KbWZWK3haEYKZeeXHNMedxFnG6JzgClF99PdaAmTWi5KRK/lEUxGHCBgfbLnz8ijpeirFK1LxEYqlbpf1aYLuFufpIHqqYMP0kBAD5v9VGsN6kt6OTNLrXvRyqqaMxv4cmJ8XeX2rZ4krzXGA26POTz2HVMbsXS1Cbc/zA7EP0YoQ7rT6Vf+WLbuY6TCwzVltFaYFymPlEHsoC/HcPiPa48v/xEFRjz1cfn97qq3PsXs7vvWecG04dafrXN6c3Ibv1iVM9RNJVAcjKHW9Ul/GoNXz73tYU0riDEQuKZmnIPNl5bHYgc9rSlhNi77w8bqYHdwRuh0jb8EASznsYzY7aDv3TWGFF7XhgvRquxNNNos+is6rV0SkUcaXJgQEPFnsw8/R4QR03ylVnKF2e2DWuaaSd5EGLIEjytH+75w3Nw8zeg09gtTkcUNCngpzJhtZUO0ZUbAFAwfx42tpW8SAxeiF0mfIbGX4Yl10Qc4hb9naofgKv8AmlRx/qU4/5bHtMe5G9OOwyt1eP+6oznNhljJz7HqsTVYz1uRJ/FF3aMT5wuBIrVaSMznFF4lRD1lBQH/YrvL9oIwBdetkXvgR43YxUg6n+t/C0XW6QfC8A/FEg1poSnNZKy/O6apfCsG9m7rvHHwtrYj6wK8qpCNs40rTsQEbS+pzyENgZIKmmar1EL/ylHjd2CLFGmGXFLRARDMoz3/kE7M74P3mEt+vpyekM9dxQ2CD2cfmta33rEfBApzCOJwTshDFy4m3ZZwxKzk9wWuOLnUFukuwFlrAiKnhKeEQBiWKhNuQTdpfPo16Ar/a60zJvhTVZni9uqsovkj4aqbO8HXnxs6LJLtI+goCtbWWngg5SL5sSimVV9XBxfcE4lrdtATCm0SmRiFh4wMkXKEjeoTSGaMNBIrIzAAAEXrYuU612h6pyH9oCP7ocFKf0vIRR7DdaTbmW0ujZ+td5X6OizYd/TjOB0EZ3oSOmWIRrAhvK3ys/bmU5Z6yUm7cHi/5IG9UgJ73su5LiGd0bEaELozBsy7Jqr5EC+BysSUR8co5ajkhSpfAJKerkPe2uJ2dE7RlGpswAmA6o9ylAWGbgC2YfamX0lxi1ryybkW3CLb6FO51SOogOHVNLa9FID4/lUCIZV8D6RigolgF9s6O0hRwXF1bEM7/OGa5VnxkkC2VJfudqvg01Mr7qOoOx8/SvEo12xMMhOW26ZW9rcgnZELPLwzBeUC1pmpF2T4vELHC5f1EWdIr6Wy6gcl7Vjvb6FLzsMwEH+yLmSGzBRLm+V/Vg7DiGBGB/mRdQPR8j04R1fa80nXfSOByD0NuJpcUBBbpOAnruyuvrKmx2N65Y6ojJQTJk+SFBc+XqgTVayZ4Ln9JSUgGJkpQ2Y5Q/L8IVJP9ThINxL3eDg15tVq7HLoBnuwZ/JsEc4G1iI3aCkTCX4RUG0oCAAtMXdJ1i+vwP2AvrmUVkBjGeu86xAUoxfUyqrnGgmfA1ka/IuaWrCzoyO+9KJCRsug5ngo0W6gJiniTkkqE6R5FdtA+LRmPUPN/TpvZbFDOQNoo3vHC6OhCUTw9gaTuXEAAdi3bpXBG8wgpMOdcUJMBsjpiPlUjJYwOsANe5HwT6F6Y/5DXYX5HDYgR5rc3jy+nYuxBwmq+ic1WJUHMMt0omcwCz7IDy1WVeuwo801O5kW8NQShrdcDrwpkdnrRTheF/6g825xuAnnyNPUZiosd9eXOf32k2MWF8AIh9Y9xjyy3Yx4yNyDQMEBbI11h4KIKpBqYeSkeQCfJeUcUncTCHVTQ0893cyCyqDaZWtoICflrnmtk/9QGvyKO4/tI6d4tFnUAyNOWBKVNHN7bvPk1gYxd2xiCPVHxRAdfzGpcauI0FlDyX/txqhaANKHerky+7GLr8o3+bakF59KGfk0tHpI5/7XnnsZv3M6u4XR9GYb15IesoXnTQ7ke81HCFUAB3jSihMTfKxawV4OcUj8pknrlctOAPus8fEjB4q+JJ8fteaJmfroTvu9wmlJ3Lx4pzVGl/wqGPCrBdYULGAmMnybINZRgNGg95tv/oBlfrtzcXFuWlw4vE30VipF//tgkZam0SBJVlG2AX5cAwtK/YoWOA4rbuO4EbB61DsW8FKXQjYrB41S9XHadmYXz523fFvo8pZQgeliZZHhqgj7dinqJ96Ci6HqE/GlMmavRfhm0MmmDMHAs/t3IMV/+frRkj6BeQWcQjVb1lCrpUmtWwStvfedEUMxlVLe9Zelu6jNjdcUCp6bwTbJ0BBBPMk7b4wrTX+Mei4+2JvHf6E5kchYBYhB94GJdMDQZ96zQE78L3l+oZGImu1LaEULqDXC5c0TZv4z3egMWL45VsRW255yT+POIrZhjavD3h6JMOuwr/a/htfwiSruYaer2u8Rbre26FICZgrkb1JulyLSc+DBxDi9bX3SyL/TS/y2HDSpCWtU2Zjr+XkU28z7t1EuoqORhfFVzXjnh9JvUMq9UVGXKF/vcCKy0Bbro0Zxd9GHO9SVJ5HzVH1+lONG2Q5ERaWHvoKzROCOoO+6oktsLgjkHW+gxyW3gjGY5KZA0qwCzDOycenQHSWAsYGpR07vGZp0LhtAoAtXc5Zm7nVyw4H6AnpI84be+6Ees2tfb7fcla3YkL+iBkNuW4n09CqOUPU9xr/m6/JGV6Tr8RD3V/mfFHl5GnL6VFdZGeNUtl1upOH+VqgXd3MyZc1VpXXWRY959bSNEoZR3bsM0ScLIP18YnWfCUiz2ghn1RqBXbpfcYnX1rwQqz9WJ2FqMBqhOyKWyQZo7P3zEyn7tJmAuNtrITHbTxTFSKXOocqmUzO9EFh3pDN0qq5NSaS9cIgclvYLLplXhrgXjdIm4ems9GAtjx7/RrAAK9/a59pft2Ji7pGDfLOo5dBic8T66Ilwwi9Ggyab7JfPc4ArEIhP0PrG/o4DmUeeUWv8B9ZhAUEtLo2sWV3Stkdr6qHpZhHnMQEKNWzI01HbLeSesG4n7d1hKhOCBNxxfXKRCvNcHCVpWQX1C0iqThJDsDqBaUNlua5et0jPlFC5LDEu/2qyYCuKFwvEhr+siUHcvgQkDkK74pK/zvUfe5cVLV2QPdyt6aTwVqX8D40HKvFuW6rwnHbpCbQjmmLaQ7vwq57L986BB2e+8CaVmgyXtu8Ahkuu2RwfNdUwGQltvDXUQA8FG8+Hikw0H/dBsxV0WjUrXOZIPMf31hUXe3YVDjO8a1XGsOfHyqY+YIDMHlKs4vL7/LVZQdypP0vrrCdwrT4v/Z3TYPjcrOkJwFEFCzEXFhgRX8udI/DaQmvs2CzkBnRiv5v8LB8wbupVk9dJkzAmAnVMi4o18jFcQqPyw/NOwuNglik2SbZpPp1Cn53sIxmVOjRynAx7ws6+ELRAcxnnXXSW3XSjHoD9TkOtLoPXHce31pC9QPPa0sOa+HOBuvqgb8+wptYM2VCZpIqANhgzx7xoTKl61KCWZgv9MHAJUTFfDgO3UGbODf0IasKIbWyt+H3qgkzOfYGqSnfG4l1Hoj07Xro/VfBxVol7dFuXPeEP9Ygblu8VvVH8wwRtKVqwW9U8I0wxty7qkQGRunnyhVCZWMmF4ro3Rd6qvOqoxOnTQOQxb/KSaLwaVIfiSlYo0HK7BO3JP/YL86pNRZWfPZzED3LDrwpZIoEEtPpvGO9wgnV5yqhFPk8MtEIcHR6HffFDJpPpqZ1PPaBT8wbnPyuNSPRcZXBJMEJYRpAuFcKtE43AWFdnU8eNCloXRrJGARTPvXNvUnJrccSOU5KVBQJvpYvyPaRdET16tUIsWLrWi+B0PiSrivNF8rQx0A0bXNLZ9jeHzTsuyXQVHU06BBEZOKTpFHH5H/bf71yvlU1ekKxR53F2rTRIcV+JLVVz5u2GG6L43PvRS8ZNisvXE13jfDifknFwoqydppZn3S4AjSFhbicZgPy9/Gxnl66Q1yyc5vjF2bGuH0pem3C0pYqB5SlICHW947p4RCD7T//JL3YDvs97BTcpZymEjjWSFq1FWmQGdBngt2x6CEr5gxI0mDVhDr8PQsZWp/P+kUGvyqOl8cYa6eCboJ//dWp+xiUgbFG35PAEpt5++EYZhnRgorGqgbkBEUETW0tteOVt9OUBpfGkFMoc5piNo/K5Oc+5iFHsLP3WbvPEt9kP3yAkolqXxZgUBc3mV53uTPA6dtrVZpJcGhO1JqrjjwQaPJg33njKgjruLxPMofyGphJ09j5zVPJOFyOAQjHtLdXK/CzB2itOilHfIN3sPt4JDrXtAcIo1mA9pxmPxGx2puuDbqb4Pkdz7inUvSHIQqv7im5IsFEgDSUVJHLzFvVXcQFMaIxHesW2gmgiwNo9v+JmJvZsleNOnQi2PrvURXzpAM0xF71kk04HuzvK8cz6CUstNbnXy+teGPkTOegwFpqtOkk7hyuiCKwWhQyM5zrCxA1Fa7RovmJfBuaLvuUagy65n19bHhzClFdJaB/UYvzujTs3TH5XKZxyGXVu0VE32JtCXxSAF1PR3BNfwSvh3/2Yjn2huvg5c3S1y88mIzwlRi6Kq3G7IdYHz4V6J4h7o6+PgHMK81jf+bAnTMUII8R7M47a39ugsMMvnRY+QRHoJ9EhqKjsQdAtbn4eMhwAiu1eT4DQfF7Gr5FqCPFvGmpiGHZjLNLxPOd4hhpMJj1u0CNqIOhchVVJ7su4JO5WVmiuBF3T+tD916WnJNVQgrh9RYgNP9GpIGvMfsPZW/7cCGJ7eW+l5jZ84SjIMwK9+sat1+MKwikFVPyICbPICTgOY6XuKidp/q8CJYrGroMyUQ67fFgEeHVWjxlPt+a8SECkcw6bSQ07UFs/qLOcq0yblxdc6gC/ppOWvniW9s4IJGweMwfvg1gdYflgGdf3tyG7lDeHZ/PHsyHIbevvQchOp8oO34sfYUbQbLnw184l/CG6Ok5Fqo+536Hpn9wrumEwjJUgEYw9SJLrcwmwrZsm5eMKQdtuSpQLt8HF3qrd4f6LnNyu7FkA+Ta9TWqWDqyAlBdwveZtZ7EEiXGseYIIrd/RucTflgQRn61f5I5HRzbFr9gFZmELnJ1fJ4PzDFFQf0/tZp+D9I533pHvcm6P7Zn6v+8kqPoV7Js5wWyye4wRtgorPt1xlMN6vH9q1sa71Hcq/wj1F/ss4JuMZ9dbcuigTHjVC/H1EhMAZ4T+52ljcu1/r5x/6bMEZ6jLns4/x7wV4IDQcktIHXumFxleW6yvVXBTce9x1HCzysELoaM6zgcHoqbkO9jQyFwhD39vOn8q+DkO9FUxKY4gBiDnm7ybuIsW4wig2UeAiMHweyyVB53AcdKz9fVFQ/sv0rPHTJtV2+EbEBGh9lAQa95kSD+WHl3kek4G+2a0QPGp2Y9jBL/3i6WdGrvKH3ikb+DtQQy49bnWPUlsd7rUzM6YsYCxTVOQAKHCVS4OHrprK1RUABHJ0M4zUkCu9MMakr0jqBCytnsN+dNNIRohwFfaXM/LyUyffDhv+Ff2taGwdXR6WnQXwHvW5eyXbUzzPZZPzao4AQ/wGDJnY+PrTCYx6unK3VrD5BPmBYAi87DYkykSgFfUkOzdnDqHh5OKR6xqHtWRcWzTek4g3tYM77BX2Ci0vc3Qkh/Hk+SOu7ez2V8VEi6Ku23hyrw9nSaN9rXrRliMFmaMy4zFydHwSczxTOk9OTEXyze5xSwSM9pP1cR5ZwOxMZ5xXHsELj3yipUqAR4qlY4EP3HOcbgRR9F02TSRvE7RfNGQv3lRxheKkODET1G8R2DhW/GZAieTlBr+87BPLebnd+BfHu2KBlT9iubNyCu08lYzYZqMvaSgdeFo9QRSLoDwAFBe5SrpTTpBpWGesSKvF0Z008MWFpesanP/xY5EmoWg9omQpVVYpc9COlIUeKRdIfY25/78aFAtDvGCJfBAOurnNZKSipKXxfiQKZaLBnZg4TYrtaFH//05FwoGasNUOKtHszSs7ppQzgYTwENvflGUTkqRbwRoRKo4kSkoRJytyywE8BK8TA/XB0m8S1AxPE6BruB7JmJDHmtXJm/DTCVXsUZEgCdGaZAtHhbSiiahnl/JrDK7yNbn0zIytxmQ5kcCb/2a6IE/kX1cS1j3TjH5GGN16QatyAbGTAX0a0vNzXvpE1kL40wJDilYmGTyX+diX44upzSnezUV575csp6DsxsukwwoRbVXOUclo4421dmJpYNRLhv4BbA9+u7c6zOM9ZSmJvFMGkqIqfVDnd9dhkSZ8doKSv9I3J8koKTMt1qfBPDX8YreMYVi40aGlsEcjNLgoSLr/M6Rphkw0EQrLuIN09K1jeMn79cGorsG4TrlQ+yiAvzqYzCLCJXdwcRvnbquoQ7RYoyzNye1UAMKwLZ2YTeem+g7DQNkqRQFPun6PG6iawYbockQJg9r5r0T8LgRNZeIxhwTkMbVFYHQ47nKzUrgqn0eKu3TLKAaDRvJWeeIuPTz9WFynTLAfrPPynXjh6HyG6uUs4SbSMMyflWXNh9sSu2z8kw5QduzC5MIIHux5FvriJUd0lbVMSfNKde4synDZCi5TIeo5F5XeI+H/igvMR+mYbGIVgJGqssJ5jmN/GMobOaV4PpjkYMMoSYAWG70Yt0DNsWc91C0jz7uj9ZwwGRJ+FwPnOzDVnv243sFYLxcrIh6hFpR2ez/ddwn+O+KnnsTbssCXYL3g4M6idzxG11qtXK0ReH6H40gbjxTs7kc6h/0prhgklK+OJOXy5ueObZJixb6auHnrucwyoRlvYMK37T2aLwXKJ1SUh+8FtzW4bwS5YHFXno6tx+nQBWC/pFZGyMR8LoifRb1TFNkmLgJc732/FNip/14vt/QpsBU6h0e1TMFG9W1KDRA/8TK79Pn7Gxx65eTlLUo0PL/KVYCJ6Lc3dQSpBmf+Y2DZCQ/WhERwqOviCO/pLkkDjaDxmJ89t5mvGV7qD9xjBaorXdXiBJ8aLfV90WIX4XQ3c5jbkywJWNuCTPZL19ComuPlogYSfrkKqUtFtHwjRTj8E98csKBzm7MA69JPFNvHnKk9UvMoC+8fXujuJBcDmBNK4wEifTDwPy+GBJailszEbuzElnw8jrEbdsJYkKT4zlLgszjfWrPr2XLRy3dyR4TkEz5jRyl+TsWICqzSEjRULpOWGtsVa3ZL2gKRrgCDjegUdGQG7q4xlAsk1fKr0naEPcuWzL/cl8DZoRGzw4wVoMYkRIob5tyzjhPhI9A2LEwl2A59KcsrPBPOt1ZtXH96pcBqQrtFa07GbK1IVSGpJbjaHVDHVw2xP16l9qviUTmU1UoEgHorSoayL/lFrPrCSHM3ZwajPlpRCGz5/sARLjAD4wiOEFKHQpzE23yhNICne6HRq1t2LsGwUMmIkbn5lkUo/utr6eG13/lqmLd9UY8zWp4Lo7w8NKd0LACbfVuN7erQhx5BLlIvGaLp0uw+ULT1Xztja+P5tGkmtCqQgQ8BpuLBRuoGk3ir7mRZrUAfMO7fpYKCK5Gz387p6Us/LSGIEXvysD9L1iygT/c1gd4cK3aaMQQJJyctiPL1dCsQV7uRTsnG6nsnfzzn7CBE6E2HBn988JFbTpa/7V5OQMgxPtdhYXD3tcmhRxbnQ5dXYEQ3wj7G4LWYs4sq8rgWIz+X/+PhCi2ae3gbEmZvFCY67srXoCCXpzLB9p/txaqy10a3zi7ccCcUgjryTDxxAQHDWHR8SeD9aecXovYp7dKO6YDHQcd/TjXNu5cKaz/RysnnL2+MfFB6g8FJQtOpUgg/wbxJd8HpihKxPa3iEnXtZ1hZ7Rr+4JujlztajNaou/ymYV0gcCgfFs3eROecDG3NhWf4g6SIKHDcJrXVcSW3oveOhbDCRq0si8ir1k5hlFH6C5XFm0K0mMTxXZ1Cq1VTR7/lYjOZ0BhCTZSGuzGYjJAnqcSzt2b3ovbdMrHui3Z/2NX7PPf6L4JFsYn8kZr76wq/i5qdK0aBSo0jPjOhnRj+psIo8imGgaXuHsXrLOs/2yJZ8kMOD8rFv3SN5PgmYq1kB0jDisqtCAg7my765U9M7ClmPO5JGsfKwshEvM4d6W9SceqHG2mrMkv9wyI8WNY3kRZa0wnzw3RpGbCBw8m6POUAW11tp2gTIFE4M6cYzkvB6UFigHjtn0WGEl4DgaUPq4ODHOZ788lMkc9ZGW356bzzHkJ75nGnNci46UC03BckDeUOa5KxreZ5dOY7j9OnzDbn7y2k/15zp7knv+0ac4kWQXX9JXddeAm7yuonMLMC+m0PDkrOjs/cWNlm01yHht+gcUh4ZDlRjhAg+TIgydCPaodcDJyCuIzcKwlXlcJpn/wu2IcymLO+BWazudde8YmMp7wyFuO5HKH09qQxPFeD5lis4fLxfxhm8JycrmakiAkWjlE6FXLnYFBxoEost1wO1s9DzMaYnF/O27aZT4Yre92Vu4sZdjb7MxQj7l3JS8afTcmy87YqzXIEwR8mvzQJ63dLI4FzP1DrFhmdXOg0kZ/Nd2pQi/Ui5OXaOKQqlg/lr9k/3urXbO4YA9nrcgKHSHCTRwmTPY3TFccJjYvP+nrCdqp+xLA0x826oZizxAoZdG4nIL4wkQJvSTx8dE7O8ltLxKumCqDSWk0WX7VPRCXVj6Z7J85yPgYiMlYEArpMtvVbiqCKQX0NKuCWMivS28pLh7pbkyouHbWl2uTPr35QCYWWIuBr2InvL3WKRNhYj56sxSJxZ3sgGnNf/PztOAmOMydSTs+6RPUps31+PqCzA8TjxWek4UP5Qu7uSR88cAA2uFhLZ6QU9nuf/kfRU15ZVi6CQbe5BePnCTNi//r2ueYxnCHXVQ+Vk4OQeZJ9VzoNA1h3rDIQq4AoWV+CdKFORzZHyjvGJ5ka4kv6tycrOewgJKwe2wrHKOqJvSjyBGzAvU8OaUZobNBPADFPTQZv8HFnyTxkQEbwKwIjBcze2oJp4uZPT1h+6H/z8k4JVpIyMC2vRTTPFNV5zFlRzgf78a7VBHrMU2jEP6seio5SntsL6Uzd4hkngSUKMDIeWiGZDcYuiEVBL8KMh+9lx4tL3JXrMj7zeaYK/YJZZfjPzYX9txz2p1X1ynY6ehQNS1KjYTZNfbjQXoF81MeFzDpBKPqfCWqZBmNZ3oUnWryrm0Zdo3yZE5cZ2svGd2QxXBeVDgJEfYnFEyd4MxiTyyqJgyRHKX9FM6tfBaTs7+rjDucyUdHLzl2ZlSpgGR2W/WQnLW6W6NgBHzEu6u8GK3bNyaCxqaD2cHu6gXINXp+WlVhp0SvzE1KjoNSOHz2dwia4KgfN02hqcihbIkatGSwtDfcu52QsJJjbiXmnqkv0axgEY3wwfFuxEyTAmo5Id+rxVXG66vNV/0wuf9I3nEPEqDL7yHiI73OSMAWxpkK40B1Et7hserNofzVbmjGKB7AibnOwKiQHSUqnPQ0QTf18R5SJwXFTJCUeH2OLpNlHWQlt+9mM0HUGP16Bd41lLBmA+sm+RQvhEHdBU/b3eYIV1M1sClTBamrS55iJw1THTniAt2q13ItRVCeGmijCP1PsMJ3/2X99yQq/ZOUTiYOi4N0VnKdi2NKslqRC/H2303rGXh6Dt6V9inMQv6aIDSJ6bncAglXwmYIB66YoSZ/u8SMySfsyyg+6w9lO3O05VXT7eWte+uTiISLWlol6UnzsTSbtkvzLiXGiNYP5qQIhZ4ovluPai5/mVJCWP2vLUy/hbhPwp9X1GEvtPcKtIUiQ74/3sgKfu/SG0RyGygn4M7WXRU8OMRY7f6f69b5QJoG23H/9odZCuYDmQdzoWS1Amg2Svtt9SE2XAuxfN6ezS+JIdCsvC8xqsfKMi9SSxnFTfV0+Pp4aH96XMGC4fZ6XzYMTCUzuQFRyaeu6Cv6xaX8xkg5G5j/e+QM9/TI/0i3B8NADAAAgLFt27Zt27aNj23btm3btm3b6hAd5KIYa4m2ktL6vhshvyOUtE9Ls/b1WHtMi/+kCXFOjlJCn3J+cyY4Vh/JpUuJVTJHN54nasunZzRCod5Na6all288m1K2p4WSjNFapqnSeRqheSwBEMpFAWrprMcuPnd/t7QLfmW/VWj0Sz2SyvrzuSCaZozmAwRLaguCmPMeKrA9YrR15o0lR7d7jiyf0kxSrhvgwseQ/AQ1H3DsgMd5bnUwLVT/lFhTbrEzdFywXWp+sN8d6f4QF2i9ms9Okh6RbRvAXN5DjiQ8lDavfVVLdJE9NPtfPS15WtVvVc4U06Qw2C+q6M7Fj8/Kv2gYH931UoS2KjoeiZpY0cQtWOl88XUR37E4EILFUuMoR26dZ9xzFewz/2be6kF7B7tafZ47+Ilk/CMuOz50Z+ApGyeAIq1GdpPX1YMPfdkqdILUSj0Tr8EojfbqT17LiuFu44d8H1gkUQ0yezQMO3oB8JdMSEZ/8bjLuH3AHPd4YSFMjrypvM3BXcQ0GWBsLczO+/hMe/0OV81FCLRG3L5Hz5xoP0NRt3XfC7nQJY51zdElk/xgIp7aHgeyXaATF31yN5cNxeNPQYM7d3MZYkKPk9dclQz4UHRmm2dR/8LyyKag5TvH1iHIOaq7CkyrRATYa3YxiTDutbaZIRC0jnW5RtRrmUV+iQg0cB3DEqJINp3cphzXOmmvZmta1aX9sQHOOjwx4Geinr3r9q53vV2wvSCYfbsTj0pZWhrGbSUfBLG0lEceQuJS45LefVRP1DuG0UFCHWHy4Znd7xcU+NjvLKNhMocF5wavc9z9ve0EZS0XyGqEbC0nLOM2Zi/9kWAnbCvjgAymc0qoIOS9X12Zzup+H6Gi/BOg1etjg4DceGeLJHKWuH/veiEW3LkZQ+7a//58/41Bhi8QBziLn8cq8/evQrnTIm1yoLFbxlVFU4tEDZFYVxtPSlWZy+kgl8aFvGm3Yon9YnzQ94xgdxPMr62kDvKq96/AB+I5nLgEzVDR3DTTIc/yTeah0DzdOr6Q+QUlUzv2pWaISW8gT6uVUFplk/NO+9kMMtz0XOEiV/f5dBHx3D6gKKyM7LfpElyJ9OaIbP9RqAuiI31ruHIHJXG6yJp5uqPEFz05S16uFRqmxpI4zjr5AhVmn9kbe9/qkFS6t4DsB2rS4j5gRBAQq6Th1lBrKr6npC8vH+jrol0CosXrbf0e1H1xYmLG1Ycvp2ppE9/nSHd1gnwtUSdSwEAFSCG/JAeGJ8XIb2d3dV0zsgkpiKSzqHq3ym80EqzPBkSoCe8LafFp3qy73cglsst/Z3B+oUorTmRQPCQHI8wUGuaD7BnaASjHuc5HKWDuFMTWwGZXbaWMQM5n9ffAUVk9Rr1ye7tJLiSsQ9nUJ649L0HWVdjuZUo4Yt5kLXUzhjOyUCvIqBUS3KnByvcBrSFKZemZpUVJnFC0pRg1wyIslzKHcGIXoSp3ilGIgSD4gCN8xSaIq8y3eUj70dbXWT+IWTLEK1kUUcP0y1q+UBHVfYdk7XcKNKhp4K4wVOvstwSIL5HQzrvJ8ReTMEu8ZCZJxCMp7PivMrUu6nRpXHdciMju8meZIvyBw0lve/GEUyi5j7accohiu2HHniD6Vf/rQYYvCkyFY+VluSDnZ4jq1qySZu0CHGkjY1b8WlRmfgmCGiCEGbps2z5VJ85nfbRWEiAKCZGhQBt5rdtz12uVkCok51dE4zLt0+lAAbrceUmCk97Tu0zEsGBPUt68eeUpu3obPhoLubbS6tVC3bXcMkxJtO104TRUTyWuvQOKCTICRaXvOzJamdsYY3cQ8ZcWL4oglwblxEwV4KTOmO9dOFPmyvznYNDQ299ooo2qnUjwXnmvVFpF40gHmRfHbD14K6/PKS79bkkpAaglMwdwlqHAMO7ctuSatjtiFO9i6328d189B56k3AbCd88+2BVsVGpOruYHyflx3GTibqtNWwzX/LKN3EuCi0WFmKefT57h3m/qRpppF0st0apPxkRWPFUy6tzpuRjwiM5Bo8eCcMugXG7nTIqlYWmRXvtW6b3lhruNHJDmIlZROGHafJZcwQTA4oTUB75sNeBl0Ekrurwl0KjuZZLxzioU6ADW3/9yuEdVwS8XMTdhkBvrW0kDDsTJMHry1aDJNZ8kx94G8AbzQYtrhxnGWUmJZXzb6hDdNyDuHZFu0+fx1RwAI3Ifl7cQTXSN8Zat/AfRTeEHV1571Y9eGs+Z3n83+TlDriBccP1L3e3HLN8zVH0uoC+95tocfkn5bfLQC5D1XEXs8pFJnuGEZK44d6svSo2wAbfPfjpchjHRmQzP3Br9kpuol/XhcYaoEzWTaG2cj6BVksyU4Dr/7Pqgpzw9K6/aL+vSNrrMAccUMcWiXeZgpq/hzMSTaV9r0wuLnwiLfBLazjivq/v9p9WNQLxJlgA1C571SrM9U3VBMVAlMssxA4aIrOl4EzBrY783cqW9MZRh71a87YuP4UnD3B/GcW9A1YxMZGTAa2G4DRZgQqMNd/skfs8QPawxtKLpZoWsOqbIXrUlnzM6xMM/fTEjApuse+iXhvLhidiCBNGS1qqPAornAVYn5CQthxd6SjBDJe129OfGF8dxE4QUSTUeMySMSHaO8Xo2HkbR39hOYgkprAbWKxeWYiBl2ix4uFfRIh6NGT1sP5Pt7unsfNf18zoGjvOi2iT+HSY5HDP4z17gvXp+N4QmZR6BPG+9i8pyT2nWjUJOHsLT7+y5gn0KbHgyXsYhY8r8vfssg/bmPVirvwj+X6TVEZi1ZZicbxcqnVDpQSwX58cq2WG+oIZCNIfdOIpzNtf6xr4A1NLXao3JPwEcGuDoK+8zdZIuEubSdGyk6Jzl9byAFpIGJXXstuLI+f0bnBpzi4Hju6o1Lx8GkmVzg8X7d8NlG119cjk91TItGOuFodTHSjPbR996IV4nWIk+90kxBQoWDPlPaW/1cvODjX/ua487zJ62wE1TVw9Zwp6INAuiqnW9UrIOrX/ugYqD9/AJ94AEX62KxEHOURi3D4FZW4xw6DQCpXS1oLAKJWy7JIuMa6WC0/Uj2mlnWM8LjPVRTgAj3XEkOsxOsWjjeSIelNMy1NjMtwY3UQKGDpAL1QXfJ0psDalEl0DaEPUreCFIH76WNgSr9AdeaHCRiMoYcYqA96N8apY0++uDvvYX+7eJSC0nLPsKI975MiKqxm1aB8oFb3RI/C133vedAATqjeIFOrv6d8fvXYbF8EZis2AuGt14bAK8fdQb5CKoa4HRBvntrymsfsAIwt5q/EHIxjWN6JZ3eEBJ8qbAJq1CQd6CURS8eTmcBaSm7NyBohhYiYYYHKXnhkr3IqkwbXX+x+ocnXUkiT8aU3oe04hHk7h9pvjkGaxI7vgKRosTF1Jqqq0qR6+OfXzko54z4WWC0PI1p8OVfonq8ice97TT+cijec6/h8E+RMI7AuWzUOy5kmkjo4vQuv8KZ7cY79lwaLFs8OSXSqUUs6hw/i3SKkHqg0YDVINd1pIFS6CnIp9ihiM2JKYNDUCu8BxYk1souELKihF9v5fbi4fidCqQH+8O7PwQLRCf5PsLlX3A7XYLP562QZgep3QMh+YfFlMUZywt1xv9eQ8+pWWJnqfPZ8WfwSbyntDvWdH0pWwHK5KAY9L3hTnUQSj9MeKVKrbbAvuKQozq3Vg0CYck/COcfzL0DpkSs8GNJl3Ds0eTJ2pw6Nm9Z9qAvbIxwJQczgB4QB94no3UItq1VqtDyrEibaQkgJITkxzyAP9RY7xuD18ulimsQB9kTCpm+GcsW7MaVb2GATUgl9WtiwfDifhIQBuPn1UqKVZvCt5ut3IBDMA3oCaYLuj0+hJuQHYXDLnM4dlvjhbCP+kyFH5qftiZzePiFK7z5h5NoiJNTFZBBr6fB/ddysNLLbrymfBQC/F36ArqxjWt3AlUpHYkkI/YkXaqnoepD/kpswhyhMupIZcBLllzyTDAe8p5MW9X9bU7Fv47EvRs7bNkro7uZ8rvN20vuCtJKRGJtAiYRdIrPXrpoOhCuSVj7vsGaziUkyJXsuggi45p6bHW/Of7Yl48SYm7EIDknSWuF/cTEP5JdPkMU8FaOSWdKamOytMOK39ePzCaypvJiiJY8cIany7tfpcRMVdDopLvwpczrpSUGlSW/rtryf9Ri/e5G70TA/0WnC3ErqAGZhr51q2FhI5IZA65dzHYOWtcYf5RjsiK1LZexng1BHe2RoN7lljYVEX3ddAHbLo9rxFZ4/xNT0f6FP8F80Jea+rvAAo1I0xckzjOxVemc5I1df6TFpI/jV/CbmSKqm49WHnfKKbseHtWZTNLOKyetPRPKeyn4Y7Gci0JhyIDEebkNrLl4dtZFKkHmqhwJxOmMzDTekzfOTX6Y7H9+u7JRtq+/kYLbccWqDri0EUshnUg7xxyiwToNBQe7UHv3P+g29CP83NM5kRqdl1NTfccMwFGOaLv9nFJWyc/ZgpIXgZn20pZyFiM2SpiBNkPcrmlc/8YiHxH55NvuvUNqkCD9CLhfrY8ND65b958uo7JcunUpPQjV+IiGgsuF3InAd0ebyo1PZXfT99QmeEXp0yHQ6vQa2xcIudqsSVUIivYuI5sCz0POc6qR1PYBfLbmMZz8X2fex2GPk7vmxoj8s2W31NRx/geumLEE/4aLWHIQTSoOjZajv1sZeyaUqz3AqZ4yUeaRdRUhihDJAysY8c1M7G5Nv/OFiaUfuTi+qIdvKgUjNJ0cEJDwfBWnSZM9UcFoKfY8J1hvoXN3djUjCWZ9IFz3fiG3vSurcb+qjT8r4ADaDni0DNhIBsvVCt4wu8xH8vYMrHyOUGv1iY2alJWAHV9N2CMV6dlLjlMN9vgemN+yAN5CpDuJYgzasaocF+HrY6FcDJMU6teezJSmC/wTbpwP3S4oVpEZAQZuqCR7lVLR0wgoWeh4L3SA8DSuV4uuXIvwfI962adUyBuxuiQ8D37Xm3LoS+5l3jbZY3rj3kh/3EC5lqdtf4BO/ikVLamw+WwaMxq6jWW0ISxJTH2GazaDTgFcuFggZDiBEmbH2PBj0XFy9lU7UUzgRcboYWM8F+nFpG/xN2RU2DNAOiY9ZsIF87uK0dIcFvN9fcxbhYxkcDegw+GhZHWJjB0v0NRBGKDRppNIUp7NmfeTFPlXIaH/XzKgPeEu61HcyYWqa5O7gkCx5IRHOJQ6wQ1GB0YbdZeN23xrp7qDOgAQhf06/ugdLClZZ5yso2ixtwvqLS7T9VchswnVZCz2VJJKX8vtGksXbSl31M3kC2qIoAWC20BiwG0AC7PMxvbZq/WtW4xNYdIvLgq1pV1BrXfEVmPPuX9uCs/IURsvJxgPFQt0uGB0qTsZ75ym9hnRsjN5WHNsklAQptBSHjZgb/m4o6u6hNc3sbcFyvsidmoohAbMXiD0PVdnCjFL3nYFTySGCVxt7K4Q6Dc2sythKLca3gqG0L8KW7Pwp8keIUimyViEQyQvMDS2K12c9nS5/RE85wv1b0dik2W0/wf0e/3dx1f3NcAcK6piUz0+toiXceT+sDIahSebOzWqlKkDs8b7tiprcyH8GYMCMt+7KYVqJZDoFDP5w/g3hpDkPifvm5Tv2FUfTG5wTa/n7HF7u4zEi7lP/VjrfTpc6dwqdTHB+XxSSOroChuRUB7xIdzf46jG1QHxRRFzml3WPQdPbt/zk0QSH9/e376AjTZYCOq71rvPd1WZTAF/nmiIg9I1Uz9i7n9+b8BBkbyuFNCkUtj+SuS4n0f1lpBNUCU0hU7r1yFc5SUKmWbgMPe4bWigr+eXFLpTtKDf2cOjKBDWiWWt/ucFwPOOW3ZyMtJTAzTYYTtZmQcMxEutXab8iCHX/gwyI/hIsherXgnQDiQVS9YOcQ303o5eJNfelgLNNFawjHbZrTF8yeryeUbzPQjSZ9bNUh77SlHAG1UNhMzBDFm7bEPdonSYrVgSvw08X8aPbBIAFnkGZxvrWVX0mxOxh50ZVxQSLW7nVcGsuUqofPsCBGJbaFYzXIb/a+3TAOpVgZUmQRs8Zp8oaMu7PoI9VUQGJKGz+US/IJsFczTFI7V5M7zfigm9VKkBkhxLrCxpLHV6qhwCPb0WOtWct85ReKqbyhb/EdjFW9oJE5mxRB1oetphO44w463pW8i9dIJ0C8Gm4qd3uMG45sgqc/pq5Wrwl0sPbJ8tRSVTCJpYIs570/c7dI16UjffJcCz6Tv/BuLPmY7wtAmqHEn2IL5/phNgP17UObXAsWT/gi6kzmn+dp8Pw9LhgpTj/GEg83sonJFxVpLz6amQN+csUebuoG8yA0E8pQMSXNS9jtChuGxKqdsK6OVnyLVf/vEF3Lzo1j5siAzyoxAdqT5NelfUOBEWoXSGuhzK4bLYD9lQKfAP6E0VlsdSbHp+6Ly5voyrQ7wO3k5HYWN6f5S0Vb9EvLned00DNt3gnPt/FzJk4MQGGieGJ4OBtxGNFzc4tMWsb5BOuGhU3kTG1CfXb7Hf2LjKh7MO4XiaCOJmW4ETX/AxjtOxgc1QUOkZOatPtxOgwMyKff+UjsEb+W+JRHGX9Y+LX/qp6hVUnlIeAQmrBQEi9kviqlc+vO68MFWnSqKJkFZ+39ybKfQZ4WZDMEgA09qFkO/pWVpO/hFNQa09y2LuILY9rDyqLOX2Tk/Ef5IqHwMysuEMKEZFwJCMAlG0xnWFCPC7d0YNHl8VUyQV3SeRgSaA6UJQ/Rt72zzT3rSyGMJh9fWho3fIe7q46szZkGb4DDHpftptgJ3iY/K8gX9CNYT2ntuheuZgV2RbrM0udH5y/kGj0p4/lPl/ZJm+q5PGv2usjseOIx3Mv9+Dr7EWElNXpTTsVGgOed4sxmsxngwC6Mdi56c6LFjLBXko2Ys2ks6DWYxa0Nws9+84vMtwsrHHQOqcgS6ZL2yylKDAr8x7A4fUDHWBPtxmd0xRoobtHLdE6H82PXZtfv3++oFIwhY7F+Co+8W0fuej9kZFCkUunJQeJc0YHx9zyNbTjFSI9Bz9B5rxYY9+1baGmxmwRCgVTgzikXQX3BEO/yMWRG5VQ4hsrYE3gWt/y61IuEWzdpEwtJ/FpYtUWBV9Kmplj0uXsK44GdV6I7nvKbld4H7/CJHuaImNZiDxEGrkYpH211SHNR2/Czse0jOygKzupJjA9Q9pZsO3p7K038nxIEIFVjRmJlDfZ5A7MmqBxClW6bv+FwiBmko/xSZrBtgaPKoUyiOiJXxA639pRmNtQMvZ4kdqumHSD3mSZN1Nlt0/EpAv6OgZzfB27xzRzkJaTgKf8DcnIqpeebABSkouMraZSqrSa4juWywchJlAmxsOeekmGaUPRTkE0Fi71M7UrvJG2ep4SvChwQ+oZEEXy0De7evR7OWJM0kIbqOdq0sRGcbDlQlH964K17DpkQ4Pbwcpw1cB89UanETS7/AJatEW9X8LbRc1mGtPAUDIdlX/QeXHIka4zod7od6kICuXj+NCZx5VU9Ys2lhItOdJ8kWZlu4UFcPAwmkMl2QpvfvS27hCHbZNv7Z5qQL29J0z4gMiOGlLbLWp9o6yO/4jPSa13OWbgvepvsX8mxk4Hwtxg/LoaWrOqe0q6PzTLfBNggPLOgNiSalqW+mhs0QYo7p+asyLcdI6Kx8GWiQGKySqQb9Uo4Ahmi+HJW48PH2WAfZDEiusP+dfYzutypWJMx0WxjDQ2fsZKrppKqlE2jqj3ppQuuP6lhw9kTPc+86oAUa3BNH0p5KBp2hX3JMasLxKRt1lNSalGfE8t4LcUrQUfLkyJU67b9YUt+EnwDaApOawBkA0KXv5+Y4M/RboKWy1KAF/NxHF6Z1/9Vyq6hhZXqlcJadz2101ntPU885cuity9FwS9fbz0hayp4BMzQPoTAuKz1dnBjd7O1TAk6fRDu3Zm+358JLfsUW6EgCMYPjbomNABDQKSu5KHRxCWNMYrBJw/errmwAAFVJ6oW7OerEmc3zuPYYU29N5/NFiGi/FMVDaGCmCKqj19ytpKgDbp9+1NNmG/dQFDVRMuUkCFlfFMN5si6J7bS6kPleWwSlX2jo17iwroSLupYwrQ0ajk6d39Y05iCKB27MUGqYtjzKCjqZTZh4SwkgSDM9CTOc8z08R6Xz8KzfLc/LjCrKlanlEe8cr9GE4AbRVuT8nvJcTDIF5143596QWNpupJNDYJKKBEDVWZRBAAAQwM0BRY5dfUFMbgdeI5Ai+6UpKzGjDZbh8e0cpToVJixbX2If+R2N/W1Y3beaDB97kEuWLZndYgIfxLEHWpE3RuCDIvChNDveYSmLUcJPeEForL1Sz+ziDzlHpHi5IS4d4cbQ1pQQtCh5xroFqbqCEaATC3aqfbfzXWvH9U+jakyNxl+spupZC8UuFKOQysEwtV7ujpJXKCONwVBphGag3SqmbA/XYY5WuIgMlFuIlY+24dU33zQ/qik8MufUxa/bvgw1rPuKwSM7XnOjOt1K+wZoltNS2tyIc/JmxzXbxdykE+clhfhD6tfGkgqqD1XTQaM1JSw3wXAsj0i3yRDeziy8tJN9Md3Pt+XAj9poTovKUOdiFNZ4cBDOnClGcSX1DaUYXfj4gLh2+g1uaNIf9jXBz/UWP81WDsA03kMZ7DRoqqb5Do1mv+lrqvkWzhDlq/lUOPUxDIaIsQ/VDrlp6gfoWBi+in+R4fEcbxfDQ9CqANiKQeLePhyxKgLRfoopvJBzMzJE+rlumdOhZMc2FfpNLHn7GlUNJuk0/E00ctd9ZSgd53ityKMfXTN/yYT1OZIvmxwLX8cBcwVaGLFxZ6eqvpYJYWiAr5c/wOLe0Hu/kTfg2i3KWNEk//bUmXUYbR3B46v7mvKBo8HGXBs89ypCmWukJIZOAwhmItIqaLOFj+XDtCVcb/8rUXc/LI1KWWtHzO/j1USspqoZ/0WleeQ41A2Okunv1mPJewhh/BJdIMMuSYA6e9odEOmAOPtFqyAkgWZn6VTZQ/D83BotCLS34t4D7LQtU7h/LYE5wXQepsyhavOQPjM8DFH/poRcCayclH24cN+f0CpfbQkbYaiUwRH/QQhD4YNN/bKZSrJgv4SIV9CPHxTcz91uQBA9g01CL3bdNwA9GGPZK9nqeH5IRfsalaVgXJ2Xh5GN5gBQJPSSz5PatL3unK8OFCpXbaq7xWTVZIx/+FZyDm6qClDq/sFsQcmBFoMfRTV/ZmnYYtiOobdsQRBtJtvJy5T4fsrLTbaOR6Pt3qCv72peQoA5Rt8diRNLuYliEAZ7xL9FfM/Q0G/yrYjX0fx3tc9s04V5Ht6Dc9TDlUnfycEsHu9GEFQ0QdvfibXbSXdWeA517WRSdqjlq8ZREVAGCsJjQS3hcmdZr1S88RVeCAaxZPdmudvPcHvqIs4dZUMlLGMQ2PnsVaStx0nNskb6khDiOV3rCMT3MhQhZxp7It6Eq8g08DASpciCUMQK3NUsjcrLs4Rdt9qVvBk4PrLtkrtGJ1uQnbI5NSwUfm1uRM3Q8Zpxys+hzxYXqjmtZxtH4DD1uMzxJRdsj7nWxVgE9eWIpzwJEzGZcdUNJ3chUT/RKfzwNq8Ll5kHKJpkfocsmwK062hHI8KUnn6iSBSkm1kuE9AivtkEmW7TcTXBfP7Ptds5TFO3xFSld2CatymTe9UylrB8c+IAy4xSv5uOF2qgFp4YWX0atvJti4gi0KEv6z6oQvSah2TF0cnJg4wAfvWp0Kvlx1cVhrlzj++w6Ixto8sO+LLIkjHayWTA5MaRvJ0fvfm3hwB72/G0+3tMatieg7RVz7QLcmCOSwdRdvaosW9mIYVrZLM5lNPEfQ8sfpWcgDnznMYm8T636GgmE1ICFrxX3BRjXB7ucJMetzrVlFwATzs458CLt6X1nNzm9lD3WOZTU41euqKJLbzYvcA4mzCt/z2b0RcuhXoAk2yDcf+uhfXDHh4iw0RrDgWafifBOt6gQYUdQZgcMergfdqHtDUCQ1L6IXR6dC6oKpEm/0lA9TjR8PDu0BCmuQn8z4qdRwR7PsOym7RUqQGKRDFOEGbzn0c/zq3ZKDF+zU1zZ6GapwYGXpcC4Bddr3v8Y+w6CxqWAoz9IjWgZ9cH+Z/It227ip15bAs1TsjyGYQHyspmBMUe6VnssXT4mms4ekytOxLc+ogpTVSKsPUmf5nehxEgvvdFP/OyyVSaMWE1HtHagpQb3N+1AMKfAZVzoUn/eZH7MhREYTSOM06LL16uox4CKRI5dLuo3b9GvuKldObWjjY5EGRzFQL4NIRfsPnIu250GxI1nHzBLmVi+WbAAaYoxQq4grKsv4VDb5BGhmGyraDFla8ZI/TBOO3G86+Zgj+mkIYzqZif7dHdYEgJgREBemKs3J0VytxEpYI708wDhB32FkGjEROnLtgeFvTMkFbMhBKAG6k5Hh/wjiU2TvzdyUHosn0DO3Mvo8U9gdOyVhPAN1jTJjtubUDaSkTT/K7lrdeXDAgpbinFC0MVdZ0orhXgVGbShciVfYo5GEAedblE/+GnXI6sYGfMkhMLnv63o2TpLN6cMXTALmt1ZXmLNoPN9k0kzVMeaqOCdE79RdKcyL1bR1JGDi7bYvEIsDAZAFtLq0Ksgdom3xtWcrFU6pBCnNCOXxYDYTawzONZO65A1pJjHokast5MvOqY6Zfgynf48RDLlKrjeBZrG3N2osT46q1b4sGv1mjyIe0DJx5+WGHwS6gsa+7XYyjPVpOQrVuX7CGXgEa/dqIMZI1VVenUzUS7OfoP/OEGux9V4j95cnJRCL2o6dfNnf03ZMh4IlD4SEChGjw2n9o7Ulepw/T73CK7VGb/Jbuss6XRy6C0gmjnm0vvmwmsB/U/Nb6KOhaOJcCUiNy3CpAdT6nqJj4Wxb4pFNRttg8YWsZy8MSgKs63vyDAhOkNfZ5NH+TSPz2kFGvbFjWA2LPNDn3HY8nn8j/LyMA2+I7B8ryDLShbEBMzqP6dWf0i5y7NNKEEp1YAwNxC/zyhdBNPL1kF8frDsTNG+QnooVmAXTSwsfErE+oHbvOYye4m6Fyeu1Z32XuOAh1YdbVEHZ3kBSb84smXAWh+2u7IW4BfrVKhY5IdzJGghUQ8Fl5EdP2VTg/xGKpk3OqcuEBSSEjLlkpUqDyNnkva9fccauAHTM742zdSXa2iA2OhZr+NgHC4orTOuV1o77en3lY6iWhl9vDu6y++h1Jhp7w/N0J2iBTnXHSPeUWpYw20jyQU12tRpeY1Ni31v8cHf2/WDvbLuyu0/hyWeSi3qm81SEkh6elSM0F3XL4hamUP1eyVSilo9iZq9NmXRr2XB+tnEH7yWM/5d7bRGaJeRpvKFZFsiGWBf49bFZRhMpb0kRyZuGzg/mdYVh0TefsCyGAKXSqdj/NOKs2jGmaQqrp1nDtnaHpYNnY0GNahkv9S28tOOsZtFVst62UeEqOHqOkCa2/sZ9Ej9FMCme1NTLbwS02d+2DJT+Mv3SXaGHRWRha3kw8EtUlleYXd75YL+htldiGLXMdQa6U9Up9N1FFaoxBWU/oEaFK55fKKnUSFlKn0Upub5SZ/ia6vCe2OBfoo17Hup0Q8BCFWsBbBLbzeqRENTZyOBkWJIY1INm8WcW/P+ciPkVXFjdMZ+7uXlxJJIB5dnQ6a1qIynAM3h5gdsOkwkdb/Jd8T5jqZ255DXWXfT+f7m0WvoQDBQqllDe84P7L9UOvReODrp5iG+0xQWj7y3EJKRSIUlhVuX5g23+DWRu8AFZ8fEB/pnJCWjKCzr2bavPSvfKKRnP5ImeDktJyj93XXYQT/gJUiiUU8cjqjK8ooMh0jLlviaiWsNCT5GT/6bK1JkSFJwRKw7nafc1EJWBsNyNV1NN18ovIkHSOX33Too+ImyQXSUF13AhlUTSKsueSPJySTSKZ8APln9BYWaQtWypzMqOrU3qH0VosiKkXM0MLNBkNnGIPhLRuXYXTcXYlrK4cZ4/nXEQhwckW1+/0KmSqah5BETgiMSpXBOSrSdQ2yhGjaQNnmlGyGz83Q8oE/j1bFliT0zuhFSXc4fpRBMjfmF2ZRl5mQML3sl67SvcBVmBOfhSjW2HiN/5xUp6WaQtUSP6OSVJnNBqIr1v6a6ZLwrkjWmUR0Apav1cnCQJrvogTzw5YRGuUDXwjsR213ehsJ3cg4Tv6jRdX/PMOH68uJ9kF7IZZfjenlig3pSRFUUXAZdf+jd4rlAq/gXzJSs7Itqwc3KXX54+nHmXBA8dFN4qeoEsS1EVLVRBesWpISeGrhJwhFKP+1d1mYDYrWsHkv07bPbVdxIaO3uPTC5M22FdJ8vqECeQE6LYKjuiBawpm/LFgAlegkLGXNMo98oHQqv1BG3t1KuGrtnt58q+IdUW7h/SWIYSBjZYF/7p5Pv79liG/gshIm6jGWuc84T9YwpOedNBq66jMiCEeomFNlfD7en5aryasK8a6YmlOlrIaoR5L57gkgLibkrV9FBCvd2F2QeIdBn/UXvHw9w40nlxJwDxht4pe/FbFqwKCI9EA0DribZ9E/bMItU/FGdzKsBgsCzevZaYQnoArifJAsxVkQp/IbOAnBxxIjUaSgtllzdLTkSuf15qNnH6Kx7Xfd50ecm0TPLtfPTSP7kLIilTdnFyqe96E9JJsXArNRKioEJgpwd7CEELu4soijs9lqkVaO8BjWBoxRGpIqNxzwCJMvIPUwPpTQn/LjvxppBnQg+aDv8rPp5iknnKAm5VI3zRCzT2fd8rcwtB00cbzovL5jrSYpnTXpLwYEKNqYmAqmC+8orY4E50ZhDLZKq9RtcbCDYLQcH+1gdi/8TKjnJaoLHDm02TwijfjNAcSSFJxM9dr+PazodqUd35ZIXO+RqhYQ2Kidbbzctcfn4bTQaWX71Nf+53cKf1WFPZuoL9G1OLBCb7kkiMrR91Gw18KJcLC0IS9p3pCVbxLHn7ju1Q4YDcElIxKN12nPWrIAfxzqwF3lwpZR1CQb/6a3k//GYSSRCjAs4Fs+XucbYSjBt7poEOQDOvpmFOaopdxDBqYeeB+6ShNYR5nwYkGfXcSej1UoDHqYw4D1ro52VQOODtd1AZe3kgXJSVN3PuFI0X48RIQk3cN2XA3bH++XAyp3+C+lUkEAEFA8M9Tk881CxQccWxnprYM+zR2VOSrswXhXPhdiXnmUA92hq2mYnSZ9YwSbtyExYyGvsI5uR6XQXiofDWVBvghkqmRFfjjt0m0g+ycMe5SNL1EtJ9Kx4S5Im2LBGb3t9roPRbFjiR5KIHQonf2IAgpBzdNwTCDMA6o5XWlgXuUYP0wQ8uI0zoq6Em3Om8iIi1qzSWVkjGTCOiPidyu3S0wR04M6WBEZm8mMD7oSZZigRCAaX7muUXhG0m64IQCjTyhvmaLvNcFruQ/I+wsx2mv5veHrkkr5qWCna022k00DcqfbWxg0bYQbuIRf7C8CCvRAOzcPUVZXke+g20kkZzLadVsQdZKAWxsp3sK7OHc+fycD1bIVvx4oRcplykdeAM4WbBfqqaWZnBKvmkYU7S4snviQrxOIQI5NiLUSHiUxmCM1bEJBtCQMmuiyggCUp7apKRhm0N2+JcaURXwNufMqrbTT0xdEof4tg5i6Bmv0+Q5cMOv0/ia2EuLw4WzHpt/wNfbi7F8xowFx3Q+RHF9d/MaoWr/MBI6wxYXhNDdcfRzcssdVB8fccUqr2K6iyYpMESOZH5wbasIDQ9RvHyWNOmgoWlFA19TYtnhB5+AmltURvhkgGENM/3gRTQ8LLNBVGnre1sih/GERxOWTewc+NJ+xnNHgy2agUfXYv3Gligrc1rSBBe6Ko2Zsfz/mGIh2ud+atyWvCWoS4OYykr2GFX5HLv4Hmj1VvZpOXnNHSNTrEZeT1q8wtsUCxQh/tt+/yQXAOBSk+13ZRXsRFyFgQFnW+S+SgqKpG3BPgRAVbuQ0PDdQYrYDrDDwfy8PkzkTaNSYqlEWRJvFJLpAqIjmNMhYGjnR0ked2Clomje08Aw7rmE3Z9uVVZ6YlQhqEfbs4+nOC/niYkqDngiF15Xbf11i4XO/yrDb3dcgqlIcIMRnRFLD1ZtG1PVyAdgPpxDahtXD6NurcXkHiQkce7UDwkFnaGUyYyB8UBPCauhEzpXWLCQcua2AFtZEethvxZoc3bX4OLIkoodv88+pA14lgn+4OFTBxO/oiIYgp5f86Ltb8irUaCWqnVjc6UkvvvPIg60bEZpBP8eKFYM2hmRVjOp2GARYNenLA7M1+RXRQH8JhkcKh3/+K2UGvsuSwvCkfvmwffyG8EDxwurHRoQqtcoeBE/ayskGqvVaujClmSqN0nyy71/KAXU8x0NAfdNm6YLfJZFp2VM66hKSrFqnd3E5HIq03NTNei0mj8jMF3yQ7vMcYxeooCkocfSzakbmMHxkTUmqZA/Op9XnOB/n5aNIk14N4ClRk5Bnbl6fvezvOnwZb3DN3a9l+JyF4zHVFjTisgqi/Yi502WanY5aMGWWmIlC/oAD8qOTwVyuNqSb/jzU2LSAtN44H2E28Xb5C14qWqVHg4Qh3xN++T+gIUrd/P6/x3tc31hbPrmr3UlywrGIjKNeDGTKK4ZpP2yrmOVrM9tziRjj8BAj+e+AFHDr62qGWBsbvHYKJUjsPZPdvEZ8E0CIlr7IuALz5xac+LWPU+slWEIYJegXkHkb0BhAtk9lDs+PZG8EgYu8FFf5slCHrwSR244zVk/1hxChME6UwFUHYL/1dYwhFwwuSmd9NLTSKiDFa1LvP8S1m+tkxUx9gxbXyEEIh0Q4z2UcVxG7pls6EaQOxbTAgOkOPXryArA7BJMqzPP8DxImmz40s2wJ565cu8Do9SkHJ/x3kUWdY2kFvHLZe8zNI60jC9acduXU1ymIsi/0rVRtDThrNt29HNtBGVqvGis+BQGHs0WUtEqBctWyU7O9sq4F8Puh0mmWxT2odm9bXVAMhjfBviXQYRctPxTwvWMjcFlfBJPF1gEtzYWVuiWNxcGBkl6Ib/gaTtSkoZKzBQbbWluSy1cksnNZUnkiljKXYObKkw93eYEuhwZinH0oiUfjnfi0Xrsst+9qSloGbh+ZM58K9fRDDG49hOf/8n8x1FEshSau1vf03T/7XZxYR9hzDftERHQbDHthqCrLOtPlZexJITSwG28OOcZDVMphMk9e+gmdsm1Tblt4GjPySMruSOW3px6FDdtGBzAabA0SAfcTmfylIBLtU8a1g/bE8zYosKbNPDJX8R58rk5b3EbO5OUNkI3b2Z8IAyiGZrPE2Xs/Tam68oJGARhgecWmTbeE2XVF8T+dCTRQCUKAHZ4HMjBR2j2s1+D04WeIhJPJMxf5waPjc2A4Jyma2Ma5DXP/MWUgzYFGmIm6YlA7IH7xvn1FJxJ0dpWp0j4XUaLujnubtsgu/tAickrvHnLNKsJRdiOO8agpFSqqwHa2ROY6OQr9Lvrp7efcyaQRne+2amB/pKm2Eg12m96xdc+SInUxAMyDHXt8VE4pxdyEmvtULO6+TPqYBlnZ2In+tnKJcp1lzEh/sddMp89S0UAUuSXQyRPq+8xphk+VFbbv+kmoFEiBDoJLaOUnuB+ZjYGa05IbJ4iQT8n1q+RIMOoJudCX9pFSqBFWft7l5bXfm1oFJxi+faEbmJVu0Mivqiz/WUICaXACnRQhIOUc+XrHsUW/XT+M/K4WVmMg7dTHgoEW+2jWaqJF6SQy/IXjdjkrLDDZ4OWMm7mtIK7qvHwnRGRE2YgTAW6N9cESPE2UXZ0cARZemUJqUlJrVoCX3O9AIQckpLqG63JngayDmslXN0cQUT38GjnucqEGLolu9coVOqa/6PWU5CD4r01KiZWygnQH/XpTV+9dqAK1jm2Ub1kES4Os1AlTG39GT7GrRZlarTUeFO/WgGzSbN6O2PTU+6i5lXDpa7IeZkINnXPAokWUxABX4L5RveLJ1XQcuGEWIn7K6xfiZkhnjs/zjiRExjqqMUEvK6WY3psKjx35FrGuNvRgXpyWz9ymOs/GvekqeOza4VIFK2sTUlSk0OwOWXC45y821QKW4ZeWm9z8s8dSnzc16wt4bjCmxHx2u6tsfqc8zNmUJjfySsXpO7oiPPAu9Yd9yh5cE3tlRVgf0Qozh9OwALJ7+MNFJz2L4A702g7BB96v3r9WkomkOG8QmYcImMVbHeTNgIg8gCxCnNfONj00by1EiejTDFqLU19mZoENg/NAS2LX7SMrr76Pdpsd9BtNZm+Sm11TMb/3LIAzGVwhukPBOFEqN6U+cM7SW7+gxzW441S/I6KPTBcXSfhnPrMtluFNY7DsVhbgxAOse91EfO711fO1vg3cbh81qtvxM7My4joEmkHmH72GfeQRE5N4zsOzeU8AzY9922RBXskMveD6MuKwj6KtiUs8b5Fmq5o5gSWEz6/neKsyiPGD65Ar18H1F+sd/MW0KapB4hUCWEDH0P6YW8nPBo7GcZNylR84cHvzM+o0XDIs2hIOB4jltaMDVDEn5ojCs5HkB4RJsHZY8XbIExHO3Fs0Fwaqzh+0q5LleAXQnZ9J1vOOPTwDS3TLFplHoc+T+P667bPKCMpEmN5bvZME9Nonoy9dUMEAlVOI/fCOlfXb2TSagU7JIZ0C6r53ih8EzdQXD9IF7LR+SCMUsJU+vDDlbKVM3l1m929495xzlAl+nedlwNfBw40rq/zN+WsM04zIcfAOqRGlp3TNj4ISwzUchQmEhkTM0f2iPsz3+G9Ggtoe3RJVidMU16LdKR7uCE9cZfQMYCpRP/6BQWfJPuJ8aAD3o2RaawpjkM/W74IO8xacRWg500LMu8jJofiZuNSMF+RggMhTeUz14a33Gxa+AZXRFNTwAx1mhNuQXnvA3Ze9SJKgaQHrAzzO5wLzJdvkO/pRj2Cn1x/JsbC+orK+Z4Vvmz4LI/B7HKTY/y1ze5GvOn5pGLDdPva3corFThC4DKdhDBKj9W88pksjByxh24SaphzQHDVS3cBPxWo3lYHcccAlQZt8Ox2EqPuKF9J+0/Ky7LvyPNdIa+ws/ssReuK6iue5wkcDIvxQc/E9/JHr+HQPIRKlqNiSlP0jXz0XL/OtdWTG+YgAyz8N3a2lwXBbDN+S0YZ8CjwlQVO+AL7j/nToTiidKkuTl16EtF/duk1ggSrfwQOSdclgPdMmowJdosWFKjUGyWt1OdSpWTexf6mXA7WOmKzj5sN/QqenEeHVo6mC0YrAlL5VS+NRCAOR3/QaYJsYHF4UITQZWe8r2woCOWEO0zIgsheioF0Ijqg+L/rzOlMZ7ZrLn1Rh41FmzoIlniTef/aq1hbVlhcd078wOIMYzvPtthDatMTENmzYysNJN0NhiZQofcO9yTjYkHqoKGqI5NnP/Msf3PjvRZttUKoSfJUrzMoSIvu8AnseSYBspYeJEpzwczbkb4gMMyE4Si8hWkr5MaGc0cNaacVKhyPq6pR0LXre1woo/ib4siDeL+nNoL9E0Gqqwf4Kl2OeBGNPxMWGPC2icNyoZA4yoydH2XaEQX+Ai13j+vGl9VldnWSsqs1IbWqsB8optbek3MXIipqOgUgtV5vyPLbaXRsZ2nz6bjutXCrDaQ8cfB5aiiiOmvkz9PFJPVzg4tBrMLKQ5M1WWtmdTrphKVyaR1bpywEcNEU+tASZUNzVEVMAar82VLugoYYO3HoCCBPttyMV/3ZJ/vukaT4WTq0nfoe8j86JTHF0sBHyZG1uiezNOQQFB0TJ5eZGHV/5wpI0kEC6p0u1Yo9fldkcs50k9bhQ6lDBxARF8Kur3fFbB74be7bmDcwLWKOqEzSTHxXFn0pGFfupLIANVVnqqbRV0zzLDToE42UWj29DvsDBcbFw5oHEKurUduP6WtOzgyvrpMXPphRNfR2wFO6AtKOG6vwhGyY4kZjPjpfEe6DalwJQUNqaDliuWySZgDWo1YiwNtkrDAjjGH0+QbCTDbCvsLhr4wEHKZ9LQ42+WQenSDj/Fzfdch89ENajus0yKZkpBUKZfFX+FL4P3MVNHg3vlITbMzFK7XGBWpbYVZ8Ze6jfoAzVRRig9yI9Lw3/cWPjIh4t/RHPuotrz8WSG+PjzNzipBBaEU3X0HZdb1BeRzGKr73oiqthRtDBWZr3I+8BMhR87k6BnFgR7dL8GQlfyBdyGfyjTkewLczb/m1oP8A7C/yT2m/MTBr8NRkejBBe2bYWu/CRjxIRfo3EDE1VEKy3B9Tjz5DwROeMzgg2QQPH329yJcN/p7BOSwfe0uAuPGtWa6xPNNs9aZZHyYTN8KU7ITzj8ZHo1fzeSGtw2P4kswGd7aqlROBPXioDJeOOh3a60OsH9dPSJ3psLYowFOfaMm1VtQjuYgTHBZzPhzLfxWRaE79v9mKo0/TE0nGiSrGHm73u6O59deY65IeH05jtdhxCSHwkvPw3jEZruJp0SKNQ6q4fE9PRN57ty9BIsGDnMTNzUYmVWnpIM7v3iTZhsCijYMdO8OH1BKa4HdOIwO2O+VbJdJKxuk+vGUP5Lk5bfDCz6pWrKkX3q7oUWEHBOAzHvNQlJ7A6lahjFrR+2DY7QbFpkoj/PfCWwy527QtMIlgjbAwTUHmN/dY4eyxImb+ldnSz0pndX1dsnxLmpoZ5iB6vaEZ89eB1PPkgr313K6+X9ftAJnbycwQ9lWLvGkrjc6b3HY4pc5hDBOAUy6b60ehVJFLgbsQ44hoYjjDG7GAY/dIC17vxwiaYzaJv+CDGi41w9vTYQA+jVz0rL/LYtKNCbCd2vocYkagYZnRxfa6Cy79EAzRBXgUjDRvBGHRuKG0b6ZWLmzZKFE93oBWGjjc3ESI3YT0rXQtOsmPdEC3BoxMmQv3X9vvgXIttYQPpOAINJMTmEMVWkoulEKEjMJmR9vlmmT2BBwl7Ax3ONUql0uDgOSMF7bqorAOre/CW7rcvIH7VR3ibvPE4S51wBUBawlFSG9W1u3UzeRPpghKRgSlN49jz5/on4pPUry2frgAxWVmQsqTfMso5XBxJovNCTSM+/PnHxCWVlahjmrRv9Qbf1rv9EoVkxd35M673hCyIodC2hracYGxKDKQaf0K+tOIxODzzHrrkNU/pm/3ExXzmWo7yK+vAZxmyqQQE4yXUdcSLiWH8J74vAqL3ZoiO5E0B9Q7Nu1RdzXp0VLuzQu4hh2JADWi15vfMK+ayMvhpb3xFZIZ/wez2Ejsm2KZuwcvRdbuTrxaCXXzFBUr2DPRIFEUBsEwSuh4IRvS4cmTTP/P71qeNdsUencK5dwDQ3962qpM1oUVnjEBNR7Yv3c524w1AAaQR25WZykCkE/L9NXxnuEjyRdsUIJEmH0hMq2W4gGcBJpCCuMZ/rwJR7I0YpCNY/jg4MCXzfGnVFEZFOo1TG2dJ5jHQPg7MLm6FmrYuE5IhgOIhxrNqHy5eskmrHrLkIAC8YPAhegQH4zH0q8ECZynjRnbkAibk46BDq+b/+n8nns7Sk60MW+s3x37oFYmxNoCtv9L+M68pQ9REJabvzdRz8xZNneH0CkJkl/BAWTsyg3xTdg9JCcBtsJzcpjx99buyUGEvF1kWXp9hdeeiUVoSKGHwdElw/KWgVZh8hpU1pC3oheYQjAg28gmKXeBriyxbqpOLNKl6z2Yhl3r6XNvd7Cf0ysllgFFyJiQM/Cl1LSJpy3lCZFLz/Vx/Y56GhPCvszLwY1ydjLvQgk6zcv9Gv825kNwn9R3JSvNZaxYy6R5hK4puocI93+wVIpsOBH91XyYvQCq/ujE3GA3ZlXVv+wq5/aTm5TedQxnmXiIrxEBhR6+YmFMk8iQ3kCpECfsympWWmXPk4S6/WPF2BQgteEfUB1IiwUzFGErmpxGjA2LMF+sd4eNoic7r8Yc8o422oDl44rbM5TKsyS945bR5k/tvywIiGsJXOrgJhwuB+Mh2zK7PWVnoNSuREzGfiDR5uiUWoGngt1H7coVQ/E39pnaVB+8ZD2eAH7zspk8SD0uX8uQ6+VaTP/I2K2Vgc2fXr9MHIpCrlWQivYw3iqO1GBGLOAbAa6VzX58YMymtraf+FNfCdfZ8ULSryqu/ChlTkh6Bxa7NcR1QI7aynLKoUfyVnusc6kfrdWkSjvMWAvyNxA8j/x8PwhRQwFwuX10eg/yjLOrBw/Gth1DDZ3rOqc2iFf17YyzN239NQSQJ9rlvxIkGkhP7o8v09eCZVZdbu8ihnEGyFWzz5DElUVlf/N3hVEAMv0RpDDRq8Z18wXTalf7UOxSJxm1Q5affAgNwLK1kxn/6Yo8RHf6h4xGP8i/fiWd02EkOYoz1bv3s7Sf/2vR85D6SW5BjLzgrTopwbZh8DDvVinJDR1hqF42Oli9zRITLPJgBGo/fqpzZ3aN5Y/ByPGEQI9M85WZ1+6JFJMthOgk5kYu+HdTwhWjnJtsnONl7bh6aeMfk06iiWr5zEHB+zafLcTqQDbmvxbaGd5JdipFM78PbVl9Hn9mZ5jqX+PU5+JwhSTKBE1GnOXKkYFFZxPcUdtIJFLcf8EeWCVL7HFvCPyiLBmRnQpNq5VcNXrtustSitnorECZBR3wxfn3dXSyubDfG78udC2uVF2pdAylih7tAKO7PW1vMV/7ki65MgoXXTPaaX3stmTVlRZry071TzLO9sVW8ZeA+zYeWQdj6hUjNpW47Cv0SE8ZzHC+A2BUKfGFebvCh+fAOEcIcxUk+Rq/cA5IQc/JhEYyv+e5f5nejRUlQW5GgKgWyQwtcKvOy1KvpGkhv+4qXk3r70jv1b8eRBpdaIMVTkdgrMIMylfkm4XzWPnUzo4vS4O4fLW99Nx/cIXIyj2eGyhDfkHZ+5DcPdiMyrgIMlL6mvgq/dxf8N7t4HyZ2IW+BATCshuS5jVTE2GVc88gOX+tdVLGnYAWMd+MHBzXoDgBlJoxYLOYT1DCE2kLcxved6g+yMvxFkpOdmapM0N0Vm0PUyzKqL/UIhi5MADOH9nXZiy83juCFQFSNirY0zBIk+txXwdZIACeb5Nkeb5H/QQjVBmDVI3zTxiIuxYIOru8zC6WyIAxSVX7sGSqOyRvmFANfSaEfh0aIyQxWEXPBlAN3z8TzOmhf3wDORu72J/cUo48C9TVQMQ1C62NFL02dgKuH2PZdb9eAwjp4prvJQ2WcBqJeEgIFyvplMokJZgVI14lAwoEqlaBzVikKTk7QpddJWZZfV4Mk9UDciYm+LGokPW9PyKpT5le0LbTbHPcGI/gYH9xgucFyAaEtfH8Wvvx5kYL6JhjK+ijMxVyO5R1FGucPhdy6m+C9MPYkOY8AzX+Vyb/pawYchh7abwXo8wCoCyS33Lqe1aLiqQrJGaW3JG+sADPpdfm2dbqkdOdEedfFXbxgkK7TGdV+qwr+O/Sq1WMGhwoIu1zhl/C7SFn2B7kVyMfJ/f1F2+VkFCAW2wniE10t9fiogce+FpQqjh0pf8innuiqgvvndybtskvFlIo+gw4h/pWLdaRhDuKAZGYe6dGtzjAiDW+BHLJ5PuFhp/56gO6SDSIsbT2dLRj/WUstNKkAPaX3D17mjWGDOungZFeuWSW9cCIj04RHfwulbAbPfNmo8OS4htlulritWNv6McWCadMVs+Vov2txAeHRjks/rwGrishz4woftxqusio6EP8+Fed1iagOead1sDmvy4dGPCIItn5sfFAEuZ07aDn0j9XZJVjxr/NbS93cG7sWEKiI6Nmb334xkyQDMw/Y/lBAnuj2Nx/cYckSWQdhxVDf/aQIoQYgTJ/zn7peo8opOmRpHyQne0AWeFWtpihl+3Ooz6IysZwmosr6wpRoRVgIl809ZkFv8n+k24NBKAgAANBs27ZdP9u2bdu2bdu2bdu2bd4QN8gbsiZ5gbJcJ89QAVZSswHhmD24ujBV87+rwRUJ/9cCL+glPfdWbOHkFZS7wrUZu870pxkowQXbrM21KIqvPgd19hd751dmFzy0cfZX7RBlGcTrfJSOjYqufio5URrykM2FgXfTvT0kVpUIvOkErggBQOMcN8QYh0avszS0i7VL1cLkgMxjcwdcG1yKt0RWEPIOj9Vw+yMaPQWFeQwAVLMYxtQlQL6PSx8KFsZ8rSZVq5FLTkel73fL4mN/zt//l+B2VSK96tHHapPfljFi64q3SaHiHe4x3c0g+Su5VnfEgSO5Vlbh9VOKo884gCGhN3wp1R9nZrrq2nsBIbaY9pNk+cJFHHt7HMerVwaZoDt9L4LAtJWUDZBUfYThI8E34lrtGF15Pm0TvJYfUQKlAoLYNoRcsEgGTREMj+QyXZ5LsnHGRLcM/CtKkag1lv4QBD4FPfLJOkFr4K7WaDGTpZDd5GnIUJmpOXR752YMH9baKNEvHzV1qxiGikpRqFd1wnnF3PPVmAy8LQnmkZQ5+eYy7SCwNl+38Fwv/+rHXRqhPRc6HpzRySi8FgWzAgapLlJihXiDDSka47deXZzwBPZNCPtqOnZCrJpOuJh5hBNjoLbaKzcb2T3KPNG89uV6dDo/18anporjJt2xHZk62eLhwFUrRAZhynBYhJ1N+sn714SIPHULlTNZOBYFtMwc+eMQhCKR5S/FSJ0jkSHTx/HqIcnfkpiyYyx4dYqSNNu2Q0fGH2TeCUbzs+Q2StgfkplNGhlttwYMwZCmRtyIt9GoLIO9caOKFwVRAPENmA6QsV28xCESZjh7SY2R4wXlKnkJFA5zCF9z+9rUAvlckKkM7Upa3YjoyZIWi7UVo4zOOLwBgxF1Tq01zFeRgyYD6msm9/QYlm5PG1zN0BrLwzSLZ0Zm1S8yW039DrdbXmQtfPpy5wTAW99TFWvdHWOFZA6e3LwWm9j1lWIrwRjSTKRmH1uDMlZOpkqLIDFhulX1bK5uTJ98oCkK2rkLXC3YVHf4qmzlhIRnrlmfKnHCKYBmzaCQ4oaWEpc0SDst1kpclsN1/wSsP+oFGd3Dm5X+c64HR+6iQ2o9RHLOSII3BODvCrm39Jxg3MChabcqZp60W6A82QSelAzkLZHkUYT3jjb9ZA/yUblCB5wM8rMwaysr/i/cNNikymWE46serAAzJVIkV2VC18jWQxPAehTnTHq0QBdBLijIBK7hflGUmSXh3i7+db2rGuj6qLHxcuztBu34xvS3/GPrYqPpqrhGumzDUXGVISBUtf6yvX8RKHFA1VcUJpbpuL4dh8SATRjdEZ9B8gOaLgVdawBqNCywEmIbheyqmlkL+ygBZ3nqSfVkc70kLyhCCxowGEg51xklCzr8xxNst8T5hCObCycqV30vx/SVF8SxHSR0z8bGQv4n2vKWBqR1ciWYMg2bSpSSGo68CeiVx8RBBzm++94ODw4i5mQdjcAJJ97kgJ52N7ox8lK1GNhD1WPnt6B9uoTa9zVe8ixL9XIR8pTE2e7Yffemokaw7DsltWEXdNXxUpvnSXcXMQjZTG8Vms9N76TkvLSHGygP21mUsczMhSgd/WSk9Gf1yhwmVc4lp0UOFF1ryhuIDMmKVXPeVayljySIrOPrbhUTxEBU0aYU5eMNT8gzlEDQdVme7a8dyc7WHMWpJC8pPO3z/unV/a/L+/oKOsGqgPdyiN6i/bc1YzHtL4wIfm47Li3Qyah7SHUg4CDZjVXzT4v3zLz1XLClDPjl4Zlq2YG5RZEkx1jZqi8WpUwQlRHzyLUlfojai60H4x/a7b/eBrgK/5IwSs6yEO6AdO6OKXt+T+AfFVXnwcZFbg+TmaaXY6/fx/nm4RE1Th8cp2Xnh56cTSETbmrKz2Mo9euitL3H62LrpVDH3RRF90jKbdgbEDXykBdnlktjguYRA7PuTUePRPsVUVY4IUQge5bAG5KznzIJN+fVhoztz+NHAtwjoW1wTr0Qn79R6vXcI7c1giE+TVa8fftkcxGPz3iIzKcaeCs29Fu4H7ZBpG0yRVPekv5PrDjL+p6LXqzozqI70/dZn2kPAMXP7Nz0n5g255GVyZZoaDB74U0dnE3HVKS6/Nn5gB7B6vlwRczr9R44O5eDtrlLcGUQMg8pPpJ4mfSIptdYWT2zeC72MP9kbFSUhkdOICn8QXkhPELEvUF9N8cwN1svI6ohuPRNQ7qgzvOHlxWOM+DasVwdMgU7+3Sob1eyiDF9wOfKAamUrctZnDbQRf2JeUaLyC9DzT0CjI+Kw/uL9Cah23bSMmYAP61TItjiOIh/Ymxlzl8HoJ1H6ZRthzfPPxM+w2fEW2KyD+eRMxZTDO8krmDDARe/w7NwiknwRS2f0B1E/Q66SryB08kQy8TvZU9YAgIiUm0HSf0ezKCVpDs8yCKDnzwx/8bNwwyVjiJYZpTUeNg3HXB8qYrBmUmhvkr5Gr0PB7nDjHc+K5E5tyr/wdfXEndJjAy7I2uD0DHX90lupvGZKfMCER/7btaMBIu2ABuL8U9x0UdJX2752DwYf5tyP5sSeFh+TWkXCWEr5vmz9wqcIwkqw/Vgx2Ola42KiOliwlxJ+1FqdzMdPMZT2YXbbZKBzy91ww8tozmRoLsj4m9tBrMhEkWODcZCWTxPFNZdqvd8BLAGDa6je1tss8JkIWXtJULSnWZv18J++4myKHSUMiegc/1o0BRY9acpJBKeIIx0ksfBgjmm1ShoGkMYPclw+uwjGo/qm1t8Ru80T9lMP8uS4hpL12jSXGazoTe/4e+VWXsFnI95V5NBjlh2sohpDT62VN0GlcWzflPO3Kwevanlfix69BMMay4mRjlO0nNhzEcRbidooLQ3fBs8vvLnfrcIimRjU+NS1XhGtz9I9Edr8xDq19+9XxyihQxVSD8RPeQ0I86D68HoZhkisqmeU64Anj0BHK59DZdAqOUxnREE1lnD9UkAmfhc0S6ihkd3DRFJpTOc/DmvXdLLholNTWRqQ97VxPhgywjaZCMd3HXMgNaMck0fQDBuKEWLZT/M693tQDQKx9gL0u2KSMNNiTCs12Wd1aNK4/+KORCLyjIJlxUuj9bkLTKXJ7CnudIrkkD9ANZ0XnFvmxj26Argx67aGV1BS9LWpS/kuVfQ1ITZxFomAWLK8lheNd7klPpsd/twkl3I62UftFOB7PSHqrx1KgiPd5so8Hc5hnMK4O4b+MT1FeM2CTNDCP4JiZr3REEq2l8/JJEINaBlT22Qizjr1HDNm7Q9p53bvDmqfeytNr8AzxIN44uzNUGLb8+4vt/DwSnWUTgXcH8Eh50hMLuyiSz3SL6UQI0iuYDnvvYWwIKhZL2CzESY8imRJ7+znAzwp6xNcshAXhMkAwUvbBbbw6ViwzmDN7qpIpNYWG+7qeLBcSvG1k5AwRQXrSVS2jLwtR6cbkoCMGblaY2jtPE+HFOvgwffXOAVaq1+b1FQStMQLLw8LtD/6yOmSlpBWvBvctfxknlgvmzOq15cuq6lg96zm4O7cx0G/amjMA2zo5jBSqxENoULRbP+DCXzKAy6LMxrpOvstEE6OeC3B8CBVhv6EwvlWb2ZX9W4iD+HnNC6C66I+tD/kiV6/6unqyB+fnNo+tUjzYqporofnZSOi2a0oEazXP9GrEcyN4kFtm7N9Irf6isEgYexGu6Tn0Qi+xcUqweZeYFUzo5JJOfgoNBahRCYLKJKqDcw8+58l9DCFaGIG223NaY/4MOJE1AMDrlJcVV0FSfWu0yUWDDuHI+kQ0mMyuHOAFrBbfsEN9/9KoqRWmc7nSQ0/7gN9feWZk2nNXd0kzRpf7VJtS0w5rcWDNl/kaZ9KyVGTysEojDE9XiQZ92qWF9NtppDmXnPaF2WhR/8wHzchal34oJIxleK3i4eN+gs3tKw5KVt1zhmn+nowikyYZuo2/+AvjdHhOi5tXiKp4z+s4Gtv96yLkj7ixzPCYdUke//sbyiLpnBve55yAZGbWKC2ECurhGvIbqq3g7Z537hiywHq9dj7M+EgMVFdqNaLSVApJMEKYZBkMOwGIse0R8jMPqwsxrut5DGCnvRUxmXmTQz1CrD8PX1bQlnpOcFKewUYZj+PTV9b0t+L7HkYfHVutBYTLoLkEirgMXmy89JAvzeXpAwQybq3y/spSvSgNnVQoFUQQuN+wDW5CCJxbnFCHQlevAY7/uhvNauzQA9XtuHe24p6ZVih5WVNeoCtTnHR9HRTj4g5CHHwIDmCgJwFzV9QMyKjhBL/kAJ01eXbz9roJEVLiZ9cPkG+GYihhuUXUYpU2IPC5UjuTHk88svQezNhT83Q5037alC0j6vuA3lBBlO5Iet+ahQmYl9CbSr8nliCUIf5Zw7Xuf5a/WRWXGXh3xYHNEdNOXLlck96MQayEq5OO6kvRLjmAvpl/p4jjJ9kqyS472heosG12XA0yYXUaeZJQbUz6NSr9ovdWxvY64moJp531Gp4uSh6xxZrtsxCVns1VFLJ9vQ2vd562qRnO2FXbTUnNwPgnNFMqx1E1pIocz/7uZN/coRERnbf6xSKR/B9nWIE1COaOCtS+Bfw6JRLG17gZ+jUZwHrZ0xeoAekrEJpGCMTzY01HhSQUp7pc0/p80VFr+8R+A0r0gIsQY3tclP4KRCpKAAdwuCYtA73WItDWj4UkrG9NxY2M0XgaKNuwwDu551hHFO4rvnIu5/j5wMD9rrUBAA0PzeRhFRsKW81D36Lcw4jj/MJDLosoOOLMsMGHKW7rNY8kYYHbJMEZtuecGzHv72KCLYeoFhvrsW6cs7s4IX6Ufm7T6Wf2U35VUkfm/sQDf9tvDaASzFdNtgvkdLDyExXI0VPIaYlLxea2dM2MYhkas+3vI32HUZ431ZD3g8fbLlmG/ZRUQLZ+4FLw0RlRpSIO8pNobMIlmoLU14o90p6iaubHFDDCzfcNIbTWjqvWg+ghLNcUqZqQzQ4zrlER1dN8b0EjWkocKBINUd10T0LzljoL77JGCptoO0K5Ctyq5DPcLNBHVOv/rDbgqJiuni9pGaB6OlD4P8chRpFYKFR4OjWAxtd9U/CNKARt/BNkG6hLGwWbEenGYWqZh8uZtQr3+ActVkraPNpHapuRg4dXFKVq1W2O2ASIS4KeQK1zs1yrAXuiLizpSSnW/n0yBsx940p4frpBDIbit2ceyfkbsdvCjaSkI1uw8x8K0S0AyVyGpl6hUjEfYBF3sLk6D17bAcyECi72cBWBAtxwo9QnjLEoICIgxna91jB8JhTn+5WTmjPF+24YiQL/e1DnN8YkZCa9agbFaXWy0T8TM9muuB2D5epan082T101WJwkVqEphNBmolhYhzyKTKbjySo8olBWRXcvgn60WnwmzBnhlvOH1Fyim2xYPbDyx7RC5pngEkabCyx2wygPFRjuH8q8HNjfyNo3V0qLbPC68yLYASxb5i+V1vuHMu9l91B79gxQTNtkyY0s1g3vKI0bUJj5kzuIJUBBO3FAI9vtuhI/fY8jZPr2sKeNDt2htNdDBxBoIDFfgZRUp2o6zaI5diJbpEgLRcF31J968K7g2QH8JXWeMVDxaeDOYOAkFT1kIDSSq8IPdQ1hdTybeKfh5SILjMtXZk2fQ4LN/8Jb0wDBfArpdoqhhj1TAKbT7pIRNVeIALzFqZd1PsuMR9hoQiOQMDAmhCISgj/7Zwmcf0vzRmjNVIu4NH25hh9gqY+GU8xXVl7DHGHI3mzQdYyEYYx5D2ePva1MvlbYletNeCIuhBgeaLqIEYIHLpnJyw7T6AKzuS8cHxwvU4MaLk44HhMtMp2Ciw1g5LM2w/7BlsOFNZWPzAuDyIkyEvMyAEGT0XjOTqI5kXWXzT72X/yDamQX7z4Zgs++J5YFVy0A+9IhXnzdjxy8tDE2oKpNNr6GX7ExEUo32e4ZiVDRRKiArcOeaHm8V33S0ZwrnxVDy+5XkLv9gyvW25npkvhzZzR6trQ7LqgK7C0T+peg3dWdKiSPudwmw6zEpWJeCyHgW5PyGFPgY3GK0H51V2UjuszFUmI0K+X3Oe6JIpwci6v/7GTM5zfoCVa+tFr1XfHhW3bNezyXRTzk1efcN8IaNWpVHRjXl+IAeJYciAyOPhDH54flYEYKVCQJL1MswHJPuF0JJSIHuU353XiDX1bIacL2PRPZv8HF4tf8yPFiZ0ArOilfUV7G+G7bop1vWS3+ww4i32Tj9LcaS1XnLhPJJvGn2JLfWSx8qIzV9VwR+gHr2yxt+cdD8c8sqZzvucWk/CIc45MZ6blJZes2juV+GnzacpEFX+DAhzdkuIrvCL07frCQ0giKKWCPsMFYg9efDwG46ps9Gnzsko9m1m7iZeGnwUzgtqS2M9rLac4EE5hk+97Or7rAs8sRee8eculxaUNT17n2gNBkgRTWsOtjcPxbmmAYeFjW6QG+H3LzYXuo97OWbplpvoYQr+SP8Xa0/MoDS6EOCkjJg69EX8OqWMDiZ5Ty1y8wPcQ9Ns8oNOSJFljxrlXIU0nekN3jPG0xxtWpXZp1COZK6QKOKLFrEUN0vFxkIid0a+R3ZnxsCKVlECysFVTVsJ8Bk1o1/QZGfixUyA1RriUlXiyOfS4z1who+q/HME3ZeS8uZRGz0aEBcREWmWv7o5XOJOgvAgcO5ldCeWVRNmVC6r+koOUc7rz6S4BAcTTpIdYMfxCQruQOtW+Q906Ic4WUG9LaxuR4RgItPrRvX29bgkq9uOScPO1zhxdRLbQ/kBtevEg655SkFbPPJiTmsLPH7lEJeBr28hclZrVT70jsjtHQf1EqMlzCkLPyBX70y1424G+G/a2guxo+xFsjrigq6a+WSiyuHjQs7JyddV+UJ3a8Sz58zvlVvTBQA9zcFlwPUWz6n2bu7g0Dw+SCo7pFI5TX+n6s4g6XYTkaf6FS6Lw1Tb38aIg54vZFhP076a66ZGT1Uq85JizUSyn/Vo+dABvXFVM16+Xn9K5P7kugGdAc87Y742qiCWmcWsBjaklGTdoGNNzcRvvQZeoB3oN6p003fWGjQvWKZcfjDLBGNZ7lvBhnVjzhJoX2iPNka2uMCjLX9s7eAtEDbn8AX/p6sbr2278PoyX8k803VQgVtc1l4VDvLVvEowQfCtF2fl8irYbqiWAlaYN1+Fub0zpr8vwaGHZQIlVeTXZOFmzraGNhsqvbS6/MuiUszywSxDUNj5xc7PlVD3iGMjPIwI7LLWVEezY4chKHitGYP9jiFL7Oaqt9qwVD0mRcKjDix6HO6qcCtr7AeN7TNF+YTjCzLOGq1c09ASUzw2Y01SobFr/whpKBt54856ahPBiQ/lsNgwKzIUBat6oAPNKvdB8t/Xm9yofwWkC3LErLJJgefMl3kPfQYILs8q294hzyTjMwfXc/3TIUyi4LLsMZCQEdUyoQTw8p0ADDmYlG2v9zLNNUVyIBptMiCw9ByZJyKCgPS3oJtUE+SUEsBCqtC6wXN8unlC/GtYZ3nD2xLAV7UyWRyXss6Bf3cxQLVUFP76w1+jozMCubVVhyJEO0Hw918LlubH5oPAzI8K9Lxo2ZPzoqc5f4rjqH2Y/C3SKgKrBKTnYDHsrHJqCTvkLOWZ7y8aBMkeXff+ZK5oi3VdY2c+ksqzNK0hk3a6sDMeqH8fO7wQS3TzIhoh8fiCfgJMrdWUdd+bFSK9dqGDj8o+gz5VRyBpU3NKUpJXatBwCLr4n8Xq76PCdZaZY/V94ZvIOlD8LdgZAHGOGlbC+pu++YLPJSSDNrYgsLbOQ4tDapOWw95nA0eKpY3XDqxzI2bWDEISeSBp2UfuKlZQNzG9FQRSa1sdoGBHdKcTP5WyRJmf8GPJVfgpct88SBT3CdDOwtODJBj7kuKfhNUHft9W39LRA29ARjGDGYfwHL7iFEZ9GmqLu6aCdMQtqKmt/EgpzhRRjMQ9sbcy+CRqydC7LFpi7UT4jt3W+i0Q4v3JOLu5zTHIgMP7AZhE+DfhB61GgeToK5cIYQDmW8O5nhc110Frqz2Dj8fus3rE+fAJbeJubVYYO25hcS6iIFepeHQ+Xayp5Lr5oSV4AjzUiegp9xwHioYGcGG+QaHoFbdwgOcaMuz1VbMn/utUtaiBaffU3/pWeysOlL7eR0/BYyTvzmeezgCeJKL3revxWuSyINSSst083N9syjH/eB73Bscy2t+T7eXYDX/ak7Gb/bOujc66++WWNWFRHMvydeUO8xpeuHnMNg5vsdO9OnQtxTozzqN51Y3PX/lJz63DqBBuM8NFg3eP2lsRlxWelA3fMetms+avn/WtgKd5aLmc6BzWxKztOHQgqk98RnGF7IyqRYlXnn6UphV6dg1AlDZaES2F7ZSFezaxepGFmRFsPqz3bW7jxwwcwcNzvV6wB96hhuEKCAXrirk4pP7R8Mh2xAaQBBrrI7FCzpwcE3EENEl4nvEwqGm6TuisGblO0liaV4RXoxXBZVngTb0yzCthfDQUWtgjjFQT62Y+u5SPnQjkMS97YSzx3TZRXmUUP0vPXUndFGOqOW5wXjWDnhfjU/MvHF2b0Dja3v4mfNAK/Yqs2PnRXw25mlg1jsPju7KhOBeEVRwsZ36fzKLWimMSE5eEu4kch7dkGIIbK2n1IOGyWHLks8gqfkr36gmuEeO8ZeDCM1YiN8oulF0TU5k0irVrCpNvoe/Om3yPklqVeAWhhPrBV5teF4iwwioydnaiPJdQn5NgebBdKBrhoavy0NlHo350GLk3HHd0m+tDj44SFsba9UdFDpnP+4tA2+RdUMGCUl4B/Ms7rW29iErlfKaVsZlk+Uyjz+brq+66u6QTtCCYyxAuoM4meizEgcfmuAvTrrY4BNEjF/C4Nie8eBqGrGBekhYSmLZ3EDihf6dCnwfFgHzCxi9STWGcDNbVwYuELCsc3qDr6gchvMZ6Z1p/Qmz7rCzEq7EZqnjxD0R3ZdS3c9ihnxlPPOXEwDR/w6DV2lW/V4nXzcGjW9oVyGtZ4InBMi09G5I9Q2dYDESCawNPrLvkKKOLpwq43Deh4wHlO8+lasW/76T/zGon/zCupOzjOgbk760bS5FgJ9qyHhyEKoHnFsgLORJCyCX4ValYK2lRNbnyCvQZ8F1aOMGAYLPtp89QwEqaAF+IxAU4f7ahe9r22Z/jKcsWcIRFTEGE5mCWdxsAy9TcGXh5OSvT1iX8tUjSyMMxqC8QE/fkM+IYgPU/bfIIMRKbfN1aEgGgmuEj1sBsWHbNfIAdidl3qFtY92Xdrzu0NJ7oOOyuEtgZ/A4U6rCdwYeulCmiSmP3U1JQENOwRQI4dkUkt7eU+xYJVQ80KDJc8MPGi1nWUTtE/5H7t/C46Q7ZqJyhTqvnvW+jdNbg0T5cXcW6gw/FE85C1yPHjJ3R2hOyfPhW09O7yI6np83jbWhW4QqDWsO64Jrvb/x110x1HlIclAF9mL3eAlqU0Rn3eouTVUiTnyXWAK6DZ4kzl8Mmld8cIRcNbwZriYapbpDaUjoMVBze9ytj6lBr8UNb4Eg4OhM6sHlmVPDLKOB+lK/9j1civ/hf9zKNZdvAGInkKvUxmn84q+JfEVukLXHwZz/0RFXKFkZ6yzRMFG3sPrnO9F51IfX0Itt6Z+cH1LuXHvV+3R5HSerFKInsM6B3ZFCu9NuWcsnBK70PSpA0Tz/L9wwh/LavMmqVmDmAPP54aYgAbervhX5sCBB+S/MM3RfnR7NJtl3+QSzV2Qczo9avEZc/wA3vF6Gh/yM8DhXZbx+a8Mi32Q1pCqdBTmCvvdnhBgpyvJhZY6Aabsi3UpTo6Br3hK5KGOfiHemdp8OlcEmnsXASofXxbKzLwqFtUjiNYTXoAZs8X2EfK0TBqPTQOLQTgaS7Xil1knIoRRghpyaxXc9T/kLp8VLKWS54BanHiQYkYIBH8t0ALGH/s2WuSCyhqPhSqPrk4GwwFjJwKyb+7L1+wSgdNAJxMmNESttsCNsAGcRFmUH+mha5zy9eK2pzmHUIqI+UODJu5zqBhGP83+NFxrtBDApmUdnWX769mtcXM8tSyEof0PZO1JSPCZJyu0mlbgf5nqKhHZp0oFh+rvAq5K20LAjeWg2g2/fnSVtFyJaeEaKwJ6sBQaWQ67TQ6W7ma4HWYxF9Qn+K3/EwkneqaCpdgN5r9jtS0DrMr5Y1Yo8xUWWNsNMxW1fMgqzSZdywFV/EgEUAKTXTJn4LgbQ4eShRDwNEmHIr2dHrYLnEkfW1ZYPhZj5gNjcKBKpSmPfF7U6b0RNmuVOvxZAsSGZSBeFGBwn5u9uiXVZO2l5ULG82JxSKx7oCo608FaUTp7QgjMlMDkdlvr9vVYOdNJ+CpTpnQGY/FbnYlEMaSFgq7btisiL0bQ845VdT7kNFmsDxCk3h7l8sZHYLRWly8CiTZmtaHRmYlPlit7JIcOTGKg8P87OhhKwrFJcoCeCE3uOHE4G7sot7f2Pde7BQmaKShw7nb9fUQI4PWbOKbA40f79LI1w0QFqWqQF7zP3EYSnZZIQVtMIJasMABjEngo9Z0JWr+OeDX/en8EKS3iJaJabVi/Z9BiKXsTx0MbOm1+DhvCrWeldcSqemuXttO59kTfISjSWvaoy1d6DW7pn8TOt6Zg6Xr6oir7/NUOLv5th1of/DiyrspdTj1RBSyTe+13q94dTagzNcTHYp7vaZDVgoTnLyd8oyRtqxcQx6CrxMLbywAPI92QShxfxZmUtr2+D3kIAdctWY+daq5pTcBCILT45crBb6OjKJR65PWCNV9tTTDME4SlJFV8uUpGqPzL5wGTqP1lblczJ0ATWaSjVxZBq77KgZAiOJY0nduEfneb6h52HAAb9Xd3ZVAX5EYcrkCLwhZcdphGSeAxLyTHWpSYs5rz1L0IPosQsZ/NnJd89ymv7zKW9TDuPOU1jpM8Q/mn1hZon3RIuLDrhnihhc4QAhfssj+jIbVlYgPmSmaNHbV01o0RJ5D8nbGxQLb0al4/sJLxs+dSG+H7Bcop0NcULfU7hrVYhzcmQ4YhfjSSuofVob7xmbOY0ULlkvWe+tMNDxSRU6I/DZdFmoBiVGH55zFg3NAc9ujpTsqcknfZtZoV1jHMEjrMt2nHiLaxDuH0wr0opg5AWyJRZXFUVhyF2Iqa2WDwbSoFOcax+7Wl13w77316vVpScxDc2Oqv3x+f4+g5r2VlYmr3AbXLwyTrmX75tJL+yjM4zYp74qBPZg9oOQTB+l8wwDkqGUSUEa0NVGbO67McuB0+8MT1D6zox50C+MuX+xHMerNufcrhwurGjAVIpxAN1B3q6hPnI1RLoX4nqJH7ZX8ftKIeaQYkAdhrS4+IURIkRXh0/gUrI0G6/Kh1O/95UGJgmD7j6vIFula0KRbh3czl+b7BHfgVjCJRWvEkKgnWKMPPUZ2xU5tQQS1xXfgfCD0DepIgIYT53oBB0oU1YIiGzKSlu8/F14tEGvzsBbd7C2RH8jaB2bYZmxJybniq7zOtAnSEbbOKVacR26couyswBnqhQoqs6tVK5ciS6w6dCJ/pBWurFHDxOr857BMGQNYQOO71mGOJxRiuPDDoEt+zzOnjpyyWG1QEmwoCPZKtA9NLnTtRLycDwBGz8JmH3Cd/oPDMnmqB1hZxEIi1hoTZme8pU8cJoUsBgT1MtmrmUz1APz7bDlZlL4jwGXxOtbzH5CYWI9e0mRZRKgd5hGs/rxyO3PIVeCVEq+hwW7yA7AcfiFHZpvWVQy401q0qAkI+DqZkPdPSi438zxVcH08NbwosNM3hOHt2UWFql2GOh+CZTIJsAbhZAv3ElPswsm4PE4c4KELN1s3rbF23Y6ZRoUa3iKYivbVlbWstATyHp28gef34125HBgEKuPYwtQaKHh+2dqu23fuJeZycXywfhEIO9Ccv+42T8CCK1X2d4/znpc7FIrjfZsk+THO4/Weg7LD7ahRyRyddNd3YFPtsNZ6T10Q5Z0SyCcgu2KFKIM5PQujXADwaS/7cezeoFLQ85x2slHEwtpUjbB/V70xgLt9PevTGXK8jDNbkMv8R5LPGztvdO3HSta0E/qbV5/GCSNMY/4gVw0Yk7Og5eZcYSfRqnkYldAhUPnsFkM7wjF/bpkUK3v5EsQC66MiDCbBhTWbfv41DHgpJwMqsg1ePjvKAF88ek9+gvbrvcN2kFodfN0OMnaby/9OvOheoCvi9TCtv5IVVa7EE88S9xQTWHCFuPbtHtBu5toLzWQOOJ6/TJfUBo+0u3w+v6JxCJiewJ4+FIXu08JbVxeJ/cA1a43I9JvPLaaO5oi6/+WdYC9chVAssLpfWMTQ8Zb+f0Q1CKcXFOhNoi28ZCjojXzBIQ1nhf24qYFNgWcHBkAhe7+OgXx9AG3sdKfY0UHuuFf95ravQL9Fsu+LyhTUrhJlgDJI9lYLXa7FS1wmuI3ZOph+dfTq/Fw9WayjCv8GqH/YsQA5OFqO1fZDoRYijA/S4HBPnA8CNkZl3oOyx8hU8+WFkjN0++7jsHfFh0UFOJNYCXwMkCcVpwHtO8gEWj2v/cjsjXJcWuBfVwDZguAJnKAqnfFdiFjtfM93A2j3MpPIg9LOArHaWm9Itdp29E2haxcAly+2uINwS+nm3UuUAdVQgYZDIaNV6N+Gl/WFK0a2Vc5IJa4mEZY5ImmEdc/hoh4U7PUG0hp5dDlLkgWZYoc+1BGJztTZ7dwDneSF2fJfI1AE2hL8GLTNIuDl5luldXrOgXHC2lNuLEYBhMz0wbljeQz1qaqQWR13G0hBp9Kc0qBP8lQwa1XT/CQztYQ/+EIe9Rz1qGYQPQ4RPIs6Y+SyiX7RCXd2RS9ByAPmCzUQoe7mbrx1gS/lRVLDw+vuem1lVBFZdYEpcKJp4xLeKEc5cdCJVwDsdY3TR7EXvePiASpbs/PesCn+GTf3aQCXU6+Nrq+y5q5gyhrQQbcKLW67SyBg0hGF37ICm4XF7PXTQKd6uxFsoCuw33ANeZFmhT+rIu/RA7sOPh+OBTPYzCKOSvgf5B9IIt8jDrMwOTaBXHawCwkmHlJzqfOxjU0HtJtgK51YQq6Afaqams0tgvKK/uS0cXAWprd6zeYJYL9RtICCq0ScsXzu65eaMAlZnLWlFfIAywhO5S9MW1jRV5koWiUMSCTQm1uOCJfLhnlqnurFGfuHrYuED6sLLQeDBBUhIhV916s9FtBGJGcFt0QLtJzoNo91TWAZaBfzVPnujeAImRFMFwPEccXC6NeZu7Bd6HlLHECdn0xojNK6j4JjOgb/qUXX20nXI2BsQs95SDJAfjbVMRnSqsVYIawqbHh+uTKNgzFm16XageM7wFr+ZdcYKMhKCbBkFVTLHHHl8bv2CQwar16OtmbcJ29P3uA/qRj5zZmQzkjMHrYE6LzikDT3aPtWeWtoDPu+ZX4Mdf0umGeXO6zgYr0r08uMhAgaeAdF4hqzVXOdFwRKibS7AJuMX9hID/dnkE/gOd+uoxxqvBqgPKyU4D7mNmoI8qosqdNsyyU/nMWyRyjt3EvyFjJnQL+pQV7eAO54/uzOxl0se2SsjqPM4yKPy7knaXluS/HreZxVgfZC1anJwaSZbffHEn3kqYfqOJ7blnvJKhfVtGhXu/PbSUG0+igt49a3uq3LWNtcGi/A4rERrtnEJRW4h4OloFZst4S3kTHDUnuP2IoGEfiJeMBiichPfkb3tWiooiPeEigxbobUZpbiY11JueZXDi+A4U2H9ggJEcIudy62WMsCXFjcKvgCOSyzCnK+y6CsTUvGut9vnXUXsFSt/xqa2lNWuivg7aC0r5BT9o6pPap5dQRso1eH/mDXMGNtrHzLpukpYGbB78H3k55pzDNqBIhZEi+TYwix/PraPzss2mSF2mqRDRPKHvYODPUaRl3VvyTM5kfSMBBxfb/bV5GeVGA6VhfAnFOw8RvMzmTFBSndJLYcFHIJxMPM81uFnh/YutqaCwP1tetuRkQiTEp6lj9y2z9OmvMRd1sbgQUTo+jp52cp93DUOXYCLCEawVOonLvJuymYulPV6sXTI7wPNRttrFaJ3cNDGeltNXa7gQqAD2xnHrdLa8pO3IW0poVMI0fJHR5ltvzOUKfujgpr3cInaOLJmxjooSjBkZAcbYj6b0CXiMpdWhDvvzuKxNo9s70dXqudoiwxDAgb6xiFekliiEc4ey8RDXCNDGUCghWhY607wlxZB+khyoOy0JhtjNtz+dpLffQGtgyRg2fhDLuhku4ItytNonH0oPNL5e9iw/r0ecLRV6w7+17pJwYwhdPLwuoVai1gEi+pQvYzISkZ8/6ImXWuWHKcxYD3DRTEq/miUr9wdaXFbJJf7o0E3+9DnxBBaoeVPFClNT655+XaztD0Qz4Dtt4l0ned3Xo/30i+7hdSquMFJIuOAoWFSxsSBffLVrZ93IvVFDqGtFiQkpgArpWG/CkMe808JS5ccFPS+g39wwDu21NGbVzNIhXHA18fQQVlnUVn/RelqCp0212HDRN0M6PatVgoMnDftMMq3J1jA+/02qYW45PYXr9qhE8p/eb83AmMqrLyxl1JyUmh8BTq3NM83BHvHVbfsKxDZhSt8NptoSAsY5VpQadS9dxvZMjY8XaeeysIiEZEj47DY41IA6/+Qys06eyx42eyomcCTCmseEuvbsuDymDLWcCWUHPwLG7mSU3HpDAIrnsGU3gJOjEAhf6Wtgqb2OIKRmUbVkvpjXTZRAz7PlqQl8nBNSN13v1x0RNiyVD9jyRX5VPzAiRJ59f1M3b5+tXjPljidYJX7U01buZLaYi+LLJCt1MNXA5Xi349f4AYwMUtEuCgrxHokCg1H/AKxzbctlRU7g16ooTGbHTDxk3kOlDgCiOmah2Hi8nwKTQXrtvcx6YZ6Fte403eYuVjnBT+nvHSz7pscr8+xiKlRIy0WppLDY+YzzNTTBxh8AS3xLJQNgER751Ud7iREFJEn0qekFfLlQLqiwFi5RGQrF+dZvRDNxZivIUkpWkRBauwp2IVTyrfzT/vSy3irOsxnzNCwAi1m+EyoQZKH+PqGbSwIkhSQB7ljNLGaPrjZGcGQlukPPsBwhcQ4IKdnaeEsw+/JLe+NpCVBEYa8isW2mZH0NgSTIKproqHk6VM6ALV3zQsLzmtuZpMH9gfRVKauizFxtjJ50nJ5/iLEZmHYjsXy63dv0mzYM+2wjT1ZTd13Y2CgqnwDP/4aE0mUgsbj0D6sKEpFYTIyXXAddldIPuDZW1IRe8yLgRScgUFnVkVtEJRjTPu2NV64xopBO7blcFhBr5cgFU+y/dnrn+WPrhEIpc5RPnugGwDJTqXHMAR05YP1cC5S4ylNZ0zm+n9uRD219A9klI3zQV0vtLVrOIgvWmBi3eRe1E2JFxfrs2OXERLZMLualxG7yV1K0MD0ZQXM8z3z8wOMbWa/n8kuBr1ap+uMtJnu28NaS8x77MYQm4J2Fx6FJAx97MemlV5ED7bX4TJpKnWZZ8+h7e+F+/q3xEVS+7BwoL16EUSL3xI1/ohw96Ok28NUiryELPfGgNWKVU2pST4KEwGfu/lJgoIPHbPNTFy7I3yhIEQcB79xxjjOzXTrBaTmmG/cYojnT3fJGlNmIQ+RltqQ4C5x/6PyQKus0IQcHqGf+kwWsLyMLdQjbZ6FFd9VfI+CW+htiWg5S6CthsfkRulgmnjwyrYqBeIoHH56GiRdFMaakKw6QCRyYxA71KwQ9t0l6c64ZPpw1JhrZq1jwzZwOThrTk1XAelLOQqu+TPebk5dtEVtZ3oBpPdGTnLw5b7ddvsxyLvOt9MBDOsCJpgkOYMhvZ4nJcO6IzGO49ksmg+r4isOIv9mQIaMppQh5QapI+oj8HOwVIRrP9hm0viKnOxNcNqZfp2TqALIDRuETWB8NpE6maCB5yaO8gX7oZIuR/nTXgjckkOo5i0IO3opprBMKlpk4VI2J79Uy1JfB49BaU1hHtHlgAwr6csXEGHG3wW3k40dcjfAGO+IJZb0ykTOtuFGkdzvvw3xvFVjmcRtMMOO4cM3s97VH3n5FRg7DZ38akKZvhVEaPYfmDRKq/d5T1aLKcN+WlnCD+wrsSdlbImGopHEFVBoAaUL8r2dd6X2i0fc+qmaK9xnZLvlHBoB5q5nw3C08koB8rFQSGoccquHDe+P0vKYDddI4dmew36w71CLKZu6fpCRjz3ZF51ZMIGnDwgWlLkz+y8Y8wQEp5c1iuuIQv5IVbDcA0bmicmx7T5JNAm0+hxk09mVEP3r9A4b1yTHYVLGjpt42Unu2I+nnGi9jfs4MwLbB2ixBT15QSBt2fR1j0kxPCpJUqACtJ3T8M4GmibUAK+flN0QMKaT7lV7vdOaaC9Sm3n+fVAbI8pYsnsx59a/cGwH5YKAqatuqe0cUZh5VzEXkuoKwnz2LnpqWsNzQR99b5EBh5jFfh81CGzPBxHLp9J+oLrupHFEufQZjDUJvQXJKnSRKsv4w5dKvtHtMjaU412c8oPfWjtNFt2/HwC865ySMNAAKVSALY++dM3Cls+y2BqXm1K0qMO7yZE3ADG1vhuDQzAK99IrC1qJs2SHo8tZL28NyioZ6Awm7wMcp6NhKJCNf2NFXcQNB0sH1plJNb16zK6dHbXzttoxlwP5TvRGZo5rx4zXh+28PfN63dlidLGMLji3pFGziyH1E63bzKg+T1QM/qySHwz1yojato4CE48gvHC1EE8T6HKxA9cpLIn4JJWfZWFdO5At8b+6QVcKxBCoxZ2hNkdUnOpW92euM+3i8/yqjKtUKvU+Fu3POSzLOU62frrCXQy2GByo9ktGtOFRUS47T522ESAT8jNvGE/pIyNnDA77ii6Dj6NxxZRCD5flefLRsvTKTZwPlQ57yAED1jy97kXdC5juBhcsquot3tj3/ve040hzrOCOXFRYEMvTtUToICaJS5j7WlBS0izfegLl8ZGLu9i/9GIluQp2ZOo5/fAv8QP59Gz12qQe07TjB/68JkIv0ZTKtPzhtekRqttnwpRQ/tMJNMepRvgmyY+A1O/b11ZxMdafn7KWi+Cv4YZH0gH2O8zAR1SpZij7F73E74VstA5M32ekKzKjbC3lg7kDGWxNxb4cHv+r74XUeXybDloFcPv5gxuQNSvfvjLbtTC0yYB8mMwjgfsGLJ3kQ+1aqakpaPrgY8cSvPQvYd4lHAS89bczkgLTrB34pFMmmd+P4+5ATquj3ZcvzKWSq4Z8rJzQ07KEn9mxBeSteP8x23KV/vy+FTsYPJMNgzR1UcWhjlRPn5aXaVnhItOSg4SC67IOZ5kr4fU+PVEqepNpFwSeOtkuNje9Tbx+SyeYUgGfWuFXc0ReRGJtmcreYyE+BMWyG20Ooq5u6uZlm2y9mzcXeCoZwx4HtOSC8LaP2jWvld5zq/3OOPw26w0zdX0xm/Hg79CJZhTO+ODz4oZmqKyRms3eJyKZrOgRzi9B0GYL91l9JEmJhuB8HX3vzW2tCMVDsj3Ql6OFBvozA493HFTGUPxUNZsJefYWLsj8RSKyGbsoYBgsgDRoIYhb06BR1uKEiiquiu5tgdCu7jYkGMA8zN9dKOeafAOjK4ixufQDJlapw0+LC8jX+2p2/h/v29CGpzL7k1Jt26N0WkOuPAU9+S45l0tC7gFebtdbH8OjoR56WwZ0uq4kLup4LCWUYqATxTONFkKwRCljTisnCNUIapgFLH4VK/L6hUAwdKQXECRjGHWhk6QfwpsFuPp8lrPp+/akVKBnzW8rhR2VLY8pZug7MY5Jr8wq0p1aPZKETCI8Lz53bAcpZxUVuTowZnfdOktRxoMHeMPLa/yYIAOOGmT/Fdh2D5defQlXbkcdX1T1205hQanoz9gP5RVsHjzyvWGbaeO1+0STsgU1fCuErElhaSdHj5M8OWAogICLrHZcwQhRDF8OR1pvyE5wrF+v7FCCe2TfefaBNRFhprCOkAPWj2XnAsaqGpZY2p4wJ7N2h4EhRvPi2Zk+Z9BT0weQyM1kkPsGdORDY5ZT3FsE/25xTR+25ma0lSD2CzKlJZxLuArMVlT8hvvjzsESfKhefwcdsDqgy/dEfLriNUKH8AZ1nz/G0OXatbKH+BfJv6iF34DoiMzYU2hX9miVHIuvaOUP+iLpyFlGtnsKmfbZNProNuPsk4uJkY1BhJejHkO9ekKeWyaYMRCHhYamMJS7/K+DQ9jm4eUUnRxYInT+jrlY7Q5na56p7kB2Kl9NMTBzl381Vo+yPtocTIblsL+Gp6L6S3QWLI7W69x+DXCyB4/AMFgQIF0qYeRqRPm1A6DCXmk/FDodse87U8r1rUZBgWO+8hwD/K96RtVahiXMOlwG3W9ZSx5JXWKnOE8ddj/kWCxZoGV2wjb/9IT8/0WpwqAGnBVCFd+Wu9Yhl6HrH+eUYYUHdGdd0gJrUbwOiPfBC4gHX5FQyFQ381gOjyS130jG7mbu9HGglDfK+3kvzCnaqOuFj1HSwL4vB5vV2rYkG6Euy7RT6AwZM09Q9CO1MzB/IGcjSH3rZMVOmMW+m1vkRvGSci5f4RwBHGdq44KR0Q6SfkaH8QhZkHOAqRdgvbeW/xycMA/qhoZQPXTh/TqCfNMaHOkI4BvRvhjRGHjo7vBMl2PETNTq1Vc6MZYCl6Tx8pTeIlF4GlVykjtLfCrRaSsMjOe7krBO1kd1910+gYDL3kFbab4NUkHraT8OpVc62hkdrcfQiS6AXRKabwxwUTyqqOGz8XA95uyG3pskhdL8umTSPZCJHwuAMaEZLvkQaOZPqEu3Flb4uD/GESMImiACHYV2VfwIrmxCE3y7qoH2+YHj9eCkWZx+3miwR/QcAG7+ghFQzVUXzWs0g02AUnve35Oqg6HjAddGdv902mQoO3bXjDwwwzSRYNG10ZDmdx9JybK4BUJjbE8Lq9hKaNPWu7fLYjTPg9c4z3UApzkwr97kUeOZWBcVNWu9gSVyqbkjsWCV88Zlf6oyDunrP4iQqwaaSszc7FA0LBD3GuRlU/k/Q/FfhNzglQd0/V1JvyIQHbBrcnWr7PvD11sWkoyty3s9n1CKbq7bzI4mfXNOENgjwz9UBwGLSC1IJ66BlEAs49KINI/PE/+5iW4KZJS+BFR0dXsz/tUkPlSQ6DkbFx/F4tWmAk4IWh2cq+d3qF+9nWxZcxv/nA6d9M4NjFbNnmO+APETa8ImyNtSqVm/ZDGSunfHdjA6wiLHydlZZRxCX305leBbS9mxr5Ucup+nMVly7Hsw2okshrpZ4Bhvw9zw865G1LBb5sJ6yqiC4NfrwojcfZh1WHltH2eJ5HKR/Qr5zwJAC1Ubympiii9ALNIovAY10/yFux/+Fvp6+u1VQgAXPQAeZ4UIoQCVtUpWp+to7X7at86/FhiBhvbzVojxwwCRtc+sf1f/0/G0/8slg9THbGbDVoKxndvpIwjq+KCv2volVNXNCfKD/IoVPNtxMRYQqXsFLaExujSEd5PARm7pQhALcf123OhTR2uulNpm+Wyj+n0QkuoD5owy9Aur2WnEUHKu26xBGBgbfEJYLeSHi/7jalWdXT7FHFy7Vj3oYax+Gnn9G4fQJhRn6x+JG/xs7gYv5++WRBuw/N1stJkH9SEUhoryc646n1A4KnSqbvvxlBfqqCv2ullpRtuZV08owbwrVbR8RNxRAcbp1BDdSKmfTc4P4tskzMN91YFA+s++OP0b2VTwv8F69Iv3fdHu19QatJBk2HBIBnarYLd/EN1Eoy943KHblEvzGh1uXeSIZCKx2EEDlVXj5iUGG3UHsX4jr22MTlP/T3F5NItAjofnxAkNmozP1vExJfCjjXb8XIIxDBN7UnHPDnanpkNW2JkvsrARyL5ojkrg3YS1K8LQepzPcXlR1+607xYc24WhbIRonSsYB2HbI6z+aMscOdAmz8gY7w8jGvK8CzVDF0OxanYqAMNjO6J/h62s8uO84QfeBY9seo0kJZSVzDtG5OIJVOvBIjsv1MFLeRNb97dFpvlCmQweUgowKL0f0tnLK8WPSq43a2sOFQIoLAHdFLW5oe/EucKb1BpNT0pfM81xSCA/pi51gP0eXZb2wVQrMpnzaiqZ5HW3/BKghpRswxLwFJgKSNmx2EgxpTRPC2dZuus1i9iGCPH5CZLCiExhyfLluRsU9LEz7x3FbaAlISNcp55btGb8W9PN8mHl9CIxX6EPERnM9ABiY3uZyNp4lJ7q3/zFtXd+6sz4lHUQkX4xk4JhUXM+zp2xcQEHQlZDaZ5ofEAzGr943HdpNhw2KykfN9ACVfsb2RjqlE/7YfGtmANVgkjjwSgRXNi5j7rtb+bfDTiFY7QOfxB4NFcgpIJdW8szEm5T0dF91k3g+l3C/oWOErRMYtlW38Ce5i3bBh33YbrO5Wcc1BtFXA5Yh+y94BWoOkQz/atkX7BZ7pFpsWvUzZrzvDXBO8+1QiRDMTxZbHMkLBnzr/QHO13PKmJ053WhkZOw4wj3Yw9bFH2l7Ecw758CPIaQ8FWEj2XrL1W7CMPAFm5urhSNcyeHimbxXyKxRzXLpoe4ERJBfJrp5Hxj3O4ZnimF2G1Ol+xhsmyiO1McCcXd2Gi9uwcrqdmW7BCSmqVzP/w5bXDzIO53Wlp4SS3/hRsDyFsSI3SP6jwebt50gwWgNb0U7ccHdLlLhI+E0DwgThhpbrmkS7LHpAIDH0iJLSQ9N5GuyNNhRuaRMxChc6Lgso0JrBtnjLVNn7eYeRrIGyM1+CaG76WgvgrN8E+ThiUzYwvFVtursf1Vv+YUP7CRrMkjhjNF4qzzF1Cmw1zA4ejVxstYhv58F1vfRSBqlGruFGJULN20+pTdFpkYmiggU3Qff2gAvVfzMKjwShoS3uj7R16qf7sfVBBjBgvc+IL5CGgqvGpzCAeC7M3/e71dylAH2RiYwoZqgj0c3+M3Blm3tw2Zy1WuICUkR9TMM/O8nJzk5+ro2lmKeA449YOIBgHtGlgH4Gqo4wGj/L2O4O3xCIjg3vz087aZ74fPI+VaU6NXQ4kUjgFPMQs5HYvFWPwcq2MbeXuQgnWCRO4/DjyWk4w8j5CaSCsHeARdGZKZyp7xt5VFWv4jHdjZlS0V0nz1R0eaKZE+yzrUTCz1nWOr5XvupkZ/UEhErNh9hicO++NifhUt8BwEEVscjETcLmMZRfiPCYnbSU5EboiXpfF0yLfKP3H1os8ZXbJKff/UWJfTtN8WsvkAS/r2nwJr+UUCIVU0AZeAY3sM9/dxVDKevAeqZ25c9gFHTvD4KvOJ2IwqC4CtCwSTF228V6PXs1B4FGM12IHrqAHpsGingL+vf6GaHXFprW4ntd4w1MVSr6TkRf+fvvlLuOwvQrBSpjqAiXAfkNY6L/YJ7jZ5RXsZaDKQk8JCzkV58yX5qrme36zU4J0BZZZjhtf4+Stsas+qpL8WjPQTDy8Lzdq7eRvo91Y+VeX1hiK7chT4+gd8bUfgwPTkE/D6HfR9c43Ah2Sizr6+opQcftR+RDlQENEMDkSpYEQikP1G3mLKWcSNwzWCH5PXq9iMzPQo+ayEivnbN+OSvw5rNExpAlhuvW7qaXGTXZviEy9cOdsJcMRrUGLtaHGX59U9JBMOQXJmZyX9h+6bJvzBZdJr9mo+uBBl5sNOADsy1zRhyg4GQTtFNVRqrQgMeySD9dAiLUurU7lxLsbTH3MuOkfRFABd3CHLE+F9nIJFlYHpTFu6Q77CKrR7D+d5xqkGNa0BGHFW3uy9hBg2S9gh9OZnt44eyEpxh4z9lfdz4HBAicNZitKRRy/I9nGn/ZkQqIxHg6VL0bdbMmieFQORm2cPoiMdxGxtuinT4vyFsm2PtB7uI/0u0BoRYFAQBotm3btm3b7mXbtm3b/Nl2N9u2XbOIWcgx8g6s0BaTmXRL4i1vxz76PR6s4JXwd78YJS+DY/NR8eJicoORqLxSz5408OvrvFVuv4CgPNHNSSZz+TFkaOfZGqjC8AHZiK2IbstcVq3SMsc6Kc64fO93UnJZlCmCSQgwQ+scL+rzMAAEDCSG3FFBZsty65HqQ9tr/A7JiIF22KIoyuLgDCzYoF2e9EkiNH2uXD1uebS5zrlUMNwB7Fdv4PSOD1iSnGIPfP0n+IyRnBrMboV9s7y7BSmcJNWP+0hwCsZ7SMV7+/nrAnT8ywOlN7jJ3fk3m0w/Pu0LfZ4Z1dokIGpnQZ3JcvyqGeq6OuWq8X//02FAkX/tbC62Hm12ZbABx9nf8F95K/TfubRHKg/6b9v6eNiUcAf5RRXNKbwMSTj0NsuazYmk1GiWoUsHeUGmutSPYZf8EvVT+Bb4GVYGXg4K1SE/TAaOLWp2hlGJk1YItLYhqpgHpJe1xpf1b1Iou43H02lB0pZ5kJPSdVuFeCHo0OsihoBIt5c6emFssjks6uH0EP/eQoxX2o/+w1dp0v3GSEnsonGlIM01WrKJ7uqw1KBGIpPuYoqZk90GYPnv8ZA+ErEaKohiM9dzElzR+GzGuGrE/APRhxHrXEbT9ZFFoYMSwc4G7oKcOxD/J4Kw5gZvKqIRgiOUiT685vNlCLokhczmuI8fR12rTLywFGVEGnH942fe8ZPFP/783sUqJu04+Dq7WMTL4EDQiQ6rEObc3z8izDbmz17QLsv6QDMDwU2xuxL2VoRw9ux53UMc22RaHK0YJM/324poxH1ao/qoIOEFc2+IlefBwrfQkb6JVRbDQtRCIQ4pqNNmZo/0QM7j36aExMw30CiXbxtNa/dWBQkk/KTo0ZIcGUxNTLRzeZdLUMLR46Iusa1E43AUsZK9j5X4ONqZbMzAB2ZwAA1YzTECL2Jb3BXubi24ONbw+BSbazsy819ZJKW+HiIM3Dv0DQHvKAlwqfKORuhVCTb9Xonp73Swo/VHgkvvX1DM83FaVRDG9MPiJNfEZE3lBR14DJak9vhVOYvYmHo8GhvVt+TMZ57VAr+icehL1eRscdKAU3VB3FqDTszjLfzmVxL3ze9+AiLox9wQ0fj1+kVdKAFlur2G6JpVGh+3oH+jIDGkNw5DPozRf2veyngbNGiqoH/ABB1oj+DNv5z1olCni/dg4n+woR8buY5d3/uanpKi6VV1E2vHgkxMuuqfSOl2bpZfHS0IqFc0kNFKpPcf4fzQ++7MomcwTovT1h7styNpwagKiRi4dEu3NdbJcF26wVMTFN42Mb+6VR2aN0Y83mludh++RevFhPXHjTv6dad4EC5ps3adEYcdxMrq8yyYXN+M9KPig66m4CWfbQ8BWHZggc8bueEe16yRuNdQ4CWNV8u8c2cUqLR40rwqgjpilib/SGC0yEyojKKN+F9vmiZxEhuNHJuMrJznTrrKIDb6MYDfEFf9ySq/5ZdhHWmwEucEooW28Wt1UYBuk/0djRGnEboru5Y3/pDTO3Yupt4y+Ej4mSRS55pvnXe68EE1eysyHdPjkB3DS8jpNSeXZ/iiutBZiWsNrBype72T/9gZYa37Vge03uL5xSxJ1emZDguLy4JftFB2Gm8/N3/OWwB+q2oyRxAhtZKb5yKizeN8znhjNjj01JTDsuG06k2ky1wPOtVlG8F0mC03XBKpl+jwIMlxdpXGv2ra7iI58Bs9+JyAYt5qcY1PRsNTDvKNDTraxRTp09oDzaxS05kgn0+KnlHRu/192Kk8UiLsqHM1E8TJpr9Bmd+cunEGl/QZR1Nba/AiKA5F6OVgjqeWVGOeLs56a2mxYDhzk5wDdGbQjqKWv33/AaYR6PkrQVTJHOSxnFBqnrrVuwjxxESXJSuQiHoGyQ+HC4rVwrb5DbwI5uAmnDPUthS1Be1ilSL6KE5+GA+d+c3/iFpLs2NUQ59JvsK2LSPHCtpyjRkTofgFTnMrHRJhqtuVGFEDpGcXMhMU+pu7+0or2xV+5LeFs7n4zKsK97e3c0OP2j8wUDjUZjFfyP7zmTSqNv7PrvQxovZWT+zgpZtk9BeWGue0PYfc1s/GZRgCMVzlUJAWGyxOtMlZI+2o99v7+QrQr60IzNsBbwd3jWTKuOmWN5n5+uwW63EwFpP+AYkuluXbytcxeJSoKVWTs6eqtLjPp9npxz/NGonQ5p26GoWlDp0CXHy7WU2hugtWUVojNjOZfzcoCUm/kx4XYl2E8Jx69kxW2a4FN30wEL5YylzaOHpslFe6DBucrix603T0dT/c3xNz2ckH0W93z9lvGyWnbxnU/2vvtE51j2YdOU7OggDJCL4uKyH7WK4QPpWKt73XGAZ/55pgBXcXKnYXdx8hrixe6faQiBot3QyIX2A77Igjy20pVrsLi/EDAJIlutg2ZFRx7L+OVRAbDoKaWpon1Ra/7DO/P7jvBiNBGa24o4IeclleTnSZCLWvMHpHM2tOVsV+rVWALwWIjkz8x5qslhnsUlgjjEqQIZAgAEkttcZ1n/HJwFmCa9xbm6TYI/Cv3tYlcc6F5GZTvXUcdDf9Dgf26qUnmfgUWNJoyvo2kaxagn6eY74W3qvPNB9Lz5ValUXeT//kXvMWxCxV/0An3P0JcIdz0BflgrIuyie7rmT7cSvDFEz9tiiNYC4Py8gOSa61spQppUW1pFwnJr4IzQv5L/dhGnzM4F73zJxi/xSX1disJ9RzdF1GEn9OZ4eVWiuB7S7Pi97xxdALZQc9eYgCU/bMI2LEysjVU6Lx/5DdhCZeb/1e6fnonphbwlJQ0SmHM8HGNiIkVb46fRnbEa1Ed38ISfc0C57dDHdnaBiVnVPkkzqtURIXw93lvKZEAt8G5muEmsf331x/LQeeFfUwE0posjvm/q6Gp8ZLEbj9xe5NXIaPg+Y8dRJ+WEJlhVWTP9ybibf2JkY+rxtRR+ROU1ua9ARDHj7PT1jUNOzBVeaW208nIOcmamag2y4Q8qc1vxlAxq4Zm/hzWZrDOwwYGKKC9jZq0JUcM4p4oEkNrEalzyjfLJ+xH04rVPlkKPHn83qBehKlSwoEW707XVyFpqhqTmB6Dn5TpT5+ghPFWdGN/dvaGV5xr6jFGgKHIBch56K+Ei/tqmnG9FCNlPg4Ruu8QlgTRemOu2WG1kEy5+G/R1xWETbzJ59yI/u9CH+1CkT3ESClt9APNVBWu3t/TpDJo6QmD/2VsSTiPJsVRKEVYyMIKmhEp7d3/MQ6/j2Y1lbhjaTuf2TiZsM6Vsvnp79S5dzdrvW2iVTrS4lBg1GJ8v0lMrkqSuTAsONo0YA09lw/I8DEdPDCukcwCkyQJfrpMKlQU4TE+hbkNL9MtKNj85lhB9PrLmx+yBAlf+4wMAUXBq3h+l7F0uVKKxLGzz68daGZH7Ql8qpMP3buYpnodp6QA8WXAzbZo5AhrIimg18mh9e8mCoe7qajwz+kM8xbEsrit2/+eXB2XEhvgz9iw7Ln5QMGqsLlOWN2iHdTHbripPSMZh/va/HsnClxZgz0nW1AaC0Xf678lBIKjXh1Yqqx96Y4PrAEUFGkwfooXDYxEoT8JIjkTqOMRqLupuXuGmBRibTZA/50cXZ57LTIzeO8KTEfoiEh4mnFL1UKGdK5lfDagCs30kw67/cC7kSJWgVXSSade9SJejq8KCXozGCorvCP4M97lfcGV2EIlXN67X+i1GVPdP/xOFXBiMSY6QGeVsm+6jzuR98ggWgM3XoccNIppnhP9JSaexoFkAUsj/BXKee4XnAIbmw8fETRFZsyZkSosdJmUD/S2IqwvPnNF6UgrO4XBvpbaWkKcEyT1/3x6eylHMrABdHMsuqJp3yXwMy5lJ23qw9mmstnHUzQUfofSSsiuE6PpOoW/ThWSunhGp2fZQ/rhCGBY6XVC6xCkdNVmW7aFZ8e2lQYRK/NpnO2OX/YCZAvtGZtTyy+I7/CZfgP6q4ECjI4xMqT8sRVp71Az9+Bn0MKbabSikn6J/oH7fJOwckIL6WjjQLbTNGcA7pziT0o7uMFo4W0lXB5GkHG5NnO4ZmWpuJefOMEnZhiyCGBz5CzQwJLruLh1UWC7pfowmzU/nuXN/1Rx55YJMZlEwoL1nzod8yokSj3vQ+2m7Zbr6qqc0of5OOQkNHr/BKuXm+cEeHN+iwOGl0pn7OvC9e5SvfMVGFCWQvTtW9C4fH1U4qv8eZXfxqN5EHCV2bmHqOaGBLdyssWeVyVtVaNZ2VOKVetmp+5kqJ4bYGZcv9ZI59dCx8lN5K1rOGCAXqS5YQ2uMglXOGKAr2jTF9qyp2/mbo81csCyhOGXp93M9IU7+uS1tC1lp7TCOmVsl/0ZZR34rdmmW9HIQsigmVDJiO21LbSpilWtNO+cbvQj6viYYOaCNkVw2ff4/Kfl6B43niyLpB1NxFRchT0jY6W7pR/jvW26zsA6gVFr33a6nJY95gQxMk2rfbLXpUOFSQITJP1BIsgCU84ikyid9/rWnVvPfMCPtHe0Q45voB64E3EyWExtMoaI9wOXO3EAKy2Id80ss2Q1/5r8vBVVIUBAqxQSkoL/omZOz1ZhJvMt0hfBcavjRbrwOrl+nXtYkw87b78FLP7qg87RIzGDO06Yw9Xx6RTJdbaIpq2B7LmMsRhbLsaz5h0dVwZ5Ws3Katq4+d9/ytCf6K9SoKX8FK6nbrKKEJWFUG2I15pxGk9KMfWI5OscHyN2i8nxxOh+zmssAjeIBNPXuo6Hi2/GGodnsWVyCZoH7sc+GYBnC9GgaanNnNJmosMv0WWgOyG7h3afSjfOWCx154s13aCQUx7EohhCwSTtvkljIULJ8vtrvOArUPM5w3GeN8bISmYYFx1oY+Sg8QlCi8fJjJ4u7ACKuUnxERXs9nRWI5zavmhFXA9ISf3wYg8nSc9L8nj7mqFMPbPvvZtPxebBTPM1Y4m8Ma0pzJPMSTPRrFlYHzpRumdf5N7Y8ico5ZNbJzNKJiPm7h7qfs/fqjMvbxm/q6s4RRUr3lSYEdEEeoI3+QoE4BmVgjNVidFqQLJce/tDYsihr6kRtL8Mj5Y7XTSx7X5Q3ru3WmgiQavmzsgTBNhDzGdBzdZwtUME4t5O/zTA3/2CK8Qp3vq/wFSSBwSSmAQE01e1kaw8y4Za7vMeFJBDdwyN2ywCiRte5qy6LO56ubzcG0vbCPA48m0xNEK9yQdA9dcCSQBhkt6gdml9JM94kJDJUAjKesbxswxLygRgtqTNLx+kw8ZPsDyDV0lu8LUjsAEWNFb0H6kGTQsE6nV1zFuPuAmrrSsCBWZ/O6XTwzSkzquhkVS3vtsnfAmdoHDFlZa+oID5XxWRO8PNoU+yYs5qFEvluAH2bE/6ouBGPKZI0FjoSSI9GcIXAbNtEOW+ZEvUR2omqjlawM/hpXwtyBWOS5mHFZu+/8tnkFrRjc3Lp+5MDFbeck47iqp+gNcBYgrJpeRbg8Wt7ktLSJ5ruKnOGEHMlG9oQJt/Bhv5/QfoqDLyrbgq68PacZR6sFuVaUkUxzW2Q0LXv5mamwr4nI/EJ2vmZefpJ9H3BRFCjvlkFZ8g0hx9pdkmHvDYTOvOoVW4/WusFu08gQObnzzjasRQv0FNKlLmsRuDvK7a3OA5lhsyYPHrP3oEO2dbFDew4T1TJzxCZ/kE+l/FO5ZtZ/pkGie/e0lZYrNOrBKFPx3GE269cZIzbY3+z+agn6GNJV4Wm1/A209Pxf0OhwOaeTQleizaeYpgu6BUkrD1n6okPdt8sPTUuB1pq2zGkuMG2D7wXlsw7PAbJMTT0DZ2pW1JFwQD4YOIMcjBrqvUJF0OHvjLLm11Tik1YiRvhlsHh75T/s9cr/XzbbGW42gZwFMNviz8IfD5k9A9hr6+cQ+MIa3MUBOIRwajnUDqwForb+T1tDbDhemPAZ0LyzeudZDLTTrqT28oWzC6mr5x9uORwvnqNQCuiqH5rZ6JGtyCKIlA0zl7dStDzbCciEhEFIXbOmNB853WbpMzvBDItcmIYETms0aNgMgXihuN3Ku3r21RIKR0hxuuKxJtWfju/HOqL6qPwsz8chyGRwVNzXGemWaamyPzIbE9jDYMk3bIvVn4FvzwA92eIz5Pgt3S5WM2kKFOIzDr9cXLkWD7BLYQHPnzp8wsflRHxyf5C+B/GCsfjMSWXCMQqDUR9BR2U5fbGaNRnBQAWfQEJTJ5e6UIQhKhf2tLteJaUxMXsnBwN+ZG2x2bm5uQCjz4/9iI1d+k0IPRTDhkiXvKFl6ez4qEdTRz6FqoZZ3SD3OxnF1DAXlHCZdem41dBeNyvQqKNAB1ZVsIf6VGG03ZDq6ykf1qNaYpt0OQCEUC6dvc2YHdprGcSjKRE610iuo/6mV0I/tqP23MTZi0GYjKAwzA425Nd5zMmfSYKod7AxuuihqJrWoghAkcLTphQ8sG1RfmhU+rLZ+p2JFSoaIPuWbnUcKq/tllxUga4WXP4Cyuut3fNkO50i9jYzgCpt/7tPnWeUvRKLSkupEIIKZ0dn4hpDkuZvQ3GT4lC3lxF5sarN9qdnNg3Y3TP1SaDP3GfDbAK1TYmxZtF9nJh7Cljxa8se8xnYFh/ELKDTKomQu6rp8RArKT5R7bC0MK7zcb7yktnsg71SHr9waD4rmXS+SGM2P5AugXTSy5Zo5o6/BQ+9aNKHECYYuWCgmkgbpKnAxh+n5fL5Imnq+G/o0k8GsDmGarpDYAEQ8KKAnjW8Fo/EvSZI8PyZ/CybxwwXAEVwDDCuZhEfHUmvjwA2N7XUBW84M8/pu/7kjJsEh7SWsVQ24LC2OWUwFkOb1YmnSGF/7xgRdHR4WYs5XZwtM4poenNoIu1hkOOCWrvMoYGDgEDrQuw9lEV8J8qPoIXdC8OMMuReDB8q2nEnJOteDhRy3jGL+R6mpA5ztdNIcm8rg+C6jssYmFU0x7hTSiEo5zolmfiATlrXEB0lmeJxXYiAeoUQJDbzE1zDh947y7wyetnE3mIqyZHDO4T+i5JzxaxCJx5+NbuhanRdKcX14lpuDf0sis9myEabCwqLslT9HfKV72Avj4u5d9DL/FUM0VeguXE7di2t5FZ0/CgSpRdanCBTGAFO2xN84mCraOzS01fzJzDilepepUSBbGc1XEbqrvdQTx1+zsSfYBuL9wHJCchpCd4uLI1TUlG7d2ua4WH89dDb8h8oSq9+9ig4Bbxz05l0rnellULEOzXlcc4tqi6alK9S81S24NIE4MYYrPouvfe7iH6SGfzfmWi53UfCTKWghDm/c4t70SqHzZiODJmzGPbuWkcb+sfeiS5UfFLUucKIVHb6jWmR25s2vjz9U7EhGIMEg2/eUvvivzMz7PGtMRkwrc3/ogikfx9/6nlbwArpJP+XRIl/ln6bNr/A11w/bPqm+LMjz0QiSfThwuzXFFEZYACHvGMIyqmB8E7w0ziazIfeA6g+wfAe+/cR6PVSvvglkRLdybyDzjL64Xy9XGOA4fQmsWwUo1doFJoYDljonms7x1YTnKWD2mzX9NNn4NzsC8iPtF8ykYs6vm9ZZNdTWWgrBAzdR5uB8Y2nz4W9xWi6ye3E3U1x2AjNNQu2+rL1Dx+1Fwuab5cUh5XD/aUBqaKVdQpBBQ2RKzqIjX2s7eE2cJoHPyt09YPJevzjy+FsmRKHYXE4/uQbfpjMs9Lt5z0zQtqIj+HvXX4rUHK5Z9ljZcuyS0P3vQLogtN4ACAy8EE53rpI2UEJVUmwZ5qDGehjM7b5fC8Aq9eyiEY8VuAgGNE+7H/+rSDTeSwtCbQSk8egVxX2LpPh2raldQakpLqmhkrx+d2Dm/Cg5HDB0xh6byJAMWAaotAZmYAEXW2peisHPjtkGZZsXsuSfoIQleQhcAU8XOZ1d1oow+fxum9+vkrjyqX4aO5/GZfLR5ST0aKUKStVBVKpMhoIesBczrZ0s7DEUonyQuRPRO7xx0K08wd5j+wd57QVe2IWfN0DXgSxdpGzrHmnv005xVVROAWW1kJV0wlT5fWJKA9Z6jH1JT0OhaM0+r0F8/C33vVCDXtdg93sUQwwMaWQ43NodtzeTCojb+HotFaqo/wZf3gOtzvA7gIfXdDjUHd3ajOneXqu9Pa4tfTmLiwY0caJaipRRd6M6UHPopXQam8Y83yZKXULhP62J3bCfYRHpc62G0MuXbwIjRcK/X+T8MJk3DnKSnOaMgNnmYn4bDqpz8VtzVWaRRvt9OUUz5cpiH0R91v0DGtQRp8+DGl3x6mGlTvdbhjdycLHJaJJ1KM5lfzv5rhV4RojqCJHQyLsCKDN97V+twFwBL4xm9JURTVvpEUokbDL0IqNnKjXcHhn6G7hlu+Y9vmqUF/fH2cuwdSpc5qegu5I2iLI+QeQqhbKWRbPIh+dibZdtRuR+ptDd0aj0U4DHCRccWM7pqZ+W5p1gRdJZAchXhfrsxaa3VN3m5obbo872jx0qxXCBMES1SqFnN3pVM1QIyuEgvEDmlJRoY0shbKMxvbVnVZOYNbod6UcD0PCHRpcndNnGgI+Wv9QTKwpl4dQZ1PCmIXZzt+OZPJ5ap+AwqZXnjeO5LWGaWPSny+Fpc93lkFQxmnZldLrQgtzdBeHb1zxM+SBhjKjMaqE9gh48xjXqe0WVYFgA+MfJby9Msz48hVEGCi/yHKyhaPMiuUvabiMMYeSlMlbkJBEWsuWGM5Kqgj1yIhJkItvZ/ke19qdo2OsxFye0L8Y2yJ1GP+ZQRjrdbDNOuxAHkm0CAxnLkIC1U+4qp7Ncdo6UzaTJxNtcmSO4pwkHBl5tOtY9M/niW0FugBDA32VnYek2mZI6dpvD78oCv9liG68j/9FMb5nUwBARlIUIedd3kARm73fQWHTbRvIZ5TSxua8GlgczAwIhfMaxzyL0bH9X1SIcQ61L/KEIcyVOQAwQMDdlcwAiXCJ7hO6zncnuh74nDIfnn4YpoaJpxzEGybCVCGUorBOUqgOMx+VFb2XGW/Qy/Pv7aU5wA3hk1ykPIJI5ZbnHZetL57n1/qbp5ptMxtjttREJkVQ9G+LKIGe/Clm50RFyjYRAXT/ynVpmNphxrA8+YuDle9Oj+snJ+QQa9bavZzbohkXEOB22kVb3DQ3pMX86KR8za19f7xPYQsCsBhO6DEbyJpIysV9VTdjj/K2S2o3y78NSZZNocihrUen2CP+8e58ZsPyw3SBX1gpaqmd55UkNAOXp3ONt53/8SyB3GExjPldqaLdj695FHKFwx+E4kvyMAjQMUM6swW+MkxjWLfhqSZQBULerbgDPhNIb1Kn9ESyiL5x1ULap9FlYfa1NzW4SNr+KU3TDinQ3qgtqPMOpXyiWP/kTZZZtK6Qkg6aLu4OiXBnIkai7k6c/WsWPQSHNtvNBQAJhKxb4541fR1Xce0Qh6PfUv+I1iyynyvHm0UwM5M6x1XKMXVNn/6L7KhL7STyJ/oPzzb8Z8o5KffVWYagbEVxbQrBUIzB9H/k5XmgHHEfIJ1C/BDjj51MSKyN+zwJchjQ571xdQ4EPgxuM9oj5BnS+xQ0r5god+Uwz/1uYUgub9aLMpYijA8WzxL+S/wixKBhcfD7EWVvtcYoZw/pFB77Mccnuz/v7TzBfeDr5mRZEvdhlOgggkv1VDI2RVWhrpRjkBh5fRoot2QBkRH3aXQD7XxEYM2c7NSHSJQgrw1825RFCLtE3BuFoEyXFgC8z6D0Hd3l1b4UeRvWAVbxIKG6+Rp06DwTsQg8g8znxzGlM2nFEYJOhPkzEJuk/tPw66EjGyJucy0hFeoaUVaZUadC1YoFm9+lHaNWS8R7NMWfRgViRrzLa4qDwcerNPkMfZnxO9sTDpSmWgZt6GZgOXp2CzcL47UbaVLVLdp5MJ6Pg4UHR7tqlXsrBbgZBpp3bJeXSUrM+4MQKWJZtqNPoB6TLZRLqEVqUexf2bd0gMs4z4GWfoeHq+/5grIa0OHS9Q2j4XqRgCJdnP+oytXzUaNQwI8SIQZCiWGtFKx/UvjDlrfjLeYenLMSsveb2679ZFr/MTRcc1buRnRsPh8jpDfb7X6ggWy4eqdacD4y/J9aHasr82OKyz2rhvJCoB4Tg26EbbzCdGKzSBh+jHlFJ3LO66EOFQJNuF8MKSuP3deuCye2UA0nF3CkNjU+w7xBi1EiO7KWcpd/GVfiv24e1cXDNM9LNiJYEUw6oGX+SAa+DCukFT//HNCNaxjYL+4JTEN5Eg2+kwZ7XnNt3uUQqMu7AWhEGR+GE5tc48PkdSl4SUDYqxA0FXNI4AfFqhvP54eJ/4hKkwgmYH8xClJrtEhJUfwHDRyx1OluVsznYoVmvnqPV8WLEk7VbOoXtcj7Ih7uS8TYKW4YB9pa4vBfRyEaS8MXqE7sHk0T+UgLaFbhZT2+xW+EaySyn2MBS3M/h3b8iJbhcZVpDSJDoCrtT/fWam0AHPcvX00/rGuCPlGLq1xQD/dA2k3hFORkEwiYDDIw98lcxvxS0ke/v+ZrC/7mgFBwrnrFJMPUI2fBvyUtLh/x3jsY+Cwp9WfRWxUiBeIZ57pMKVpDgZYENKpCcah7+S2egMMooZ/U9LKTmv89fGvylHuWnjmEtR0k7GllNcfLpd+zq7OJYq+WzVok3qTdYtuOrNJqmFcU3TXFxIEcEGabdH7UOwTUHH8GgXcsjcxqu33qMZ/jsXp995ros1XPvUDnhGaIjX9QQ66JOmYRn7V2Xl9IMfqfa3Iowu5yKSSE9vUx19O+OjOHwYkf2lseIBQyRTYH/z5sbhYPTHJhwThNCuY1iNt0XSFQrMC6yVk/9lkhotdKLv5dGcG52dZN8KQPGFVeeAZUp9GOq/RRBF2n9CNTXuDznBzrYnAYZopCidP9+J0/eY5iwDwMqFieLxI/IITlrIxbl5fydcwsmGn6639cFxsXUvHGVfD1dyTV9vr7wzcj4jvL8SSCVuVaA2zouS6YgcfHM2MyhTdW2fj1rI0D88ebETeH3j0giYrqFRPzBG3bnqjY3loqQNF0Jf8aNqhX0PE0p6zviOy9A12X8WXDnFAfG4+3LimbFDVbxv6kNHChcDra8YrK1fa3BU0NWS+673mb11bVmY2WgZn2IsythY/p9KqcWtk53E7IxXCJH9AOfVTSWo1XRgkOjwhko2vLuU6ubNA5rNFLOilJ402Ws0nz6GjPFZL9xzltLscbNPMt7l2l+Z91noZuegVYJug76UkOOZ4n6J3247CndJ9R2tnLWrru2c4+n8i4/sPVbngHog12e79JdPw+MCDhQ/lifU14JID62roPr2OA1Es8rFS6GjPsJl9gwV3XTNoZ0GvCe8LlAkd3kFNIEjeH0SLqK6TZtcTXuKhzCtwHKh9aygm5c7lY5UMfE7d+JlpdWhT8gMBWt+HgLXVVrWfDepXgd2xChjWRcmQWU6IXkdaO8FvlU3HgZTvx2vAQEUPQV/KY7OEQLMISL/NhlXhVPIOsJ4RdYQxA3KxT+oNszv+IHUUcMJ2VTL1sMGB0T2ZH7i8sHkKeZdcWiybDJEBsbwDuu8ZYr20wNuY3nj3Vc/Z48oomLAKF7IF1xWcJtlhWno23fUa0J8JiGQhj3ZgxTssrX4xyLcAwpiNyzc4pna9E7fZsag3ye35SoggLOZMorfYTtSi7VQyfGD5HhFzj+jqmQf5cHU1mylCIb1wT0OT0NtMGpg9kzUgxuB1fGZYd5557KPgnJaJNOV3dM02xILzlNKLf5j7rTwGTMObGbx9bIdO/rITUc4eXi2Nf2WJMbIq+luo0ssJcn7BuWnqP/HJCNw3SdjRn+Lezkcnh9hUd+D5tX5C2lG4x6RX36cQdjvzGarbu/rDu/BCz3TCgpXlnThc8o9Uay0+XLaqX0Lam1jHRhB66X/saGKp5R/Yhc1i3+H64Lf/IONuhCLDJCyXOeb/5j1xiGSHXWUMMGB2UCMu/uKho/Mu4/onolqsTqO93oK31KNpuun8rEibE3zOlDbkIMSywcYPkbOi8YWgplaQ8QdjeLzPsuWVYFbWKIPrVtRAnYKG+dqXVRwdjuAzWTIfnEsxMPglp/zFiss/y53QZkmJPKc1QohzOrlt4qOBaobVuApjheRJhhBXlBlYuw0FweVwRZwmBtMDT4dCWfUjVdzGW/Gj3ImDp2z03GmkIRdDNCrHeOHDoktdTbDD1X5OOxPJJZ4WCqKg0PoriekBjWVK3oJBqCfUthM5xink9xuqllBXEBaZxkLWVaYL5R1DDVg7rj0VgFBUTW15qHTstGEC3OWsRssnyTgme6SK3eSzJxxWRObZE4sgSrczArcuf90NWCNrMdcGsN0IvZoTwryBqOZYbTxyQjvYWkI5BfzTsjH4EOyGwyiFPQVfzL00qgEmhUDSBUoZHDnnAhTtNXNX9lYl5XlajHTlqKg8ucRydylXaqklC9LNpJA8nnbQKJif9X1pVcgXuxjWInZ0koNe8tNyKY4ueSObpvnwkpqzNeFuEsC2Zm/eWdmsTIzIJotukr2ndjkRrmiq+UkKCuGFD1Cdh/gajibEDEMrkMgXOuKTMF8W9jRr2mF7gByUAfczYNP8bkrp8tqFCkuazb/IykxqN+ydzAGmRqPOM7KsyDZDNTrAMn+UQU/URk3bJPVBxlRiuRAxnxdzeFxu3fUZM8Rk4fAN3HZfM8wUmREgsNpfuGwzupl3QQUPG5g92dX/pI4SdJe78QnJj4/POs4+X4G557JM6/IBR1T+I9xUR//nwAQmb9U2KN7PGaQLwNefV/sWWtNpC/FpZoJEymaRHRuINvv7Lahnerx2wmIT0PTreS7V40ZlNlMuhkKUzr+W9LIG0+Sx3QlHbMA7fLQq3veoCwht52TOperPJxiUWlgyISO4p4uoDudPwOpSqqiUEW2SlaePWxaX7b47u49iHQ2/zZ5ao7ixs6u2XnSqp/yhAZAA40oJqpclIx3bCemSj2HyfPBXk+2WauQDzhopQ7ifzUV2XMjWpjIT+MmpFYQW4vJeM44WnJ1om8FIfEZoQWaBza9Pf1NxQPpyhAfOV9fHQ8nVzrvWwTV/s80OFnlMt5wkpXixCDJ28HcMBn8Wj6wwfReM5pUUE0k+JNT3xYA2ieabii9D62/SYslibbizUu2cExpE+/nxSszLsRx6bfs4c49DIVtdozAfxquryho4sNx8pq2QluxTQLA3qevTjkAYWnaFaTcAVbOdSGgsqQzq/136a+sjFWmgcFfRmrJL+vMNzROVYtYf4HKSiA1hoJJ2FA/Engg1dV3b0SiZMHb/Wuc6+h1n+RZK1wZtmSrnpIDrkwwQfKbdNIIvJ97l96DwWoyj88vk9JUolXVySlnmxvQ+Hi7XArk0J8xwI0plMaZbdhvILk52SMvssJ/hf4T3Wl2NctnU5p6XqMFj5581HZryuljQVzLcTBLiNkIKSNDU+x9Z1R24qAAmVVVMrq0xDhrJoHt0qqNxfuyZcJhG9/Wx3Ey55rEX/aTDVR3KoxnHGg6hWWUvZmjK72s2BHIQnLENt8aJ0o5XdnHxmC/6qvLeyTE9kgfP9kQZSAY8o+e+weC+XPksUvMTObyZaP9PsH8eURBWd79DaUfrpKMOPtwxxzjkxLFsg1fK1el9iMUmBVP87ASUg5Moy2loItv6Ui5ZYtmhAK30zuF0jYPEd8Uf8RdNRmLbiKP+e45YtPIZB53cRzf+EpyveHl8sKhwRIGiL5NKSSAKvp+RiHBkMKzKhi890aEEHDB0Xp0OPsjCHYyZ9V13R1u0kvKotwkYfz/ISM/2Iseo1n2jy8l9V5zgye7izXqpuFUPCKb5od277/w7XeXHVBwzDW3d7C2G76FFFrhZila4t6tWj1S78fQmerwl3I4MYQCP7QdollmZ+3JhtXkZ3S8RS69f25luZ0ODQ/SI/lslj2u93xWyUOtWbLsPaQMoGCthYq0CDwho8FgEoXxpD/iNX/Qh0xZUhPMRnFS91hY5YwImKEPIuK1lCxelaAQQkaora/ycTiI3Lxe3zKpnrWiN9XTtuZV237n6nNMw5dI1p4Ul/vSOXTsIBNPEgV9EZuX6zl2umyWZVv1w16W2rH18cEeWeUHWNpSL7/F9JWXfkU51pZNZOr1AfnQjzZGIS3ko4CwOz/Bnehw6EJ1Yzk6Wz8W64cnklH4UDsujSq9aCOJbWxtri6zXTC34eTuH7IfKIs5xZ3VIPG0C3v1BPhOHzUNBIIJ7ZcCYOdTwa3ZjIXYBe7+KNb6sLjOECoQkmN49Lg/SUkksMbU20gkkik+69NT1F7J5r0Lhy4xGIgKcZnzfAYNNHrBeTQ1mm2Vhy8HPG+q69FL4ZB+8hnEzdUH4E59cqA86ixCJYTjahKGiOc6qxYNmPLwQ0O03QagRBbjBA8pyPlZKX3zwt14DE/NTMRR/3pt88xxsGVdM8YUFxizCYrg2Mex3Oc7w0eP2Fw9gai2Mm1XeY7nko0gonMxctyzYlyIqHLQAp+Uq6ZmSYrhdvxM5dP/EAM6eioXLe4PaBsHPqHL4JTLsxn1cA4xj+v/bhoDQszdQlSwQlqUdEo7f7vpj3uRCDG+xmyelFKb9pmbG2n4HSRWLiLKz3iUcHkuXJNVi0PR893S9Zbe9AoPXaw3gyhlPxg3xLa2PAQ7njq/jkmvsGjYxfabfc0gt13L+B94XrDrb2CA4i1DPxUK8cAOaFBwRoW43QQXRhl6yg5j2ntNmxcEl99JXXA1wVt2Ni0m8HjyIG9JoQCPnwdry+0yVm5PgQ252FPWG3ARvY7KDvn0t943BNl87r9RmkWcFAsdAE6H2QXMeWSrFh05JGQkEIPdmTXLIy4n7KnN+fo76HEvDdUokVunZsywN6sQ63MCJTNXU6msW51M9KSHQ/q6YcPmlm6m7rCFHSSVIX5tstwd15QQWXU6M36e1d57R+THglyidYGb766mc0jm5SACsr2D0PzY/QDZ0zmsXIKF6yuBHGrDsogriL+B06IPm/MGAguGEjzaH1VezWxOCvq7oc6XDPiKd+QM/PhV7uCT5rpsfFPZuioy+oQkt7mJAuRMhgWRgtfNze8ki/mH8baFigrYra0g6nRbDeAdPpb2i9BqQTf+FaopNi4EGXWS7PXEINCeQOylQLCBI/OFkUCVNnHq+hJDj/5bH4H+Fg/XHoeufJw+Uwuh8PEHu9qWi/VXwazgakGdfGl1yqi79R4K4/EEzUp/soXM6rccd85zfh2vqutA/y27tGkTjRPj1CWoEfJa1c1z/YuaZa4FkJQPRSkiQJEUK3sp2uWE+x5kYzHHIKGl8dRrzv+2rlT/m9izdD/+KXp2cnqZVL5X5h+e/T3HMmFJwGGsZ5Dxz+sla4v54yxLBFQoQZTHCqxnAE6itWbBDj7SGrD9UbcZ57/bAhIK2afY8G3R0Msa7fyhH+afZqsWjhXZFdtEKYQcK8T1sFN5lK4tJjRL0JCvXo92Uec2VlMm5xSUlV+bdG4qq8Qu9aUwdaMqvb9fhz/nXDoCCuTxwReP0gPj2Q1DixkerIDHlBzs0sASej7xk9UxTS992HmYl+oZKRKqh7DTopnQ32pXZzR9TxrqVaZk8MKbVg4FQ51YamfUzFEV6as6nWF+XcmkwN+YuaL830OMvAG6f9gC7UuihYDMrg+wDIl4Hb5Ad10jZbCZHL0ojo2A3GMMSYOjrAkuxKb5OEDcioHovR/kr7K2qSHybDuvLQkJoqsJxmXSvCtvVLhaOTc7YXsPX5ZcXfkaradu6DPhU5n+ghQUt4C/CjXMnwqCH1SmNcsbBZiGaNwBX91KB6GRJoC/sHWEwcLB19bLa9pX11d0HoCcoSGeT2QLzYIAcb4ZEmqRfwcA1SlPcnnWBSt72+jybiHU/2JYdUSKisx0pXHnG2zm9ylae0aye2Q9FE1SC7D+dxqYFj1gPO7/6LifLK+0Fb36M9S6GPosC04UPpaMjDMy0Wze9jEB1wIQfksFeICv/fxoqRtPeJT07WR5G/fmKFHlJaia+5Db2RAqcpjA3/2VWPd8gr+o74Zr45NxDpL31JDb9knTOsPky+1UNA/ZMCLlwJ+JfAaZapKHxkyW3DE2C408+l7PgwnowzZrslNxqKfNVtJNt2TOI/WP5epJ7ULqrYcLQQK8rPqUpoHliP4oiN5uizkBUcSTZYThUsq6RVYRSrhzz+81iJv6sjJPlSJuQVZfXgSsGfB8bqiGIf3rjTas8Y3GKJtznMzzG5GYuzK9BIF8ecAfJvp6DjtkU3csUci3Tw9FqCDczNYBJVrb7Pdn05wUKe1Ozk4SBO63qmehaNV/u7YSrDpdouX/Z6yhRUgTSo0ko6Xrlo8nasYgwIQJKgv/vkGb4TAgVIFOnjQtP83zmub0p9ZgGzO4xe2DozksG1VvhYToKU1L5oUD/6yZiTZ5bwdFq6RDJ2FTaxPnIWfv2NszL3klaAgnDDZ1XM8/jCPXD0colB/KikyD6PiarQvX4UrNVYxT/if2piG58CZvi++s/mF+fyXIBgw1MU/BqOv+D7+YgWilkNgkWdEKknI1hrRQ2KWuiu83KNUvRnnTio/6Fpktm5MFnd0Wq4Oa8qM4LnM+wE9EIUFYqyBdclhIvQ/D4KzM+trUr3qnXOdkQKB6dMEJAAdSMeHkNYXPXi6lR3oz2vNO/82yse4+I/k1a0IS1lPSl2UIDjWKgsHUn2AwuQFPOtBEs92KmbtWM0fxVz5jqoveSurhaMlLAkuejNK9+npE2vK/sA4Uo4kMSbDi+x9nB+ScKYK5RJSYiTLltPXHY/CD9a8Hkzu1tFAYT03TImuSBzGV6TlIb2rIhXYQ1SHTTS1Fz7SJVk/RZnSqvZ/GMad198/6IL2iSQr5Vg8H7lr+NvyF7uO63cVwW4BfYyo2uvahZZhkyxKfhSflwewxkJOwqKqCtaTOivJKLQzmyD7AWa4xWZFwx5DsUeVcSSWnmzzCBoZP39a4hBcrmVLl359hmtR3hnek3sxkLejWqxGCQD9Io/yh4kzkGy/78a/nNn0USHl1cVdsAiYVLD6klxdyqsg355DeMYR+lTRsIOjp/LrubsVbmx6gq9VTTrwv0zXqXwMLozjUPs5AAa7U/IZLn5dH08uABzG2Fdw2BBpzN/vgTOYYkW7LhAmu5d6xSgzfAifyvf6DgnD3nttVyNGoJ/Sf6Ys45lLSQuMg8BaYh5yoOm2nvp7WGRON/894vt3lsOu7XjgRkHuPuOucoPhXSTQ7xfXXUc2m1o6mHzRVqQLD+9nI4oAq0iHz6JY1u6WvwTPmAg1ftStBtGWn+yc5uKZt9ar62BFqfafMtOehJXQW0vfKhJrWLKBzWzFXpbif3SHYbgVTR5SUsCJde1eqX1LlV96I85OpoG9jGV7pZDSuonF+3idKNnFYs9KNG8sKaudJd0VaKhy6sahiVLo1KkyKq3WmLWiODXm+iKaWS3yArw/MsgzrdDfP9FxxN6M/HFkm9Wlk2pAuS5VmrpET6/lB6RFpWxZHYvA5EfGWYgKKMV+TIJr5MUsNy8okjt0VIlQlDWdQgx51CYrUptZ810y7DIWRrNWh3ufsGQJNPvuTx4kc/LjLTzq/whRfLCTez/o65T+QekIPERovG6dI1Mt75bhutMCLyVFEBR0GR8cqVd0ek+p/qr4vS+ijrIRmMaFmjjd5maUxWqPgwmsrL4+HqNTRHDF+vTFGv7zL+TbfCnuB1JhvUHcn5ZurgyWNgAJZ/ELeZoR5DrYuT8Ltft68AhpOju7e+Q1j3N4fWHpNGtRr1mD3y2p4K8+LCw1RSATIjDBLZUG4GpGChNCaXKYsgqfCYfjn6WjCQf0SHUGowqaU0o42sNY4oYslxhMeUggIDYPc0lfrQ2thyh4RpfGW/Ga8j6mK4uZ4/wtjtLNpUCXHWAAgO6ObL0laiakIPvediBnBwt1d7hV6CCVXktGLCgqfgBYXDTkt6UDF6p2HgpwXJGHfBNgLHxrDjiZDfLMFf5KhuPnSfyX9X7NJjpDEeXm29BBVlAAsdP/ZAQLSMgIJo0yNXUhmhK4xgMpVadfv5Tb92HcFKdBhJkIYCvm1QokTq0LZYPnjrGF7ngkihITOcMrunaY7kPqYUvs/hKGTFPSuPJfBYMhZpoF8mQlYNAPIKg3ZzReiWFIpvWpqQ0Nbr2m/N1ZnGDzy+0n4a7/ZWtjcuK1cEr8E73GNTRV7sO7kwPaUfa2mLpZuy0e/Udgr49sa1354maEPUtC6c8+9cJt1ZsFoo3jXMws11cBnIpZAHK5kOEd0cHywAb3uLgv853lCxRkDfnbY42OHbA5QV+bPZ40RIAqydNa40prG39ew5eOlIGvPCxz+H6nyn2Ivb3fE0LP3yile34Y8NN2+IO/RWyLRC98FyGb+NWvLbEcUHEpZMVBJOdMcuBcFtfi5B5mWJsZBz4Qr1c6B5eIT0GaZ+SCEt6i4gaIRSzrKQ1HumPFKzTJsbluouKLhNAI++cuM1zr3S6ktVXGt7F9qzY7RLAdZnQfZDbzV4wRGCKktIX11k/mkr29N+8Z7SD+Tc1Vg2rxAFGxKyHyZ6ekbhURP1WFb620b3SBiK4mwYJzZyqkvflRznUQ3W2HSQp8mXkKOnhLZgC7KpC/Fz4W0yNyhTOq+AfeQWhBMWJMyx0hwMsTE8iiQb4V144hqm5W6XdhL+JkQ8VveckZLNhbmDv0KJhMMy7e6sikHHzDwuqg3ftLjB6PwUvZmiIgcKiUc76xU4OTY9aZtgsLM1n2w+9DokhReciZkXPdRV+frn8zgFu9Li9c3y+IxAdQYq2TMAgq4XhDC1AobktdO5p6KqKDgRyGZNY+vyoJSwENmHID7NmrQSarZ2yt39e70+7N/qXcuQbEJ7I+x+b2NtQaZCNcgeN71CR0hc6NuOVy92Jnj5boIHZlcongU/eXxbczq21J9Yc17D1Gam5JkGc3yzpiLu1I/Ydu53nMs1o3YJPU3xR9x5i9DOd+wQF1HCS4O9ZovC3ccy1dNZw53bg/hmMbtqyGTIbpgjawhpjH6ImkV/70BlYlyK3d1Vws3mxTJCi6D+BDiFP3DkIJy9Hgix6IVT1xkpZHC6wha94UG5Y7b0vDoSSDyk6b1Q+pupRFY1hZwGclBMFsCRu23Oi0kjNRciYLDv1SNxji9F7a0C5l0PFmYLmhFBMSmlfSVmuH07mQ8ESsKs9+J2Iso0Uaj5nRfeJa4oh0esScf2VMSTYXSwK7E/SkCiaZcIK2s9Ep4dyIapTY4uWTzWlF/fPUNROZUE9NmrW9xyl0VdMnne/4Hf2eaHCEYFAFRMu32lNRHH8KcfpJV6+6yH9hsX+89vUTEGg272c0c4Z7t5f9v/4/OJiCBWUMeuOSxl4Ak/6Qa304t2alFq8IQuJQcOUFHQr5vRI6THPv2CBY45ypHWbOZBW1ltEHs28J177h9dC2QULVsTmiouApUSfMO/cDawVma/LfxoH2QrI9HdAdW7E8+wlzS8i/geDShpjNn9uhQa146Gb0JW+k4vcvohauiOdgk3nGe69cOfhspvfaqthC6MCkpJLxg9Dvb7jOE/+4z5CftENopHwFi99DhtEWRTbN9htWWrQRT/b59/WvWk5rMsKrjkO+Xo9Vs6IEizoToepr+9kJ1MbIlU7g5sMwVEJlVs+Nv9T3pdURol7UTPz3WaUfxF64toaallMHSiUQO+xgvRj8wp0VjCDwZrmL3AjfUtxlHxfZ1ESV4xgQmy0Hk1RmRHi5iAQWEOOZ1JcC26uL25nY16Xytj3H0JHc7xwM1crSODpSuzH4JRxaS8xA6cyRvpqhyI1F38FHX0cTOFAuamj2hgDvy88sSYVPRzxgmjepvaVtFdvic3xmXZZZB5R1W2QjbDTR0Rf7saGs/L2yeDYDdLZTpRtoFf7gYDk9xbdnmZb+wgjt1KcfE04rbrxkYSi1Ht0wr2EpiCu2BX3cLYUi50+v7+MZ6pD1941v5piwZ9Jd+JtpU4icZDAnKtFgSy/OfAYv2c5UrmIKDa8rgqz0UXsfR1YIi7cOZlAinliHbJhqfTQKSoxinr5X8fntbUTtU1BIu22QLXEOal5EWNVd/Y1Ojhn3RUNhCJR/dlfko1QGy9pO42GC7wWhkjKSLEOpTQfYpyWmZObKoOrMWcTeHdPinWLmmk2ST6upJgakKBD3Cjd0w3DDpdElyJ8BmMAIqEDbt2FUVCYxpfY0U1f8oMJBEwRvrFIWB9d5PDeCJ91yF57MTDvHtv7T+callhl3t2h5gSOb919mPJ/jEJ24lsp0odJEBLc9EUTL+cdfNqbPfEaZ9SxGbZyCaku/0lEn04qSy7TlZ/g1s+DMR8nQZdLwaxP+FeW4TxoouUopKH+OzKfWifPAFqBDKxVgyRcX6DYNxk7+0dIwWpSSOzFvbo/AV45YT/elaP55bRQvWzVRY9/S0InuBuAszyMboV6WLwHxsnwdrF/mf7aDSPmge1BZ0t02KttXz0Mo1XwDS6J3vaabCf9auYTHGjRwfnOhZd0Rf/htYKj/rtdyRAXoE21fOl14JDrVLuS5RpGzaw5iKsFJ78aY6DbHiiDwhpZMFcUVdlpt1m/mxk6ld0jCNlxoLGKbNykNqRipimyDadWZysXtYb+wWbegMA3QC5xjwnWcP3LHxr5D7fY7uVh/SEhuXL5P2pRP/uNeUrKHz+fzuiq9yygjfCM0qG1L09khY9Oz/3T4vfhdTbubLFFx08LGGkbZILEs6MNNEMklNWfIPbFRizntZUDosuYObNVSOd98VuJmt7K+nfQHxbnGWuUV0UrH/KFNEBhfyoDjD9ncWD8UJ9WSkxCjVkUarlW6UTObvMcD2YBr86viaEbgj0A2o9qqM9df70VwAh8f3ACxHKl5uL6qi29QAfKLGSKnJshgkWUBdo016pKkF2dNfsoZs2B6pMJrT2zv2AmWZt6RWkwqWh0IYwk+XX2+5zh9qASpzYZZr1Tpo07hoboLZatUfJKt8POCnM+SVfkHhc4SM1Ojd60ZjIdc0m49Ydzq3293HXrxMebd1vzIcCecgLtNAFTdAu1T/PrWWVGw0yBdL/Ngix1puc4+Fuu8Tm/sf8DPtVwSsr4ck3iw3M0AqDSGSnmvbzYearaogN9W3/6CCxnyMgq8KG+FLYbZeLrXBdqA8wsJFkPblXAHRKBPHvPBYP9HnFGtqQSjO6ASjkE+3GSJMYAEYkjq7h2FmUYlVB31xF9vJIVldkLS2v2RIIOFgRP1kTr4tpQJS6MRitwuCshOcMfviYfHIWvVI3zr16czYuWVmXv58lY0dg9dbaIgerjskYAtFVs1TA3DK0XLd0MQK1zHPMdzQ2VoFpm0PiZUfYLphIsNjqboGO5oKPo9P7n38osMfjKItdhzUxyLAeu/ghHkZJMPpJzYrILOCYpgwHlXIN4Ui5nPEil5V9RokV6im710ha3R6qe25sQRpITYzRlyFJHHNZ5HnTrpXYzjo4IyAitZdahzGNlh2dk1LE7tQ3Ae+fMmEaITUParWZObbQcNDosjgH0TXoBrg0BW+CmkR03r10EbZrrVUG/lh7OwXleEWv25T03cf6IhyPkgszyAjguaELhuN0FmrZwf31Z+wjxEU4+YKPudzLTwldkASjpK5IYbkl+MlaMqE1Y6Dg9L1SlGlqJl/Mhj133DPkmWqGSTwYLXaCKlVW331KCXirvHZW8dsvnpwAFL5j8d+dO1pNabfrTTEc1XrxvvmDt9Tph3krureCoCcfEVwjzdqfpTUtYUYh0GHjem8yNL5j30GLqZ/NzRmZCpWCs8e7x37IDAuZPklr1op8s3npXrt4HDeeV8wH+RLEzx7vxvnlwso8lRIBOmLIOfkYnimYxOOHyT1u5/pNuDYSAGAADA2LZt27Zt2zY+tm3btm3btm11iA5yWYrSchvJo7vIgW+3J5yDjCMdC5VBFWYHiQJkVI2QTKUAJ4BYvZak9CDGF1UGVdFfuZMoHvfnDbwmJiIXRQK3vqBOZhqo6xHQ2BwkhiF6NstPNyYQj51plliIAiEaUthBQlyA/MB/TGslf93jB5yjhAw/vy2jm0jYsx4guWLd4qsdWPvwBwcbDgXoed6dr01jTrCxQ0ryALkLxHa2USqRLIKAeJoejTpfPpeQhHwBWP8C/jVoFYKqR0c/KbsfQZZew7W5qDWea37jrMOcPT8CGoamSsMImDvAZPJtu7cWoaUcbho7HjeIuhEt097u4tZTt+GgRvBsOdjZV9M3nsyZX9PeV6XFNEVXNBZGvysdf9gBWoRH7oXLU0ufJIczMxZqsStPsMZtAApA0/IhJzPShjPQM8N6L+jfqIU8E+rX9V1oVa355Le1xX+x0KvM6kC1ZAAlaS5nvnZznm1LGgAYTozm68bek4vss4Glit7qatN2H2sSozoWQknBYETxgeivJNIV7ZM4UPhznM1b6nraRLt31+xt+7kfzVoZ4TjVxUBlPx0rRK5+EH6WZih7MZeFGgHpk5mejb41P/l0sJl1kEKgam3IBCrajt9E995xYr7xKAgrgf3dspvwIDTByHnvKS3DDLh+OWX28BoTJPNkoKLBYXq/aIGWUvH8mTjkRjNe2A+ZKHs5xE3dTxUTk0LiikWDV4RhLZcXR5ClfdD+dYDrWgBmePcEUoBSTBSxqQlUdxSYJ24LITCvkqQ3e9XwM9XGMGv91mRFxmouZpJPBEHi0D1YTwN5cc5axVbYjdn86v00Mznq33x0NdjDmu68ylx0IaMUJwxx/+52TomD3GERT0DlK6dGoYKRY2+v7PC4KnOK3WyqBS2klvYih+bDDXrzBLA4lpCENxgqw1GoOJqjDynXbgPIuWcOwDfHSo7vITAikN+EHT5n188mYNqbFtiTVxtJb3c60vFZPvj1EmPtjq0/3mTcAthYgu0J8a/KleItPAIPU2Cw/H5Shti8lizCzbxBy5N7FmIlf6qfaIrLstIqFZsVaKE8k9HtnrdOU/RqJfMWwa55OMaRASj7lrLRbpdEfYTv79vtHp25SnIIFDhvET+qhXomeKLNEuBwaElaNFPSgz69/rsa22jGjj8CsavwYuGjGJJqP/cmg8pCK/Lp4vihV8SpE+18X4jl8ypdC4NEFVVVPaepgKWfhhPsQffcvMbXVVcRrNbZKWYmLLVaerh1CPSDMwNm3GaoC9/i2j78ABhiDg2nnAXocZLkO353XqSyOVVN5BY0edOjC873a97i7RwKV7Ly917UF630mKQdTwP+4GJdUwtA9Rc5+Yz3EMS0OxBy31bVp75IrhWOl50YD6KPKrpMMtQHAxfvE68sgxYLTchvVZNrFqx2wYaa4ApmgU2VDiy2PUUjC7Ro4NznXNARB6Oc2k7sEZlnDpte0Mapg54sADvbcqtrP0LIGToEYmGSN839i990ZfQmfKt+RorSvNumv1Hid2Z9DaYOsjJte38YAUFCc37PF+tzWrnfwzzEZqH1G72o7Pc685rd3LxhSrdzCHEiPRGu41KaOCltAZi7NxNFoYks35E6QqOVdLN3u6ogFsI92WLyd/7Ia8//9M5rVjLz1oLNb1g/9K3cav5SjfaogXkdt7NShla87hlDKxjpaGqNrQy6VmJGJviBoDdLvLavI5Wyu8XBu2d0+LJwfQusQtBMtDFsJKAvYwMpsIl2u+iegQVrilIQeZmTKobap2uIx9HAsbud/rxXXNWS5Ode8vvwjp+33aMacaDXn1WaoTCf2h7J1OQLkLGIxHBxEhmCMH3iVCYu4tVJXgetpMCIaLrPdOpd6ZSRZOc9pawnDEDjQp/2owH4alRVw286936DnDlIPWGcm6+VJztxHVWyfdvwFR8XzTO/0wEjjGSSbOkTjOyR5ewaFjhsBzgKGKNDlIKU971taiKD5VkdexHTPYe2FJ6yRBAOGBALb173B/Lqsxq1g7sRQpMm2u2Njpy3ztckvf7ZBD9zY/ockh+lvhjovwX415JxnHAnERYNmRDjN+rqRrMx7orspNFwPseTXwa/RknDH94Lic407TL4mclBYG8kTEHGN1nxp/CQNwO2tYi+0gG2X45EytipQo+GZJzerwNkwdB9nJHoPhcuCNXUuNtf/shVttSxGB4GaUkneifOXpZYsqr1QX0SZyyu3NDCdixSznw1jnmDTc04Plj/41n6Kblb4ZPo61Yknr+25k+OczIvfvsk0nSE2ufGDofz5x/SxyepEX5daF9q8bk/rCjQh/SxNlXL0OCFY840ZrQ0rnt42H6vRHcwsZwxDFlmAo2duTAhX/Rg6H9I/96uASJxZMm5DWt8woWgY/9ceqrl0XhX475Jvy0oo3TmBkt+vCG02q07zc4dJl/vGDWWssQfDJgEL1EHKfw90h7Nu6bMYTtwuoQ4DssgHaBP31TPT59WvGkMhcwppT7zJDWxsspDbmogxPp9xzyV2Umhhh9cvO1heCqhRtyKfL4Xowi5TEFzVR4DuCsQJivQJw9rM5FmiUBkK2xVtFU9ZiXCymU3AEEpUl05AQvbmG9aN5nZ12mjlO92UrPLk9sP7hES2IWpyUPr6moRp7U0Pi2T5gXr0z9ppzifZe7tNdfvRx4LI6yodprOdNEOSY8E2kgJ1Suu25svwTFNV545lC2GlR5wnCBcMIMnCdcWA9zq7e6yR9HxrsidGd1FbDSSHDOnG5BOBBkBMtZhBIsDzy3TEqQvG4vNXYJv2KCAFx9Fl5kWdKjc+o+lkhki3Vm07QKVxpfwTnE6A88DP6huuPLIm4HZcDOVD/md6EN3X38TEddKtBq/AxQi3surezXMTXLPjJ6bj1yQgfb1LRgtUAosNQFHPLbuqwNWYusPTFPp3gEgInVzUqaiAHGZEOJiHnFmqSdArc5Gr9T+cOchg+7eLGuZ5/G43glFbts4GcHhhmmPLo+IhS/5h3lqgOhPIBquOzWXM2M1STd1+tkM/9lACofk+8YeT2U4dH6diop3XhrcteP2GiAuFklE7JUdMlY6kn3Tfz9U3kF+5tWNLMA+wluHI/slyQiPOzSFUCg6CfVjO0tr/jJQM/Ha3BWbzN6O8qZJptczX/qIffT2FLk/ssjvYOKg/Ywq+TiVCxBlMb+g1DucWktF77X0eM/GM2D4hktlg7ELxMfmsFF6cCCuPFq/CVNAm0cie+MYcoiskCJgWxjsaVtWrq4rZ/oRDHMPxFFd9Wsm7JDBnc1vEqpPrXjAD/vZ5Vo0tPH2WyAEte3hYeZJsHEkI+lhUdw7NxAz4gi2BlKAvrft4RqOsxlqAtmAXSVufQmxXKU8YINbs5ih1Pww9l9v7J+vwMxxTsS0uyuFdJ01j3sALiSf+EfPoFAohwaUzR+lvFa+anW5fF/T8frYzZTOS5dEmV13goSZCQDQ4eVC8alFaOF7omAT7poTMCuwaBhzFSau/4xq1xh67eRrwGnyM7IEAJdbP1S1+xknFhZfE78ElyxfU4tTGS9q9ttbVgqkYxuO2ZORygJ70MidebtsKe6MzDNTZTjFLRZf6NAEqoCJp8BI6uJ2/Q8JNpVXfSEi2LqYrz3AzB3x6bV3FDadUiyT38n1BKphpxSIvfiG9UOIshOzJnVBPbC9zJyRzHYI3r+5TFkdlgrchSH2JR+li2GjcuHPmJBlXg+vax51XhrLmUCg0TNWS5h4kh3WtZ74SSKkULOVlkEj4q3BTygfgy/ChsJs5a8ayYdhhPDEP9jfaMGw91UZsMH5+JpiC6y6kyYwV1g29CQCBsAmeaseKKx8kDHGaoy+aTQPFyFQlTR/e0ECzLHGYrBvK0DlagWZBS7CCOeD7iTRJKC5jF99OdRYBFTcHBX5Ol413cdF3149O0BEgX5cMwCSLmc3n9gtNzs5pXmivrktfJyl3Z+NAsU8SYuUirB8EM4CgPtb+G+BUelN2sd4he3RgUbkFyJQiRkRDErERqamtgV2PiSKf4RqVxvU8a2UK/k0EZIY3yvDbFoJFOZbqCbQ67Po/0BcNvPZ7XFwBdTwOnGIVq8NSfxSaJEtTkpC36mxn8fgQSFQPpHO0nmOp0JphQFrycE/AVWmCNtGgzXwFO/P9LXGk33cblOFOKffvWP+NT3BmOUWVAgpIrNXZVOeCgUqS9HZf8UIVlGhYyudSBJmAdHyrPcPp6ai+OI4jjTHpkmugTqPREsFXuGRhj0MWCMr/dSj8Dkem1YwSCUMsZy/fMKQIz88Bkq3Rnuy6PG3ocuK44bx2wyJcbQU2x89iu0WaItzU8k65SyiouwlzwRVWG8a65GRe3FG8EKBnK/8CwyX/OmTQDWCcZ6zL6PB9ywFJH+nSZjqd+hl+Iq00+cJGRlhEuWdZKdElCx9WohEalral0Cf1nDIgETU4IXmH9zcOIJ9KeCocu+jphPgreFwjvK+m6TWaL0oW6BSGLqSi6ImxGG/HiGSKAZRNip9gtIM2Ld6ZToevx7cK0w+tCHrFBItYDmnhIDNlP/pxfmghgdz6grej5htC6F4L7oAF5lAoJiWQH3NMX/9gaL27qEYCLpU34foYy16yysnSRF8/VwU5qzrSLN9SkNyabGh+Kh2lwizyBt/tykFoUbsFYACNZg5NHR2Yyw2rqfPTnInwIn3Y1vpaPIjV7CLkI5ju8ok91SyMuIudvROjmJDihHd4SgYJeuAYU2sjWalQtOekySeakcKYiu5Jo91PdRI0+z3XboAu9y7Kj9b6Y2dRwfdVjqRab/tPZhP0dqrfc/DChJIVTa7cFK/BDBpAxrYk48w+BSBEGvBUICwsGGEJtrBY1G6K/YM0SoKOcRFjCFDAtxZ2ib9gqyO5F1Cd6sbtwSbeJbH2T52nmRYA0ehcLLXHBxvSm+kp2oZmjwHLpKp0oAUz/KWDcKPjYYd/6l9mGl6vZW5bMrFzxLdTkOAAoz/nByZMn/gfQsRCpS9VyQrFjLZciebm7zryk1OnFK5EjHDHH9+2tJSL6Jy9rhRKCPNLGpw/BW2RL2pCs8mNj2xrO90tKV/+LebLVg6Ni/Sd+havSYve23oogpZ0xtJPG4hJRV6ZhZqE+ckkmaeRJaywJ0ACCWKeonnOhcNTE5KK1NQXamBUKeMOArpSQKatPk4flGz1Z3Ah1ZDuHeMIRomKik/fuE6XxX3AR6dh0UXCKDtpJAZeBGnEvA133IR8NGUeIlfOc9ZE5u4cxouEuKBKUkXQ0Oj6kb2rS8kTRyykEDtgNYyx/VC9i4jcGrWlqqhSodaA/HcAAgksjkLXN+7/6o3UeYQ5Kv5IYnCeMYtNpKT86ok0RV8OwxyfbZ41FYTKdrE87qRFKB4YSvI0PTae0SQkDRNUMV3LUV9Qrvl1jfNVck9B8hlm9SeW7vjfiIITwVl2qckEmcrdwTvPNyfN+e5zJ5Cd63WpEf/kXsUkQBbL2+J7vozGyWmE5lTXdnVcMc8WyjektTqRG5nHXOKbGERv+OBkrfIJ6HMP8uhoCR4O0iX8sOLv6Zz81cI1t+QCmg3vvubKFn7YHca/O5NBpJM2DNLJYuRihacwnD/AcQvlo1s0rY4dN4Gh4kKu6d4XwWWAsokz2PU2R/Hw6AVl0d4yLJD76fZxLx/qM0UQo+/bSEe2VPhhnEEeQsDfbsAQ1tDGGsQoJFDTSSZpCos4HSBmM/ews4uvBmpBirUmkKpRs9/L0tf4Du5spWCiI7gsX09UXvm9hS1A+0emXPPzI7PfvH0jihj2iJH0IURl5YzsP0cWjebYks2N+DdI3RMpcvagEnUleCmxeI9y9hQBJc08Ym1XSskLbAWow9Qi+2Ep/IhYNZaULieq/2HQfrALgX/E4yWZjWFD4GKMwFiEuKdwO/VcIkpjXICL8mkMKudr9vhc8aZpnFwBHQ4eYhKrc45nDOujkJ3A2O5xugW8lnHXFd5BALaUVUuyuLwanzK4SQiiXv2sliP6C1bk00+d/jcVjK1pKWZiOYNw4OsIVDem+DO14ArwaAPc+AdoJVnHcdsPiKg2ruExq1fTFgToRpo36Qv5OvRM7tnOilksKuuspLgETnDD5zQMPKi02jOT9IxyRoqBhtcvdoicDQDtyoRLj3I1jb2puHh1d7gwzNCGso1AiJjGFVE6Odsb+EKSxCROSFBzFrRnrKMYTart2oQ+GsDbcS9ubG/VnyJG5bIy+JOL9rjlaKu8IMUVLRUbbuJbOimqyTEIlQj3AykGEMsZT0Ps7u6YbOK5tZbgLtQ4okm89UoEauiquujSrwzXgKiMpwrFbp+eRYpRZUoViecJ4cofq7WAllyCmlP7aosj/1PqTrXLE8tOo5H3+voQHdXbrv4CuShp8Yi+/sHjquYV54XtbUYBtkUZWMe+KK4lZOkP/YoGTI4t8UikDoWxYFat7D4K/1cs/Wzbb5ItkW4/+3ebllUeYWfkZmuJ3XtA7MjE4oJuYbFwJqsfVCIwIGtPttTTpXbU6JaUakEpUFqXlrvNVbeQleafoJviGtjR3sYzEW+hqlc9x/am+56iuAJOvggQ39h4gl6PiAsrhDoFbuIzKUNFi/nQq576L4aQrvGhAEAKWSw262o0B100Nk/jYX9RTw6VsYl80W15pKhY2zrzZ2aZCxsjAmb8MKKg24a53pcJ4wndjKvbMddIq53l4919goUG2ybT94d1nRQoLSHUg61LkfB+TrEmVE5ohed+uIio+AV+k+YlJFKEFQJRTpTjcfBHvOtGeFslG9+HmXWhYSPN8nAW0cu7MJZ7eRrAqZc8t8UTeeM4Z7AEhp0xByX8giLQS2wTqunG8ymmhh/Ui8aeSZBwHp2qkHcIOIpnsvuGejvhdMQU2cY/HUuPoAODz57MXMJZ6RB6FknZ/9XL8n9z3oDlaasfBM+ePN2X+McbCSO7Ct/QqBahBaZPF+HniCdl3U5qW98xhKzJLsgr/gsmWwPhR1hcpF89yOMzZPOLbuzE1L/zH/enrNIRTEaL6a7sj/r1WlSRaeSgStqp8t0vYsRzPWPydrArJqOnva7rQEeDpCyFr0o2vqDNe42q2iLS3Kb/xRmeZbo+IAxbjsd43V0IVbRaGqi9txG9u1pWFytNJWBX+NBk0104E4ozTe048+GDIE+rfD9e6Kal8IQqYZnaBZsx/8p2VHqHAsVmpmzehcGNz9vcogiyj22qRMG/Sa3L7uaFhctS0GW69AbOckLEU8hZIPnHC+1sEFbCXKgwePe8sc6QUPQwggvomIdPn7guRY/xsVNYZd6gYauZBggThakLVIlzImxqRjVrvgLh53U0j2FXVNo16vIAMD2AyYD1COjSu/UMpTgHMhorx232ssasM64gCNRV7OlfKmRoeel8ZyjQ0oAkLMdr+z+unSKzbcNK32MSnIjaTbSkY/yCay9i/4dVSTeZBz406FY/DypyfjZ9CJnFMCbfH1L/IsBB/p+f4joqdPtyHVt/aK6Aa237S20Ek/UOdUyrya/d4oS96rdQZ6kGj4nCToGu+adDZ7CxmkanTirt7ee7c3ygtkR9Z0QnHAcj8xYxK38S5D8DFxy38koLhNx/jeNnXJ+jERBOf5yPIPLbpfXd6/nxSZmBDOt6MyGtvf9+J7xPeWB3FBG5EUyrdysgRmiSVUB7ohM8UUFiivpVMhIXoG85xKTDQ5+OhxBAnKSVG+bRXQD2Y9eo+rwLxLoFxF8rMb4Mlpi5GZSc9qc6Ur9Da0VJh1J4SxqhTb1zcdwRDSySvv1CopOI1bxlg4ySzGo4OYY61Ru1KCn8Q5bSBg2GMsYaul0hqJGjyIxBWQsDpKmh8O6DkI6CKRotMYl9DcjEoCneZ4gbnvCEgrxKzPQZrblZP42+6S6x65P+wYrLrNwkMU0SGJ6mpsbEUEKpA3LzNDo/hF7feUl7HInLHzVBXb2gwrxhr7/IHBXA3FqHAKuZ/I6/2vXxI6h30RtFTXoTm5nHnBF/0UkGEhxfXUOm7/gFyWyHDZfhE5EKhw5P/pQu6j8i1HISQv44DiMCUKzWgMGWl85T6o3Q8jFO2CWaIVLuxqA0r5TD2fF+sVF/mMxFKDzp9oB8dde0eelEV/rQJ3NB7RpKPZCrG5gwmgCKlr5m38OnNivnu8RDuYKuikhPeSHbe7mLJEsiti9IXVrHz+gu3b+hepdl+M+cRTewocJAnvfdAa8+DwralLtWmGeiwvejbcTPG2em9a1CFEX1KBsHrkzp8k8zKubxlQmkFzZ1UZ5My7XR9CRG3LBkyoDjAk1RUcKlx8HvKFYQPPOnYHI9aXChwMuvouB83NsFFtTSsp2wmtsvL3GU7e3u2P7zym+NVLvTU6tSm9nH3t2zVUpA7EezRjPKZnv442z/1kd9wJmS0hcqLqxL86iLGbVdrESmc0ki7rbKtJNkYIxFoyXR7499NuA8WIajRM5JD0niz44G8l1FrnDOEYddD46LgIddGukd/l3tLFwIeqMG0EO/N1Or5EL1ImO/h++CcFkwqVAV4px4L/Xh0vkJW3Vp2FZKcxHD3u3oey/s37Wt3vim8m29SZ2VYzOVaFHYgM2GuLdVeMZ8y1Ewj1mvjNLVWsw8QBxxzaVN1JO3h3E/GNRAuHdLKM/lLA2sS9cZvd65zUMhmSVEHVMkMH3QkZl4ULYSyuzAZK6Nos6psGUiFZkfULdpzprXqEbMcbl/f3rVurLACYax1PVsVevlbzR4s7IzbeMkZDD94ETQ1b/e/9getowgQCQuvAD/Sji2GURVx5yJEY6LBuromNxKninWAX+gcJgv65Moq5krsw/22UXEsa8fmdoQ6MalMw/17XgP9PxF2KPgMoll8vZQbVVep5flSeoybN511OY95ZBm6H+bh4tecVEVu5sUTbfy9PgsFRz7o2faNn1rYmbkGNgllCbtKVUAA3HkqwffNbwyYCfbrKb7rYBraGe8268bVy8oiGt2dT42t6ejC+mXDbRvLu91cZ4GwliGd4WgfKEXCOZSQadzldDvejtSI2nihO6tFuCpzsI8KRZBC3ggoelxewA9SarsJljKWLeUf+cDyEVAoCVG6eJznbqKChXV/TsDeNoKGQsO/5J56PJovmqK9b9OqCkFLzrQG5XcJMw4scpxd3cjvaZWseYlLzhuFlh3THSSXCPM52gchaM8yP6tdWaFKEYnWyNw+NZON33AO0y7hDrtUg/nEglHTwhyIGGBaRj7u3uGktYaH+FJYnuinFc7LUdAOplfKPjzY9VAEtnm2rT61CD9CjZP0FmWHxuDPWQs2ld60Webxa38N2XjH32Cyt5khn/K1etcMhXs4TW4oVBQRcg+hF3FC+S54tcFohR4yPKmlfMfkfyEElGeKYktK0K0sJ5ycmzGNMPw2WQP+DWkv17AvJDpuZ96V7+K6vkN6PFlPWSKwcPs38ax33+I5zsAEP2YPmA6S4Y0VVZJmZDBmDLl4bPK5+rK80SZtLcuqoHMf3w1aQAkKCqI9mblApSBFC1jTloHDWf3P5EQIutUbIr6Gz/1m4hJjTkKR60p3Dbq74blhXOExlLlEFm2Uy91wE5O0FLrIQAO5ITMqv47ERBbvVWOzQKoGFthzoMbJw60fOJ2i7Lo/eDCeO3441u7T30/B8qduD6WSyartmdfS+klL7E0Fyr3Vr80mLz0sQce4mM9oKeANRmNJ+WRaR3zQJkBRupE0P+7JZeofaeaqszFjnlcueKc20RoIRJ9krv0Xilq1KqMSOltb+xNhVmc2yMoYCj+5XFN/kpW6AbIkb8auDEZC6r31UYDrzkYiwXDsnTZWIrDsgkGWJxxvBOhhQ9BsZTclZ3jc+DVkVQOCvoTNKQZfdVXfFYjUjHfnKO7S3hsCcJMmI4hX2lfjtrfTzOmOKsc1jPirRv1nQBMoRiEK1qBi/tV4HjmSrQOUYgX8UbHPaYMYRjZ403z0ZjD9j/HLXXvI9c2F3UEqxzMJhwiFsQJ6Ojh7csfr3M68NE2xWRJdL8TR1aPTEALWKQiT5sWU7ZbG3I2a+DylJkH4dv6WeWrfqGVErhD0hILVfmTBnyREpAsmFpvgeBcUYp44Svrt1aJ89dIZ2Xm+533JBtIv/Rd4jPsYcR/9vM86Hlq4dY75zfcIevHXVRwyUL+qgNO1SHpY1DErnTctzwvV93GAeU6QR55zd5LOBvzJTVAplUMnuMeLUKJBA647u8D2Hhs8wN9qhjn0lENaZqteX1q1wd1oVG+YqYGjuw9+3M1x2MDeqB4Nv3+lyJokZ9ovTWYy2mAM3r4axCtcWXHJX9+4iPK9DrURpuZ8DfQcEjE0addecXZwZ5exqSrB9pZtJRpiF97Mtyc9gUBJd8mT7CkXijwtvMNNH8mExL6vG02h8AWPN4MhHtiqpUWNeUPozbv8SjRWNIIwV+n2uBbqq4S4D/6JrgN+k9tk1qqmDyKfcH5z8RfYVgt+xDFMUn8ozLPxrpw0crwJpBWvhqC9r7iRwit5yh8UA6MjN09FGO2W/eUE1Ii2DcBcFW2Icj0mkPXVUiRo+jw4jMC2SzCt9CmHGF1KVybjSOa2tX572Sqg2WnmPX2fgIoUKgZlnPR4rKaNPEtGHkCKJdtpWTN5NEZ9mqsJuYeJ8BjDvHfpNu4l14N/Jz3Z+P3fJcyHPywmgkO29s2IfpX2Hu3Q5uaWj7hKlbKZleSN2aIm+7ueak5Rymd8dblNCrCX1fxJeKdXeYfjrCPdjyjQ05iXnBhF7bCxthDLXoi8Fw42fNYH5wbl4qhea0C9KiETRW3Q72heFhLDVIaQGcx7hjuWHVe5ffC3T5PPGBlF8ji4GqhHvXIAJEO/H6ahss1ZLPGzcUW73EBmk5FtAfJ0MKtFBC8QUD558lX4Lgq7HKGEPeR9r8BNRRmiXVhHdqMOKXCKJZt9LMBqIreyYNIRNpv1kJSrQvf7xMud5EVXc6kjqVkT3Sr9DgvtuMiPYT1W5ac3Hx9xMNqWAWE17UBuK9eSzET3Yt11lzXyO07LMdZh5MRd/rSV55RXZr3zISFSB8t0MYwN3Kp80rUhelcbu6Bxumx42Vd8V9p/vdz1CiYbWkXPgxU0F8ucB2LxFvbnoNUFkuNVOC0hEPjpST90FznWC+Xe31Omi5E8krtyqTFpIScQL8Qbv82jI/AoRaChu/JbZO5iQAvLCBZvjT7+PedYwzav5GYH88lKrErkXvwmSeIQ7wIX3ia4DH3GT3xGgSWLCV6qdvY23CPItEAK9utd9sveu6x2pEd2qN71HTm2fDrYfnSEzxWtk8h88mdWum7w8k02YLNzY76LW79eh8kMv+ZvXEpDD1xkBQC0tq9QGn7G8V6DM1XHttGAGuGlD6dCkc4voinMwFgz1wcXrAAXvPhZfl7Munq11rPIy4G0rbUSSXe0MmXvuju7RUUhM/79MN66snP/o1jx04hg49RoDnFcngBbkOeHa6+6f8DqGUTc/AMoIs3CXqbaHG7tptrgRKOeyyoUDtU0Vl2HtkJtLkmOjPwmOWOCCHtxCRaeCveepfMScMFd3mVPxME9JoS4Sxo5xRkWLAafzEyHPyir+0FlkU4taEWXXgX033AfN1Lr33Bvzeard2OWmLyLG9cm5Gy8HJ9SLkq+hqoWUDWjqun/qP2+o0CziDC374o10PSVrMIgiy60G68bybnK9x7m5Xlxdpb1t9pzXGCZyHaNU+Q+1rXlS8nvPnHDPwjkxO3/9s8u1vhzo8s3eNO9G/0rgJfweW9s6Augp2PRLVsWpeiRwyvNi46tDyWyJvyvSq8D6R4YFE22PGQxd2l7gZ85oA4j688n7NwyQh0Kjtnp1mnLDvIT4uZ4ebzdRchEObAGr53mvI5sWAfIrYCRR0qyubvst7J521Bcf24lxX4cfTFZwWQ75DabFpw5vsFYkEI5/5ZHlAX8AwyxgfKoJcfP7C/I2hEIBttd6LDMKdPbOFIgdBZERNeDHqEsk7PQf3NMeJa85jEO0i1FQXieKPZ3vEZNkC5oA7R9db+PB2f6PK5Gpm75g4Xt4tn/WBRxcA9Q9G2sfyZJHAx8NxZIk7ETOIc+kq/nEM9c0xMBc1+hwdKcFXIanmfDZTKAiHIJAtJAThY9CWwTBBzfooKCY1+RWukYpENm0hBCsM3eZDzqyTeOrh41L9yvHpIEyILboybH906CURNiMyaUDmTr5O1MNQLo+E9zsQixHTOhiNpVIEg4Zo/c0VaRn73juUl6b/miDmgTihxX3Ueaxiv6hBFV9F5h+nU7XZdgEfBjQhikzz/Y6ShhX7KNxvY8eVWqjALoN+nRHhTC0drNTa7uofbomRMg936WGqJ2H49tbj7jw5QSzdEkWS/4uYr5IOspKmv88K8IQXe05Liw/XoCteYJIbpHJkyFDdm+Y3kyA/R9Wj0wbl1t0xHjjG9B3BbS2DKU1ONm7EB4QqwUzPM4ic0y1EJWd3lcQUC9jFEhe4TcfGUIzxr/rd9NkSb/vKHE42+8ve2oWOSxgMvEF+Y6llnHYsLpyUgR62nrQU+RCf6FhJDxXUR23+vWmaJI/9tA1GJRsAyAhWGuvKwa5kmRGGWCOWMa/3jxieqoackEUc/qdPs3Tj96ANv6D6wigZAFuFOJzZZRdw2bsZBSF3fDneLJDTR4bHBQv6PN9DjbvOuCiXPYXQ1ZgopxuD4+hCmZXe0/tUucd41WCUAWSEmuhBJMC+1XFMUtxYHLHdXx6+N46AATi0asdnUrLWG0G0uKmQq3Ck2OpGsQNNuJuQv4xcwDd1Xu0ZY41koLmZdfnr60Zk6/v8NIoEvPbid9697R0WA0yDLfbWgDy6Nvtr7w7Mlwf5SoTz3YpImDOWqDjdx59K6S9BNcX5CuQ7NuM7mWXkbGN1cJjT52s/gz3ePJ75nDbOD3WweUcXPKtYl9QBnz4uRyXMuNWA1TnchrFxAb/v3279i4RCilRcpXS5FBWanAGnyZNSG4de8BaIATqQwQvH7USexU6YNJg1WzEp+TxhrlzQatL1C5nPbQtJaGau3M0HDK8uXA54vxf+YmI/bRSAU2rVOwtIQJOpoIbGjkkmQnyGy2JCuxldfAW7dcEXPjf59RUOoPh3VKM+SlSWn0a8OWib42y1qwhSRbqwkjyVlHXJ7OS5IlhXwNvxxo5G/QJiWYclp/tlSD/w+ltRVicHCk42J1LIful3NuE29OJlXxiMIt6XyrFK3EQityNvTzfT5oGzgAqCfiYVSZ3kta+a3SWZ05g0faRngMz8Z6wCZuf3gvFzYZZoY9w1836P8SET2RW+P7XrQqSfm/pWrCBpXrxRevVxbStBgDA3mf7SmKxvJHqdQzHjK7BCBlFrzm+u40e55u5K4DlwLnhKoCIvp3aVDXegKwRiQVcfyc8yX44cB7IIUUjmVxKIAZwg4PRivmqGki43CRbd3TTb+qqAB4zDZcdDSh9VwFrw+b5nLdsO+cV5C5m3kJZuKCh5WfIOWXWCgcaUJdJW/0JWdmzLodsgqWCKo6oIFJH5dIe/f1+JPXdfS7hCkT0u0XzojEwNVloIh+ttS7vPob1CTjGKC+Uc1O5xjQhFju+nLAshk5ijlc83n3HvSNbbNiI1imEWOobT3/Adn+xzyam+qOVf743/NnGAqna+YCDy9RYiPTmElRd8MXnrai7Hxv1H3PyFaifh+ZUdddrn4u8PG8MvjrmKDs1HSTUbkCafL1/jvRlBCTjUapMY2QydNpTN1gwH49Nx7ZiR7vCyHlgVtC4vAk2F/2pazyy6qd61hVs0h/m48oR0ydBXKow2rrp5L9B2r1D6jz3kn3BlEVP9Y4D9LZ+weSOYg3DINk92nFeWahmf7F1w4YRoKwQlWtJMBvcrdPhgryqHZLomm9IG3JAj+j5S0A822WRreb9fyIeBSdS28MC+iuIdINng0RsdtzgSNMenfuXAFySb78OrHLY96OWrsYnO8pK5rYhUU+Knt2eU3YuVeM78BSbLgNCXutxMDMUgGc/pY1/JOVtPrON0KjEgODR3JiGr+9AAlHAJcgGwNI6GvAs+42m7Qh82Myz11LcIgYLmZJsPko9QtG9SoDrUv+UzX4YS46LdYqrA9kYBP/Zyj/kVC0TH9DjwX9TQIxBuQmiGMxpq8KE7Bm3jeZzuluNCDHj5TZUkHYszBqaECRU2hSAJfkZ4pM/rvmezTjYWx7Rq8DNbxdgVfWPSFS4wIt2CFY+N3MMG3teM54zp64xK6Szxt6fPZQrJLCTTqNOiSjTustRAVpKMuFpP3axZn/RoFIbDILPGjyeu3IwlsQ8A+VVgLffnOEM8B7GxsE+xmiUQvKD4AuqqTpwXJulJoa5bfZUl5GcRcYywS9R2fAu8yE73VlGNOvsSOsxYv7pSW9ZwKEG/I/RIXQU/0Qpi+dANCMYbmJrR7yaSYgZeZQVJkrr15kwtlV+ZqH/1ynu+uuJUlk5r7ccKqwlpKsWh6RXiqS11L3ZMhwZyFxRyBeMV+WXab71cOkAQL7SOLV/GAsoCeDEQL0ur3cIXCmWRsoWOW+syo9eQ+ELqnho1OuI34HB29BCCaMcZhYaO8D0IhP6+LRnuSoyA3Zwsj03ujVDB6VmJQ2HR+8bEcGSgTbg9/yp2VBKw/vwKPOvPjgkuD215dmYS4VZb+DgVHvjuz8JIDVLVQ1ElpgVC/X1/HNWIGiKGWet+3UHMy7gv6JsipIKFjjtzoNEdop4P9XSN+MQkRQHaEsniIMeFcdL9jYl3OlQAaM5EXF4yIG72p3HkGs1/smXkAaPetVe/oNMVPxdnUcEfW9pwj1rucUkcDh5qibrIO4MOi+ZTnU4XV9W7CmC0tyUpiXU82R1yU1DyjJMCXyZ0peKv3k/eLKxW3t85y5dvGofertU6YjqQCd+k9U0Tf4nOV0PxLNLlU7ee1KynsyPBKz4G++4E1dgxMFKkmB7WV8U9KP3YFQrZjdYnnR92yGgk5OgDo0pcw+dlvMq5H36NZtaFRigH9MXQ48lJ0jsGDn6MM1pIRC/jGp9lTdcHHX2k6cZxFaj6jNCGLEK2C7MQq7cFwcvGa3/gHy+9Q8jfcCa+vUd6dVDuJUwqfPk0aYDOoZ4DkBHOPr8bNCtEFEuQ+KeuutO5gtjozT6oRFwVCTOS9L9b2TWRozRjonFPrImMdtKX1rexqbRXDsZRD3xdgziiMzEsx7qmAwM2XVNVOmAoJiejr6E2yGfHVqJtY4oJSHySfRh1aa4DKF5nWt0uiZuuvdTsOh2EPLFFL8H5FFN9wHdEaKJOtajTa08swy9QXj6TCXNJuQUzMA9eep3+xInL+O47VsfMUyVUm5QorNP09Uh2yii5dUDE0xMPUxnXfXrjaTEvLKje97yzfpDf5AEnAgJOql6Krr9lZQCTEzSAREi6Y/NTY5kLyGvuR1M4dMhk1awBNLkPt6te1ZbpVckLSpveo79Z1MHlTfcXZ1VsdoGcomN5QFh08nGI5oSREI/2QTgZHhbCDb25aGLJ++Eu7yCK62MjFJj4p0scmhPybbVTiqjUfmGxA59FzaMyxJ5KvYSPrpzjFnqXB/x0XglIQ6rryXVJdnXYnL72UitdJeXuNynh9ZEaNU9RyhQYX6RiQJI6vYK4iVPDeRjosKGQaTo/VvBtE8Q7p2VPbGat8ScQTioE8Yb8+2jmftbMvm7dUv/2bQ02EB1ixeNz5EiqWfTGds1K8BEMvYRxKpL6o8Y9r5dJ2+5Tv3lDbm8lInQJLy4m8yx/2hSTpnAwMPgo249KACb22cwcm0gVNUIOlBTJvFRrhr9rJHb9Poqh4Fwwg44l9mG7Bn7HLpfg58PLCNYdfUufW4eI5eazx+9cb+/Pu+EzYdjCuiyNfA72HL9YCPdJqY54UTcZOAsBhK+ZXS4JDQ1XONpGay8QIm7S5x7DeLH+2XpiSk4kuSZX/Y0YCDheD/+oDfYDhomi9j8R4rVegt/5rMKITLYG6vppaLKEivwagQRB/HplrZfKioFetQsxdwk5Kbtvl6ZrIx6lrCjBJS4VGcCoEZvk8MWTWaELJOqnqGiC3ucjOgopoZ98UrDjDnsEbFIJF3S7dFZtSPd1W+pUw2/kR4JQ2e/wIBANeJG+u9AVEJNYa716he5GND+uxYdRyZKIDN9m0IBjUtHZc16CvcpHogL/JeU8JcaiRjjpa8Drz4OVVeGHz5LeAH8LQEC99j/1gLrW1vA+T0tQb9spXN47oKP1cc1lM3HelOMVQK+K0bqn2HTZcrmO1f1col6Ex5j6QY4+ikdmFM97cdVNCL1+tLw6kBk8CYyqhgpUpas36kZaGtKqL88xbSw85KG4zzIVKFgfbHZ0vHQSi1t6TGD0jhvwxrtuPEjcgG0Suk0LfFwhD80bMN9GQYkZXhP9djPV957JUe4aK50/0lBQsvqk9SYlALZpdP8lJDytH38emYXor+LqTGY/bBszOpDp4l0PxDEP5wkJ9EbXiChh6Cs54Q9mFOwvGjjQ2uEbEVbGKEjCly3KlS+qt1P4h9LvNoBDMRyVJOsnRsJZNokJ8J6HDJTAiLKSgDbsTI9HhSF0bo4MfrgoRgG2QMt9jnyvHSsZ7q5Bta3tsM/uaerzAljwCcYeWXhsmI5cg8cMGsOeFND24Oxcbz0wMp75gUVVn9bxrEYxNLjycE+jtvyyrHvrt2QDnTJclIRSb89ioQhPWE/fgyST027cZAw2z2+fjbyJrwF0RwxTphwqoqC1UvPNLj3xNSY78GZqD4cAo27pWEktAKJEGz3sh9SBX/YL4CcL+Gq4XSh5gXVDGS6lgqIT/c6dmR0F7JHMNrTenBmcsLgeQG/0aGEH+N6lxPdm9D1Bwb+i09Yp28G+gJ8wuTIxtNbnYQ/32XHRmE7Ea+2gCQ5/C7MVRHvQ/uZIK+Eqa4SCYfIuK0OC+5Mg942baBHYwdSd6bgayAs0h+2+UiVOE1pUZN+5m8d5hXSnNyVr2N7DqBRRFqXLgTy+Gt9qW1ptz0alopIHi8Bd0Eorj5elbW2y9gUXNLCf+Blc3WK4hr54a7kZQnPu3aGzP/rtO4dUAZRMnVe72bkvjd3ZVH0dY21SpDhoUX7MPqJaEciPHsq8jxjCxiibdq6Btg15wpJuAkWhJA78JGKhBMTX7IY9Do9J/uD133dQfIx6Un7U/CI7NJoRioAkW2+eqz4eqX1IvuUOVcLXCOMq4s8WC9ZZolFDTkGypuGnC41awGLUv1CQy9C1CNGJM6rBlRvVTItgWnTpc2n3zaGxW55OYQklBFI2Nh9CeAYFWzidLbTNlmD34+L/U+sszinPm86reftyZRjOl8H3x6Qu6JwzEZqosJFK/F674rxENi/HeMw4aebPWDrAI30A1VlGoPpjtZyTak17eYIdkqV6oTdVHy7+Tsb9GUVk0g6UfVPWy0wSJDt2wfYnC20m3ef9cAyljeuSjOn+5nexAQ8lWRWl8lyfLZY8TkzPGu7iF5kf6auNx9YowvUlh/6gnDRwarKTHfxlqpk90i3N0NbGNSBSpmyN7ZWZvrw/Udq5NdZs+zFEmJMHJo8zJzEQ5UiYURwA3gDuIdbRRBcmqf0QANkS4aOtJVCr/7c5mYpV5xTW/anzcKtW8ZahrHwwgfPDsfw9iZkgh7WzSNfTczgUnMrkrxYjmVOC/psIEdcr/WoI3sYTWUdGKwDKLbUV/pBF56BLjvlpIKskZePtJQ40xgjrxxXkgKMa9KCrkM/kxSMk5TYrjHxHRxYSsIBJWmwweLL0Pcsi4u2Nwa82peYTyKIV16ObR31XVmZSQFnCki7uMiuYBYnEO6tuPcoR9I5ijv7iCZLSXEn9UOcNY9u15NHASRsRRGdiulNSikkuOOs6T08Ciu/JqNKb69pBH3FZAqnBtyV643/kP7htf+tPLnmYOPIuN72STFo+z/SEW1b7Q/wEDQIUQfhlxj0saK8wTSvHbk49LMCbnAE/ucX3kpvXqndhCOqb3nP4bd0T2wZ7mqBoSLgNMLuEAu7MYE+eUfU7Sf2JqesZuDmlDJ/cXeljpXpPZ0sepzHY4wCbJDjvhYo4zg3TGCXwD3z1KErgwhBeYQQho55TLy0nGdZKU8mMQit4NYDF0O9gSuUe2CsnFVU/Pw2WwuwVL76u7yD4RiM5vfpXi9+nr53cST0EvtcloKlTQod1/pfWfoesjbSwUvfhNoC5U352/xs0Q6DM/aw4vRINfYcj/TcZCaUA4XdPlU3EzLp41atZrgzaPscmnoSb1LkZ5F9erle8QuEu9rP4Nne6Bt4lz3PcE7acXBSKkKgOL4L2lDUzpLYMeJAptwjPlljz0I3uy3Ulto02cpzm4oK9XiPQxct4ReeSsfWGd8x+Jmcs8AxyY//5XsVuy6ioXfSnQhaV/MX69qBcCfHCnEMiUfEfXPUAgNDAQuiReOWxGPsAdhQDSsFrljUzk1/nuH0qFPN7ycUJYhCKvQhpvEawhREUmqXmCwrotzCMR0kBOF1EjzWCLG62BkcdzvICq2+sfZZDMDtqn7O1FjkaP5Fl7EU05x9g+Q6Goxzh3E+NhibyVP1mc4DXjZ2GcxmzLY80FXum6uxg2F5VphPF/QpzkVRKHZciELZqF5MQZQwCltkVG03a5XAUkJ8m3S12Lcrrk6AQlQ5jPcnX8kQmuEUjuN3OrhxwoKh/1zRw6+PgyT3NrKW5BiV3b+hBssKfvuzlys1eLy/Lteh1RwpVo6FH0VtMt9kJHMrNh6kqYYu9w1efDRXyLmrfkPw+ocBwHoLAw8i6HsGuIcSCCfqigT5U6irwPgavFNiIWNWEnwa1VfrDwhPQqLEfTk9YQMCfA9A9OD6HWbfaml+vspEgFd1zu4+TTFjQvd9Ask/9ia9O8M1PXvSqVvpiA9z8sKOqizTQVYUyxp8UzsSyQS56VndjQlzXdwgjZg/6ZDcUFAzXK0/gU9PYsRXFs+4txy8HVI0BMrHoeiJWBP2bMPJ1d3hGIhpDhWsXhvAawb2W/jA90R+ww6XLHyXRcCpheFVUXMLYI8d0pVtB9uZayai3b+ALp8R5oNob5rr7RBARJQBa4bhCANZU3TkSS0s3OiP9Xo74Z38OiP4iadN7H3UlirJ44Ju8adnTxrRDA+eQ2AcYO9i2VktZIdO/oLsblITMmDSyvTM+MG/5BlLqGR9zlsRkUJAb+5lkDSBSgKgsgL2Ol6biSMYPxa5Yl6H6sTISokxEvSbiyi5zjFvo7xUnLp0nWmi8g/aUoOTyc41tsMimKlLkTH9hnb7hqf/r/z17zXw/truqVfONUmbDoMGBsr63aGcpNjQfA7TR3tHTJbg23cmKhk11i7++4RAhyn/QmyVKQ1Qws8Xn474QlttHQfySBW98bBlEBexS+v+t67jHSkff8ZRVVUN26DjknmAdWPyW8g5xFo0EY4rZXN6+vTKZMpnNHxCbMFYozJ1QfTBEAiZqDCFIssiHvOcoUbYwjosG2WfUhwI+IHfvs31wxHnDG9Oeo8AfsZ9SsT+pmQJUZLAMEmPbdPy4BrefOh0nbT0MwPo/aeBJgiRBo4BM2yDHw+27Brt8JledD88gK95k8OUvq7apCR1CxpagSGyY1zL39fwKzF14b4s7ZxDRR+3KYoETJKk2l5FC6ShSnEcF92wbZrZNla9YNgrcHa9XE5GxnRytLm9V7JRODJO9hZNjV8FaCGyX+3CVVhut/n14r7jq3RYTiHKhsCqxLgW+itOHWBRE0YN6oRhQuqrVIrl7VX+1Ejz1pgL6iY4Y7PrfvyrGHefzhMGw1bs+pun03JvY6TZfFqU/mT5eesHxCmb5AjMcZV9Q79xMDn90W11NCnSSahoUocfYhmKpbJUjz7cj/5y1lECQUUA4mWzSA54c6Eyy4tY9BfO3wfPQQ2HvgJ7UhUeae8g9HUBRAerIlpcJKmdIUvJCUCHTIjcPHiKvdmq7obds0rTZ9FyAVC8HH96PvSpD1qBH5qlm9uynpabJqJ+hDOkjwn6TT/dOM0cQ1pdt/JhwQDq6cRte1KyHF7Prx8KDTpNRjcpns+cLy+aKP40bsYr/88Y6rdVlEVrOwhQebFMqjaUSPxQfi+DqFFLB7CYvah07TSqUsbT3lKFbdy/Q5Ok94QZ2tlhzlzp716gQxNN7JrF2GNWqVpNMhz4Z8EjoukaP0M2HmNoIzme6stbDBBLeUVtrazJlp2043PtYLWfAdZmYUB7FnkdEXZE4U8NGHYoDkHUWY7b49yyYSgCpPVL/7OBQd65V8ST/M466v1beKsp+4L2O6kkcrbtRbqtkDYqN/AxGTP4Y5jy44UoRNbawCDrkEqpY7/o1aginMbN7sxeHKaBVDlUfTxcTDhXhGoh0ZTpTJ86FBbaphF+BT9kypbQFfBMgmbeXG6HnmP1jcuoHvhTLRFbpbWweIXl6nEWCZXBOfDw5+DrcIheiGH8vclzWtGwiJuQ56MWf0j2qL7AMXMCvvZYoK+8jfJmqJaZ1BKn/jMh4kmzPE/KWK1LT/X7oCwGngUfhVG7jVBoUTDdIMW17dylQR72eulQyArKd91porXEKP5vUkps7woBY2U04yJfWWIRs2e6ldTEsd55LZXsN9Xak4JxxkpKY05HH8uLJjoCUGoEnssgPffBEM2tpaEG4+3Mps2XurlBM8HsOfjlHeJWeXuvZ4RZmdvyZL0UrxSKEWAWWbKxocZFWf7lYwVs7u/CcL5XS/zrXkPGaq3blrgaoDtPsRxxDDRS6JZUBn//USbFQhPlyNqc6WCgiWzY9wDawDdPbwXBxmA7CGsGKG7C+3+1sdVjwgkmnFwWGq0Etl1+WViy8mHSn8yciL+l5bzV3tXZuS+7iamrIwaFtE3Ke1c+erJvUJAzBrYUTiowF+AqjQJZm3OIwrnLSn+Ulc4A5YUdfpE4ArZpF77NSck+rFF/3Jg38dOzY6EyIk4bUSbbdADdgpFuKXwAVLGNuaJQc6LhhaS4KFH6byLzNxUrdJyWzg6Qmn/TJTHPEzICbuQzk+I7gGMACdUfaq7nChNcMrizxwrKSjUGas8mHL4XNTefYAJxt3sMbNReQES2Erz9RiGAqopP604EDg0dPwuiC4+TILZ2xM8Voz3jxTR3/YIu42dOK9rrlS6SLh+LCwNLkTcSIukXg6LUwYx5AX4gga82xgA3EAAiqGZZAckzVvnf38ke5nb25Izc5zQ+0i2BBqbtu2ipqyay9suOPgetw/R1OJiHEXWatTO1j/us0zCrIy7wc2Z+dYNgZBWnBhCqcY9SWDArUU94IdDprobgL5vcoZbFUJb6f45QRALgdfmQtlUOUFrv4Nacx0pcoQZ4F2bwaU0VYUW92sxlExsMbvJ/idGg5CbP2IsqO4gxo7/3IUrmri1WGvVvY8Mffv0sV7on0dyLFNJs2xQJkFjbT0qqwovEM03M3fgyZ1s9/0FjVLhCup1MT4AgjE8m/AtSEiOjRgIVf7hyo4wQA935Cec/86NREF9SoE1RjsnkjT31U3kce64BMX9/bpkSvDoci/Yra75thK8J7G0ZZmjMr5WOTfr5cUF3u+74GwB80yPBUV0ZWJXBCFN+Lo6p6Zb7WRhzpUPZpb7KyzRNozahsW3rFIjuUafYB8uFp4ZNgJd7YEiAAjecY9QxCGiKQxj2LkwJKO2+RRy/JfTQ+LIs0BqYbQBc5R7E21ZAXj3i9cfHzGFB/WtRy7kN0tkWIWWwwy2Sz7TasSTYR+FMNh2AXirdnOJLPCcYPB2cSaEsNoZfLQbcAbfnNiAX8boIz90eoLruf6IpcnJNpEQlQ3U7XwQxZgzPGufPJbCAV5G7EUQm98kcS0znshf4U/mVFov26bRkCMp5JMLe7CqGMSm2pzDbob9yKYzq1aHn6HwAhON7HtOAiz0LCvUpwiz9RJvFqJK2LM7pI0EI7bv1eY0gQDYqC0wGZ10+1oi1HtuBU4vN4ztrP/LiZBKYdddDb1AcUPmWVA42dCuPVw7PW3snpg4BlH3YMw2TtgZYFs/8fJBBquRaX/6b9neYnwHfj0v5UEITOEzALC+wRRvla5X8NC9alucOY8YaAb10nVhUG5FV0M6LPFQCeSYrnre6dY9AbI5Yb5opaAo8mAdDoDUaMNEujMdYRuWw1i2Ay6LTaBUf8Eb68R1Ea3nMIs8piZflAB3nAdjKcdRRrxv8YIvXR8w55YFvbAZmBcYuYbQ8H6DTLDZ3rg2D/t/Yd+YI0XvJ6dHkCly0t5PjSBoGnjZPslv4jwmv66E+xTbQ7AeHpBWuMSeA5LPoziq8vC1bFY+Pl5npdX+/jOI+F2QyJnaIPAZj5dtRYZCosk63uejR6XkaB+btbEPS9iqUe39bj9Q59o6xi0U2rQVC/SXQC/cRaIV4Q3LAcNUlZsQz+Mcfp2H/zPYD8Br2MxbAQmAiKGVDu0NsBqhMpZ6tHIjX1ZCSDsl6vtpmjDm4YTN+oisFsfYC//Tji4fxwWCHqac1f3xaR//fVBXAMUdhgzloJbuY36+072dcmgSbHaha3RbwxilGrBIDGCtAEDyfa3ltLBsQI5WssUOn1a5uLQl5VXC+GMItvHDgwPxzFww8m5mutf+cvcKZe7Uy+cCTrGqAfVkML1XqR3i9rtnsRE58OISAwQb8IDqz1mjPZnNnxkpFBuwtNC+ue9sAx7tMd4056EAn4clFVsDFYICqdPpqaLoMSSniZepN1rWFnrVSqdMY0jMVNNEzF3JSH2EcEnV6rsPoDyPsNDj+p7KhyaQlVng15JpBGvcWeoQxwzisC1JnsIk3H9/6NHrG8s0MrHbWDP/XyIm2cteZc7kMFWMGO06PIbOGvB7FY8kTNDeITSsfB03HP+/rk20CAGiVy5THFc82ptbTVKOzZtpa1TJB6ntBFRHp5u0COw5to31BLtR9Fm8P/ye7PQSUxNuX5mWbyGIH9ZW68Q0VAXFINvAY0ZbUhQJkzXQRYVzMtW55UFRRzFiKvjWDBgBZdlxph4GtwqbODR2XbK6lygC1F/S3surjQmKjCodHzR2HGzyYYeafKvwb7aASTcKS19DQS6iX1FOjs2tBSZhFeSn7LE4cjGAj4tFljdY8/Qa/W7UKLMIGwUS/WKlTD86+7W2dktlMFKhKUnJr4sLA9RBEiWfkDs9EA45UNmA/d6cfoVwHBa67ckCNXF72OncMB2a3Ja+GIOzYvEjvQqYaSiseZclF6tcJGJCIDd/Iwk27ojLvXPfkw1kcC74gdgR4OFJji8cE+sfw0XXSVjvQqeIa2QAoIJm6bobN4lslTMNlwd8u+jrPHZxxg1m7KM12WRTB9tEFlbvTmW0v8V5snRSvrwZvYcKoRGOXD1QEjtXp6BxBZL1eQdLh1oDpJajo7+Ow0SYoveAWghmjy2smRzEzkuepEiH5cfAvOWMgAdKudTbrrKFMbdzn1MODe3Js7L42tTaHzLewlMd+etfvrBZjk/RoRfbpwOjw3KXV3dMxvO+HV8XdK76jwLlC8ZEBCMOemfo/eeb1VmprDnQczfdySaKDrpFp6QYdtFRtCPqxAM7VQvSjBfEcBNEY7PZ9mJiH7G0vo+74lIBJaIWzfydrj3ry6SFDGkNRbXtQw14wpbDVDEUMG3YYi18Yi4nyXCh3jHLN3sBr3MH4rIu/aMkkangKUM5yj2nYffTbPyIFMGynKcxkhAl/2lX7VIDJwgKn6TBZmL8E0+tzHBIhG1GreADC4P9Ga3IfN8W+AQkTkbioJIy1K/ztZl2uY+sFDKKLRoySWtJmY8kfO2r2bgiA/fhPSBvzfTjPZKmxTwm5czibWV5lixOMd9k/cuPpjnO5EHIMx/Uei5f80R74kUAynpSSZcQZ8HVBrJaUgi2dL/UyuTZHrVVAQqkIsyCQJ1QeWkPN0PQD9rvvK91QnLchFY7A1K3oUMRr1XSRTFazifGU2nhX3FR1B0ctHRmDvNfXypBtkP6XkYFdfT6m3liIypcMvkIMYoER2vOdN5tX5jcAndAwrv/Bbaq/l/Y0QP4J+SIlKK9NGARdb6h+hekGxFVJaPn2/DY1aT3zlxvxzpvTyFlEkF8n2tCYAyksoLLmG+F0PFwmXnYtEoJwHy4bXwSRJe8hvCCEgQRm4h1PBW9amk7A/zdu0XVrGvnSrXw70SwvpR1p+S0BFVi8DNpj6yhVAy8m/liz0dkwe9AlO5LZ0K3YcSbRtO5QD0RorQZ7arPkxv72cMqJkVYUINivF/JkCXsYrA04CdjXKctfoQywzjsU80SIgvAHEhoXiT33IxgKndGmY5DUJG2SWCUidYT0dXlkPeObXTs/AYdVwSLrZ528wLFucYQElfpOnilt9j+DPKP8diCI1DHeKyKMOV9Ks/E1fuMGSESTsEcd9NW4ELa0VThEW7afMaSRPKVFmtU49NBkEoad1TjI58yVXDNu/Sofe7opmt/KzNHnUaPvBpQdDv1bMGAZz+4blvaMPs80O0wTqSaAj7jPQGXppXQxdmAk8U2aDEIRDuUjRVTWEonEzP3+9BDxNZqkXBu3svwf2iUwE8plOe12ro0+8+fBVUg1NuMSf1gKC0yrfylVbBEA9uVgan+PmvRBxoelq/1eFnILVcPAzCABmHRyVL5/iaUaR2bnoWcngu6OU529AwKpYN5o8tqWiKDmT6QxBZwAq9JION/W/I8FULRbqtwsQUer4MoI8/ExvHc26/YHjeRjm+HTvi1yzhm/DMANzetkooLpTkuTFIvxUgR4veE3Dfv5mgx2fGGtIPQVqAzgmrnPYj3MphcTMsfMro19LTMR/3PBKpus7lS0yxnz3fi4XRG4gQYCrIfuAkIlROLSkp5f4boCIiweSZMS0v81/46e3uStsQ586Wzq80pGJv3tU14H/A9cib28OB4Tq2p+SmZ7pxDjUfqosgGjxhFGX4wccdLBjxeHfnXCmuYds5UTJhWH0vk9yVELm0rc32ckagyFyTOCWP1XrLa/YTWzNX4YreF3DnUqxVyInIO+dohe4xFzyuiQXcOa0SirGCDq2c7rLXLfKVUt5W0FucCwcmAu3uHx4uOlJujIDBI4G2EFzhnq1MkvkCg0OZDbaFnVP+YsqrhcM+F7dXMI8Ueh6SXyoFdXc4PirUlVPhJ7YmV3mTF80znFD766cjbwUEwZh6M7AMMia0ME8ZsDReikFzccGq3OZqGjAC+iDCZXR5E8UIN07kNNEH7ZvaGHISjX5WcSVo75UQOz5Zl5uukVyDeZSo0BgDHNW7+JNIov+bqIEPneRxRu3KQeYwrZHaDDaEv4CKH6ed3qj+6HDVbF6ci02L4RhT7rjjadtzUQGIZiNALz3mvJdWynAlNsqIxRtADr9QZQ+eqjyyU4Z2dgp7LSxf4T0LYwemI1pPfSh+2WQMoZf9RVMPx1y0lOgHfxGLiHzNqiXHfrT2hXWke+k+d3BToa+Hr+vPEI6ldn1swVHVO+hUKw5zSlVL+NgJXNi397O9AU7RfmKP9gx6yt0mpOw5vNc+0DIzVqmZ6FVgbwv7e1Jj9nGJ3OED89OnkWcBSzrv7t+nf1rByxPu41Hbje5PFxHiEDMJkeYMOEDcHTqqHaqQWz1mv1Iu8Kiz7SdOpUyTlQ8SXZtuyTRNOdFzMqijJm/J0kGu2aybc+Y340v7VDYhCSadEsRUurL5eONzIh5gNo4uJdYSmX6Bs7dLJJPl8usWZaWUWqPZXdCdSeNDXQgqI3wrES9ZMe64Hw/45fFszALI72s2aBfuftzEIwKmdVn+6my91qzYgfhVX9r8lXxM3/wWdv0b4yUdZuiEnfDN4T0FkX3tE9sWWpYzpLmZhkavj7FNK1EF1+8MA9Ops5kgwmgQs5zmnkZeYzYXiFsSjB/bi3fe7z+EU6Afv41Tbn5X+cOflVb2RFJULs9PZhwWMiu7JPknIj8hWUudUYDtCa8DZDzQISRxHC06xPxmTmfTsnxOtqVEDxFNzBeaR506aSo3WR+cRqV5zy2LcYBjP2HuOE4F7RO4U3mY79LyAg0HzJpSeIaanXCVwd4QZva9XM96WI7vvG/kOwNfHRO00ZZ8BDm5jhjcWEnDs0x4U+H9i44qXQ7S8KZ6eD+lTqO2qkp/mBDyd1ohE5TAT6vpE7nN3dUtPoOis7QbUFnosWcykzaGGnAbrrftkiFUAFaiFIsMOk+hk5mnYIiS5Z8MCC4+o0eTjULbLzFN6SLV4QRY6LjW8a7GwWyEwo3wo53HGy0vomzl7RUyLWeWU/swTtZ3yGGiHTMLLWnnE0H1WoZjew1jrTmajMbdsEot1fN2SrLlI3K31mQ4EiqOHTUar751wGFj5XegEHSpoKMmeImJRnkkaQlN/bll9XSijIksbwl35FU770u8dLLuiyuU0fVfofAHifYTaLb0zMKfwWlhhSh7Nur3PzNOeVia6r2gSN5kOmZGnMEIGpk72hkSCO+ikIOdoWeWeVODl7JXuKh7O55YkFEUMdsFonHtI+CAuw3+yJM9awL+hVwMoqKPXx3qxALYQ7E/7R2DVwfwTPvb2sSUHoFHwSsV2CvpbKduDQ3SXyDw5x25W0HD7kGvO1ECZ3i0OIiV18i5HSWgt5G7qyYB9o0Gil+6EZTpVdEn/iyeuaUk/2oZvin7XlbkjuXIjUUCHg0lktUsVsWBkTZFUhaFpdyJbiEe7DdCFE37EfK7wr7sgViBUSOFlLEjoEn/GA553wVwjeLfn0VoN4pMxGorzhOeUkw5j2B0+HvZs2rFtHP2hsFTaX3CGNTqAxE/qJNbQft4c7eyzJkjEk6996vmRXwN3kG8eOrgj1lgcwo08wwrWtIPkvHnOLJ3YvFSZPpR+uN94e0QpQ9P3tSfq+LN4oi2CcnjYnF6b6PlQiuPCrUYpA1hjCvs02wTu3gpOtJ/Z9RBGn0/lQOSwZkQQ8hbLvJDZeu1iry5gYRDMkZbmON96kQW7w02xT5NwaJHV1kBsv8124XTEgTJKbIkSsWJNhHgHMoYM4cePy/7+CLKymCPbkT9kk5dboP9IR2EGhlHPm22pA+CRREjGzZ1MqJaHj1KCvkyMwG4qQEB4V13h01y8YwjMW0NduEohYFp9OmFy1lEcaL9mOdSchwSBjQPOC473qoqiqvQ8a05OvxgjrGYuBS+id11eHwIB8d7XM86A2pVaiiaQLJVjPmKphRkkdLWvIH2m33tP1fxhzU9UNaK9K6331IWk2Ljbn8zj4e+OAgHkV4xUZzvB3F6h2E3y5uXoTN/yQSDKGGSJYawTS819bzWRm/bc1XEOaC8h0dIBx8KVJcxTOcfZeOO3SJNebVfzAAKnmxbVRRZ2JeFdlEElSUUGTPGhwzu9O0n4lmxQkhceaZ7yWN+rgcBFzVbrMLaUY+cjT1E3tzuEF3Q0gAus6/xoX8N7vp7+QHDT8OgOV0RW2Pj6ZOBEsuty6iLrU/4e/jpEOTOsp/jLDjy2bX1ylsvJHpC5wISF0Bs5UBJTqmW1OkywKJ4upfQE+h0Pzy7J3mdbYSE25c5w0nHCFBfJ0riqqsE4WZ0HF5QO3Xf1C8taPKsukHNzCWZ/JkFGKlQHDz1RkhLTnBl6YpR1L4VIYggxE5vxw51l8ZTRkREP0ukSKr1jerY++R5bTIDx2FJlm0GG7BnpziauS9tvNrFPv1StFlTz7lyebBwuIvvhXAr4zMV4DAc1+E2+mnkLre6CRcBsfKNzMcs2mACoVbq61c1gyItDCmn1Ohp6OCiRVJZHmb5D0G130x2gbtO6ziPF4XZS4XTINftyv1PuMGs0BqJokCIbTO91APiOV6b1cSiMz/3Npeinofh7+XvRe+CEiV0PzhetXjNShuMRCR3Zhg6OIO4TRzwRVfX2ljXjlHBOqnQAlFIkbPqbNm22Vvh9HwT//R8EeIwGgwhx7Fpk/u5oW9gPCxKG2DdDBJ9//HHXRL2b7W9C3Gool046G50wZx+ADl1GOJEDx++ue8FhhfwaZ/hnCdAdK2bt/v3v0SYrykxwMvmUcL70Q0wf0E0wG+ibMcBELT5qwkiDkotFqTQHgYZ9QjLsLb3wlBRoXpextZVL1G9zAXQTbVfxzutIdEPcUyGFe6c6vuGIDJrTL7sH4ftSpojZ1v5tsN+Q7NTJSDID9jbuAJJtyiDAls82Nnvwt/HJdQ4or+mawLJOOpDCcObbgVVIRzzPIZ6l1rVHAcHBTeW1V+tCqNE/OrvNCK/8wqXmsEm41jTLlZV+g9QLD3OvRUTMz2sfSwjTwHsn+42MKWKkAHZtEfx1fTg+QlB3IxDbNMHnnziIBDsXvL6RSQx/BPisy3JbTQ5+9SHByBkpf/COdN8L2ahZc5vqmzfi+A/V3BXna917h0D3So+6qx9oPbj0XeJlQ6VJC8picIcjaRJGmgtEj6d5aBKNnJLuEBDzN61HF+/xXbXtPASbNBHgMV6qRZO35mC+2gIOnMIDkc74U3RXaFOw8iuJJdXYHFhFu4NMIeLRFKXtsyDMmAZSfR2vG7uWqP7cb56OrUIervLHII5Aky0xwiL49vDvhvfJjTozNerVvNMqvdlCif5NMJGqgkv18wMvWJIVbjnM09eRW54doubayebybpYRGNFLhGLhZhO7muwfHrnsf6c1/E/NCrfHsA8CSpcoCH90XdqkJYPRGa4CR7I0IYDGr1ud1KZgDXzRTZ3XZNRyYh7huu8rsEzXRh83GRSrMmcQRaFV2YMeqHk9d81ZWSxDQPhsvBg0e1jsC6I4uWvsDL3dyuCqRCVC7uH/UMmG6ms5hHs8wJL/a8gt1z5RYdugiEo28ggNaC/8J0v2L8qPnrCq34Ko1zPOXyjDoESxysVKm0v3GfAt+uk6C65ziXMB54YQT0d5fw7gea2x3xhgB0NWolqjggam1xayw+Z0U9KHaYuteystaNwkEqr+O+XExfN+Dsxfn2pEaYmvBMTiXrqsR57ByMHrdPDBZbSpQ1k8wzkJbWatWI1rEwsWaV5b2V7FV6k+q1ZG+ltfza0tO8MGybbBVJSpCrRU5l5eyeO5jpYR9dwggW7JPkR+yD8EZ7GDAu5iD35l3jMYdMnccr/YYWup2mtV5GMRUOYvtA0OjCp8pYBKOdWBhqUOSXE5Ymd44lsGdN/GBA/iRWgs34Hu4ZOjrzVe9sw3TOFDhGkwb+f4lX9IkoMSpVdl0ENfZWo1S5ySaE7YvVF0Bzc/1dTgdeMoCHheql+YDCRfbwOJeCkK+nC2vYhViX+YtqpIW5YGXVN72Z9l9dPiksLggZpSg5HJlGJNeEhp1+bqZ/sYBBAk7cB9ynwOpzldh4UlPehIM8z/2zHcgfnDK7hKU+wXddD62uAKJr7MXqGBnTP4dv9FlhqDwZ4WuJzpgocfWFE/j7FSvOB4yUB1RKwTJpLuGRCgJDmViuBIdxUTQuqWxLazWkn1Hz4DGRaeubhdVAR5VTml5o+h+9WDwGTSRitnXxlKPdtN1bpnz7gHgkvT/yXp0WZ/qJ5wIOjgvst5yqAPV0rcgm1rg3EW+on6HQ1uGTqq9aVkJCXYQpBUXylKB1OpFbZExJT0viGaGncBbc2DhDGuoAAc3IrnpvxohyZXjZcChXpUIkKpaSg6XzObp/B142uir5Jc4w12ftblX0sDGIDVYsE5rkahEz8fnSPyXvug9t2Hv33a9Wil+54KO/2nMXYjcAmV5VXIqr0mP9I4gzoSGIuKX/vrSAug4+bsSBqJkUcqWYqAMNpthsa8fy9RROpUnSghHqWPqnDr9MHmZcH+m1IEtdb4SCLDB+S0pxQHN67CP8Y7ZbNfhqEAJtvS2GTmv5C4nVO9xySmhbuMEkGbv1uP/qUOwIQiqaewmxApRx42cr2YfH44O6GqoTLXX3UV8aZSywhu5F1XBIwT/rR4X2xBkWNlq7K10JGS9fLb6v7CCPAg8SVwoYTbrcPlhZWZcE8UNR0Ah++knSRbMomEmJ1cr5RQ80v8DtFulrWPqZE233taehJqYRLh4LoBrRoPifBV3TwcflM2k4rTLElLMZniWZtCCYJTWykM9FMlrwtKge+mtBSlg+6wlSCpgf3zKEHWBjpYb6R1pyRgTaA7xbkqLg0uCjas/kxzpVFHQogQc4Z/WcBlnoGQT5ocdUNpjmJWLWdEwWnOejbeAajWFUNP875AM/egZpbD0J99FnHLz1XNcAU1ZVZHB3RWdpok5dnRFiOiNsVT9mGdiYMgmMVd8Ww+j4toGEkQZ6c8wvGl6VuzmaaXHTdgJpR/ib3i8oKzOBCSi+Te0RKsTXDiDVOfI/NmgJCFvuv8cRgTE3damcmu19i6OLFhOEx0YBKGWd2pRAQzfbFHTmfx1wUvOYoOcO0gXF6U1ucxsGxvUc2ABlpYEo91Wvrm+uOO1tLsQwagoR/dIrDX1fpJIp0xZpNuRkGlOUj9R2w5+Kmf2XTl7AdWS1MV2L+ZRxvKDvL2bSQksbGr5OuBZCJ7CFyaqUVuoTST/VY+Bv1Rz1aUx4/IqTBG0iiXtANhvNFUMB/0t+08FwxF5E1EXSrirjiUfXsif30CEmPG8pyhm5EQwxW0Pd9HL7sWSR6QAtzLGvUM4DNlgep7+7spDBkaL1B6Qc9WQ4QhcOkU+uqyfCxXn3JurRIWhZX+G+mSUhKbblazOR96sUEM0eDvPc3KvS/aIk1fZgIL3Tg6h16LRtxakZJfF9Px9e6vtOKx85vsWblhiPjD7sGQ3z0REmOnXZMooch5uauOm3XUoXxd6ebzqiFoq4mBba39a2ofJjT88A7IQDeBAl7WiQxISXQnBr1D7eb9zvCfL20KBE5lSu9iQTb1A9RZw+iYRYWKR6kSnN0k7yX24nHiMd/V9fjSqvMPxtr+g1/soVdek/Zn9p0NlwGLqyGSt1MkWZ0pRq8qhze7aL4v8X8lLOvTae5po1tK8lHjPxp0bJxXeBMzRNkpn8qiCwk46Kz/JUS7QChLjF+5QGBmAlfdEbOqnI7WGNJO3/IwbTIPHQMBDjOn0gV/Oztg8d1ia5ciAZlUGoA1NbCdxvAyi8btoze16pN5E8zxCV9Gv7OJ/bfORChq4/EDogdOF1STQKRoMa/rd3Bfwl9RaFt8Hcs87Bt+5/MN7XndIKDtH0zBjLz8+kEDYf2PCPMT2Yqc3QJt1D85BI8lqIR/T8r60YldemM8b+08NemZURk1i/TmJs55qIjApUzkCLSPpKy4nNfZSHuS0vESkon6la8RAycE6D0Fcze4XMrLtis0kFogh+aiVVMKOdLo12OSnfn72NnkZQknX0sSXW1F1/vpo7h4p6+HKigAZhrF9RC0xP6Zx3WmBuLYhiF5E5/9ph3IYcAZ5VzSh26yon2EvvaKLHrTMkHWrcMbTngwA2teVxr5FP4BTJ+Btm/0cq2yAmMuNxBmdm6IQsS/1nxGwqviIaF20OLnhu9V6+KYh+oLHK4GsKhHo/6m5nGq+K39/ljKL3aBBcgIKy5En84lKuQ0EC7d4JWOxLGdkXpqKOa3LaGDSv8OZQ8ElQOils8o49EkjHZXIR0gKRzheY9JB0PxvIUoWOMtfv4jFfvHZxCZpI4Q7Py4H2RIDhk5PpDrk1K6qmPhRKfvHcZBhxfewE6M6vubPpoTbcf/Z+hAKVtJnfs7ITPkuhE/LkwOEh5c8hdBGMi216S0j21TTOXHlCLAhqY0YFArgnzmKjJmg8e3Mq80B38zEIoOZ52e6mqBz15wfI8wUNXfsphCmVIVIXxNE8U1vjYLkv0IVZ7ipxPgAS9q9cAd+PFjkEutHHz9JKROBH4ZtvWvuBx8ovaQgKUHZomgEJGfQnJcK3iaM7b7jVWRtGb4/GPbUPHA8caQE/mWiHuCRNLN7EF6/qj6PsyZsM+J7xQScYGX7xftCHbwLCT7sQrkbfBRt52hS2wwSrzdBSAei9WYpShPx5BnPnxfcCIG3A6G1cQacMHp/b6gFganpsnN0kn9/PrLmVhAAe3Sirurd7UQTpjo47NdQyKvSQjHMSsM+/42n3p7PNt9RJS8KCNwDP1lIYij6pjR9gFFadEgOuDBKKwApBCXYJsWOGvef4V7kI6pXl7rvvgrg/jXnsS4ZadaKnCQj0g6/p+WIx0dHCkUdTSKwNzlGvit2/K2YcimxjKGVeuWZRJSAH2MXqMb5K0ZbejOITH2dJ5nwCm4X1KgNOr9CWg2slIIGGgOJqyPPfvNtxdJcA4Z2NQWso7u+2A7fVFTdzIi8Se3b/JNe4QZ62mX3ujRelSwBW++tHGw/FNNQq7MRZDiJox9Fnl1cmbi0WP8g9mQDjrhl0VmygbMUW1H4PsWWmdKPavPyhXnIvDjRQ9FiJKuq+fkUxUQEWIX8Hv00HMA3pfkh0+B9TNCCnLusL1qSepcIYuYzJkL9yV8O1iOdbSgOpwBB+HWOOewomH5lxkKBJsfTu/Gcy505rDHm1N3Wfq/PpoGC9cL0knusoov6iWR7jVTE9mXIc9pk//tHj4wGZP/PtJYgCB16oztOX4kaw6lWKETwwvqYPxHdUYMeQH/CwU9Jcor3u6Y8aS+XRlqi+Z9QIFZGcXxPHzITBZtVeYD62dQB3rNehfBjxWjaZbbBou/VJkkMYm10Bf4/bdrqx0A2w28A6ZaKzH4gZbEcROW1V/aVMSfGaqXMY2Nph+oWE5HsISUUHfPAyVbUphJmMdKjNRawBKJ1YM6e+tHRP9TawGyV75MV2e+qT6lekkVBQwow56/027L4ZjpD+caU1c4IqbpSzD3NK+wop85BRVZvjJbTW1t6GggkLbsCYh6VY2trgxYDBITFqje1CkzxdKhujnFoXkAAHOMngxAuNzf1UaHVLZAVUAVfgF6p6sihCeZlaNOL51N5zlvwYuA+bi5/dJw9mYfecpMgb0lcwiROielLZIivHdoOPc/V2pNBHEiqbGEsF7DKtUbA/bPSlsJVD/r5fkeNTH2bAN12oMz/qgr0fEmCrXvzo/kxnzIcEuKYKDe+jqZpymfLpw/vDla+wSsB4Ql74oXk8wDC91GA5/u0XiB0eCGLbNH+M85W3T8S83E6UC3TjrB8bE/uvTrW9+P6Tl6O4vFG/onGD/0rDSpZuuCIQlwB4CnlDNuf/UXrXn6tlcd4u2SvMBIIJ9lBv5MD/UsC7IF1lSp3FM+T36oVVqs2Xd6xcrSm5fdTe/MxT7n6iBBGbKImxl28KJWvF799Cmi+2JC8aQ6LZuSW7Sks6JruOD0XrWWCW9iiFzlX/e+KR3ZZxcVnhBrd77rU5INuCohinvacV57p064MXQW/o/WVt8vlZg8/yUr/mp7Deq2B+uFONHalUBwwU+PLMsn1vEJ9tCNA8yWppuow07sB7vPVm1I4z2mWTw1nWkjuzmPf9RHPhb+HChHW3QN0JgHXmwOhkkRPcOjilgivQbGc+IbQfMMTv18SRCfAMEs3BLFFWg1/lDJ0yBaO81iZFzrplSHcI6+tlLZ8ZtY+kWIy09bNfTtoTbQiopsvDBGvGpFC6wkeJ+hcRWkyXOe11+PIc5EH3LguQ5u8CGuf0aA6Im7EwYZ4sD5WjYM61c8cGAxOYYq/WF/qbnzKLpscMQESXpmNQWRRwSntpUzFWnXKXRbqh8c3L8tNZyOzChFkXt/+lGL3ExOgS5Ch9j4l7KxtHSm7+GnhvzlU0fW2aAsSJJ+v7hsE1mxyvEwo8AmfRHOY0Szn5nUPMOGDqf2GjU1JNHJh/bs7JLwhtVlq2h4cMsSaJ/x4xx0RI+VAtVAyn1asJ6onoOIn6S80LJ/fuxXUvT6B7GxkWDzuhDObbSFnYJcS6L70SgDp7toIvZeCnqTtrcd9bBcga6NplRdIYHbjZg/fcs3XvWGbUfGexvzHIz4Ve90N7cmVqSblnFatoeKeeePB85WgMXWZzBRtJNb2Jf+qCS3fcIIrE+ULqK1KozvBuldBmWkBz2cQZYfZo/baEZKtAcKsresa9PN9QsR+Tmad/f+q+ghPsveJkYWw/YTWS/IZwPa1mxAdHzJ0BLLGhO4FIMJxt5+wl1pRdZXFGudWZS4/rERFcpWB1O+KhL8bebK8stIlWXbk2FWuVFOG7XoMDfg+i1GkxdpTRWxbFSPwg7JhpHtaDgjCoW3YukvpPG8HaQPCC45pmVTrJ+dY/ViRCK7wKvNYafsaAJIB6Te8BYE7TjhROQsjsgsmXmMzzphC+hw54M4x/cTNIvTTIID70A381AnVx107l4PuSp6izOgd4M0GlSSFEzFS8xAqURzaTYGQ3eKiRqiI0pJiJahk+0Iiito/K6CeI9Qag8yh0Mr6zHbJpEqNb61A1y/z18lUnMC32VDebzdgAuC0wadYjufpDQzd1q3CJvGr4I6ui4a9wsjbSUcrbDIZ0ModSKhs7De3qvlLRB7z6Cdi8uXNqgoz6f6yNsXv3p2osQq2WFAkBjPcZLnhwIbHoPq1FcsVbSIYH3LB5NllmxUN2MsXDPky3IWyOXRSlAckm2EwizdYA72OBw21chxC7aL/R6Tv3HiO5zuMcv3407DedlPuBTT13WtlUWV0TFLqIRpWePoL3TiaP1koUwGtYnCa5R64rz2v6AUnvC+FO1NgPRIoBxDscbYJn9fNeePMNz1j3vGWiUU0MjArI0fa5k3SBjuiPXlJPtc3LSO4s3NaOzF416Xb3fsZyBeMecsFwIaspAtoP84G0+erqCbaRdhn26mpOtUZO/dtY/UWx6+/NwaAdCLR5hjnlyZJbWlSwdDOk8Yxhcm+LsTHAEspULrJKisByAGgqoHbKz5LMRnyU7XLaW5lhOV2s8aIksVdVkWnk6isZpgKiE8vRHyyILQQ+ID/juMQiEHcK0pagGtBEnULuN+ZfpNQmPfw9LfkjoBeaSJZtMM6E4lFa/D3Mwbd5B3kuvMYOYt+vB6D3j+sw4w1ilYU84YnhJ3YK1s9/rhZOEiogjVRCZbuvjUQqxRYqquGDamSquekSdp7zOT85VoGBX1vjnDFAuRVBDBISh6O5fjf9IjCdRVLSCOHn4rP6IQJFfPFRSEV1JIIPplpPjTXraTCFfAAyrMCkot5CIjPu2ol2EsQlp0irNnl52Yczw4ofocLn3GzAfbLWCprnp2qqSb0Pdz6AX6VjdYx7IYfuCnpOJQNJXzaGyTUYIrBU04RPFyTB502cj4Rj6jfsHa8gSlEoiswSkYmDv3TwC07roHGwWUvf7u8YgRAfLLEWRPOir9/CBF4ZkZFSkj82ehdnwRzO2uz7cFbPHeBC+UILqzlnk8DDkL3nS0u9/xaoOuCamnUXKW7vW91IOvHsQBp/oPXbi3WnrbGxWWJAdfPY7ylIRfOU3hLQUeYpQPmlFCGxVJvOKx3lstHTvYzPhdOT0/L4v21+9rF1Q/g3PFc/ROHziYNdiby8YGLO02yC34O4+uhZzUUOwzVDiYbf76EMV/6pvMUhoJWbLYC790sZDcpxLRAywAH1eFaI9iQW8K5O1R7IXY2zotxWuLHmMGWwmLuuLuZiuGmiIiezZXzFbPbkatH6xxRSpjJDH/z8yPx8MGMW51YF1ZECpaJoRQDTuZHy4/PIi5MiVUcDWvJa9m4EVB/1xUdUqPce5UC5tIcoRz4xT3KaxTCLBpODYJkOK/rRJdeg2ljQHcY1gS3fZNmcmurng8eX180gTse3UPczzhdSkSdw4GgAgDL28pKDQOKEZQvgQKjAYrdCu8AnOAdizvTrmGYPTE36oGor5FPVAgvJmgwuYKvjiEV91D/vse1iDGy5qIdtnLhSBgmvOt0mzSCgp82KlBETHOhfRBAAmSI+g47S65j0ZoU7SrUPaxAwzh2HQq86OQSa0PnPgDG9yGODbtRU2UyRmUlbqIXZuLgZ/JrbbFfVTJyu5wDqbGrjLxPoewJ9hRTTWTny5okrGZIhVFlpQYiDNtw1Y+O426lC/vWjxoBZpt7F1iQRS/HpGqwrnaVo1qvrJXDVVka4rfEhx22/eo0HWoi0JiIXlgNZZPCOo6pXXY9fLXyl2G4ZbtZyhrHVcWQB3K6b2XN4vUqiXwy9KilmVryPAYbM9Fxa7hYwJun9b+AXw48Gpb5vwSVCSzcc4nCOe/RJFpHldkoayyUPJV3WHEPZTMxnDQ9Y9Jrj+hW5ZiPlQw2grWxispPmUfLTrCZZ44tZulhXuDz+PzESXzvFvGoMgb2Qh37VkHMIsJgMRc7cNKcjUOpdQL8nWF9LGsHtjPg8VW1zr4SyaPXvmtUeTXjeOnUMLcibHmJtQ1BYZ5mZMuXbM689gF8xDUs8mbnrsZ931Bwd7gGM8NNP1BkWFAA+lFoSFuQjzVo75BcX1laD/nqd6K5bS4/s7WnyaotV7u+8wudLdfv9Y4Hq8pZX/bE3P7uleAAe+pLDSx84yJ+cC2BbqVgypf8H8xGKnfA16frYXrbHv+QrAvAcqZzuo79//bqTxHUNQ4E21gPoWKOlxEDDjsy63cHSlCEU4u3dR0DQmM7R8NhsDR8XhOR/8EEIwdkEnfazZYqmSQ707xrB5Nv9kKwEJ++ai9FSX0hkQTvRbhdJ7hhFGacwoy4iZR9SYveIe3KvjYuYVcfAUqNBRv0g/7ZYvqS59p4Ngl6+wW6MPOK1JqHNtVnu3C15+Sa69E+4TqTUS89Q/uUdfOsw4uf0YwNnWNqbnpq2mcEc74uM29RS98XKbdnaep76GazAqJdYVUts7IWYxF058OB1u3qOcU6IXgH6r1Z1SEGFJcVgJXvpW9tS2wtThN+/Bb+p1HLXribf5LhRzsqRseulQmMYDaMrYww7mX0FDuXUI+uvZqbbn1ezGAyQKHVnnekktp2Ep7N1UlIN5gCcfknwgOpMIaKt4tL1giDDCnywVvxMCYrTA2mdCmsHfXHMO/TIiFIL0zlVTzAD7+GygCw1DVTP5Hv1C+YvlJDT1R2mOV79JOgJGcx9DynD8/pIxv/1Q77L1DNSbYmte6SHawsiwsooDGgoYvNCyFkfHa9RcOB7/ZzK+n19hngYVdsbF9fON2WBYHbrpxY8ufuggrtlNI93jbDlleK5mt/shvh3LlYuK+1PiDgpN4RODiXVMpmVHjq8cz3ofzDqJHazmbSVdQnIj3fQGRr6L/XLexT9bY59ZaqrEZPKXvfUsjHJTDsVOJpfYxHp2v97tnsQPpiA70v/Y3rK7FmuE0waQc/ugcXCzVGscntOd1lVLXKeB7JQMd8VZhgV4MKCFompJjW9ffMilqn8OIR6QUqEVSGNCc2n8B6L/CtQmP8eHZ8U+0aiEPrabJA/7w+3tKHdHZrGCgNhgAt6AAlzEUHYH8j4CE03xudUIAaz7htA1g3+U2KlsTL9F93A9KDKrZrSCZJRMbE4nv+70oYVQC6zHywILPZTH9aL79U8v1yZKGIi9GT+SGVrEBtYf9WjrouotLEtvXp2sbnIuTK3jH3p7lEqcolb5rnL76bi88TMzkRclrnyyB4neK1Y7hhJ9RPfceMu/DiW7igmDHO7gFR64idbpJcjd5TIdTy0AiajehpYbQs2Ctde5z+rrGeUc2EyCIKo+eDPuXRuzPfOkYQuSenUH41KPiGs5GlSJsDTzyTEr0jlJWIKEwYM4st0/dyOn7zQdu7yp8crDON9UPCCad/AdLWBVQVHpPfTqi7XHJ3SJHpi3wYnNusLrd6p42Ei0r6GQavLAFzEpFS+vJlvbCt8UqwyHqkwr6Zmwvgoypn6zFd+D/pDtPO8FSdtkUW5vK8r6+3kmOAo2AQerp8Rez40KjYZrQ3iROmEOER1ZtTXvtrcKZSWqzV0JgCaNfI1yxQb4Nmc0QrjqMThAgkWmApNpdAaxeatMwc07w15BmMcTyOQQmsu3XkfzqjZeuJMrrsruk7/mcdD3MrHMr4x4bYUNNZnuPrZYgED5rVJHrUhnR2gd1Q84Lfxsl4KeA/eoU7JB3T2uqfoNEFi1Ii4lAIDm1hMb8bDCIcFh9nEb2YLh4q2XILHEdZ+ZxY+kDI2KJ93byLCX7ijr8FzLyuEtOldvaR65aCFJklLJGSayW8mT9Eu+coKNLBo+EtvYkmuPX38nKoZ/zIVHsmJmL5MYTp6KrJxQWkKuFmBxDTTRt9eDGLzuJboNpeHP60/qEyvDzTw+Jv9Lc9U/BdddnrsPlBoTzukpL9faD0M0bx8tsxAe7LwymR8cYpzDPcYcufQ0qU/og783+YYJr3GwbDwYcVjJDBhV13Ci0ilYoR77BuItvYAu95bLHSkynpC2vhpTGK0CoHoUoJ7jmTSiCFVn87ccWQJzIurha5nTyYAowB6AFaMGnzgXQgWwfuEvxNweY8caVMvrrGoaymdIflBPzOz7fQ7JXAEWAtw21faCxgiWeCe7MaeDTilCLyJwvkZ7RWIhcNWAIRNSYkJBcebNXMiYJxFcGzmtRTTE/PN183MFt53b+w7WJLBFy0wVhEndvBvY/TOqMlRV9MvTaVK3nSruBuBWtH7OyY9gtbGknFLz/h9Kb1e6fCP+UKec4uOU7lo6ZAWwFlwd4LhT0U1NZ1YL8QWkQs+6odxRlGpy0rxTHismGYeOXI9I01WU3BT0BQPCGcPGIzivCi214NwBBIPqhGxq29+wPPx+QLHROQlvS3WaMH9YXzGMOTmcENThVVuWTZXE1hF02+7aTH3KwjLHtlU8ePOP/ZGL5vOYZiYY4SZNQETu8mxCoeNNdpwdA3GlaVZ64jbe35iRpwIk/wWYfoESSUM0qV1WLno1GEwwSUUnu2Y/hJJYPgDwrRNQ6AN/K72qCucfPgL6yM9vZ333wJ2CuqeCiy+IGIAPFHMMARoKQQzDk3ufz3NELAeWAvxlxsE1MrSl2IYi8SdYeVChXPUinThxlWxxhCQkFaCoNVVjtctzLm8BjmGPFzwNpaGeWD083hHP6/d/tU+oUUia1EEa/Fug5vBMHlEefK7d8KlvHQd7rca0A4yKL8euZ0fyoZEI9HBvpMURCOs40Ue0p4Puawq8hHLboSmAd9Zk8QZhZ//XfBKfBDfsWTDQl9ccaP515kgOPEGUiT3K7nR2QKm0TA3tHx9VCYdVpxzMKW5Q6vdSg1/L7HzXqOtxB9CIEmhIlF5YbUpdKN6HF6Ud1Wb3H8QF9Hv3VCaGcY+VmOt2ZnS+VuzsBX4BE9fy9zyHKJtbDT7gZeCkgXHt9Kny2ZBjBV7ESyrjB4IbQToE3RKZVst+NX4Ozzcb6owiMHTrqFFTjodX7NVEdZ+Y/plUtt0C0irptIwwheZ5lpmR6XL/Hie8BhLyO7RXfo2vKKNhZs5gjQ96pGTDNRjIqEZfcdM7GIqWZXV6eHaZJ2X2J5IaBF6Rlo6T4oY3lehQNNrPyrzQaIbUkywRDfzke1zERoTzB1N0NsWk0ZcIkBIDYlV6FkvJ0R65IW+PazKv6sODKe1ltqRjX8BB9U1J0yUloO6qzML/5XEBx/Z8bkCgzDGIeiqtYahfAg0+VRBaPzfceGr3P2PttOuQQR918oGXb+O13uZJDmYMAXNcuFtb5lqUxbTaWa7vRrrIa0i67IqEPAwNBfG2WT4zdffCsphSxDcFNElrEYGWSFKwaF5ACP1PEz1WgpjueIMEVHvqXEhWzlaVYqhUbEmNjqkJD0nWTIKNWIwVppO6jVdnavZTtf0WQcB5TZNRj4nY9iF+Vj8Zs3SKADwpIcZfQqC43e0n4Cvh8iXsGW/76zbDl0P1986MRqU3gYrJrgMYfvJdQi3mZJnHvfKf93FMQ8AWCrISElLncQasIZ2VF+EIF5qKgCP74UWBg38ug0vOvINlpIBkattzI8OkEHbr8ar0Ecw+1+KtueCDKh5hf39EnwFZ8b1PsG7Y+wxWOWXxAv+dHyDzd/JEGQZF2A8Etrz2YYu8rJf4d4omu0k7SkeDsZwhwOlvVZ/Mu0DaoQ7pDwkT4mo+GxwjCXw4U2OLfdW7xOd8+1RbtfFBO9j2tOSLmbN+1hkPD/rLPrG+MuhsWoZuPIxEoRNyrCvAXShrWtfduOdst0klBDqx/medMWcuD0tDtX/FUPadwPpzGFX7xjJxjzWGT2DYxu+oAk+CWiULowz1XN/15Dg2wguNFUqpxau917fah+KEN/4Y2R4y7/xcTpfwNpIE3BKCPyCLV6gxot2jJy6/uUfl6BkeCtaOIWrWE5E3o55WYoBMGgqUHAzdfYC1BZmS6Gbf8m6tCxydHRjpx0IlrIvbaNVPgT2sBi0+myPOefzEdMXYpDQ4PGcpz8XcU2Zd6Iv+AATaTMXS8OIp069Z0bhOv4O711mWAyOI9M0ZMNC3ACAJwFdxH2/54FonyMlQcBBBWCsacggROjlkG5UaAEn/5KeIVjw18DV8mw3ke/vp5WXZU56KyRk9Sse5sE0d2nvVwcS/N0bkFPwkAOjSB+ndi6f/XyO81o4UJ4vvyYpAINzIiE7ePPc+9/rdswXMcG6MYsrDDLwgeIG9QYY+n6vWqPdgbaU7DpgutKiA9ju3IY/5Hat1c9zkXQNhuXIcYowURduJpiaQ8nP66QCnCAlOMKBcuVm48T85zIoV/O+Mn1m7YzJ0Esp64d9JwtIeNxDwZrv0x9O7gnue0eDTB+kscOHWKn9Iy8P6aevyEVqQNV7xG4qWG03ZBwtbHe8rJOPk1zGeKMaO1NsDAqyyUqOiebiLjaTCaw8TnhjrCoGj3hmddRu/qN3Hm5oq7iaE610fs8Ykjk9/E8bcAR4jFA3TaiYUjkKmVB2ef0Lhv0rOaho1+/CIVE0bho8pHJlhqa+2Gm0fHJJQ50jpfPCjbF+YNqGGkg5OaET4tkD4FR7smv7Gpz2zXx62Sr9N+Q8DWIwxkNZDZjlTlZ4CwiCTfGXjqKAmiDGfWSQD/tpjEyCX21fIxD7BFHIaTRbcxn2RWBXIZ6ZSYDZkX4it2sGGSf6BBQ9CWzMVJY+al6/pfP44obBVlSDQmiEPnLDAOl9UYj8ou0ERZ4JykNRR1pixLyK3lMbM6M2K8BzgBnPHIOMaSU8xt3MOWduLqFpOU0giUgedRs4kDpD9/EU7UUAtOrHCD/e/s4c8pxjUo1WNEb5csGAQDbtyTGmCuiJNT93/VPeSicIsmtvXacq0zlBGdr0CPkBAAD//5VYk3s="
+
+// Set accessor to a real function.
+func init() {
+	compressedBytePointsFn = func() string {
+		return compressedBytePoints
+	}
+}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go
new file mode 100644
index 0000000000..c9d47f3078
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go
@@ -0,0 +1,1272 @@
+// Copyright (c) 2015-2022 The Decred developers
+// Copyright 2013-2014 The btcsuite developers
+// Use of this source code is governed by an ISC
+// license that can be found in the LICENSE file.
+
+package secp256k1
+
+import (
+	"encoding/hex"
+	"math/bits"
+)
+
+// References:
+//   [SECG]: Recommended Elliptic Curve Domain Parameters
+//     https://www.secg.org/sec2-v2.pdf
+//
+//   [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone)
+//
+//   [BRID]: On Binary Representations of Integers with Digits -1, 0, 1
+//           (Prodinger, Helmut)
+//
+//   [STWS]: Secure-TWS: Authenticating Node to Multi-user Communication in
+//           Shared Sensor Networks (Oliveira, Leonardo B. et al)
+
+// All group operations are performed using Jacobian coordinates.  For a given
+// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1)
+// where x = x1/z1^2 and y = y1/z1^3.
+
+// hexToFieldVal converts the passed hex string into a FieldVal and will panic
+// if there is an error.  This is only provided for the hard-coded constants so
+// errors in the source code can be detected. It will only (and must only) be
+// called with hard-coded values.
+func hexToFieldVal(s string) *FieldVal {
+	b, err := hex.DecodeString(s)
+	if err != nil {
+		panic("invalid hex in source file: " + s)
+	}
+	var f FieldVal
+	if overflow := f.SetByteSlice(b); overflow {
+		panic("hex in source file overflows mod P: " + s)
+	}
+	return &f
+}
+
+// hexToModNScalar converts the passed hex string into a ModNScalar and will
+// panic if there is an error.  This is only provided for the hard-coded
+// constants so errors in the source code can be detected. It will only (and
+// must only) be called with hard-coded values.
+func hexToModNScalar(s string) *ModNScalar {
+	var isNegative bool
+	if len(s) > 0 && s[0] == '-' {
+		isNegative = true
+		s = s[1:]
+	}
+	if len(s)%2 != 0 {
+		s = "0" + s
+	}
+	b, err := hex.DecodeString(s)
+	if err != nil {
+		panic("invalid hex in source file: " + s)
+	}
+	var scalar ModNScalar
+	if overflow := scalar.SetByteSlice(b); overflow {
+		panic("hex in source file overflows mod N scalar: " + s)
+	}
+	if isNegative {
+		scalar.Negate()
+	}
+	return &scalar
+}
+
+var (
+	// The following constants are used to accelerate scalar point
+	// multiplication through the use of the endomorphism:
+	//
+	// φ(Q) ⟼ λ*Q = (β*Q.x mod p, Q.y)
+	//
+	// See the code in the deriveEndomorphismParams function in genprecomps.go
+	// for details on their derivation.
+	//
+	// Additionally, see the scalar multiplication function in this file for
+	// details on how they are used.
+	endoNegLambda = hexToModNScalar("-5363ad4cc05c30e0a5261c028812645a122e22ea20816678df02967c1b23bd72")
+	endoBeta      = hexToFieldVal("7ae96a2b657c07106e64479eac3434e99cf0497512f58995c1396c28719501ee")
+	endoNegB1     = hexToModNScalar("e4437ed6010e88286f547fa90abfe4c3")
+	endoNegB2     = hexToModNScalar("-3086d221a7d46bcde86c90e49284eb15")
+	endoZ1        = hexToModNScalar("3086d221a7d46bcde86c90e49284eb153daa8a1471e8ca7f")
+	endoZ2        = hexToModNScalar("e4437ed6010e88286f547fa90abfe4c4221208ac9df506c6")
+
+	// Alternatively, the following parameters are valid as well, however,
+	// benchmarks show them to be about 2% slower in practice.
+	// endoNegLambda = hexToModNScalar("-ac9c52b33fa3cf1f5ad9e3fd77ed9ba4a880b9fc8ec739c2e0cfc810b51283ce")
+	// endoBeta      = hexToFieldVal("851695d49a83f8ef919bb86153cbcb16630fb68aed0a766a3ec693d68e6afa40")
+	// endoNegB1     = hexToModNScalar("3086d221a7d46bcde86c90e49284eb15")
+	// endoNegB2     = hexToModNScalar("-114ca50f7a8e2f3f657c1108d9d44cfd8")
+	// endoZ1        = hexToModNScalar("114ca50f7a8e2f3f657c1108d9d44cfd95fbc92c10fddd145")
+	// endoZ2        = hexToModNScalar("3086d221a7d46bcde86c90e49284eb153daa8a1471e8ca7f")
+)
+
+// JacobianPoint is an element of the group formed by the secp256k1 curve in
+// Jacobian projective coordinates and thus represents a point on the curve.
+type JacobianPoint struct {
+	// The X coordinate in Jacobian projective coordinates.  The affine point is
+	// X/z^2.
+	X FieldVal
+
+	// The Y coordinate in Jacobian projective coordinates.  The affine point is
+	// Y/z^3.
+	Y FieldVal
+
+	// The Z coordinate in Jacobian projective coordinates.
+	Z FieldVal
+}
+
+// MakeJacobianPoint returns a Jacobian point with the provided X, Y, and Z
+// coordinates.
+func MakeJacobianPoint(x, y, z *FieldVal) JacobianPoint {
+	var p JacobianPoint
+	p.X.Set(x)
+	p.Y.Set(y)
+	p.Z.Set(z)
+	return p
+}
+
+// Set sets the Jacobian point to the provided point.
+func (p *JacobianPoint) Set(other *JacobianPoint) {
+	p.X.Set(&other.X)
+	p.Y.Set(&other.Y)
+	p.Z.Set(&other.Z)
+}
+
+// ToAffine reduces the Z value of the existing point to 1 effectively
+// making it an affine coordinate in constant time.  The point will be
+// normalized.
+func (p *JacobianPoint) ToAffine() {
+	// Inversions are expensive and both point addition and point doubling
+	// are faster when working with points that have a z value of one.  So,
+	// if the point needs to be converted to affine, go ahead and normalize
+	// the point itself at the same time as the calculation is the same.
+	var zInv, tempZ FieldVal
+	zInv.Set(&p.Z).Inverse()  // zInv = Z^-1
+	tempZ.SquareVal(&zInv)    // tempZ = Z^-2
+	p.X.Mul(&tempZ)           // X = X/Z^2 (mag: 1)
+	p.Y.Mul(tempZ.Mul(&zInv)) // Y = Y/Z^3 (mag: 1)
+	p.Z.SetInt(1)             // Z = 1 (mag: 1)
+
+	// Normalize the x and y values.
+	p.X.Normalize()
+	p.Y.Normalize()
+}
+
+// addZ1AndZ2EqualsOne adds two Jacobian points that are already known to have
+// z values of 1 and stores the result in the provided result param.  That is to
+// say result = p1 + p2.  It performs faster addition than the generic add
+// routine since less arithmetic is needed due to the ability to avoid the z
+// value multiplications.
+//
+// NOTE: The points must be normalized for this function to return the correct
+// result.  The resulting point will be normalized.
+func addZ1AndZ2EqualsOne(p1, p2, result *JacobianPoint) {
+	// To compute the point addition efficiently, this implementation splits
+	// the equation into intermediate elements which are used to minimize
+	// the number of field multiplications using the method shown at:
+	// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl
+	//
+	// In particular it performs the calculations using the following:
+	// H = X2-X1, HH = H^2, I = 4*HH, J = H*I, r = 2*(Y2-Y1), V = X1*I
+	// X3 = r^2-J-2*V, Y3 = r*(V-X3)-2*Y1*J, Z3 = 2*H
+	//
+	// This results in a cost of 4 field multiplications, 2 field squarings,
+	// 6 field additions, and 5 integer multiplications.
+	x1, y1 := &p1.X, &p1.Y
+	x2, y2 := &p2.X, &p2.Y
+	x3, y3, z3 := &result.X, &result.Y, &result.Z
+
+	// When the x coordinates are the same for two points on the curve, the
+	// y coordinates either must be the same, in which case it is point
+	// doubling, or they are opposite and the result is the point at
+	// infinity per the group law for elliptic curve cryptography.
+	if x1.Equals(x2) {
+		if y1.Equals(y2) {
+			// Since x1 == x2 and y1 == y2, point doubling must be
+			// done, otherwise the addition would end up dividing
+			// by zero.
+			DoubleNonConst(p1, result)
+			return
+		}
+
+		// Since x1 == x2 and y1 == -y2, the sum is the point at
+		// infinity per the group law.
+		x3.SetInt(0)
+		y3.SetInt(0)
+		z3.SetInt(0)
+		return
+	}
+
+	// Calculate X3, Y3, and Z3 according to the intermediate elements
+	// breakdown above.
+	var h, i, j, r, v FieldVal
+	var negJ, neg2V, negX3 FieldVal
+	h.Set(x1).Negate(1).Add(x2)                // H = X2-X1 (mag: 3)
+	i.SquareVal(&h).MulInt(4)                  // I = 4*H^2 (mag: 4)
+	j.Mul2(&h, &i)                             // J = H*I (mag: 1)
+	r.Set(y1).Negate(1).Add(y2).MulInt(2)      // r = 2*(Y2-Y1) (mag: 6)
+	v.Mul2(x1, &i)                             // V = X1*I (mag: 1)
+	negJ.Set(&j).Negate(1)                     // negJ = -J (mag: 2)
+	neg2V.Set(&v).MulInt(2).Negate(2)          // neg2V = -(2*V) (mag: 3)
+	x3.Set(&r).Square().Add(&negJ).Add(&neg2V) // X3 = r^2-J-2*V (mag: 6)
+	negX3.Set(x3).Negate(6)                    // negX3 = -X3 (mag: 7)
+	j.Mul(y1).MulInt(2).Negate(2)              // J = -(2*Y1*J) (mag: 3)
+	y3.Set(&v).Add(&negX3).Mul(&r).Add(&j)     // Y3 = r*(V-X3)-2*Y1*J (mag: 4)
+	z3.Set(&h).MulInt(2)                       // Z3 = 2*H (mag: 6)
+
+	// Normalize the resulting field values as needed.
+	x3.Normalize()
+	y3.Normalize()
+	z3.Normalize()
+}
+
+// addZ1EqualsZ2 adds two Jacobian points that are already known to have the
+// same z value and stores the result in the provided result param.  That is to
+// say result = p1 + p2.  It performs faster addition than the generic add
+// routine since less arithmetic is needed due to the known equivalence.
+//
+// NOTE: The points must be normalized for this function to return the correct
+// result.  The resulting point will be normalized.
+func addZ1EqualsZ2(p1, p2, result *JacobianPoint) {
+	// To compute the point addition efficiently, this implementation splits
+	// the equation into intermediate elements which are used to minimize
+	// the number of field multiplications using a slightly modified version
+	// of the method shown at:
+	// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-zadd-2007-m
+	//
+	// In particular it performs the calculations using the following:
+	// A = X2-X1, B = A^2, C=Y2-Y1, D = C^2, E = X1*B, F = X2*B
+	// X3 = D-E-F, Y3 = C*(E-X3)-Y1*(F-E), Z3 = Z1*A
+	//
+	// This results in a cost of 5 field multiplications, 2 field squarings,
+	// 9 field additions, and 0 integer multiplications.
+	x1, y1, z1 := &p1.X, &p1.Y, &p1.Z
+	x2, y2 := &p2.X, &p2.Y
+	x3, y3, z3 := &result.X, &result.Y, &result.Z
+
+	// When the x coordinates are the same for two points on the curve, the
+	// y coordinates either must be the same, in which case it is point
+	// doubling, or they are opposite and the result is the point at
+	// infinity per the group law for elliptic curve cryptography.
+	if x1.Equals(x2) {
+		if y1.Equals(y2) {
+			// Since x1 == x2 and y1 == y2, point doubling must be
+			// done, otherwise the addition would end up dividing
+			// by zero.
+			DoubleNonConst(p1, result)
+			return
+		}
+
+		// Since x1 == x2 and y1 == -y2, the sum is the point at
+		// infinity per the group law.
+		x3.SetInt(0)
+		y3.SetInt(0)
+		z3.SetInt(0)
+		return
+	}
+
+	// Calculate X3, Y3, and Z3 according to the intermediate elements
+	// breakdown above.
+	var a, b, c, d, e, f FieldVal
+	var negX1, negY1, negE, negX3 FieldVal
+	negX1.Set(x1).Negate(1)                // negX1 = -X1 (mag: 2)
+	negY1.Set(y1).Negate(1)                // negY1 = -Y1 (mag: 2)
+	a.Set(&negX1).Add(x2)                  // A = X2-X1 (mag: 3)
+	b.SquareVal(&a)                        // B = A^2 (mag: 1)
+	c.Set(&negY1).Add(y2)                  // C = Y2-Y1 (mag: 3)
+	d.SquareVal(&c)                        // D = C^2 (mag: 1)
+	e.Mul2(x1, &b)                         // E = X1*B (mag: 1)
+	negE.Set(&e).Negate(1)                 // negE = -E (mag: 2)
+	f.Mul2(x2, &b)                         // F = X2*B (mag: 1)
+	x3.Add2(&e, &f).Negate(2).Add(&d)      // X3 = D-E-F (mag: 4)
+	negX3.Set(x3).Negate(4)                // negX3 = -X3 (mag: 5)
+	y3.Set(y1).Mul(f.Add(&negE)).Negate(1) // Y3 = -(Y1*(F-E)) (mag: 2)
+	y3.Add(e.Add(&negX3).Mul(&c))          // Y3 = C*(E-X3)+Y3 (mag: 3)
+	z3.Mul2(z1, &a)                        // Z3 = Z1*A (mag: 1)
+
+	// Normalize the resulting field values as needed.
+	x3.Normalize()
+	y3.Normalize()
+	z3.Normalize()
+}
+
+// addZ2EqualsOne adds two Jacobian points when the second point is already
+// known to have a z value of 1 (and the z value for the first point is not 1)
+// and stores the result in the provided result param.  That is to say result =
+// p1 + p2.  It performs faster addition than the generic add routine since
+// less arithmetic is needed due to the ability to avoid multiplications by the
+// second point's z value.
+//
+// NOTE: The points must be normalized for this function to return the correct
+// result.  The resulting point will be normalized.
+func addZ2EqualsOne(p1, p2, result *JacobianPoint) {
+	// To compute the point addition efficiently, this implementation splits
+	// the equation into intermediate elements which are used to minimize
+	// the number of field multiplications using the method shown at:
+	// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl
+	//
+	// In particular it performs the calculations using the following:
+	// Z1Z1 = Z1^2, U2 = X2*Z1Z1, S2 = Y2*Z1*Z1Z1, H = U2-X1, HH = H^2,
+	// I = 4*HH, J = H*I, r = 2*(S2-Y1), V = X1*I
+	// X3 = r^2-J-2*V, Y3 = r*(V-X3)-2*Y1*J, Z3 = (Z1+H)^2-Z1Z1-HH
+	//
+	// This results in a cost of 7 field multiplications, 4 field squarings,
+	// 9 field additions, and 4 integer multiplications.
+	x1, y1, z1 := &p1.X, &p1.Y, &p1.Z
+	x2, y2 := &p2.X, &p2.Y
+	x3, y3, z3 := &result.X, &result.Y, &result.Z
+
+	// When the x coordinates are the same for two points on the curve, the
+	// y coordinates either must be the same, in which case it is point
+	// doubling, or they are opposite and the result is the point at
+	// infinity per the group law for elliptic curve cryptography.  Since
+	// any number of Jacobian coordinates can represent the same affine
+	// point, the x and y values need to be converted to like terms.  Due to
+	// the assumption made for this function that the second point has a z
+	// value of 1 (z2=1), the first point is already "converted".
+	var z1z1, u2, s2 FieldVal
+	z1z1.SquareVal(z1)                        // Z1Z1 = Z1^2 (mag: 1)
+	u2.Set(x2).Mul(&z1z1).Normalize()         // U2 = X2*Z1Z1 (mag: 1)
+	s2.Set(y2).Mul(&z1z1).Mul(z1).Normalize() // S2 = Y2*Z1*Z1Z1 (mag: 1)
+	if x1.Equals(&u2) {
+		if y1.Equals(&s2) {
+			// Since x1 == x2 and y1 == y2, point doubling must be
+			// done, otherwise the addition would end up dividing
+			// by zero.
+			DoubleNonConst(p1, result)
+			return
+		}
+
+		// Since x1 == x2 and y1 == -y2, the sum is the point at
+		// infinity per the group law.
+		x3.SetInt(0)
+		y3.SetInt(0)
+		z3.SetInt(0)
+		return
+	}
+
+	// Calculate X3, Y3, and Z3 according to the intermediate elements
+	// breakdown above.
+	var h, hh, i, j, r, rr, v FieldVal
+	var negX1, negY1, negX3 FieldVal
+	negX1.Set(x1).Negate(1)                // negX1 = -X1 (mag: 2)
+	h.Add2(&u2, &negX1)                    // H = U2-X1 (mag: 3)
+	hh.SquareVal(&h)                       // HH = H^2 (mag: 1)
+	i.Set(&hh).MulInt(4)                   // I = 4 * HH (mag: 4)
+	j.Mul2(&h, &i)                         // J = H*I (mag: 1)
+	negY1.Set(y1).Negate(1)                // negY1 = -Y1 (mag: 2)
+	r.Set(&s2).Add(&negY1).MulInt(2)       // r = 2*(S2-Y1) (mag: 6)
+	rr.SquareVal(&r)                       // rr = r^2 (mag: 1)
+	v.Mul2(x1, &i)                         // V = X1*I (mag: 1)
+	x3.Set(&v).MulInt(2).Add(&j).Negate(3) // X3 = -(J+2*V) (mag: 4)
+	x3.Add(&rr)                            // X3 = r^2+X3 (mag: 5)
+	negX3.Set(x3).Negate(5)                // negX3 = -X3 (mag: 6)
+	y3.Set(y1).Mul(&j).MulInt(2).Negate(2) // Y3 = -(2*Y1*J) (mag: 3)
+	y3.Add(v.Add(&negX3).Mul(&r))          // Y3 = r*(V-X3)+Y3 (mag: 4)
+	z3.Add2(z1, &h).Square()               // Z3 = (Z1+H)^2 (mag: 1)
+	z3.Add(z1z1.Add(&hh).Negate(2))        // Z3 = Z3-(Z1Z1+HH) (mag: 4)
+
+	// Normalize the resulting field values as needed.
+	x3.Normalize()
+	y3.Normalize()
+	z3.Normalize()
+}
+
+// addGeneric adds two Jacobian points without any assumptions about the z
+// values of the two points and stores the result in the provided result param.
+// That is to say result = p1 + p2.  It is the slowest of the add routines due
+// to requiring the most arithmetic.
+//
+// NOTE: The points must be normalized for this function to return the correct
+// result.  The resulting point will be normalized.
+func addGeneric(p1, p2, result *JacobianPoint) {
+	// To compute the point addition efficiently, this implementation splits
+	// the equation into intermediate elements which are used to minimize
+	// the number of field multiplications using the method shown at:
+	// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl
+	//
+	// In particular it performs the calculations using the following:
+	// Z1Z1 = Z1^2, Z2Z2 = Z2^2, U1 = X1*Z2Z2, U2 = X2*Z1Z1, S1 = Y1*Z2*Z2Z2
+	// S2 = Y2*Z1*Z1Z1, H = U2-U1, I = (2*H)^2, J = H*I, r = 2*(S2-S1)
+	// V = U1*I
+	// X3 = r^2-J-2*V, Y3 = r*(V-X3)-2*S1*J, Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2)*H
+	//
+	// This results in a cost of 11 field multiplications, 5 field squarings,
+	// 9 field additions, and 4 integer multiplications.
+	x1, y1, z1 := &p1.X, &p1.Y, &p1.Z
+	x2, y2, z2 := &p2.X, &p2.Y, &p2.Z
+	x3, y3, z3 := &result.X, &result.Y, &result.Z
+
+	// When the x coordinates are the same for two points on the curve, the
+	// y coordinates either must be the same, in which case it is point
+	// doubling, or they are opposite and the result is the point at
+	// infinity.  Since any number of Jacobian coordinates can represent the
+	// same affine point, the x and y values need to be converted to like
+	// terms.
+	var z1z1, z2z2, u1, u2, s1, s2 FieldVal
+	z1z1.SquareVal(z1)                        // Z1Z1 = Z1^2 (mag: 1)
+	z2z2.SquareVal(z2)                        // Z2Z2 = Z2^2 (mag: 1)
+	u1.Set(x1).Mul(&z2z2).Normalize()         // U1 = X1*Z2Z2 (mag: 1)
+	u2.Set(x2).Mul(&z1z1).Normalize()         // U2 = X2*Z1Z1 (mag: 1)
+	s1.Set(y1).Mul(&z2z2).Mul(z2).Normalize() // S1 = Y1*Z2*Z2Z2 (mag: 1)
+	s2.Set(y2).Mul(&z1z1).Mul(z1).Normalize() // S2 = Y2*Z1*Z1Z1 (mag: 1)
+	if u1.Equals(&u2) {
+		if s1.Equals(&s2) {
+			// Since x1 == x2 and y1 == y2, point doubling must be
+			// done, otherwise the addition would end up dividing
+			// by zero.
+			DoubleNonConst(p1, result)
+			return
+		}
+
+		// Since x1 == x2 and y1 == -y2, the sum is the point at
+		// infinity per the group law.
+		x3.SetInt(0)
+		y3.SetInt(0)
+		z3.SetInt(0)
+		return
+	}
+
+	// Calculate X3, Y3, and Z3 according to the intermediate elements
+	// breakdown above.
+	var h, i, j, r, rr, v FieldVal
+	var negU1, negS1, negX3 FieldVal
+	negU1.Set(&u1).Negate(1)               // negU1 = -U1 (mag: 2)
+	h.Add2(&u2, &negU1)                    // H = U2-U1 (mag: 3)
+	i.Set(&h).MulInt(2).Square()           // I = (2*H)^2 (mag: 1)
+	j.Mul2(&h, &i)                         // J = H*I (mag: 1)
+	negS1.Set(&s1).Negate(1)               // negS1 = -S1 (mag: 2)
+	r.Set(&s2).Add(&negS1).MulInt(2)       // r = 2*(S2-S1) (mag: 6)
+	rr.SquareVal(&r)                       // rr = r^2 (mag: 1)
+	v.Mul2(&u1, &i)                        // V = U1*I (mag: 1)
+	x3.Set(&v).MulInt(2).Add(&j).Negate(3) // X3 = -(J+2*V) (mag: 4)
+	x3.Add(&rr)                            // X3 = r^2+X3 (mag: 5)
+	negX3.Set(x3).Negate(5)                // negX3 = -X3 (mag: 6)
+	y3.Mul2(&s1, &j).MulInt(2).Negate(2)   // Y3 = -(2*S1*J) (mag: 3)
+	y3.Add(v.Add(&negX3).Mul(&r))          // Y3 = r*(V-X3)+Y3 (mag: 4)
+	z3.Add2(z1, z2).Square()               // Z3 = (Z1+Z2)^2 (mag: 1)
+	z3.Add(z1z1.Add(&z2z2).Negate(2))      // Z3 = Z3-(Z1Z1+Z2Z2) (mag: 4)
+	z3.Mul(&h)                             // Z3 = Z3*H (mag: 1)
+
+	// Normalize the resulting field values as needed.
+	x3.Normalize()
+	y3.Normalize()
+	z3.Normalize()
+}
+
+// AddNonConst adds the passed Jacobian points together and stores the result in
+// the provided result param in *non-constant* time.
+//
+// NOTE: The points must be normalized for this function to return the correct
+// result.  The resulting point will be normalized.
+func AddNonConst(p1, p2, result *JacobianPoint) {
+	// The point at infinity is the identity according to the group law for
+	// elliptic curve cryptography.  Thus, ∞ + P = P and P + ∞ = P.
+	if (p1.X.IsZero() && p1.Y.IsZero()) || p1.Z.IsZero() {
+		result.Set(p2)
+		return
+	}
+	if (p2.X.IsZero() && p2.Y.IsZero()) || p2.Z.IsZero() {
+		result.Set(p1)
+		return
+	}
+
+	// Faster point addition can be achieved when certain assumptions are
+	// met.  For example, when both points have the same z value, arithmetic
+	// on the z values can be avoided.  This section thus checks for these
+	// conditions and calls an appropriate add function which is accelerated
+	// by using those assumptions.
+	isZ1One := p1.Z.IsOne()
+	isZ2One := p2.Z.IsOne()
+	switch {
+	case isZ1One && isZ2One:
+		addZ1AndZ2EqualsOne(p1, p2, result)
+		return
+	case p1.Z.Equals(&p2.Z):
+		addZ1EqualsZ2(p1, p2, result)
+		return
+	case isZ2One:
+		addZ2EqualsOne(p1, p2, result)
+		return
+	}
+
+	// None of the above assumptions are true, so fall back to generic
+	// point addition.
+	addGeneric(p1, p2, result)
+}
+
+// doubleZ1EqualsOne performs point doubling on the passed Jacobian point when
+// the point is already known to have a z value of 1 and stores the result in
+// the provided result param.  That is to say result = 2*p.  It performs faster
+// point doubling than the generic routine since less arithmetic is needed due
+// to the ability to avoid multiplication by the z value.
+//
+// NOTE: The resulting point will be normalized.
+func doubleZ1EqualsOne(p, result *JacobianPoint) {
+	// This function uses the assumptions that z1 is 1, thus the point
+	// doubling formulas reduce to:
+	//
+	// X3 = (3*X1^2)^2 - 8*X1*Y1^2
+	// Y3 = (3*X1^2)*(4*X1*Y1^2 - X3) - 8*Y1^4
+	// Z3 = 2*Y1
+	//
+	// To compute the above efficiently, this implementation splits the
+	// equation into intermediate elements which are used to minimize the
+	// number of field multiplications in favor of field squarings which
+	// are roughly 35% faster than field multiplications with the current
+	// implementation at the time this was written.
+	//
+	// This uses a slightly modified version of the method shown at:
+	// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl
+	//
+	// In particular it performs the calculations using the following:
+	// A = X1^2, B = Y1^2, C = B^2, D = 2*((X1+B)^2-A-C)
+	// E = 3*A, F = E^2, X3 = F-2*D, Y3 = E*(D-X3)-8*C
+	// Z3 = 2*Y1
+	//
+	// This results in a cost of 1 field multiplication, 5 field squarings,
+	// 6 field additions, and 5 integer multiplications.
+	x1, y1 := &p.X, &p.Y
+	x3, y3, z3 := &result.X, &result.Y, &result.Z
+	var a, b, c, d, e, f FieldVal
+	z3.Set(y1).MulInt(2)                     // Z3 = 2*Y1 (mag: 2)
+	a.SquareVal(x1)                          // A = X1^2 (mag: 1)
+	b.SquareVal(y1)                          // B = Y1^2 (mag: 1)
+	c.SquareVal(&b)                          // C = B^2 (mag: 1)
+	b.Add(x1).Square()                       // B = (X1+B)^2 (mag: 1)
+	d.Set(&a).Add(&c).Negate(2)              // D = -(A+C) (mag: 3)
+	d.Add(&b).MulInt(2)                      // D = 2*(B+D)(mag: 8)
+	e.Set(&a).MulInt(3)                      // E = 3*A (mag: 3)
+	f.SquareVal(&e)                          // F = E^2 (mag: 1)
+	x3.Set(&d).MulInt(2).Negate(16)          // X3 = -(2*D) (mag: 17)
+	x3.Add(&f)                               // X3 = F+X3 (mag: 18)
+	f.Set(x3).Negate(18).Add(&d).Normalize() // F = D-X3 (mag: 1)
+	y3.Set(&c).MulInt(8).Negate(8)           // Y3 = -(8*C) (mag: 9)
+	y3.Add(f.Mul(&e))                        // Y3 = E*F+Y3 (mag: 10)
+
+	// Normalize the resulting field values as needed.
+	x3.Normalize()
+	y3.Normalize()
+	z3.Normalize()
+}
+
+// doubleGeneric performs point doubling on the passed Jacobian point without
+// any assumptions about the z value and stores the result in the provided
+// result param.  That is to say result = 2*p.  It is the slowest of the point
+// doubling routines due to requiring the most arithmetic.
+//
+// NOTE: The resulting point will be normalized.
+func doubleGeneric(p, result *JacobianPoint) {
+	// Point doubling formula for Jacobian coordinates for the secp256k1
+	// curve:
+	//
+	// X3 = (3*X1^2)^2 - 8*X1*Y1^2
+	// Y3 = (3*X1^2)*(4*X1*Y1^2 - X3) - 8*Y1^4
+	// Z3 = 2*Y1*Z1
+	//
+	// To compute the above efficiently, this implementation splits the
+	// equation into intermediate elements which are used to minimize the
+	// number of field multiplications in favor of field squarings which
+	// are roughly 35% faster than field multiplications with the current
+	// implementation at the time this was written.
+	//
+	// This uses a slightly modified version of the method shown at:
+	// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
+	//
+	// In particular it performs the calculations using the following:
+	// A = X1^2, B = Y1^2, C = B^2, D = 2*((X1+B)^2-A-C)
+	// E = 3*A, F = E^2, X3 = F-2*D, Y3 = E*(D-X3)-8*C
+	// Z3 = 2*Y1*Z1
+	//
+	// This results in a cost of 1 field multiplication, 5 field squarings,
+	// 6 field additions, and 5 integer multiplications.
+	x1, y1, z1 := &p.X, &p.Y, &p.Z
+	x3, y3, z3 := &result.X, &result.Y, &result.Z
+	var a, b, c, d, e, f FieldVal
+	z3.Mul2(y1, z1).MulInt(2)                // Z3 = 2*Y1*Z1 (mag: 2)
+	a.SquareVal(x1)                          // A = X1^2 (mag: 1)
+	b.SquareVal(y1)                          // B = Y1^2 (mag: 1)
+	c.SquareVal(&b)                          // C = B^2 (mag: 1)
+	b.Add(x1).Square()                       // B = (X1+B)^2 (mag: 1)
+	d.Set(&a).Add(&c).Negate(2)              // D = -(A+C) (mag: 3)
+	d.Add(&b).MulInt(2)                      // D = 2*(B+D)(mag: 8)
+	e.Set(&a).MulInt(3)                      // E = 3*A (mag: 3)
+	f.SquareVal(&e)                          // F = E^2 (mag: 1)
+	x3.Set(&d).MulInt(2).Negate(16)          // X3 = -(2*D) (mag: 17)
+	x3.Add(&f)                               // X3 = F+X3 (mag: 18)
+	f.Set(x3).Negate(18).Add(&d).Normalize() // F = D-X3 (mag: 1)
+	y3.Set(&c).MulInt(8).Negate(8)           // Y3 = -(8*C) (mag: 9)
+	y3.Add(f.Mul(&e))                        // Y3 = E*F+Y3 (mag: 10)
+
+	// Normalize the resulting field values as needed.
+	x3.Normalize()
+	y3.Normalize()
+	z3.Normalize()
+}
+
+// DoubleNonConst doubles the passed Jacobian point and stores the result in the
+// provided result parameter in *non-constant* time.
+//
+// NOTE: The point must be normalized for this function to return the correct
+// result.  The resulting point will be normalized.
+func DoubleNonConst(p, result *JacobianPoint) {
+	// Doubling the point at infinity is still infinity.
+	if p.Y.IsZero() || p.Z.IsZero() {
+		result.X.SetInt(0)
+		result.Y.SetInt(0)
+		result.Z.SetInt(0)
+		return
+	}
+
+	// Slightly faster point doubling can be achieved when the z value is 1
+	// by avoiding the multiplication on the z value.  This section calls
+	// a point doubling function which is accelerated by using that
+	// assumption when possible.
+	if p.Z.IsOne() {
+		doubleZ1EqualsOne(p, result)
+		return
+	}
+
+	// Fall back to generic point doubling which works with arbitrary z
+	// values.
+	doubleGeneric(p, result)
+}
+
+// mulAdd64 multiplies the two passed base 2^64 digits together, adds the given
+// value to the result, and returns the 128-bit result via a (hi, lo) tuple
+// where the upper half of the bits are returned in hi and the lower half in lo.
+func mulAdd64(digit1, digit2, m uint64) (hi, lo uint64) {
+	// Note the carry on the final add is safe to discard because the maximum
+	// possible value is:
+	//   (2^64 - 1)(2^64 - 1) + (2^64 - 1) = 2^128 - 2^64
+	// and:
+	//   2^128 - 2^64 < 2^128.
+	var c uint64
+	hi, lo = bits.Mul64(digit1, digit2)
+	lo, c = bits.Add64(lo, m, 0)
+	hi, _ = bits.Add64(hi, 0, c)
+	return hi, lo
+}
+
+// mulAdd64Carry multiplies the two passed base 2^64 digits together, adds both
+// the given value and carry to the result, and returns the 128-bit result via a
+// (hi, lo) tuple where the upper half of the bits are returned in hi and the
+// lower half in lo.
+func mulAdd64Carry(digit1, digit2, m, c uint64) (hi, lo uint64) {
+	// Note the carry on the high order add is safe to discard because the
+	// maximum possible value is:
+	//   (2^64 - 1)(2^64 - 1) + 2*(2^64 - 1) = 2^128 - 1
+	// and:
+	//   2^128 - 1 < 2^128.
+	var c2 uint64
+	hi, lo = mulAdd64(digit1, digit2, m)
+	lo, c2 = bits.Add64(lo, c, 0)
+	hi, _ = bits.Add64(hi, 0, c2)
+	return hi, lo
+}
+
+// mul512Rsh320Round computes the full 512-bit product of the two given scalars,
+// right shifts the result by 320 bits, rounds to the nearest integer, and
+// returns the result in constant time.
+//
+// Note that despite the inputs and output being mod n scalars, the 512-bit
+// product is NOT reduced mod N prior to the right shift.  This is intentional
+// because it is used for replacing division with multiplication and thus the
+// intermediate results must be done via a field extension to a larger field.
+func mul512Rsh320Round(n1, n2 *ModNScalar) ModNScalar {
+	// Convert n1 and n2 to base 2^64 digits.
+	n1Digit0 := uint64(n1.n[0]) | uint64(n1.n[1])<<32
+	n1Digit1 := uint64(n1.n[2]) | uint64(n1.n[3])<<32
+	n1Digit2 := uint64(n1.n[4]) | uint64(n1.n[5])<<32
+	n1Digit3 := uint64(n1.n[6]) | uint64(n1.n[7])<<32
+	n2Digit0 := uint64(n2.n[0]) | uint64(n2.n[1])<<32
+	n2Digit1 := uint64(n2.n[2]) | uint64(n2.n[3])<<32
+	n2Digit2 := uint64(n2.n[4]) | uint64(n2.n[5])<<32
+	n2Digit3 := uint64(n2.n[6]) | uint64(n2.n[7])<<32
+
+	// Compute the full 512-bit product n1*n2.
+	var r0, r1, r2, r3, r4, r5, r6, r7, c uint64
+
+	// Terms resulting from the product of the first digit of the second number
+	// by all digits of the first number.
+	//
+	// Note that r0 is ignored because it is not needed to compute the higher
+	// terms and it is shifted out below anyway.
+	c, _ = bits.Mul64(n2Digit0, n1Digit0)
+	c, r1 = mulAdd64(n2Digit0, n1Digit1, c)
+	c, r2 = mulAdd64(n2Digit0, n1Digit2, c)
+	r4, r3 = mulAdd64(n2Digit0, n1Digit3, c)
+
+	// Terms resulting from the product of the second digit of the second number
+	// by all digits of the first number.
+	//
+	// Note that r1 is ignored because it is no longer needed to compute the
+	// higher terms and it is shifted out below anyway.
+	c, _ = mulAdd64(n2Digit1, n1Digit0, r1)
+	c, r2 = mulAdd64Carry(n2Digit1, n1Digit1, r2, c)
+	c, r3 = mulAdd64Carry(n2Digit1, n1Digit2, r3, c)
+	r5, r4 = mulAdd64Carry(n2Digit1, n1Digit3, r4, c)
+
+	// Terms resulting from the product of the third digit of the second number
+	// by all digits of the first number.
+	//
+	// Note that r2 is ignored because it is no longer needed to compute the
+	// higher terms and it is shifted out below anyway.
+	c, _ = mulAdd64(n2Digit2, n1Digit0, r2)
+	c, r3 = mulAdd64Carry(n2Digit2, n1Digit1, r3, c)
+	c, r4 = mulAdd64Carry(n2Digit2, n1Digit2, r4, c)
+	r6, r5 = mulAdd64Carry(n2Digit2, n1Digit3, r5, c)
+
+	// Terms resulting from the product of the fourth digit of the second number
+	// by all digits of the first number.
+	//
+	// Note that r3 is ignored because it is no longer needed to compute the
+	// higher terms and it is shifted out below anyway.
+	c, _ = mulAdd64(n2Digit3, n1Digit0, r3)
+	c, r4 = mulAdd64Carry(n2Digit3, n1Digit1, r4, c)
+	c, r5 = mulAdd64Carry(n2Digit3, n1Digit2, r5, c)
+	r7, r6 = mulAdd64Carry(n2Digit3, n1Digit3, r6, c)
+
+	// At this point the upper 256 bits of the full 512-bit product n1*n2 are in
+	// r4..r7 (recall the low order results were discarded as noted above).
+	//
+	// Right shift the result 320 bits.  Note that the MSB of r4 determines
+	// whether or not to round because it is the final bit that is shifted out.
+	//
+	// Also, notice that r3..r7 would also ordinarily be set to 0 as well for
+	// the full shift, but that is skipped since they are no longer used as
+	// their values are known to be zero.
+	roundBit := r4 >> 63
+	r2, r1, r0 = r7, r6, r5
+
+	// Conditionally add 1 depending on the round bit in constant time.
+	r0, c = bits.Add64(r0, roundBit, 0)
+	r1, c = bits.Add64(r1, 0, c)
+	r2, r3 = bits.Add64(r2, 0, c)
+
+	// Finally, convert the result to a mod n scalar.
+	//
+	// No modular reduction is needed because the result is guaranteed to be
+	// less than the group order given the group order is > 2^255 and the
+	// maximum possible value of the result is 2^192.
+	var result ModNScalar
+	result.n[0] = uint32(r0)
+	result.n[1] = uint32(r0 >> 32)
+	result.n[2] = uint32(r1)
+	result.n[3] = uint32(r1 >> 32)
+	result.n[4] = uint32(r2)
+	result.n[5] = uint32(r2 >> 32)
+	result.n[6] = uint32(r3)
+	result.n[7] = uint32(r3 >> 32)
+	return result
+}
+
+// splitK returns two scalars (k1 and k2) that are a balanced length-two
+// representation of the provided scalar such that k ≡ k1 + k2*λ (mod N), where
+// N is the secp256k1 group order.
+func splitK(k *ModNScalar) (ModNScalar, ModNScalar) {
+	// The ultimate goal is to decompose k into two scalars that are around
+	// half the bit length of k such that the following equation is satisfied:
+	//
+	// k1 + k2*λ ≡ k (mod n)
+	//
+	// The strategy used here is based on algorithm 3.74 from [GECC] with a few
+	// modifications to make use of the more efficient mod n scalar type, avoid
+	// some costly long divisions, and minimize the number of calculations.
+	//
+	// Start by defining a function that takes a vector v = <a,b> ∈ ℤ⨯ℤ:
+	//
+	// f(v) = a + bλ (mod n)
+	//
+	// Then, find two vectors, v1 = <a1,b1>, and v2 = <a2,b2> in ℤ⨯ℤ such that:
+	// 1) v1 and v2 are linearly independent
+	// 2) f(v1) = f(v2) = 0
+	// 3) v1 and v2 have small Euclidean norm
+	//
+	// The vectors that satisfy these properties are found via the Euclidean
+	// algorithm and are precomputed since both n and λ are fixed values for the
+	// secp256k1 curve.  See genprecomps.go for derivation details.
+	//
+	// Next, consider k as a vector <k, 0> in ℚ⨯ℚ and by linear algebra write:
+	//
+	// <k, 0> = g1*v1 + g2*v2, where g1, g2 ∈ ℚ
+	//
+	// Note that, per above, the components of vector v1 are a1 and b1 while the
+	// components of vector v2 are a2 and b2.  Given the vectors v1 and v2 were
+	// generated such that a1*b2 - a2*b1 = n, solving the equation for g1 and g2
+	// yields:
+	//
+	// g1 = b2*k / n
+	// g2 = -b1*k / n
+	//
+	// Observe:
+	// <k, 0> = g1*v1 + g2*v2
+	//        = (b2*k/n)*<a1,b1> + (-b1*k/n)*<a2,b2>              | substitute
+	//        = <a1*b2*k/n, b1*b2*k/n> + <-a2*b1*k/n, -b2*b1*k/n> | scalar mul
+	//        = <a1*b2*k/n - a2*b1*k/n, b1*b2*k/n - b2*b1*k/n>    | vector add
+	//        = <[a1*b2*k - a2*b1*k]/n, 0>                        | simplify
+	//        = <k*[a1*b2 - a2*b1]/n, 0>                          | factor out k
+	//        = <k*n/n, 0>                                        | substitute
+	//        = <k, 0>                                            | simplify
+	//
+	// Now, consider an integer-valued vector v:
+	//
+	// v = c1*v1 + c2*v2, where c1, c2 ∈ ℤ (mod n)
+	//
+	// Since vectors v1 and v2 are linearly independent and were generated such
+	// that f(v1) = f(v2) = 0, all possible scalars c1 and c2 also produce a
+	// vector v such that f(v) = 0.
+	//
+	// In other words, c1 and c2 can be any integers and the resulting
+	// decomposition will still satisfy the required equation.  However, since
+	// the goal is to produce a balanced decomposition that provides a
+	// performance advantage by minimizing max(k1, k2), c1 and c2 need to be
+	// integers close to g1 and g2, respectively, so the resulting vector v is
+	// an integer-valued vector that is close to <k, 0>.
+	//
+	// Finally, consider the vector u:
+	//
+	// u  = <k, 0> - v
+	//
+	// It follows that f(u) = k and thus the two components of vector u satisfy
+	// the required equation:
+	//
+	// k1 + k2*λ ≡ k (mod n)
+	//
+	// Choosing c1 and c2:
+	// -------------------
+	//
+	// As mentioned above, c1 and c2 need to be integers close to g1 and g2,
+	// respectively.  The algorithm in [GECC] chooses the following values:
+	//
+	// c1 = round(g1) = round(b2*k / n)
+	// c2 = round(g2) = round(-b1*k / n)
+	//
+	// However, as section 3.4.2 of [STWS] notes, the aforementioned approach
+	// requires costly long divisions that can be avoided by precomputing
+	// rounded estimates as follows:
+	//
+	// t = bitlen(n) + 1
+	// z1 = round(2^t * b2 / n)
+	// z2 = round(2^t * -b1 / n)
+	//
+	// Then, use those precomputed estimates to perform a multiplication by k
+	// along with a floored division by 2^t, which is a simple right shift by t:
+	//
+	// c1 = floor(k * z1 / 2^t) = (k * z1) >> t
+	// c2 = floor(k * z2 / 2^t) = (k * z2) >> t
+	//
+	// Finally, round up if last bit discarded in the right shift by t is set by
+	// adding 1.
+	//
+	// As a further optimization, rather than setting t = bitlen(n) + 1 = 257 as
+	// stated by [STWS], this implementation uses a higher precision estimate of
+	// t = bitlen(n) + 64 = 320 because it allows simplification of the shifts
+	// in the internal calculations that are done via uint64s and also allows
+	// the use of floor in the precomputations.
+	//
+	// Thus, the calculations this implementation uses are:
+	//
+	// z1 = floor(b2<<320 / n)                                     | precomputed
+	// z2 = floor((-b1)<<320) / n)                                 | precomputed
+	// c1 = ((k * z1) >> 320) + (((k * z1) >> 319) & 1)
+	// c2 = ((k * z2) >> 320) + (((k * z2) >> 319) & 1)
+	//
+	// Putting it all together:
+	// ------------------------
+	//
+	// Calculate the following vectors using the values discussed above:
+	//
+	// v = c1*v1 + c2*v2
+	// u = <k, 0> - v
+	//
+	// The two components of the resulting vector v are:
+	// va = c1*a1 + c2*a2
+	// vb = c1*b1 + c2*b2
+	//
+	// Thus, the two components of the resulting vector u are:
+	// k1 = k - va
+	// k2 = 0 - vb = -vb
+	//
+	// As some final optimizations:
+	//
+	// 1) Note that k1 + k2*λ ≡ k (mod n) means that k1 ≡ k - k2*λ (mod n).
+	//    Therefore, the computation of va can be avoided to save two
+	//    field multiplications and a field addition.
+	//
+	// 2) Since k1 = k - k2*λ = k + k2*(-λ), an additional field negation is
+	//    saved by storing and using the negative version of λ.
+	//
+	// 3) Since k2 = -vb = -(c1*b1 + c2*b2) = c1*(-b1) + c2*(-b2), one more
+	//    field negation is saved by storing and using the negative versions of
+	//    b1 and b2.
+	//
+	// k2 = c1*(-b1) + c2*(-b2)
+	// k1 = k + k2*(-λ)
+	var k1, k2 ModNScalar
+	c1 := mul512Rsh320Round(k, endoZ1)
+	c2 := mul512Rsh320Round(k, endoZ2)
+	k2.Add2(c1.Mul(endoNegB1), c2.Mul(endoNegB2))
+	k1.Mul2(&k2, endoNegLambda).Add(k)
+	return k1, k2
+}
+
+// nafScalar represents a positive integer up to a maximum value of 2^256 - 1
+// encoded in non-adjacent form.
+//
+// NAF is a signed-digit representation where each digit can be +1, 0, or -1.
+//
+// In order to efficiently encode that information, this type uses two arrays, a
+// "positive" array where set bits represent the +1 signed digits and a
+// "negative" array where set bits represent the -1 signed digits.  0 is
+// represented by neither array having a bit set in that position.
+//
+// The Pos and Neg methods return the aforementioned positive and negative
+// arrays, respectively.
+type nafScalar struct {
+	// pos houses the positive portion of the representation.  An additional
+	// byte is required for the positive portion because the NAF encoding can be
+	// up to 1 bit longer than the normal binary encoding of the value.
+	//
+	// neg houses the negative portion of the representation.  Even though the
+	// additional byte is not required for the negative portion, since it can
+	// never exceed the length of the normal binary encoding of the value,
+	// keeping the same length for positive and negative portions simplifies
+	// working with the representation and allows extra conditional branches to
+	// be avoided.
+	//
+	// start and end specify the starting and ending index to use within the pos
+	// and neg arrays, respectively.  This allows fixed size arrays to be used
+	// versus needing to dynamically allocate space on the heap.
+	//
+	// NOTE: The fields are defined in the order that they are to minimize the
+	// padding on 32-bit and 64-bit platforms.
+	pos        [33]byte
+	start, end uint8
+	neg        [33]byte
+}
+
+// Pos returns the bytes of the encoded value with bits set in the positions
+// that represent a signed digit of +1.
+func (s *nafScalar) Pos() []byte {
+	return s.pos[s.start:s.end]
+}
+
+// Neg returns the bytes of the encoded value with bits set in the positions
+// that represent a signed digit of -1.
+func (s *nafScalar) Neg() []byte {
+	return s.neg[s.start:s.end]
+}
+
+// naf takes a positive integer up to a maximum value of 2^256 - 1 and returns
+// its non-adjacent form (NAF), which is a unique signed-digit representation
+// such that no two consecutive digits are nonzero.  See the documentation for
+// the returned type for details on how the representation is encoded
+// efficiently and how to interpret it
+//
+// NAF is useful in that it has the fewest nonzero digits of any signed digit
+// representation, only 1/3rd of its digits are nonzero on average, and at least
+// half of the digits will be 0.
+//
+// The aforementioned properties are particularly beneficial for optimizing
+// elliptic curve point multiplication because they effectively minimize the
+// number of required point additions in exchange for needing to perform a mix
+// of fewer point additions and subtractions and possibly one additional point
+// doubling.  This is an excellent tradeoff because subtraction of points has
+// the same computational complexity as addition of points and point doubling is
+// faster than both.
+func naf(k []byte) nafScalar {
+	// Strip leading zero bytes.
+	for len(k) > 0 && k[0] == 0x00 {
+		k = k[1:]
+	}
+
+	// The non-adjacent form (NAF) of a positive integer k is an expression
+	// k = ∑_(i=0, l-1) k_i * 2^i where k_i ∈ {0,±1}, k_(l-1) != 0, and no two
+	// consecutive digits k_i are nonzero.
+	//
+	// The traditional method of computing the NAF of a positive integer is
+	// given by algorithm 3.30 in [GECC].  It consists of repeatedly dividing k
+	// by 2 and choosing the remainder so that the quotient (k−r)/2 is even
+	// which ensures the next NAF digit is 0.  This requires log_2(k) steps.
+	//
+	// However, in [BRID], Prodinger notes that a closed form expression for the
+	// NAF representation is the bitwise difference 3k/2 - k/2.  This is more
+	// efficient as it can be computed in O(1) versus the O(log(n)) of the
+	// traditional approach.
+	//
+	// The following code makes use of that formula to compute the NAF more
+	// efficiently.
+	//
+	// To understand the logic here, observe that the only way the NAF has a
+	// nonzero digit at a given bit is when either 3k/2 or k/2 has a bit set in
+	// that position, but not both.  In other words, the result of a bitwise
+	// xor.  This can be seen simply by considering that when the bits are the
+	// same, the subtraction is either 0-0 or 1-1, both of which are 0.
+	//
+	// Further, observe that the "+1" digits in the result are contributed by
+	// 3k/2 while the "-1" digits are from k/2.  So, they can be determined by
+	// taking the bitwise and of each respective value with the result of the
+	// xor which identifies which bits are nonzero.
+	//
+	// Using that information, this loops backwards from the least significant
+	// byte to the most significant byte while performing the aforementioned
+	// calculations by propagating the potential carry and high order bit from
+	// the next word during the right shift.
+	kLen := len(k)
+	var result nafScalar
+	var carry uint8
+	for byteNum := kLen - 1; byteNum >= 0; byteNum-- {
+		// Calculate k/2.  Notice the carry from the previous word is added and
+		// the low order bit from the next word is shifted in accordingly.
+		kc := uint16(k[byteNum]) + uint16(carry)
+		var nextWord uint8
+		if byteNum > 0 {
+			nextWord = k[byteNum-1]
+		}
+		halfK := kc>>1 | uint16(nextWord<<7)
+
+		// Calculate 3k/2 and determine the non-zero digits in the result.
+		threeHalfK := kc + halfK
+		nonZeroResultDigits := threeHalfK ^ halfK
+
+		// Determine the signed digits {0, ±1}.
+		result.pos[byteNum+1] = uint8(threeHalfK & nonZeroResultDigits)
+		result.neg[byteNum+1] = uint8(halfK & nonZeroResultDigits)
+
+		// Propagate the potential carry from the 3k/2 calculation.
+		carry = uint8(threeHalfK >> 8)
+	}
+	result.pos[0] = carry
+
+	// Set the starting and ending positions within the fixed size arrays to
+	// identify the bytes that are actually used.  This is important since the
+	// encoding is big endian and thus trailing zero bytes changes its value.
+	result.start = 1 - carry
+	result.end = uint8(kLen + 1)
+	return result
+}
+
+// ScalarMultNonConst multiplies k*P where k is a scalar modulo the curve order
+// and P is a point in Jacobian projective coordinates and stores the result in
+// the provided Jacobian point.
+//
+// NOTE: The point must be normalized for this function to return the correct
+// result.  The resulting point will be normalized.
+func ScalarMultNonConst(k *ModNScalar, point, result *JacobianPoint) {
+	// -------------------------------------------------------------------------
+	// This makes use of the following efficiently-computable endomorphism to
+	// accelerate the computation:
+	//
+	// φ(P) ⟼ λ*P = (β*P.x mod p, P.y)
+	//
+	// In other words, there is a special scalar λ that every point on the
+	// elliptic curve can be multiplied by that will result in the same point as
+	// performing a single field multiplication of the point's X coordinate by
+	// the special value β.
+	//
+	// This is useful because scalar point multiplication is significantly more
+	// expensive than a single field multiplication given the former involves a
+	// series of point doublings and additions which themselves consist of a
+	// combination of several field multiplications, squarings, and additions.
+	//
+	// So, the idea behind making use of the endomorphism is thus to decompose
+	// the scalar into two scalars that are each about half the bit length of
+	// the original scalar such that:
+	//
+	// k ≡ k1 + k2*λ (mod n)
+	//
+	// This in turn allows the scalar point multiplication to be performed as a
+	// sum of two smaller half-length multiplications as follows:
+	//
+	// k*P = (k1 + k2*λ)*P
+	//     = k1*P + k2*λ*P
+	//     = k1*P + k2*φ(P)
+	//
+	// Thus, a speedup is achieved so long as it's faster to decompose the
+	// scalar, compute φ(P), and perform a simultaneous multiply of the
+	// half-length point multiplications than it is to compute a full width
+	// point multiplication.
+	//
+	// In practice, benchmarks show the current implementation provides a
+	// speedup of around 30-35% versus not using the endomorphism.
+	//
+	// See section 3.5 in [GECC] for a more rigorous treatment.
+	// -------------------------------------------------------------------------
+
+	// Per above, the main equation here to remember is:
+	//   k*P = k1*P + k2*φ(P)
+	//
+	// p1 below is P in the equation while p2 is φ(P) in the equation.
+	//
+	// NOTE: φ(x,y) = (β*x,y).  The Jacobian z coordinates are the same, so this
+	// math goes through.
+	//
+	// Also, calculate -p1 and -p2 for use in the NAF optimization.
+	p1, p1Neg := new(JacobianPoint), new(JacobianPoint)
+	p1.Set(point)
+	p1Neg.Set(p1)
+	p1Neg.Y.Negate(1).Normalize()
+	p2, p2Neg := new(JacobianPoint), new(JacobianPoint)
+	p2.Set(p1)
+	p2.X.Mul(endoBeta).Normalize()
+	p2Neg.Set(p2)
+	p2Neg.Y.Negate(1).Normalize()
+
+	// Decompose k into k1 and k2 such that k = k1 + k2*λ (mod n) where k1 and
+	// k2 are around half the bit length of k in order to halve the number of EC
+	// operations.
+	//
+	// Notice that this also flips the sign of the scalars and points as needed
+	// to minimize the bit lengths of the scalars k1 and k2.
+	//
+	// This is done because the scalars are operating modulo the group order
+	// which means that when they would otherwise be a small negative magnitude
+	// they will instead be a large positive magnitude.  Since the goal is for
+	// the scalars to have a small magnitude to achieve a performance boost, use
+	// their negation when they are greater than the half order of the group and
+	// flip the positive and negative values of the corresponding point that
+	// will be multiplied by to compensate.
+	//
+	// In other words, transform the calc when k1 is over the half order to:
+	//   k1*P = -k1*-P
+	//
+	// Similarly, transform the calc when k2 is over the half order to:
+	//   k2*φ(P) = -k2*-φ(P)
+	k1, k2 := splitK(k)
+	if k1.IsOverHalfOrder() {
+		k1.Negate()
+		p1, p1Neg = p1Neg, p1
+	}
+	if k2.IsOverHalfOrder() {
+		k2.Negate()
+		p2, p2Neg = p2Neg, p2
+	}
+
+	// Convert k1 and k2 into their NAF representations since NAF has a lot more
+	// zeros overall on average which minimizes the number of required point
+	// additions in exchange for a mix of fewer point additions and subtractions
+	// at the cost of one additional point doubling.
+	//
+	// This is an excellent tradeoff because subtraction of points has the same
+	// computational complexity as addition of points and point doubling is
+	// faster than both.
+	//
+	// Concretely, on average, 1/2 of all bits will be non-zero with the normal
+	// binary representation whereas only 1/3rd of the bits will be non-zero
+	// with NAF.
+	//
+	// The Pos version of the bytes contain the +1s and the Neg versions contain
+	// the -1s.
+	k1Bytes, k2Bytes := k1.Bytes(), k2.Bytes()
+	k1NAF, k2NAF := naf(k1Bytes[:]), naf(k2Bytes[:])
+	k1PosNAF, k1NegNAF := k1NAF.Pos(), k1NAF.Neg()
+	k2PosNAF, k2NegNAF := k2NAF.Pos(), k2NAF.Neg()
+	k1Len, k2Len := len(k1PosNAF), len(k2PosNAF)
+
+	// Add left-to-right using the NAF optimization.  See algorithm 3.77 from
+	// [GECC].
+	//
+	// Point Q = ∞ (point at infinity).
+	var q JacobianPoint
+	m := k1Len
+	if m < k2Len {
+		m = k2Len
+	}
+	for i := 0; i < m; i++ {
+		// Since k1 and k2 are potentially different lengths and the calculation
+		// is being done left to right, pad the front of the shorter one with
+		// 0s.
+		var k1BytePos, k1ByteNeg, k2BytePos, k2ByteNeg byte
+		if i >= m-k1Len {
+			k1BytePos, k1ByteNeg = k1PosNAF[i-(m-k1Len)], k1NegNAF[i-(m-k1Len)]
+		}
+		if i >= m-k2Len {
+			k2BytePos, k2ByteNeg = k2PosNAF[i-(m-k2Len)], k2NegNAF[i-(m-k2Len)]
+		}
+
+		for mask := uint8(1 << 7); mask > 0; mask >>= 1 {
+			// Q = 2 * Q
+			DoubleNonConst(&q, &q)
+
+			// Add or subtract the first point based on the signed digit of the
+			// NAF representation of k1 at this bit position.
+			//
+			// +1: Q = Q + p1
+			// -1: Q = Q - p1
+			//  0: Q = Q (no change)
+			if k1BytePos&mask == mask {
+				AddNonConst(&q, p1, &q)
+			} else if k1ByteNeg&mask == mask {
+				AddNonConst(&q, p1Neg, &q)
+			}
+
+			// Add or subtract the second point based on the signed digit of the
+			// NAF representation of k2 at this bit position.
+			//
+			// +1: Q = Q + p2
+			// -1: Q = Q - p2
+			//  0: Q = Q (no change)
+			if k2BytePos&mask == mask {
+				AddNonConst(&q, p2, &q)
+			} else if k2ByteNeg&mask == mask {
+				AddNonConst(&q, p2Neg, &q)
+			}
+		}
+	}
+
+	result.Set(&q)
+}
+
+// ScalarBaseMultNonConst multiplies k*G where k is a scalar modulo the curve
+// order and G is the base point of the group and stores the result in the
+// provided Jacobian point.
+//
+// NOTE: The resulting point will be normalized.
+func ScalarBaseMultNonConst(k *ModNScalar, result *JacobianPoint) {
+	bytePoints := s256BytePoints()
+
+	// Start with the point at infinity.
+	result.X.Zero()
+	result.Y.Zero()
+	result.Z.Zero()
+
+	// bytePoints has all 256 byte points for each 8-bit window.  The strategy
+	// is to add up the byte points.  This is best understood by expressing k in
+	// base-256 which it already sort of is.  Each "digit" in the 8-bit window
+	// can be looked up using bytePoints and added together.
+	kb := k.Bytes()
+	for i := 0; i < len(kb); i++ {
+		pt := &bytePoints[i][kb[i]]
+		AddNonConst(result, pt, result)
+	}
+}
+
+// isOnCurve returns whether or not the affine point (x,y) is on the curve.
+func isOnCurve(fx, fy *FieldVal) bool {
+	// Elliptic curve equation for secp256k1 is: y^2 = x^3 + 7
+	y2 := new(FieldVal).SquareVal(fy).Normalize()
+	result := new(FieldVal).SquareVal(fx).Mul(fx).AddInt(7).Normalize()
+	return y2.Equals(result)
+}
+
+// DecompressY attempts to calculate the Y coordinate for the given X coordinate
+// such that the result pair is a point on the secp256k1 curve.  It adjusts Y
+// based on the desired oddness and returns whether or not it was successful
+// since not all X coordinates are valid.
+//
+// The magnitude of the provided X coordinate field val must be a max of 8 for a
+// correct result.  The resulting Y field val will have a max magnitude of 2.
+func DecompressY(x *FieldVal, odd bool, resultY *FieldVal) bool {
+	// The curve equation for secp256k1 is: y^2 = x^3 + 7.  Thus
+	// y = +-sqrt(x^3 + 7).
+	//
+	// The x coordinate must be invalid if there is no square root for the
+	// calculated rhs because it means the X coordinate is not for a point on
+	// the curve.
+	x3PlusB := new(FieldVal).SquareVal(x).Mul(x).AddInt(7)
+	if hasSqrt := resultY.SquareRootVal(x3PlusB); !hasSqrt {
+		return false
+	}
+	if resultY.Normalize().IsOdd() != odd {
+		resultY.Negate(1)
+	}
+	return true
+}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/doc.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/doc.go
new file mode 100644
index 0000000000..ac01e2343c
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/doc.go
@@ -0,0 +1,59 @@
+// Copyright (c) 2013-2014 The btcsuite developers
+// Copyright (c) 2015-2022 The Decred developers
+// Use of this source code is governed by an ISC
+// license that can be found in the LICENSE file.
+
+/*
+Package secp256k1 implements optimized secp256k1 elliptic curve operations in
+pure Go.
+
+This package provides an optimized pure Go implementation of elliptic curve
+cryptography operations over the secp256k1 curve as well as data structures and
+functions for working with public and private secp256k1 keys.  See
+https://www.secg.org/sec2-v2.pdf for details on the standard.
+
+In addition, sub packages are provided to produce, verify, parse, and serialize
+ECDSA signatures and EC-Schnorr-DCRv0 (a custom Schnorr-based signature scheme
+specific to Decred) signatures.  See the README.md files in the relevant sub
+packages for more details about those aspects.
+
+An overview of the features provided by this package are as follows:
+
+  - Private key generation, serialization, and parsing
+  - Public key generation, serialization and parsing per ANSI X9.62-1998
+  - Parses uncompressed, compressed, and hybrid public keys
+  - Serializes uncompressed and compressed public keys
+  - Specialized types for performing optimized and constant time field operations
+  - FieldVal type for working modulo the secp256k1 field prime
+  - ModNScalar type for working modulo the secp256k1 group order
+  - Elliptic curve operations in Jacobian projective coordinates
+  - Point addition
+  - Point doubling
+  - Scalar multiplication with an arbitrary point
+  - Scalar multiplication with the base point (group generator)
+  - Point decompression from a given x coordinate
+  - Nonce generation via RFC6979 with support for extra data and version
+    information that can be used to prevent nonce reuse between signing
+    algorithms
+
+It also provides an implementation of the Go standard library crypto/elliptic
+Curve interface via the S256 function so that it may be used with other packages
+in the standard library such as crypto/tls, crypto/x509, and crypto/ecdsa.
+However, in the case of ECDSA, it is highly recommended to use the ecdsa sub
+package of this package instead since it is optimized specifically for secp256k1
+and is significantly faster as a result.
+
+Although this package was primarily written for dcrd, it has intentionally been
+designed so it can be used as a standalone package for any projects needing to
+use optimized secp256k1 elliptic curve cryptography.
+
+Finally, a comprehensive suite of tests is provided to provide a high level of
+quality assurance.
+
+# Use of secp256k1 in Decred
+
+At the time of this writing, the primary public key cryptography in widespread
+use on the Decred network used to secure coins is based on elliptic curves
+defined by the secp256k1 domain parameters.
+*/
+package secp256k1
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ecdh.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ecdh.go
new file mode 100644
index 0000000000..96869a3cd9
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ecdh.go
@@ -0,0 +1,21 @@
+// Copyright (c) 2015 The btcsuite developers
+// Copyright (c) 2015-2023 The Decred developers
+// Use of this source code is governed by an ISC
+// license that can be found in the LICENSE file.
+
+package secp256k1
+
+// GenerateSharedSecret generates a shared secret based on a private key and a
+// public key using Diffie-Hellman key exchange (ECDH) (RFC 5903).
+// RFC5903 Section 9 states we should only return x.
+//
+// It is recommended to securely hash the result before using as a cryptographic
+// key.
+func GenerateSharedSecret(privkey *PrivateKey, pubkey *PublicKey) []byte {
+	var point, result JacobianPoint
+	pubkey.AsJacobian(&point)
+	ScalarMultNonConst(&privkey.Key, &point, &result)
+	result.ToAffine()
+	xBytes := result.X.Bytes()
+	return xBytes[:]
+}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go
new file mode 100644
index 0000000000..42022646b1
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go
@@ -0,0 +1,255 @@
+// Copyright 2020-2022 The Decred developers
+// Use of this source code is governed by an ISC
+// license that can be found in the LICENSE file.
+
+package secp256k1
+
+// References:
+//   [SECG]: Recommended Elliptic Curve Domain Parameters
+//     https://www.secg.org/sec2-v2.pdf
+//
+//   [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone)
+
+import (
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"math/big"
+)
+
+// CurveParams contains the parameters for the secp256k1 curve.
+type CurveParams struct {
+	// P is the prime used in the secp256k1 field.
+	P *big.Int
+
+	// N is the order of the secp256k1 curve group generated by the base point.
+	N *big.Int
+
+	// Gx and Gy are the x and y coordinate of the base point, respectively.
+	Gx, Gy *big.Int
+
+	// BitSize is the size of the underlying secp256k1 field in bits.
+	BitSize int
+
+	// H is the cofactor of the secp256k1 curve.
+	H int
+
+	// ByteSize is simply the bit size / 8 and is provided for convenience
+	// since it is calculated repeatedly.
+	ByteSize int
+}
+
+// Curve parameters taken from [SECG] section 2.4.1.
+var curveParams = CurveParams{
+	P:        fromHex("fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"),
+	N:        fromHex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141"),
+	Gx:       fromHex("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798"),
+	Gy:       fromHex("483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8"),
+	BitSize:  256,
+	H:        1,
+	ByteSize: 256 / 8,
+}
+
+// Params returns the secp256k1 curve parameters for convenience.
+func Params() *CurveParams {
+	return &curveParams
+}
+
+// KoblitzCurve provides an implementation for secp256k1 that fits the ECC Curve
+// interface from crypto/elliptic.
+type KoblitzCurve struct {
+	*elliptic.CurveParams
+}
+
+// bigAffineToJacobian takes an affine point (x, y) as big integers and converts
+// it to Jacobian point with Z=1.
+func bigAffineToJacobian(x, y *big.Int, result *JacobianPoint) {
+	result.X.SetByteSlice(x.Bytes())
+	result.Y.SetByteSlice(y.Bytes())
+	result.Z.SetInt(1)
+}
+
+// jacobianToBigAffine takes a Jacobian point (x, y, z) as field values and
+// converts it to an affine point as big integers.
+func jacobianToBigAffine(point *JacobianPoint) (*big.Int, *big.Int) {
+	point.ToAffine()
+
+	// Convert the field values for the now affine point to big.Ints.
+	x3, y3 := new(big.Int), new(big.Int)
+	x3.SetBytes(point.X.Bytes()[:])
+	y3.SetBytes(point.Y.Bytes()[:])
+	return x3, y3
+}
+
+// Params returns the parameters for the curve.
+//
+// This is part of the elliptic.Curve interface implementation.
+func (curve *KoblitzCurve) Params() *elliptic.CurveParams {
+	return curve.CurveParams
+}
+
+// IsOnCurve returns whether or not the affine point (x,y) is on the curve.
+//
+// This is part of the elliptic.Curve interface implementation.  This function
+// differs from the crypto/elliptic algorithm since a = 0 not -3.
+func (curve *KoblitzCurve) IsOnCurve(x, y *big.Int) bool {
+	// Convert big ints to a Jacobian point for faster arithmetic.
+	var point JacobianPoint
+	bigAffineToJacobian(x, y, &point)
+	return isOnCurve(&point.X, &point.Y)
+}
+
+// Add returns the sum of (x1,y1) and (x2,y2).
+//
+// This is part of the elliptic.Curve interface implementation.
+func (curve *KoblitzCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
+	// The point at infinity is the identity according to the group law for
+	// elliptic curve cryptography.  Thus, ∞ + P = P and P + ∞ = P.
+	if x1.Sign() == 0 && y1.Sign() == 0 {
+		return x2, y2
+	}
+	if x2.Sign() == 0 && y2.Sign() == 0 {
+		return x1, y1
+	}
+
+	// Convert the affine coordinates from big integers to Jacobian points,
+	// do the point addition in Jacobian projective space, and convert the
+	// Jacobian point back to affine big.Ints.
+	var p1, p2, result JacobianPoint
+	bigAffineToJacobian(x1, y1, &p1)
+	bigAffineToJacobian(x2, y2, &p2)
+	AddNonConst(&p1, &p2, &result)
+	return jacobianToBigAffine(&result)
+}
+
+// Double returns 2*(x1,y1).
+//
+// This is part of the elliptic.Curve interface implementation.
+func (curve *KoblitzCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
+	if y1.Sign() == 0 {
+		return new(big.Int), new(big.Int)
+	}
+
+	// Convert the affine coordinates from big integers to Jacobian points,
+	// do the point doubling in Jacobian projective space, and convert the
+	// Jacobian point back to affine big.Ints.
+	var point, result JacobianPoint
+	bigAffineToJacobian(x1, y1, &point)
+	DoubleNonConst(&point, &result)
+	return jacobianToBigAffine(&result)
+}
+
+// moduloReduce reduces k from more than 32 bytes to 32 bytes and under.  This
+// is done by doing a simple modulo curve.N.  We can do this since G^N = 1 and
+// thus any other valid point on the elliptic curve has the same order.
+func moduloReduce(k []byte) []byte {
+	// Since the order of G is curve.N, we can use a much smaller number by
+	// doing modulo curve.N
+	if len(k) > curveParams.ByteSize {
+		tmpK := new(big.Int).SetBytes(k)
+		tmpK.Mod(tmpK, curveParams.N)
+		return tmpK.Bytes()
+	}
+
+	return k
+}
+
+// ScalarMult returns k*(Bx, By) where k is a big endian integer.
+//
+// This is part of the elliptic.Curve interface implementation.
+func (curve *KoblitzCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
+	// Convert the affine coordinates from big integers to Jacobian points,
+	// do the multiplication in Jacobian projective space, and convert the
+	// Jacobian point back to affine big.Ints.
+	var kModN ModNScalar
+	kModN.SetByteSlice(moduloReduce(k))
+	var point, result JacobianPoint
+	bigAffineToJacobian(Bx, By, &point)
+	ScalarMultNonConst(&kModN, &point, &result)
+	return jacobianToBigAffine(&result)
+}
+
+// ScalarBaseMult returns k*G where G is the base point of the group and k is a
+// big endian integer.
+//
+// This is part of the elliptic.Curve interface implementation.
+func (curve *KoblitzCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
+	// Perform the multiplication and convert the Jacobian point back to affine
+	// big.Ints.
+	var kModN ModNScalar
+	kModN.SetByteSlice(moduloReduce(k))
+	var result JacobianPoint
+	ScalarBaseMultNonConst(&kModN, &result)
+	return jacobianToBigAffine(&result)
+}
+
+// X returns the x coordinate of the public key.
+func (p *PublicKey) X() *big.Int {
+	return new(big.Int).SetBytes(p.x.Bytes()[:])
+}
+
+// Y returns the y coordinate of the public key.
+func (p *PublicKey) Y() *big.Int {
+	return new(big.Int).SetBytes(p.y.Bytes()[:])
+}
+
+// ToECDSA returns the public key as a *ecdsa.PublicKey.
+func (p *PublicKey) ToECDSA() *ecdsa.PublicKey {
+	return &ecdsa.PublicKey{
+		Curve: S256(),
+		X:     p.X(),
+		Y:     p.Y(),
+	}
+}
+
+// ToECDSA returns the private key as a *ecdsa.PrivateKey.
+func (p *PrivateKey) ToECDSA() *ecdsa.PrivateKey {
+	var privKeyBytes [PrivKeyBytesLen]byte
+	p.Key.PutBytes(&privKeyBytes)
+	var result JacobianPoint
+	ScalarBaseMultNonConst(&p.Key, &result)
+	x, y := jacobianToBigAffine(&result)
+	newPrivKey := &ecdsa.PrivateKey{
+		PublicKey: ecdsa.PublicKey{
+			Curve: S256(),
+			X:     x,
+			Y:     y,
+		},
+		D: new(big.Int).SetBytes(privKeyBytes[:]),
+	}
+	zeroArray32(&privKeyBytes)
+	return newPrivKey
+}
+
+// fromHex converts the passed hex string into a big integer pointer and will
+// panic is there is an error.  This is only provided for the hard-coded
+// constants so errors in the source code can bet detected. It will only (and
+// must only) be called for initialization purposes.
+func fromHex(s string) *big.Int {
+	if s == "" {
+		return big.NewInt(0)
+	}
+	r, ok := new(big.Int).SetString(s, 16)
+	if !ok {
+		panic("invalid hex in source file: " + s)
+	}
+	return r
+}
+
+// secp256k1 is a global instance of the KoblitzCurve implementation which in
+// turn embeds and implements elliptic.CurveParams.
+var secp256k1 = &KoblitzCurve{
+	CurveParams: &elliptic.CurveParams{
+		P:       curveParams.P,
+		N:       curveParams.N,
+		B:       fromHex("0000000000000000000000000000000000000000000000000000000000000007"),
+		Gx:      curveParams.Gx,
+		Gy:      curveParams.Gy,
+		BitSize: curveParams.BitSize,
+		Name:    "secp256k1",
+	},
+}
+
+// S256 returns an elliptic.Curve which implements secp256k1.
+func S256() *KoblitzCurve {
+	return secp256k1
+}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/error.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/error.go
new file mode 100644
index 0000000000..ac8c45127e
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/error.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2020 The Decred developers
+// Use of this source code is governed by an ISC
+// license that can be found in the LICENSE file.
+
+package secp256k1
+
+// ErrorKind identifies a kind of error.  It has full support for errors.Is and
+// errors.As, so the caller can directly check against an error kind when
+// determining the reason for an error.
+type ErrorKind string
+
+// These constants are used to identify a specific RuleError.
+const (
+	// ErrPubKeyInvalidLen indicates that the length of a serialized public
+	// key is not one of the allowed lengths.
+	ErrPubKeyInvalidLen = ErrorKind("ErrPubKeyInvalidLen")
+
+	// ErrPubKeyInvalidFormat indicates an attempt was made to parse a public
+	// key that does not specify one of the supported formats.
+	ErrPubKeyInvalidFormat = ErrorKind("ErrPubKeyInvalidFormat")
+
+	// ErrPubKeyXTooBig indicates that the x coordinate for a public key
+	// is greater than or equal to the prime of the field underlying the group.
+	ErrPubKeyXTooBig = ErrorKind("ErrPubKeyXTooBig")
+
+	// ErrPubKeyYTooBig indicates that the y coordinate for a public key is
+	// greater than or equal to the prime of the field underlying the group.
+	ErrPubKeyYTooBig = ErrorKind("ErrPubKeyYTooBig")
+
+	// ErrPubKeyNotOnCurve indicates that a public key is not a point on the
+	// secp256k1 curve.
+	ErrPubKeyNotOnCurve = ErrorKind("ErrPubKeyNotOnCurve")
+
+	// ErrPubKeyMismatchedOddness indicates that a hybrid public key specified
+	// an oddness of the y coordinate that does not match the actual oddness of
+	// the provided y coordinate.
+	ErrPubKeyMismatchedOddness = ErrorKind("ErrPubKeyMismatchedOddness")
+)
+
+// Error satisfies the error interface and prints human-readable errors.
+func (e ErrorKind) Error() string {
+	return string(e)
+}
+
+// Error identifies an error related to public key cryptography using a
+// sec256k1 curve. It has full support for errors.Is and errors.As, so the
+// caller can  ascertain the specific reason for the error by checking
+// the underlying error.
+type Error struct {
+	Err         error
+	Description string
+}
+
+// Error satisfies the error interface and prints human-readable errors.
+func (e Error) Error() string {
+	return e.Description
+}
+
+// Unwrap returns the underlying wrapped error.
+func (e Error) Unwrap() error {
+	return e.Err
+}
+
+// makeError creates an Error given a set of arguments.
+func makeError(kind ErrorKind, desc string) Error {
+	return Error{Err: kind, Description: desc}
+}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go
new file mode 100644
index 0000000000..8d9ac74d53
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go
@@ -0,0 +1,1681 @@
+// Copyright (c) 2013-2014 The btcsuite developers
+// Copyright (c) 2015-2022 The Decred developers
+// Copyright (c) 2013-2022 Dave Collins
+// Use of this source code is governed by an ISC
+// license that can be found in the LICENSE file.
+
+package secp256k1
+
+// References:
+//   [HAC]: Handbook of Applied Cryptography Menezes, van Oorschot, Vanstone.
+//     http://cacr.uwaterloo.ca/hac/
+
+// All elliptic curve operations for secp256k1 are done in a finite field
+// characterized by a 256-bit prime.  Given this precision is larger than the
+// biggest available native type, obviously some form of bignum math is needed.
+// This package implements specialized fixed-precision field arithmetic rather
+// than relying on an arbitrary-precision arithmetic package such as math/big
+// for dealing with the field math since the size is known.  As a result, rather
+// large performance gains are achieved by taking advantage of many
+// optimizations not available to arbitrary-precision arithmetic and generic
+// modular arithmetic algorithms.
+//
+// There are various ways to internally represent each finite field element.
+// For example, the most obvious representation would be to use an array of 4
+// uint64s (64 bits * 4 = 256 bits).  However, that representation suffers from
+// a couple of issues.  First, there is no native Go type large enough to handle
+// the intermediate results while adding or multiplying two 64-bit numbers, and
+// second there is no space left for overflows when performing the intermediate
+// arithmetic between each array element which would lead to expensive carry
+// propagation.
+//
+// Given the above, this implementation represents the field elements as
+// 10 uint32s with each word (array entry) treated as base 2^26.  This was
+// chosen for the following reasons:
+// 1) Most systems at the current time are 64-bit (or at least have 64-bit
+//    registers available for specialized purposes such as MMX) so the
+//    intermediate results can typically be done using a native register (and
+//    using uint64s to avoid the need for additional half-word arithmetic)
+// 2) In order to allow addition of the internal words without having to
+//    propagate the carry, the max normalized value for each register must
+//    be less than the number of bits available in the register
+// 3) Since we're dealing with 32-bit values, 64-bits of overflow is a
+//    reasonable choice for #2
+// 4) Given the need for 256-bits of precision and the properties stated in #1,
+//    #2, and #3, the representation which best accommodates this is 10 uint32s
+//    with base 2^26 (26 bits * 10 = 260 bits, so the final word only needs 22
+//    bits) which leaves the desired 64 bits (32 * 10 = 320, 320 - 256 = 64) for
+//    overflow
+//
+// Since it is so important that the field arithmetic is extremely fast for high
+// performance crypto, this type does not perform any validation where it
+// ordinarily would.  See the documentation for FieldVal for more details.
+
+import (
+	"encoding/hex"
+)
+
+// Constants used to make the code more readable.
+const (
+	twoBitsMask   = 0x3
+	fourBitsMask  = 0xf
+	sixBitsMask   = 0x3f
+	eightBitsMask = 0xff
+)
+
+// Constants related to the field representation.
+const (
+	// fieldWords is the number of words used to internally represent the
+	// 256-bit value.
+	fieldWords = 10
+
+	// fieldBase is the exponent used to form the numeric base of each word.
+	// 2^(fieldBase*i) where i is the word position.
+	fieldBase = 26
+
+	// fieldBaseMask is the mask for the bits in each word needed to
+	// represent the numeric base of each word (except the most significant
+	// word).
+	fieldBaseMask = (1 << fieldBase) - 1
+
+	// fieldMSBBits is the number of bits in the most significant word used
+	// to represent the value.
+	fieldMSBBits = 256 - (fieldBase * (fieldWords - 1))
+
+	// fieldMSBMask is the mask for the bits in the most significant word
+	// needed to represent the value.
+	fieldMSBMask = (1 << fieldMSBBits) - 1
+
+	// These fields provide convenient access to each of the words of the
+	// secp256k1 prime in the internal field representation to improve code
+	// readability.
+	fieldPrimeWordZero  = 0x03fffc2f
+	fieldPrimeWordOne   = 0x03ffffbf
+	fieldPrimeWordTwo   = 0x03ffffff
+	fieldPrimeWordThree = 0x03ffffff
+	fieldPrimeWordFour  = 0x03ffffff
+	fieldPrimeWordFive  = 0x03ffffff
+	fieldPrimeWordSix   = 0x03ffffff
+	fieldPrimeWordSeven = 0x03ffffff
+	fieldPrimeWordEight = 0x03ffffff
+	fieldPrimeWordNine  = 0x003fffff
+)
+
+// FieldVal implements optimized fixed-precision arithmetic over the
+// secp256k1 finite field.  This means all arithmetic is performed modulo
+//
+//	0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f.
+//
+// WARNING: Since it is so important for the field arithmetic to be extremely
+// fast for high performance crypto, this type does not perform any validation
+// of documented preconditions where it ordinarily would.  As a result, it is
+// IMPERATIVE for callers to understand some key concepts that are described
+// below and ensure the methods are called with the necessary preconditions that
+// each method is documented with.  For example, some methods only give the
+// correct result if the field value is normalized and others require the field
+// values involved to have a maximum magnitude and THERE ARE NO EXPLICIT CHECKS
+// TO ENSURE THOSE PRECONDITIONS ARE SATISFIED.  This does, unfortunately, make
+// the type more difficult to use correctly and while I typically prefer to
+// ensure all state and input is valid for most code, this is a bit of an
+// exception because those extra checks really add up in what ends up being
+// critical hot paths.
+//
+// The first key concept when working with this type is normalization.  In order
+// to avoid the need to propagate a ton of carries, the internal representation
+// provides additional overflow bits for each word of the overall 256-bit value.
+// This means that there are multiple internal representations for the same
+// value and, as a result, any methods that rely on comparison of the value,
+// such as equality and oddness determination, require the caller to provide a
+// normalized value.
+//
+// The second key concept when working with this type is magnitude.  As
+// previously mentioned, the internal representation provides additional
+// overflow bits which means that the more math operations that are performed on
+// the field value between normalizations, the more those overflow bits
+// accumulate.  The magnitude is effectively that maximum possible number of
+// those overflow bits that could possibly be required as a result of a given
+// operation.  Since there are only a limited number of overflow bits available,
+// this implies that the max possible magnitude MUST be tracked by the caller
+// and the caller MUST normalize the field value if a given operation would
+// cause the magnitude of the result to exceed the max allowed value.
+//
+// IMPORTANT: The max allowed magnitude of a field value is 64.
+type FieldVal struct {
+	// Each 256-bit value is represented as 10 32-bit integers in base 2^26.
+	// This provides 6 bits of overflow in each word (10 bits in the most
+	// significant word) for a total of 64 bits of overflow (9*6 + 10 = 64).  It
+	// only implements the arithmetic needed for elliptic curve operations.
+	//
+	// The following depicts the internal representation:
+	// 	 -----------------------------------------------------------------
+	// 	|        n[9]       |        n[8]       | ... |        n[0]       |
+	// 	| 32 bits available | 32 bits available | ... | 32 bits available |
+	// 	| 22 bits for value | 26 bits for value | ... | 26 bits for value |
+	// 	| 10 bits overflow  |  6 bits overflow  | ... |  6 bits overflow  |
+	// 	| Mult: 2^(26*9)    | Mult: 2^(26*8)    | ... | Mult: 2^(26*0)    |
+	// 	 -----------------------------------------------------------------
+	//
+	// For example, consider the number 2^49 + 1.  It would be represented as:
+	// 	n[0] = 1
+	// 	n[1] = 2^23
+	// 	n[2..9] = 0
+	//
+	// The full 256-bit value is then calculated by looping i from 9..0 and
+	// doing sum(n[i] * 2^(26i)) like so:
+	// 	n[9] * 2^(26*9) = 0    * 2^234 = 0
+	// 	n[8] * 2^(26*8) = 0    * 2^208 = 0
+	// 	...
+	// 	n[1] * 2^(26*1) = 2^23 * 2^26  = 2^49
+	// 	n[0] * 2^(26*0) = 1    * 2^0   = 1
+	// 	Sum: 0 + 0 + ... + 2^49 + 1 = 2^49 + 1
+	n [10]uint32
+}
+
+// String returns the field value as a normalized human-readable hex string.
+//
+//	Preconditions: None
+//	Output Normalized: Field is not modified -- same as input value
+//	Output Max Magnitude: Field is not modified -- same as input value
+func (f FieldVal) String() string {
+	// f is a copy, so it's safe to normalize it without mutating the original.
+	f.Normalize()
+	return hex.EncodeToString(f.Bytes()[:])
+}
+
+// Zero sets the field value to zero in constant time.  A newly created field
+// value is already set to zero.  This function can be useful to clear an
+// existing field value for reuse.
+//
+//	Preconditions: None
+//	Output Normalized: Yes
+//	Output Max Magnitude: 1
+func (f *FieldVal) Zero() {
+	f.n[0] = 0
+	f.n[1] = 0
+	f.n[2] = 0
+	f.n[3] = 0
+	f.n[4] = 0
+	f.n[5] = 0
+	f.n[6] = 0
+	f.n[7] = 0
+	f.n[8] = 0
+	f.n[9] = 0
+}
+
+// Set sets the field value equal to the passed value in constant time.  The
+// normalization and magnitude of the two fields will be identical.
+//
+// The field value is returned to support chaining.  This enables syntax like:
+// f := new(FieldVal).Set(f2).Add(1) so that f = f2 + 1 where f2 is not
+// modified.
+//
+//	Preconditions: None
+//	Output Normalized: Same as input value
+//	Output Max Magnitude: Same as input value
+func (f *FieldVal) Set(val *FieldVal) *FieldVal {
+	*f = *val
+	return f
+}
+
+// SetInt sets the field value to the passed integer in constant time.  This is
+// a convenience function since it is fairly common to perform some arithmetic
+// with small native integers.
+//
+// The field value is returned to support chaining.  This enables syntax such
+// as f := new(FieldVal).SetInt(2).Mul(f2) so that f = 2 * f2.
+//
+//	Preconditions: None
+//	Output Normalized: Yes
+//	Output Max Magnitude: 1
+func (f *FieldVal) SetInt(ui uint16) *FieldVal {
+	f.Zero()
+	f.n[0] = uint32(ui)
+	return f
+}
+
+// SetBytes packs the passed 32-byte big-endian value into the internal field
+// value representation in constant time.  SetBytes interprets the provided
+// array as a 256-bit big-endian unsigned integer, packs it into the internal
+// field value representation, and returns either 1 if it is greater than or
+// equal to the field prime (aka it overflowed) or 0 otherwise in constant time.
+//
+// Note that a bool is not used here because it is not possible in Go to convert
+// from a bool to numeric value in constant time and many constant-time
+// operations require a numeric value.
+//
+//	Preconditions: None
+//	Output Normalized: Yes if no overflow, no otherwise
+//	Output Max Magnitude: 1
+func (f *FieldVal) SetBytes(b *[32]byte) uint32 {
+	// Pack the 256 total bits across the 10 uint32 words with a max of
+	// 26-bits per word.  This could be done with a couple of for loops,
+	// but this unrolled version is significantly faster.  Benchmarks show
+	// this is about 34 times faster than the variant which uses loops.
+	f.n[0] = uint32(b[31]) | uint32(b[30])<<8 | uint32(b[29])<<16 |
+		(uint32(b[28])&twoBitsMask)<<24
+	f.n[1] = uint32(b[28])>>2 | uint32(b[27])<<6 | uint32(b[26])<<14 |
+		(uint32(b[25])&fourBitsMask)<<22
+	f.n[2] = uint32(b[25])>>4 | uint32(b[24])<<4 | uint32(b[23])<<12 |
+		(uint32(b[22])&sixBitsMask)<<20
+	f.n[3] = uint32(b[22])>>6 | uint32(b[21])<<2 | uint32(b[20])<<10 |
+		uint32(b[19])<<18
+	f.n[4] = uint32(b[18]) | uint32(b[17])<<8 | uint32(b[16])<<16 |
+		(uint32(b[15])&twoBitsMask)<<24
+	f.n[5] = uint32(b[15])>>2 | uint32(b[14])<<6 | uint32(b[13])<<14 |
+		(uint32(b[12])&fourBitsMask)<<22
+	f.n[6] = uint32(b[12])>>4 | uint32(b[11])<<4 | uint32(b[10])<<12 |
+		(uint32(b[9])&sixBitsMask)<<20
+	f.n[7] = uint32(b[9])>>6 | uint32(b[8])<<2 | uint32(b[7])<<10 |
+		uint32(b[6])<<18
+	f.n[8] = uint32(b[5]) | uint32(b[4])<<8 | uint32(b[3])<<16 |
+		(uint32(b[2])&twoBitsMask)<<24
+	f.n[9] = uint32(b[2])>>2 | uint32(b[1])<<6 | uint32(b[0])<<14
+
+	// The intuition here is that the field value is greater than the prime if
+	// one of the higher individual words is greater than corresponding word of
+	// the prime and all higher words in the field value are equal to their
+	// corresponding word of the prime.  Since this type is modulo the prime,
+	// being equal is also an overflow back to 0.
+	//
+	// Note that because the input is 32 bytes and it was just packed into the
+	// field representation, the only words that can possibly be greater are
+	// zero and one, because ceil(log_2(2^256 - 1 - P)) = 33 bits max and the
+	// internal field representation encodes 26 bits with each word.
+	//
+	// Thus, there is no need to test if the upper words of the field value
+	// exceeds them, hence, only equality is checked for them.
+	highWordsEq := constantTimeEq(f.n[9], fieldPrimeWordNine)
+	highWordsEq &= constantTimeEq(f.n[8], fieldPrimeWordEight)
+	highWordsEq &= constantTimeEq(f.n[7], fieldPrimeWordSeven)
+	highWordsEq &= constantTimeEq(f.n[6], fieldPrimeWordSix)
+	highWordsEq &= constantTimeEq(f.n[5], fieldPrimeWordFive)
+	highWordsEq &= constantTimeEq(f.n[4], fieldPrimeWordFour)
+	highWordsEq &= constantTimeEq(f.n[3], fieldPrimeWordThree)
+	highWordsEq &= constantTimeEq(f.n[2], fieldPrimeWordTwo)
+	overflow := highWordsEq & constantTimeGreater(f.n[1], fieldPrimeWordOne)
+	highWordsEq &= constantTimeEq(f.n[1], fieldPrimeWordOne)
+	overflow |= highWordsEq & constantTimeGreaterOrEq(f.n[0], fieldPrimeWordZero)
+
+	return overflow
+}
+
+// SetByteSlice interprets the provided slice as a 256-bit big-endian unsigned
+// integer (meaning it is truncated to the first 32 bytes), packs it into the
+// internal field value representation, and returns whether or not the resulting
+// truncated 256-bit integer is greater than or equal to the field prime (aka it
+// overflowed) in constant time.
+//
+// Note that since passing a slice with more than 32 bytes is truncated, it is
+// possible that the truncated value is less than the field prime and hence it
+// will not be reported as having overflowed in that case.  It is up to the
+// caller to decide whether it needs to provide numbers of the appropriate size
+// or it if is acceptable to use this function with the described truncation and
+// overflow behavior.
+//
+//	Preconditions: None
+//	Output Normalized: Yes if no overflow, no otherwise
+//	Output Max Magnitude: 1
+func (f *FieldVal) SetByteSlice(b []byte) bool {
+	var b32 [32]byte
+	b = b[:constantTimeMin(uint32(len(b)), 32)]
+	copy(b32[:], b32[:32-len(b)])
+	copy(b32[32-len(b):], b)
+	result := f.SetBytes(&b32)
+	zeroArray32(&b32)
+	return result != 0
+}
+
+// Normalize normalizes the internal field words into the desired range and
+// performs fast modular reduction over the secp256k1 prime by making use of the
+// special form of the prime in constant time.
+//
+//	Preconditions: None
+//	Output Normalized: Yes
+//	Output Max Magnitude: 1
+func (f *FieldVal) Normalize() *FieldVal {
+	// The field representation leaves 6 bits of overflow in each word so
+	// intermediate calculations can be performed without needing to
+	// propagate the carry to each higher word during the calculations.  In
+	// order to normalize, we need to "compact" the full 256-bit value to
+	// the right while propagating any carries through to the high order
+	// word.
+	//
+	// Since this field is doing arithmetic modulo the secp256k1 prime, we
+	// also need to perform modular reduction over the prime.
+	//
+	// Per [HAC] section 14.3.4: Reduction method of moduli of special form,
+	// when the modulus is of the special form m = b^t - c, highly efficient
+	// reduction can be achieved.
+	//
+	// The secp256k1 prime is equivalent to 2^256 - 4294968273, so it fits
+	// this criteria.
+	//
+	// 4294968273 in field representation (base 2^26) is:
+	// n[0] = 977
+	// n[1] = 64
+	// That is to say (2^26 * 64) + 977 = 4294968273
+	//
+	// The algorithm presented in the referenced section typically repeats
+	// until the quotient is zero.  However, due to our field representation
+	// we already know to within one reduction how many times we would need
+	// to repeat as it's the uppermost bits of the high order word.  Thus we
+	// can simply multiply the magnitude by the field representation of the
+	// prime and do a single iteration.  After this step there might be an
+	// additional carry to bit 256 (bit 22 of the high order word).
+	t9 := f.n[9]
+	m := t9 >> fieldMSBBits
+	t9 = t9 & fieldMSBMask
+	t0 := f.n[0] + m*977
+	t1 := (t0 >> fieldBase) + f.n[1] + (m << 6)
+	t0 = t0 & fieldBaseMask
+	t2 := (t1 >> fieldBase) + f.n[2]
+	t1 = t1 & fieldBaseMask
+	t3 := (t2 >> fieldBase) + f.n[3]
+	t2 = t2 & fieldBaseMask
+	t4 := (t3 >> fieldBase) + f.n[4]
+	t3 = t3 & fieldBaseMask
+	t5 := (t4 >> fieldBase) + f.n[5]
+	t4 = t4 & fieldBaseMask
+	t6 := (t5 >> fieldBase) + f.n[6]
+	t5 = t5 & fieldBaseMask
+	t7 := (t6 >> fieldBase) + f.n[7]
+	t6 = t6 & fieldBaseMask
+	t8 := (t7 >> fieldBase) + f.n[8]
+	t7 = t7 & fieldBaseMask
+	t9 = (t8 >> fieldBase) + t9
+	t8 = t8 & fieldBaseMask
+
+	// At this point, the magnitude is guaranteed to be one, however, the
+	// value could still be greater than the prime if there was either a
+	// carry through to bit 256 (bit 22 of the higher order word) or the
+	// value is greater than or equal to the field characteristic.  The
+	// following determines if either or these conditions are true and does
+	// the final reduction in constant time.
+	//
+	// Also note that 'm' will be zero when neither of the aforementioned
+	// conditions are true and the value will not be changed when 'm' is zero.
+	m = constantTimeEq(t9, fieldMSBMask)
+	m &= constantTimeEq(t8&t7&t6&t5&t4&t3&t2, fieldBaseMask)
+	m &= constantTimeGreater(t1+64+((t0+977)>>fieldBase), fieldBaseMask)
+	m |= t9 >> fieldMSBBits
+	t0 = t0 + m*977
+	t1 = (t0 >> fieldBase) + t1 + (m << 6)
+	t0 = t0 & fieldBaseMask
+	t2 = (t1 >> fieldBase) + t2
+	t1 = t1 & fieldBaseMask
+	t3 = (t2 >> fieldBase) + t3
+	t2 = t2 & fieldBaseMask
+	t4 = (t3 >> fieldBase) + t4
+	t3 = t3 & fieldBaseMask
+	t5 = (t4 >> fieldBase) + t5
+	t4 = t4 & fieldBaseMask
+	t6 = (t5 >> fieldBase) + t6
+	t5 = t5 & fieldBaseMask
+	t7 = (t6 >> fieldBase) + t7
+	t6 = t6 & fieldBaseMask
+	t8 = (t7 >> fieldBase) + t8
+	t7 = t7 & fieldBaseMask
+	t9 = (t8 >> fieldBase) + t9
+	t8 = t8 & fieldBaseMask
+	t9 = t9 & fieldMSBMask // Remove potential multiple of 2^256.
+
+	// Finally, set the normalized and reduced words.
+	f.n[0] = t0
+	f.n[1] = t1
+	f.n[2] = t2
+	f.n[3] = t3
+	f.n[4] = t4
+	f.n[5] = t5
+	f.n[6] = t6
+	f.n[7] = t7
+	f.n[8] = t8
+	f.n[9] = t9
+	return f
+}
+
+// PutBytesUnchecked unpacks the field value to a 32-byte big-endian value
+// directly into the passed byte slice in constant time.  The target slice must
+// must have at least 32 bytes available or it will panic.
+//
+// There is a similar function, PutBytes, which unpacks the field value into a
+// 32-byte array directly.  This version is provided since it can be useful
+// to write directly into part of a larger buffer without needing a separate
+// allocation.
+//
+//	Preconditions:
+//	  - The field value MUST be normalized
+//	  - The target slice MUST have at least 32 bytes available
+func (f *FieldVal) PutBytesUnchecked(b []byte) {
+	// Unpack the 256 total bits from the 10 uint32 words with a max of
+	// 26-bits per word.  This could be done with a couple of for loops,
+	// but this unrolled version is a bit faster.  Benchmarks show this is
+	// about 10 times faster than the variant which uses loops.
+	b[31] = byte(f.n[0] & eightBitsMask)
+	b[30] = byte((f.n[0] >> 8) & eightBitsMask)
+	b[29] = byte((f.n[0] >> 16) & eightBitsMask)
+	b[28] = byte((f.n[0]>>24)&twoBitsMask | (f.n[1]&sixBitsMask)<<2)
+	b[27] = byte((f.n[1] >> 6) & eightBitsMask)
+	b[26] = byte((f.n[1] >> 14) & eightBitsMask)
+	b[25] = byte((f.n[1]>>22)&fourBitsMask | (f.n[2]&fourBitsMask)<<4)
+	b[24] = byte((f.n[2] >> 4) & eightBitsMask)
+	b[23] = byte((f.n[2] >> 12) & eightBitsMask)
+	b[22] = byte((f.n[2]>>20)&sixBitsMask | (f.n[3]&twoBitsMask)<<6)
+	b[21] = byte((f.n[3] >> 2) & eightBitsMask)
+	b[20] = byte((f.n[3] >> 10) & eightBitsMask)
+	b[19] = byte((f.n[3] >> 18) & eightBitsMask)
+	b[18] = byte(f.n[4] & eightBitsMask)
+	b[17] = byte((f.n[4] >> 8) & eightBitsMask)
+	b[16] = byte((f.n[4] >> 16) & eightBitsMask)
+	b[15] = byte((f.n[4]>>24)&twoBitsMask | (f.n[5]&sixBitsMask)<<2)
+	b[14] = byte((f.n[5] >> 6) & eightBitsMask)
+	b[13] = byte((f.n[5] >> 14) & eightBitsMask)
+	b[12] = byte((f.n[5]>>22)&fourBitsMask | (f.n[6]&fourBitsMask)<<4)
+	b[11] = byte((f.n[6] >> 4) & eightBitsMask)
+	b[10] = byte((f.n[6] >> 12) & eightBitsMask)
+	b[9] = byte((f.n[6]>>20)&sixBitsMask | (f.n[7]&twoBitsMask)<<6)
+	b[8] = byte((f.n[7] >> 2) & eightBitsMask)
+	b[7] = byte((f.n[7] >> 10) & eightBitsMask)
+	b[6] = byte((f.n[7] >> 18) & eightBitsMask)
+	b[5] = byte(f.n[8] & eightBitsMask)
+	b[4] = byte((f.n[8] >> 8) & eightBitsMask)
+	b[3] = byte((f.n[8] >> 16) & eightBitsMask)
+	b[2] = byte((f.n[8]>>24)&twoBitsMask | (f.n[9]&sixBitsMask)<<2)
+	b[1] = byte((f.n[9] >> 6) & eightBitsMask)
+	b[0] = byte((f.n[9] >> 14) & eightBitsMask)
+}
+
+// PutBytes unpacks the field value to a 32-byte big-endian value using the
+// passed byte array in constant time.
+//
+// There is a similar function, PutBytesUnchecked, which unpacks the field value
+// into a slice that must have at least 32 bytes available.  This version is
+// provided since it can be useful to write directly into an array that is type
+// checked.
+//
+// Alternatively, there is also Bytes, which unpacks the field value into a new
+// array and returns that which can sometimes be more ergonomic in applications
+// that aren't concerned about an additional copy.
+//
+//	Preconditions:
+//	  - The field value MUST be normalized
+func (f *FieldVal) PutBytes(b *[32]byte) {
+	f.PutBytesUnchecked(b[:])
+}
+
+// Bytes unpacks the field value to a 32-byte big-endian value in constant time.
+//
+// See PutBytes and PutBytesUnchecked for variants that allow an array or slice
+// to be passed which can be useful to cut down on the number of allocations by
+// allowing the caller to reuse a buffer or write directly into part of a larger
+// buffer.
+//
+//	Preconditions:
+//	  - The field value MUST be normalized
+func (f *FieldVal) Bytes() *[32]byte {
+	b := new([32]byte)
+	f.PutBytesUnchecked(b[:])
+	return b
+}
+
+// IsZeroBit returns 1 when the field value is equal to zero or 0 otherwise in
+// constant time.
+//
+// Note that a bool is not used here because it is not possible in Go to convert
+// from a bool to numeric value in constant time and many constant-time
+// operations require a numeric value.  See IsZero for the version that returns
+// a bool.
+//
+//	Preconditions:
+//	  - The field value MUST be normalized
+func (f *FieldVal) IsZeroBit() uint32 {
+	// The value can only be zero if no bits are set in any of the words.
+	// This is a constant time implementation.
+	bits := f.n[0] | f.n[1] | f.n[2] | f.n[3] | f.n[4] |
+		f.n[5] | f.n[6] | f.n[7] | f.n[8] | f.n[9]
+
+	return constantTimeEq(bits, 0)
+}
+
+// IsZero returns whether or not the field value is equal to zero in constant
+// time.
+//
+//	Preconditions:
+//	  - The field value MUST be normalized
+func (f *FieldVal) IsZero() bool {
+	// The value can only be zero if no bits are set in any of the words.
+	// This is a constant time implementation.
+	bits := f.n[0] | f.n[1] | f.n[2] | f.n[3] | f.n[4] |
+		f.n[5] | f.n[6] | f.n[7] | f.n[8] | f.n[9]
+
+	return bits == 0
+}
+
+// IsOneBit returns 1 when the field value is equal to one or 0 otherwise in
+// constant time.
+//
+// Note that a bool is not used here because it is not possible in Go to convert
+// from a bool to numeric value in constant time and many constant-time
+// operations require a numeric value.  See IsOne for the version that returns a
+// bool.
+//
+//	Preconditions:
+//	   - The field value MUST be normalized
+func (f *FieldVal) IsOneBit() uint32 {
+	// The value can only be one if the single lowest significant bit is set in
+	// the first word and no other bits are set in any of the other words.
+	// This is a constant time implementation.
+	bits := (f.n[0] ^ 1) | f.n[1] | f.n[2] | f.n[3] | f.n[4] | f.n[5] |
+		f.n[6] | f.n[7] | f.n[8] | f.n[9]
+
+	return constantTimeEq(bits, 0)
+}
+
+// IsOne returns whether or not the field value is equal to one in constant
+// time.
+//
+//	Preconditions:
+//	  - The field value MUST be normalized
+func (f *FieldVal) IsOne() bool {
+	// The value can only be one if the single lowest significant bit is set in
+	// the first word and no other bits are set in any of the other words.
+	// This is a constant time implementation.
+	bits := (f.n[0] ^ 1) | f.n[1] | f.n[2] | f.n[3] | f.n[4] | f.n[5] |
+		f.n[6] | f.n[7] | f.n[8] | f.n[9]
+
+	return bits == 0
+}
+
+// IsOddBit returns 1 when the field value is an odd number or 0 otherwise in
+// constant time.
+//
+// Note that a bool is not used here because it is not possible in Go to convert
+// from a bool to numeric value in constant time and many constant-time
+// operations require a numeric value.  See IsOdd for the version that returns a
+// bool.
+//
+//	Preconditions:
+//	  - The field value MUST be normalized
+func (f *FieldVal) IsOddBit() uint32 {
+	// Only odd numbers have the bottom bit set.
+	return f.n[0] & 1
+}
+
+// IsOdd returns whether or not the field value is an odd number in constant
+// time.
+//
+//	Preconditions:
+//	  - The field value MUST be normalized
+func (f *FieldVal) IsOdd() bool {
+	// Only odd numbers have the bottom bit set.
+	return f.n[0]&1 == 1
+}
+
+// Equals returns whether or not the two field values are the same in constant
+// time.
+//
+//	Preconditions:
+//	  - Both field values being compared MUST be normalized
+func (f *FieldVal) Equals(val *FieldVal) bool {
+	// Xor only sets bits when they are different, so the two field values
+	// can only be the same if no bits are set after xoring each word.
+	// This is a constant time implementation.
+	bits := (f.n[0] ^ val.n[0]) | (f.n[1] ^ val.n[1]) | (f.n[2] ^ val.n[2]) |
+		(f.n[3] ^ val.n[3]) | (f.n[4] ^ val.n[4]) | (f.n[5] ^ val.n[5]) |
+		(f.n[6] ^ val.n[6]) | (f.n[7] ^ val.n[7]) | (f.n[8] ^ val.n[8]) |
+		(f.n[9] ^ val.n[9])
+
+	return bits == 0
+}
+
+// NegateVal negates the passed value and stores the result in f in constant
+// time.  The caller must provide the magnitude of the passed value for a
+// correct result.
+//
+// The field value is returned to support chaining.  This enables syntax like:
+// f.NegateVal(f2).AddInt(1) so that f = -f2 + 1.
+//
+//	Preconditions:
+//	  - The max magnitude MUST be 63
+//	Output Normalized: No
+//	Output Max Magnitude: Input magnitude + 1
+func (f *FieldVal) NegateVal(val *FieldVal, magnitude uint32) *FieldVal {
+	// Negation in the field is just the prime minus the value.  However,
+	// in order to allow negation against a field value without having to
+	// normalize/reduce it first, multiply by the magnitude (that is how
+	// "far" away it is from the normalized value) to adjust.  Also, since
+	// negating a value pushes it one more order of magnitude away from the
+	// normalized range, add 1 to compensate.
+	//
+	// For some intuition here, imagine you're performing mod 12 arithmetic
+	// (picture a clock) and you are negating the number 7.  So you start at
+	// 12 (which is of course 0 under mod 12) and count backwards (left on
+	// the clock) 7 times to arrive at 5.  Notice this is just 12-7 = 5.
+	// Now, assume you're starting with 19, which is a number that is
+	// already larger than the modulus and congruent to 7 (mod 12).  When a
+	// value is already in the desired range, its magnitude is 1.  Since 19
+	// is an additional "step", its magnitude (mod 12) is 2.  Since any
+	// multiple of the modulus is congruent to zero (mod m), the answer can
+	// be shortcut by simply multiplying the magnitude by the modulus and
+	// subtracting.  Keeping with the example, this would be (2*12)-19 = 5.
+	f.n[0] = (magnitude+1)*fieldPrimeWordZero - val.n[0]
+	f.n[1] = (magnitude+1)*fieldPrimeWordOne - val.n[1]
+	f.n[2] = (magnitude+1)*fieldBaseMask - val.n[2]
+	f.n[3] = (magnitude+1)*fieldBaseMask - val.n[3]
+	f.n[4] = (magnitude+1)*fieldBaseMask - val.n[4]
+	f.n[5] = (magnitude+1)*fieldBaseMask - val.n[5]
+	f.n[6] = (magnitude+1)*fieldBaseMask - val.n[6]
+	f.n[7] = (magnitude+1)*fieldBaseMask - val.n[7]
+	f.n[8] = (magnitude+1)*fieldBaseMask - val.n[8]
+	f.n[9] = (magnitude+1)*fieldMSBMask - val.n[9]
+
+	return f
+}
+
+// Negate negates the field value in constant time.  The existing field value is
+// modified.  The caller must provide the magnitude of the field value for a
+// correct result.
+//
+// The field value is returned to support chaining.  This enables syntax like:
+// f.Negate().AddInt(1) so that f = -f + 1.
+//
+//	Preconditions:
+//	  - The max magnitude MUST be 63
+//	Output Normalized: No
+//	Output Max Magnitude: Input magnitude + 1
+func (f *FieldVal) Negate(magnitude uint32) *FieldVal {
+	return f.NegateVal(f, magnitude)
+}
+
+// AddInt adds the passed integer to the existing field value and stores the
+// result in f in constant time.  This is a convenience function since it is
+// fairly common to perform some arithmetic with small native integers.
+//
+// The field value is returned to support chaining.  This enables syntax like:
+// f.AddInt(1).Add(f2) so that f = f + 1 + f2.
+//
+//	Preconditions:
+//	  - The field value MUST have a max magnitude of 63
+//	Output Normalized: No
+//	Output Max Magnitude: Existing field magnitude + 1
+func (f *FieldVal) AddInt(ui uint16) *FieldVal {
+	// Since the field representation intentionally provides overflow bits,
+	// it's ok to use carryless addition as the carry bit is safely part of
+	// the word and will be normalized out.
+	f.n[0] += uint32(ui)
+
+	return f
+}
+
+// Add adds the passed value to the existing field value and stores the result
+// in f in constant time.
+//
+// The field value is returned to support chaining.  This enables syntax like:
+// f.Add(f2).AddInt(1) so that f = f + f2 + 1.
+//
+//	Preconditions:
+//	  - The sum of the magnitudes of the two field values MUST be a max of 64
+//	Output Normalized: No
+//	Output Max Magnitude: Sum of the magnitude of the two individual field values
+func (f *FieldVal) Add(val *FieldVal) *FieldVal {
+	// Since the field representation intentionally provides overflow bits,
+	// it's ok to use carryless addition as the carry bit is safely part of
+	// each word and will be normalized out.  This could obviously be done
+	// in a loop, but the unrolled version is faster.
+	f.n[0] += val.n[0]
+	f.n[1] += val.n[1]
+	f.n[2] += val.n[2]
+	f.n[3] += val.n[3]
+	f.n[4] += val.n[4]
+	f.n[5] += val.n[5]
+	f.n[6] += val.n[6]
+	f.n[7] += val.n[7]
+	f.n[8] += val.n[8]
+	f.n[9] += val.n[9]
+
+	return f
+}
+
+// Add2 adds the passed two field values together and stores the result in f in
+// constant time.
+//
+// The field value is returned to support chaining.  This enables syntax like:
+// f3.Add2(f, f2).AddInt(1) so that f3 = f + f2 + 1.
+//
+//	Preconditions:
+//	  - The sum of the magnitudes of the two field values MUST be a max of 64
+//	Output Normalized: No
+//	Output Max Magnitude: Sum of the magnitude of the two field values
+func (f *FieldVal) Add2(val *FieldVal, val2 *FieldVal) *FieldVal {
+	// Since the field representation intentionally provides overflow bits,
+	// it's ok to use carryless addition as the carry bit is safely part of
+	// each word and will be normalized out.  This could obviously be done
+	// in a loop, but the unrolled version is faster.
+	f.n[0] = val.n[0] + val2.n[0]
+	f.n[1] = val.n[1] + val2.n[1]
+	f.n[2] = val.n[2] + val2.n[2]
+	f.n[3] = val.n[3] + val2.n[3]
+	f.n[4] = val.n[4] + val2.n[4]
+	f.n[5] = val.n[5] + val2.n[5]
+	f.n[6] = val.n[6] + val2.n[6]
+	f.n[7] = val.n[7] + val2.n[7]
+	f.n[8] = val.n[8] + val2.n[8]
+	f.n[9] = val.n[9] + val2.n[9]
+
+	return f
+}
+
+// MulInt multiplies the field value by the passed int and stores the result in
+// f in constant time.  Note that this function can overflow if multiplying the
+// value by any of the individual words exceeds a max uint32.  Therefore it is
+// important that the caller ensures no overflows will occur before using this
+// function.
+//
+// The field value is returned to support chaining.  This enables syntax like:
+// f.MulInt(2).Add(f2) so that f = 2 * f + f2.
+//
+//	Preconditions:
+//	  - The field value magnitude multiplied by given val MUST be a max of 64
+//	Output Normalized: No
+//	Output Max Magnitude: Existing field magnitude times the provided integer val
+func (f *FieldVal) MulInt(val uint8) *FieldVal {
+	// Since each word of the field representation can hold up to
+	// 32 - fieldBase extra bits which will be normalized out, it's safe
+	// to multiply each word without using a larger type or carry
+	// propagation so long as the values won't overflow a uint32.  This
+	// could obviously be done in a loop, but the unrolled version is
+	// faster.
+	ui := uint32(val)
+	f.n[0] *= ui
+	f.n[1] *= ui
+	f.n[2] *= ui
+	f.n[3] *= ui
+	f.n[4] *= ui
+	f.n[5] *= ui
+	f.n[6] *= ui
+	f.n[7] *= ui
+	f.n[8] *= ui
+	f.n[9] *= ui
+
+	return f
+}
+
+// Mul multiplies the passed value to the existing field value and stores the
+// result in f in constant time.  Note that this function can overflow if
+// multiplying any of the individual words exceeds a max uint32.  In practice,
+// this means the magnitude of either value involved in the multiplication must
+// be a max of 8.
+//
+// The field value is returned to support chaining.  This enables syntax like:
+// f.Mul(f2).AddInt(1) so that f = (f * f2) + 1.
+//
+//	Preconditions:
+//	  - Both field values MUST have a max magnitude of 8
+//	Output Normalized: No
+//	Output Max Magnitude: 1
+func (f *FieldVal) Mul(val *FieldVal) *FieldVal {
+	return f.Mul2(f, val)
+}
+
+// Mul2 multiplies the passed two field values together and stores the result
+// result in f in constant time.  Note that this function can overflow if
+// multiplying any of the individual words exceeds a max uint32.  In practice,
+// this means the magnitude of either value involved in the multiplication must
+// be a max of 8.
+//
+// The field value is returned to support chaining.  This enables syntax like:
+// f3.Mul2(f, f2).AddInt(1) so that f3 = (f * f2) + 1.
+//
+//	Preconditions:
+//	  - Both input field values MUST have a max magnitude of 8
+//	Output Normalized: No
+//	Output Max Magnitude: 1
+func (f *FieldVal) Mul2(val *FieldVal, val2 *FieldVal) *FieldVal {
+	// This could be done with a couple of for loops and an array to store
+	// the intermediate terms, but this unrolled version is significantly
+	// faster.
+
+	// Terms for 2^(fieldBase*0).
+	m := uint64(val.n[0]) * uint64(val2.n[0])
+	t0 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*1).
+	m = (m >> fieldBase) +
+		uint64(val.n[0])*uint64(val2.n[1]) +
+		uint64(val.n[1])*uint64(val2.n[0])
+	t1 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*2).
+	m = (m >> fieldBase) +
+		uint64(val.n[0])*uint64(val2.n[2]) +
+		uint64(val.n[1])*uint64(val2.n[1]) +
+		uint64(val.n[2])*uint64(val2.n[0])
+	t2 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*3).
+	m = (m >> fieldBase) +
+		uint64(val.n[0])*uint64(val2.n[3]) +
+		uint64(val.n[1])*uint64(val2.n[2]) +
+		uint64(val.n[2])*uint64(val2.n[1]) +
+		uint64(val.n[3])*uint64(val2.n[0])
+	t3 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*4).
+	m = (m >> fieldBase) +
+		uint64(val.n[0])*uint64(val2.n[4]) +
+		uint64(val.n[1])*uint64(val2.n[3]) +
+		uint64(val.n[2])*uint64(val2.n[2]) +
+		uint64(val.n[3])*uint64(val2.n[1]) +
+		uint64(val.n[4])*uint64(val2.n[0])
+	t4 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*5).
+	m = (m >> fieldBase) +
+		uint64(val.n[0])*uint64(val2.n[5]) +
+		uint64(val.n[1])*uint64(val2.n[4]) +
+		uint64(val.n[2])*uint64(val2.n[3]) +
+		uint64(val.n[3])*uint64(val2.n[2]) +
+		uint64(val.n[4])*uint64(val2.n[1]) +
+		uint64(val.n[5])*uint64(val2.n[0])
+	t5 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*6).
+	m = (m >> fieldBase) +
+		uint64(val.n[0])*uint64(val2.n[6]) +
+		uint64(val.n[1])*uint64(val2.n[5]) +
+		uint64(val.n[2])*uint64(val2.n[4]) +
+		uint64(val.n[3])*uint64(val2.n[3]) +
+		uint64(val.n[4])*uint64(val2.n[2]) +
+		uint64(val.n[5])*uint64(val2.n[1]) +
+		uint64(val.n[6])*uint64(val2.n[0])
+	t6 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*7).
+	m = (m >> fieldBase) +
+		uint64(val.n[0])*uint64(val2.n[7]) +
+		uint64(val.n[1])*uint64(val2.n[6]) +
+		uint64(val.n[2])*uint64(val2.n[5]) +
+		uint64(val.n[3])*uint64(val2.n[4]) +
+		uint64(val.n[4])*uint64(val2.n[3]) +
+		uint64(val.n[5])*uint64(val2.n[2]) +
+		uint64(val.n[6])*uint64(val2.n[1]) +
+		uint64(val.n[7])*uint64(val2.n[0])
+	t7 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*8).
+	m = (m >> fieldBase) +
+		uint64(val.n[0])*uint64(val2.n[8]) +
+		uint64(val.n[1])*uint64(val2.n[7]) +
+		uint64(val.n[2])*uint64(val2.n[6]) +
+		uint64(val.n[3])*uint64(val2.n[5]) +
+		uint64(val.n[4])*uint64(val2.n[4]) +
+		uint64(val.n[5])*uint64(val2.n[3]) +
+		uint64(val.n[6])*uint64(val2.n[2]) +
+		uint64(val.n[7])*uint64(val2.n[1]) +
+		uint64(val.n[8])*uint64(val2.n[0])
+	t8 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*9).
+	m = (m >> fieldBase) +
+		uint64(val.n[0])*uint64(val2.n[9]) +
+		uint64(val.n[1])*uint64(val2.n[8]) +
+		uint64(val.n[2])*uint64(val2.n[7]) +
+		uint64(val.n[3])*uint64(val2.n[6]) +
+		uint64(val.n[4])*uint64(val2.n[5]) +
+		uint64(val.n[5])*uint64(val2.n[4]) +
+		uint64(val.n[6])*uint64(val2.n[3]) +
+		uint64(val.n[7])*uint64(val2.n[2]) +
+		uint64(val.n[8])*uint64(val2.n[1]) +
+		uint64(val.n[9])*uint64(val2.n[0])
+	t9 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*10).
+	m = (m >> fieldBase) +
+		uint64(val.n[1])*uint64(val2.n[9]) +
+		uint64(val.n[2])*uint64(val2.n[8]) +
+		uint64(val.n[3])*uint64(val2.n[7]) +
+		uint64(val.n[4])*uint64(val2.n[6]) +
+		uint64(val.n[5])*uint64(val2.n[5]) +
+		uint64(val.n[6])*uint64(val2.n[4]) +
+		uint64(val.n[7])*uint64(val2.n[3]) +
+		uint64(val.n[8])*uint64(val2.n[2]) +
+		uint64(val.n[9])*uint64(val2.n[1])
+	t10 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*11).
+	m = (m >> fieldBase) +
+		uint64(val.n[2])*uint64(val2.n[9]) +
+		uint64(val.n[3])*uint64(val2.n[8]) +
+		uint64(val.n[4])*uint64(val2.n[7]) +
+		uint64(val.n[5])*uint64(val2.n[6]) +
+		uint64(val.n[6])*uint64(val2.n[5]) +
+		uint64(val.n[7])*uint64(val2.n[4]) +
+		uint64(val.n[8])*uint64(val2.n[3]) +
+		uint64(val.n[9])*uint64(val2.n[2])
+	t11 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*12).
+	m = (m >> fieldBase) +
+		uint64(val.n[3])*uint64(val2.n[9]) +
+		uint64(val.n[4])*uint64(val2.n[8]) +
+		uint64(val.n[5])*uint64(val2.n[7]) +
+		uint64(val.n[6])*uint64(val2.n[6]) +
+		uint64(val.n[7])*uint64(val2.n[5]) +
+		uint64(val.n[8])*uint64(val2.n[4]) +
+		uint64(val.n[9])*uint64(val2.n[3])
+	t12 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*13).
+	m = (m >> fieldBase) +
+		uint64(val.n[4])*uint64(val2.n[9]) +
+		uint64(val.n[5])*uint64(val2.n[8]) +
+		uint64(val.n[6])*uint64(val2.n[7]) +
+		uint64(val.n[7])*uint64(val2.n[6]) +
+		uint64(val.n[8])*uint64(val2.n[5]) +
+		uint64(val.n[9])*uint64(val2.n[4])
+	t13 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*14).
+	m = (m >> fieldBase) +
+		uint64(val.n[5])*uint64(val2.n[9]) +
+		uint64(val.n[6])*uint64(val2.n[8]) +
+		uint64(val.n[7])*uint64(val2.n[7]) +
+		uint64(val.n[8])*uint64(val2.n[6]) +
+		uint64(val.n[9])*uint64(val2.n[5])
+	t14 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*15).
+	m = (m >> fieldBase) +
+		uint64(val.n[6])*uint64(val2.n[9]) +
+		uint64(val.n[7])*uint64(val2.n[8]) +
+		uint64(val.n[8])*uint64(val2.n[7]) +
+		uint64(val.n[9])*uint64(val2.n[6])
+	t15 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*16).
+	m = (m >> fieldBase) +
+		uint64(val.n[7])*uint64(val2.n[9]) +
+		uint64(val.n[8])*uint64(val2.n[8]) +
+		uint64(val.n[9])*uint64(val2.n[7])
+	t16 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*17).
+	m = (m >> fieldBase) +
+		uint64(val.n[8])*uint64(val2.n[9]) +
+		uint64(val.n[9])*uint64(val2.n[8])
+	t17 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*18).
+	m = (m >> fieldBase) + uint64(val.n[9])*uint64(val2.n[9])
+	t18 := m & fieldBaseMask
+
+	// What's left is for 2^(fieldBase*19).
+	t19 := m >> fieldBase
+
+	// At this point, all of the terms are grouped into their respective
+	// base.
+	//
+	// Per [HAC] section 14.3.4: Reduction method of moduli of special form,
+	// when the modulus is of the special form m = b^t - c, highly efficient
+	// reduction can be achieved per the provided algorithm.
+	//
+	// The secp256k1 prime is equivalent to 2^256 - 4294968273, so it fits
+	// this criteria.
+	//
+	// 4294968273 in field representation (base 2^26) is:
+	// n[0] = 977
+	// n[1] = 64
+	// That is to say (2^26 * 64) + 977 = 4294968273
+	//
+	// Since each word is in base 26, the upper terms (t10 and up) start
+	// at 260 bits (versus the final desired range of 256 bits), so the
+	// field representation of 'c' from above needs to be adjusted for the
+	// extra 4 bits by multiplying it by 2^4 = 16.  4294968273 * 16 =
+	// 68719492368.  Thus, the adjusted field representation of 'c' is:
+	// n[0] = 977 * 16 = 15632
+	// n[1] = 64 * 16 = 1024
+	// That is to say (2^26 * 1024) + 15632 = 68719492368
+	//
+	// To reduce the final term, t19, the entire 'c' value is needed instead
+	// of only n[0] because there are no more terms left to handle n[1].
+	// This means there might be some magnitude left in the upper bits that
+	// is handled below.
+	m = t0 + t10*15632
+	t0 = m & fieldBaseMask
+	m = (m >> fieldBase) + t1 + t10*1024 + t11*15632
+	t1 = m & fieldBaseMask
+	m = (m >> fieldBase) + t2 + t11*1024 + t12*15632
+	t2 = m & fieldBaseMask
+	m = (m >> fieldBase) + t3 + t12*1024 + t13*15632
+	t3 = m & fieldBaseMask
+	m = (m >> fieldBase) + t4 + t13*1024 + t14*15632
+	t4 = m & fieldBaseMask
+	m = (m >> fieldBase) + t5 + t14*1024 + t15*15632
+	t5 = m & fieldBaseMask
+	m = (m >> fieldBase) + t6 + t15*1024 + t16*15632
+	t6 = m & fieldBaseMask
+	m = (m >> fieldBase) + t7 + t16*1024 + t17*15632
+	t7 = m & fieldBaseMask
+	m = (m >> fieldBase) + t8 + t17*1024 + t18*15632
+	t8 = m & fieldBaseMask
+	m = (m >> fieldBase) + t9 + t18*1024 + t19*68719492368
+	t9 = m & fieldMSBMask
+	m = m >> fieldMSBBits
+
+	// At this point, if the magnitude is greater than 0, the overall value
+	// is greater than the max possible 256-bit value.  In particular, it is
+	// "how many times larger" than the max value it is.
+	//
+	// The algorithm presented in [HAC] section 14.3.4 repeats until the
+	// quotient is zero.  However, due to the above, we already know at
+	// least how many times we would need to repeat as it's the value
+	// currently in m.  Thus we can simply multiply the magnitude by the
+	// field representation of the prime and do a single iteration.  Notice
+	// that nothing will be changed when the magnitude is zero, so we could
+	// skip this in that case, however always running regardless allows it
+	// to run in constant time.  The final result will be in the range
+	// 0 <= result <= prime + (2^64 - c), so it is guaranteed to have a
+	// magnitude of 1, but it is denormalized.
+	d := t0 + m*977
+	f.n[0] = uint32(d & fieldBaseMask)
+	d = (d >> fieldBase) + t1 + m*64
+	f.n[1] = uint32(d & fieldBaseMask)
+	f.n[2] = uint32((d >> fieldBase) + t2)
+	f.n[3] = uint32(t3)
+	f.n[4] = uint32(t4)
+	f.n[5] = uint32(t5)
+	f.n[6] = uint32(t6)
+	f.n[7] = uint32(t7)
+	f.n[8] = uint32(t8)
+	f.n[9] = uint32(t9)
+
+	return f
+}
+
+// SquareRootVal either calculates the square root of the passed value when it
+// exists or the square root of the negation of the value when it does not exist
+// and stores the result in f in constant time.  The return flag is true when
+// the calculated square root is for the passed value itself and false when it
+// is for its negation.
+//
+// Note that this function can overflow if multiplying any of the individual
+// words exceeds a max uint32.  In practice, this means the magnitude of the
+// field must be a max of 8 to prevent overflow.  The magnitude of the result
+// will be 1.
+//
+//	Preconditions:
+//	  - The input field value MUST have a max magnitude of 8
+//	Output Normalized: No
+//	Output Max Magnitude: 1
+func (f *FieldVal) SquareRootVal(val *FieldVal) bool {
+	// This uses the Tonelli-Shanks method for calculating the square root of
+	// the value when it exists.  The key principles of the method follow.
+	//
+	// Fermat's little theorem states that for a nonzero number 'a' and prime
+	// 'p', a^(p-1) ≡ 1 (mod p).
+	//
+	// Further, Euler's criterion states that an integer 'a' has a square root
+	// (aka is a quadratic residue) modulo a prime if a^((p-1)/2) ≡ 1 (mod p)
+	// and, conversely, when it does NOT have a square root (aka 'a' is a
+	// non-residue) a^((p-1)/2) ≡ -1 (mod p).
+	//
+	// This can be seen by considering that Fermat's little theorem can be
+	// written as (a^((p-1)/2) - 1)(a^((p-1)/2) + 1) ≡ 0 (mod p).  Therefore,
+	// one of the two factors must be 0.  Then, when a ≡ x^2 (aka 'a' is a
+	// quadratic residue), (x^2)^((p-1)/2) ≡ x^(p-1) ≡ 1 (mod p) which implies
+	// the first factor must be zero.  Finally, per Lagrange's theorem, the
+	// non-residues are the only remaining possible solutions and thus must make
+	// the second factor zero to satisfy Fermat's little theorem implying that
+	// a^((p-1)/2) ≡ -1 (mod p) for that case.
+	//
+	// The Tonelli-Shanks method uses these facts along with factoring out
+	// powers of two to solve a congruence that results in either the solution
+	// when the square root exists or the square root of the negation of the
+	// value when it does not.  In the case of primes that are ≡ 3 (mod 4), the
+	// possible solutions are r = ±a^((p+1)/4) (mod p).  Therefore, either r^2 ≡
+	// a (mod p) is true in which case ±r are the two solutions, or r^2 ≡ -a
+	// (mod p) in which case 'a' is a non-residue and there are no solutions.
+	//
+	// The secp256k1 prime is ≡ 3 (mod 4), so this result applies.
+	//
+	// In other words, calculate a^((p+1)/4) and then square it and check it
+	// against the original value to determine if it is actually the square
+	// root.
+	//
+	// In order to efficiently compute a^((p+1)/4), (p+1)/4 needs to be split
+	// into a sequence of squares and multiplications that minimizes the number
+	// of multiplications needed (since they are more costly than squarings).
+	//
+	// The secp256k1 prime + 1 / 4 is 2^254 - 2^30 - 244.  In binary, that is:
+	//
+	// 00111111 11111111 11111111 11111111
+	// 11111111 11111111 11111111 11111111
+	// 11111111 11111111 11111111 11111111
+	// 11111111 11111111 11111111 11111111
+	// 11111111 11111111 11111111 11111111
+	// 11111111 11111111 11111111 11111111
+	// 11111111 11111111 11111111 11111111
+	// 10111111 11111111 11111111 00001100
+	//
+	// Notice that can be broken up into three windows of consecutive 1s (in
+	// order of least to most significant) as:
+	//
+	//   6-bit window with two bits set (bits 4, 5, 6, 7 unset)
+	//   23-bit window with 22 bits set (bit 30 unset)
+	//   223-bit window with all 223 bits set
+	//
+	// Thus, the groups of 1 bits in each window forms the set:
+	// S = {2, 22, 223}.
+	//
+	// The strategy is to calculate a^(2^n - 1) for each grouping via an
+	// addition chain with a sliding window.
+	//
+	// The addition chain used is (credits to Peter Dettman):
+	// (0,0),(1,0),(2,2),(3,2),(4,1),(5,5),(6,6),(7,7),(8,8),(9,7),(10,2)
+	// => 2^1 2^[2] 2^3 2^6 2^9 2^11 2^[22] 2^44 2^88 2^176 2^220 2^[223]
+	//
+	// This has a cost of 254 field squarings and 13 field multiplications.
+	var a, a2, a3, a6, a9, a11, a22, a44, a88, a176, a220, a223 FieldVal
+	a.Set(val)
+	a2.SquareVal(&a).Mul(&a)                                  // a2 = a^(2^2 - 1)
+	a3.SquareVal(&a2).Mul(&a)                                 // a3 = a^(2^3 - 1)
+	a6.SquareVal(&a3).Square().Square()                       // a6 = a^(2^6 - 2^3)
+	a6.Mul(&a3)                                               // a6 = a^(2^6 - 1)
+	a9.SquareVal(&a6).Square().Square()                       // a9 = a^(2^9 - 2^3)
+	a9.Mul(&a3)                                               // a9 = a^(2^9 - 1)
+	a11.SquareVal(&a9).Square()                               // a11 = a^(2^11 - 2^2)
+	a11.Mul(&a2)                                              // a11 = a^(2^11 - 1)
+	a22.SquareVal(&a11).Square().Square().Square().Square()   // a22 = a^(2^16 - 2^5)
+	a22.Square().Square().Square().Square().Square()          // a22 = a^(2^21 - 2^10)
+	a22.Square()                                              // a22 = a^(2^22 - 2^11)
+	a22.Mul(&a11)                                             // a22 = a^(2^22 - 1)
+	a44.SquareVal(&a22).Square().Square().Square().Square()   // a44 = a^(2^27 - 2^5)
+	a44.Square().Square().Square().Square().Square()          // a44 = a^(2^32 - 2^10)
+	a44.Square().Square().Square().Square().Square()          // a44 = a^(2^37 - 2^15)
+	a44.Square().Square().Square().Square().Square()          // a44 = a^(2^42 - 2^20)
+	a44.Square().Square()                                     // a44 = a^(2^44 - 2^22)
+	a44.Mul(&a22)                                             // a44 = a^(2^44 - 1)
+	a88.SquareVal(&a44).Square().Square().Square().Square()   // a88 = a^(2^49 - 2^5)
+	a88.Square().Square().Square().Square().Square()          // a88 = a^(2^54 - 2^10)
+	a88.Square().Square().Square().Square().Square()          // a88 = a^(2^59 - 2^15)
+	a88.Square().Square().Square().Square().Square()          // a88 = a^(2^64 - 2^20)
+	a88.Square().Square().Square().Square().Square()          // a88 = a^(2^69 - 2^25)
+	a88.Square().Square().Square().Square().Square()          // a88 = a^(2^74 - 2^30)
+	a88.Square().Square().Square().Square().Square()          // a88 = a^(2^79 - 2^35)
+	a88.Square().Square().Square().Square().Square()          // a88 = a^(2^84 - 2^40)
+	a88.Square().Square().Square().Square()                   // a88 = a^(2^88 - 2^44)
+	a88.Mul(&a44)                                             // a88 = a^(2^88 - 1)
+	a176.SquareVal(&a88).Square().Square().Square().Square()  // a176 = a^(2^93 - 2^5)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^98 - 2^10)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^103 - 2^15)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^108 - 2^20)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^113 - 2^25)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^118 - 2^30)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^123 - 2^35)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^128 - 2^40)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^133 - 2^45)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^138 - 2^50)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^143 - 2^55)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^148 - 2^60)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^153 - 2^65)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^158 - 2^70)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^163 - 2^75)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^168 - 2^80)
+	a176.Square().Square().Square().Square().Square()         // a176 = a^(2^173 - 2^85)
+	a176.Square().Square().Square()                           // a176 = a^(2^176 - 2^88)
+	a176.Mul(&a88)                                            // a176 = a^(2^176 - 1)
+	a220.SquareVal(&a176).Square().Square().Square().Square() // a220 = a^(2^181 - 2^5)
+	a220.Square().Square().Square().Square().Square()         // a220 = a^(2^186 - 2^10)
+	a220.Square().Square().Square().Square().Square()         // a220 = a^(2^191 - 2^15)
+	a220.Square().Square().Square().Square().Square()         // a220 = a^(2^196 - 2^20)
+	a220.Square().Square().Square().Square().Square()         // a220 = a^(2^201 - 2^25)
+	a220.Square().Square().Square().Square().Square()         // a220 = a^(2^206 - 2^30)
+	a220.Square().Square().Square().Square().Square()         // a220 = a^(2^211 - 2^35)
+	a220.Square().Square().Square().Square().Square()         // a220 = a^(2^216 - 2^40)
+	a220.Square().Square().Square().Square()                  // a220 = a^(2^220 - 2^44)
+	a220.Mul(&a44)                                            // a220 = a^(2^220 - 1)
+	a223.SquareVal(&a220).Square().Square()                   // a223 = a^(2^223 - 2^3)
+	a223.Mul(&a3)                                             // a223 = a^(2^223 - 1)
+
+	f.SquareVal(&a223).Square().Square().Square().Square() // f = a^(2^228 - 2^5)
+	f.Square().Square().Square().Square().Square()         // f = a^(2^233 - 2^10)
+	f.Square().Square().Square().Square().Square()         // f = a^(2^238 - 2^15)
+	f.Square().Square().Square().Square().Square()         // f = a^(2^243 - 2^20)
+	f.Square().Square().Square()                           // f = a^(2^246 - 2^23)
+	f.Mul(&a22)                                            // f = a^(2^246 - 2^22 - 1)
+	f.Square().Square().Square().Square().Square()         // f = a^(2^251 - 2^27 - 2^5)
+	f.Square()                                             // f = a^(2^252 - 2^28 - 2^6)
+	f.Mul(&a2)                                             // f = a^(2^252 - 2^28 - 2^6 - 2^1 - 1)
+	f.Square().Square()                                    // f = a^(2^254 - 2^30 - 2^8 - 2^3 - 2^2)
+	//                                                     //   = a^(2^254 - 2^30 - 244)
+	//                                                     //   = a^((p+1)/4)
+
+	// Ensure the calculated result is actually the square root by squaring it
+	// and checking against the original value.
+	var sqr FieldVal
+	return sqr.SquareVal(f).Normalize().Equals(val.Normalize())
+}
+
+// Square squares the field value in constant time.  The existing field value is
+// modified.  Note that this function can overflow if multiplying any of the
+// individual words exceeds a max uint32.  In practice, this means the magnitude
+// of the field must be a max of 8 to prevent overflow.
+//
+// The field value is returned to support chaining.  This enables syntax like:
+// f.Square().Mul(f2) so that f = f^2 * f2.
+//
+//	Preconditions:
+//	  - The field value MUST have a max magnitude of 8
+//	Output Normalized: No
+//	Output Max Magnitude: 1
+func (f *FieldVal) Square() *FieldVal {
+	return f.SquareVal(f)
+}
+
+// SquareVal squares the passed value and stores the result in f in constant
+// time.  Note that this function can overflow if multiplying any of the
+// individual words exceeds a max uint32.  In practice, this means the magnitude
+// of the field being squared must be a max of 8 to prevent overflow.
+//
+// The field value is returned to support chaining.  This enables syntax like:
+// f3.SquareVal(f).Mul(f) so that f3 = f^2 * f = f^3.
+//
+//	Preconditions:
+//	  - The input field value MUST have a max magnitude of 8
+//	Output Normalized: No
+//	Output Max Magnitude: 1
+func (f *FieldVal) SquareVal(val *FieldVal) *FieldVal {
+	// This could be done with a couple of for loops and an array to store
+	// the intermediate terms, but this unrolled version is significantly
+	// faster.
+
+	// Terms for 2^(fieldBase*0).
+	m := uint64(val.n[0]) * uint64(val.n[0])
+	t0 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*1).
+	m = (m >> fieldBase) + 2*uint64(val.n[0])*uint64(val.n[1])
+	t1 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*2).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[0])*uint64(val.n[2]) +
+		uint64(val.n[1])*uint64(val.n[1])
+	t2 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*3).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[0])*uint64(val.n[3]) +
+		2*uint64(val.n[1])*uint64(val.n[2])
+	t3 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*4).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[0])*uint64(val.n[4]) +
+		2*uint64(val.n[1])*uint64(val.n[3]) +
+		uint64(val.n[2])*uint64(val.n[2])
+	t4 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*5).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[0])*uint64(val.n[5]) +
+		2*uint64(val.n[1])*uint64(val.n[4]) +
+		2*uint64(val.n[2])*uint64(val.n[3])
+	t5 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*6).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[0])*uint64(val.n[6]) +
+		2*uint64(val.n[1])*uint64(val.n[5]) +
+		2*uint64(val.n[2])*uint64(val.n[4]) +
+		uint64(val.n[3])*uint64(val.n[3])
+	t6 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*7).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[0])*uint64(val.n[7]) +
+		2*uint64(val.n[1])*uint64(val.n[6]) +
+		2*uint64(val.n[2])*uint64(val.n[5]) +
+		2*uint64(val.n[3])*uint64(val.n[4])
+	t7 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*8).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[0])*uint64(val.n[8]) +
+		2*uint64(val.n[1])*uint64(val.n[7]) +
+		2*uint64(val.n[2])*uint64(val.n[6]) +
+		2*uint64(val.n[3])*uint64(val.n[5]) +
+		uint64(val.n[4])*uint64(val.n[4])
+	t8 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*9).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[0])*uint64(val.n[9]) +
+		2*uint64(val.n[1])*uint64(val.n[8]) +
+		2*uint64(val.n[2])*uint64(val.n[7]) +
+		2*uint64(val.n[3])*uint64(val.n[6]) +
+		2*uint64(val.n[4])*uint64(val.n[5])
+	t9 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*10).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[1])*uint64(val.n[9]) +
+		2*uint64(val.n[2])*uint64(val.n[8]) +
+		2*uint64(val.n[3])*uint64(val.n[7]) +
+		2*uint64(val.n[4])*uint64(val.n[6]) +
+		uint64(val.n[5])*uint64(val.n[5])
+	t10 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*11).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[2])*uint64(val.n[9]) +
+		2*uint64(val.n[3])*uint64(val.n[8]) +
+		2*uint64(val.n[4])*uint64(val.n[7]) +
+		2*uint64(val.n[5])*uint64(val.n[6])
+	t11 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*12).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[3])*uint64(val.n[9]) +
+		2*uint64(val.n[4])*uint64(val.n[8]) +
+		2*uint64(val.n[5])*uint64(val.n[7]) +
+		uint64(val.n[6])*uint64(val.n[6])
+	t12 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*13).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[4])*uint64(val.n[9]) +
+		2*uint64(val.n[5])*uint64(val.n[8]) +
+		2*uint64(val.n[6])*uint64(val.n[7])
+	t13 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*14).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[5])*uint64(val.n[9]) +
+		2*uint64(val.n[6])*uint64(val.n[8]) +
+		uint64(val.n[7])*uint64(val.n[7])
+	t14 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*15).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[6])*uint64(val.n[9]) +
+		2*uint64(val.n[7])*uint64(val.n[8])
+	t15 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*16).
+	m = (m >> fieldBase) +
+		2*uint64(val.n[7])*uint64(val.n[9]) +
+		uint64(val.n[8])*uint64(val.n[8])
+	t16 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*17).
+	m = (m >> fieldBase) + 2*uint64(val.n[8])*uint64(val.n[9])
+	t17 := m & fieldBaseMask
+
+	// Terms for 2^(fieldBase*18).
+	m = (m >> fieldBase) + uint64(val.n[9])*uint64(val.n[9])
+	t18 := m & fieldBaseMask
+
+	// What's left is for 2^(fieldBase*19).
+	t19 := m >> fieldBase
+
+	// At this point, all of the terms are grouped into their respective
+	// base.
+	//
+	// Per [HAC] section 14.3.4: Reduction method of moduli of special form,
+	// when the modulus is of the special form m = b^t - c, highly efficient
+	// reduction can be achieved per the provided algorithm.
+	//
+	// The secp256k1 prime is equivalent to 2^256 - 4294968273, so it fits
+	// this criteria.
+	//
+	// 4294968273 in field representation (base 2^26) is:
+	// n[0] = 977
+	// n[1] = 64
+	// That is to say (2^26 * 64) + 977 = 4294968273
+	//
+	// Since each word is in base 26, the upper terms (t10 and up) start
+	// at 260 bits (versus the final desired range of 256 bits), so the
+	// field representation of 'c' from above needs to be adjusted for the
+	// extra 4 bits by multiplying it by 2^4 = 16.  4294968273 * 16 =
+	// 68719492368.  Thus, the adjusted field representation of 'c' is:
+	// n[0] = 977 * 16 = 15632
+	// n[1] = 64 * 16 = 1024
+	// That is to say (2^26 * 1024) + 15632 = 68719492368
+	//
+	// To reduce the final term, t19, the entire 'c' value is needed instead
+	// of only n[0] because there are no more terms left to handle n[1].
+	// This means there might be some magnitude left in the upper bits that
+	// is handled below.
+	m = t0 + t10*15632
+	t0 = m & fieldBaseMask
+	m = (m >> fieldBase) + t1 + t10*1024 + t11*15632
+	t1 = m & fieldBaseMask
+	m = (m >> fieldBase) + t2 + t11*1024 + t12*15632
+	t2 = m & fieldBaseMask
+	m = (m >> fieldBase) + t3 + t12*1024 + t13*15632
+	t3 = m & fieldBaseMask
+	m = (m >> fieldBase) + t4 + t13*1024 + t14*15632
+	t4 = m & fieldBaseMask
+	m = (m >> fieldBase) + t5 + t14*1024 + t15*15632
+	t5 = m & fieldBaseMask
+	m = (m >> fieldBase) + t6 + t15*1024 + t16*15632
+	t6 = m & fieldBaseMask
+	m = (m >> fieldBase) + t7 + t16*1024 + t17*15632
+	t7 = m & fieldBaseMask
+	m = (m >> fieldBase) + t8 + t17*1024 + t18*15632
+	t8 = m & fieldBaseMask
+	m = (m >> fieldBase) + t9 + t18*1024 + t19*68719492368
+	t9 = m & fieldMSBMask
+	m = m >> fieldMSBBits
+
+	// At this point, if the magnitude is greater than 0, the overall value
+	// is greater than the max possible 256-bit value.  In particular, it is
+	// "how many times larger" than the max value it is.
+	//
+	// The algorithm presented in [HAC] section 14.3.4 repeats until the
+	// quotient is zero.  However, due to the above, we already know at
+	// least how many times we would need to repeat as it's the value
+	// currently in m.  Thus we can simply multiply the magnitude by the
+	// field representation of the prime and do a single iteration.  Notice
+	// that nothing will be changed when the magnitude is zero, so we could
+	// skip this in that case, however always running regardless allows it
+	// to run in constant time.  The final result will be in the range
+	// 0 <= result <= prime + (2^64 - c), so it is guaranteed to have a
+	// magnitude of 1, but it is denormalized.
+	n := t0 + m*977
+	f.n[0] = uint32(n & fieldBaseMask)
+	n = (n >> fieldBase) + t1 + m*64
+	f.n[1] = uint32(n & fieldBaseMask)
+	f.n[2] = uint32((n >> fieldBase) + t2)
+	f.n[3] = uint32(t3)
+	f.n[4] = uint32(t4)
+	f.n[5] = uint32(t5)
+	f.n[6] = uint32(t6)
+	f.n[7] = uint32(t7)
+	f.n[8] = uint32(t8)
+	f.n[9] = uint32(t9)
+
+	return f
+}
+
+// Inverse finds the modular multiplicative inverse of the field value in
+// constant time.  The existing field value is modified.
+//
+// The field value is returned to support chaining.  This enables syntax like:
+// f.Inverse().Mul(f2) so that f = f^-1 * f2.
+//
+//	Preconditions:
+//	  - The field value MUST have a max magnitude of 8
+//	Output Normalized: No
+//	Output Max Magnitude: 1
+func (f *FieldVal) Inverse() *FieldVal {
+	// Fermat's little theorem states that for a nonzero number a and prime
+	// prime p, a^(p-1) = 1 (mod p).  Since the multiplicative inverse is
+	// a*b = 1 (mod p), it follows that b = a*a^(p-2) = a^(p-1) = 1 (mod p).
+	// Thus, a^(p-2) is the multiplicative inverse.
+	//
+	// In order to efficiently compute a^(p-2), p-2 needs to be split into
+	// a sequence of squares and multiplications that minimizes the number
+	// of multiplications needed (since they are more costly than
+	// squarings). Intermediate results are saved and reused as well.
+	//
+	// The secp256k1 prime - 2 is 2^256 - 4294968275.
+	//
+	// This has a cost of 258 field squarings and 33 field multiplications.
+	var a2, a3, a4, a10, a11, a21, a42, a45, a63, a1019, a1023 FieldVal
+	a2.SquareVal(f)
+	a3.Mul2(&a2, f)
+	a4.SquareVal(&a2)
+	a10.SquareVal(&a4).Mul(&a2)
+	a11.Mul2(&a10, f)
+	a21.Mul2(&a10, &a11)
+	a42.SquareVal(&a21)
+	a45.Mul2(&a42, &a3)
+	a63.Mul2(&a42, &a21)
+	a1019.SquareVal(&a63).Square().Square().Square().Mul(&a11)
+	a1023.Mul2(&a1019, &a4)
+	f.Set(&a63)                                    // f = a^(2^6 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^11 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^16 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^16 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^21 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^26 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^26 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^31 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^36 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^36 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^41 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^46 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^46 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^51 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^56 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^56 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^61 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^66 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^66 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^71 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^76 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^76 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^81 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^86 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^86 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^91 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^96 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^96 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^101 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^106 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^106 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^111 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^116 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^116 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^121 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^126 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^126 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^131 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^136 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^136 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^141 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^146 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^146 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^151 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^156 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^156 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^161 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^166 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^166 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^171 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^176 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^176 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^181 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^186 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^186 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^191 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^196 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^196 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^201 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^206 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^206 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^211 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^216 - 1024)
+	f.Mul(&a1023)                                  // f = a^(2^216 - 1)
+	f.Square().Square().Square().Square().Square() // f = a^(2^221 - 32)
+	f.Square().Square().Square().Square().Square() // f = a^(2^226 - 1024)
+	f.Mul(&a1019)                                  // f = a^(2^226 - 5)
+	f.Square().Square().Square().Square().Square() // f = a^(2^231 - 160)
+	f.Square().Square().Square().Square().Square() // f = a^(2^236 - 5120)
+	f.Mul(&a1023)                                  // f = a^(2^236 - 4097)
+	f.Square().Square().Square().Square().Square() // f = a^(2^241 - 131104)
+	f.Square().Square().Square().Square().Square() // f = a^(2^246 - 4195328)
+	f.Mul(&a1023)                                  // f = a^(2^246 - 4194305)
+	f.Square().Square().Square().Square().Square() // f = a^(2^251 - 134217760)
+	f.Square().Square().Square().Square().Square() // f = a^(2^256 - 4294968320)
+	return f.Mul(&a45)                             // f = a^(2^256 - 4294968275) = a^(p-2)
+}
+
+// IsGtOrEqPrimeMinusOrder returns whether or not the field value exceeds the
+// group order divided by 2 in constant time.
+//
+//	Preconditions:
+//	  - The field value MUST be normalized
+func (f *FieldVal) IsGtOrEqPrimeMinusOrder() bool {
+	// The secp256k1 prime is equivalent to 2^256 - 4294968273 and the group
+	// order is 2^256 - 432420386565659656852420866394968145599.  Thus,
+	// the prime minus the group order is:
+	// 432420386565659656852420866390673177326
+	//
+	// In hex that is:
+	// 0x00000000 00000000 00000000 00000001 45512319 50b75fc4 402da172 2fc9baee
+	//
+	// Converting that to field representation (base 2^26) is:
+	//
+	// n[0] = 0x03c9baee
+	// n[1] = 0x03685c8b
+	// n[2] = 0x01fc4402
+	// n[3] = 0x006542dd
+	// n[4] = 0x01455123
+	//
+	// This can be verified with the following test code:
+	//   pMinusN := new(big.Int).Sub(curveParams.P, curveParams.N)
+	//   var fv FieldVal
+	//   fv.SetByteSlice(pMinusN.Bytes())
+	//   t.Logf("%x", fv.n)
+	//
+	//   Outputs: [3c9baee 3685c8b 1fc4402 6542dd 1455123 0 0 0 0 0]
+	const (
+		pMinusNWordZero  = 0x03c9baee
+		pMinusNWordOne   = 0x03685c8b
+		pMinusNWordTwo   = 0x01fc4402
+		pMinusNWordThree = 0x006542dd
+		pMinusNWordFour  = 0x01455123
+		pMinusNWordFive  = 0x00000000
+		pMinusNWordSix   = 0x00000000
+		pMinusNWordSeven = 0x00000000
+		pMinusNWordEight = 0x00000000
+		pMinusNWordNine  = 0x00000000
+	)
+
+	// The intuition here is that the value is greater than field prime minus
+	// the group order if one of the higher individual words is greater than the
+	// corresponding word and all higher words in the value are equal.
+	result := constantTimeGreater(f.n[9], pMinusNWordNine)
+	highWordsEqual := constantTimeEq(f.n[9], pMinusNWordNine)
+	result |= highWordsEqual & constantTimeGreater(f.n[8], pMinusNWordEight)
+	highWordsEqual &= constantTimeEq(f.n[8], pMinusNWordEight)
+	result |= highWordsEqual & constantTimeGreater(f.n[7], pMinusNWordSeven)
+	highWordsEqual &= constantTimeEq(f.n[7], pMinusNWordSeven)
+	result |= highWordsEqual & constantTimeGreater(f.n[6], pMinusNWordSix)
+	highWordsEqual &= constantTimeEq(f.n[6], pMinusNWordSix)
+	result |= highWordsEqual & constantTimeGreater(f.n[5], pMinusNWordFive)
+	highWordsEqual &= constantTimeEq(f.n[5], pMinusNWordFive)
+	result |= highWordsEqual & constantTimeGreater(f.n[4], pMinusNWordFour)
+	highWordsEqual &= constantTimeEq(f.n[4], pMinusNWordFour)
+	result |= highWordsEqual & constantTimeGreater(f.n[3], pMinusNWordThree)
+	highWordsEqual &= constantTimeEq(f.n[3], pMinusNWordThree)
+	result |= highWordsEqual & constantTimeGreater(f.n[2], pMinusNWordTwo)
+	highWordsEqual &= constantTimeEq(f.n[2], pMinusNWordTwo)
+	result |= highWordsEqual & constantTimeGreater(f.n[1], pMinusNWordOne)
+	highWordsEqual &= constantTimeEq(f.n[1], pMinusNWordOne)
+	result |= highWordsEqual & constantTimeGreaterOrEq(f.n[0], pMinusNWordZero)
+
+	return result != 0
+}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/loadprecomputed.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/loadprecomputed.go
new file mode 100644
index 0000000000..91c3d37769
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/loadprecomputed.go
@@ -0,0 +1,91 @@
+// Copyright 2015 The btcsuite developers
+// Copyright (c) 2015-2022 The Decred developers
+// Use of this source code is governed by an ISC
+// license that can be found in the LICENSE file.
+
+package secp256k1
+
+import (
+	"compress/zlib"
+	"encoding/base64"
+	"io"
+	"strings"
+	"sync"
+)
+
+//go:generate go run genprecomps.go
+
+// bytePointTable describes a table used to house pre-computed values for
+// accelerating scalar base multiplication.
+type bytePointTable [32][256]JacobianPoint
+
+// compressedBytePointsFn is set to a real function by the code generation to
+// return the compressed pre-computed values for accelerating scalar base
+// multiplication.
+var compressedBytePointsFn func() string
+
+// s256BytePoints houses pre-computed values used to accelerate scalar base
+// multiplication such that they are only loaded on first use.
+var s256BytePoints = func() func() *bytePointTable {
+	// mustLoadBytePoints decompresses and deserializes the pre-computed byte
+	// points used to accelerate scalar base multiplication for the secp256k1
+	// curve.
+	//
+	// This approach is used since it allows the compile to use significantly
+	// less ram and be performed much faster than it is with hard-coding the
+	// final in-memory data structure.  At the same time, it is quite fast to
+	// generate the in-memory data structure on first use with this approach
+	// versus computing the table.
+	//
+	// It will panic on any errors because the data is hard coded and thus any
+	// errors means something is wrong in the source code.
+	var data *bytePointTable
+	mustLoadBytePoints := func() {
+		// There will be no byte points to load when generating them.
+		if compressedBytePointsFn == nil {
+			return
+		}
+		bp := compressedBytePointsFn()
+
+		// Decompress the pre-computed table used to accelerate scalar base
+		// multiplication.
+		decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(bp))
+		r, err := zlib.NewReader(decoder)
+		if err != nil {
+			panic(err)
+		}
+		serialized, err := io.ReadAll(r)
+		if err != nil {
+			panic(err)
+		}
+
+		// Deserialize the precomputed byte points and set the memory table to
+		// them.
+		offset := 0
+		var bytePoints bytePointTable
+		for byteNum := 0; byteNum < len(bytePoints); byteNum++ {
+			// All points in this window.
+			for i := 0; i < len(bytePoints[byteNum]); i++ {
+				p := &bytePoints[byteNum][i]
+				p.X.SetByteSlice(serialized[offset:])
+				offset += 32
+				p.Y.SetByteSlice(serialized[offset:])
+				offset += 32
+				p.Z.SetInt(1)
+			}
+		}
+		data = &bytePoints
+	}
+
+	// Return a closure that initializes the data on first access.  This is done
+	// because the table takes a non-trivial amount of memory and initializing
+	// it unconditionally would cause anything that imports the package, either
+	// directly, or indirectly via transitive deps, to use that memory even if
+	// the caller never accesses any parts of the package that actually needs
+	// access to it.
+	var loadBytePointsOnce sync.Once
+	return func() *bytePointTable {
+		loadBytePointsOnce.Do(mustLoadBytePoints)
+		return data
+	}
+}()
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/modnscalar.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/modnscalar.go
new file mode 100644
index 0000000000..f66496ed5e
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/modnscalar.go
@@ -0,0 +1,1101 @@
+// Copyright (c) 2020-2022 The Decred developers
+// Use of this source code is governed by an ISC
+// license that can be found in the LICENSE file.
+
+package secp256k1
+
+import (
+	"encoding/hex"
+	"math/big"
+)
+
+// References:
+//   [SECG]: Recommended Elliptic Curve Domain Parameters
+//     https://www.secg.org/sec2-v2.pdf
+//
+//   [HAC]: Handbook of Applied Cryptography Menezes, van Oorschot, Vanstone.
+//     http://cacr.uwaterloo.ca/hac/
+
+// Many elliptic curve operations require working with scalars in a finite field
+// characterized by the order of the group underlying the secp256k1 curve.
+// Given this precision is larger than the biggest available native type,
+// obviously some form of bignum math is needed.  This code implements
+// specialized fixed-precision field arithmetic rather than relying on an
+// arbitrary-precision arithmetic package such as math/big for dealing with the
+// math modulo the group order since the size is known.  As a result, rather
+// large performance gains are achieved by taking advantage of many
+// optimizations not available to arbitrary-precision arithmetic and generic
+// modular arithmetic algorithms.
+//
+// There are various ways to internally represent each element.  For example,
+// the most obvious representation would be to use an array of 4 uint64s (64
+// bits * 4 = 256 bits).  However, that representation suffers from the fact
+// that there is no native Go type large enough to handle the intermediate
+// results while adding or multiplying two 64-bit numbers.
+//
+// Given the above, this implementation represents the field elements as 8
+// uint32s with each word (array entry) treated as base 2^32.  This was chosen
+// because most systems at the current time are 64-bit (or at least have 64-bit
+// registers available for specialized purposes such as MMX) so the intermediate
+// results can typically be done using a native register (and using uint64s to
+// avoid the need for additional half-word arithmetic)
+
+const (
+	// These fields provide convenient access to each of the words of the
+	// secp256k1 curve group order N to improve code readability.
+	//
+	// The group order of the curve per [SECG] is:
+	// 0xffffffff ffffffff ffffffff fffffffe baaedce6 af48a03b bfd25e8c d0364141
+	orderWordZero  uint32 = 0xd0364141
+	orderWordOne   uint32 = 0xbfd25e8c
+	orderWordTwo   uint32 = 0xaf48a03b
+	orderWordThree uint32 = 0xbaaedce6
+	orderWordFour  uint32 = 0xfffffffe
+	orderWordFive  uint32 = 0xffffffff
+	orderWordSix   uint32 = 0xffffffff
+	orderWordSeven uint32 = 0xffffffff
+
+	// These fields provide convenient access to each of the words of the two's
+	// complement of the secp256k1 curve group order N to improve code
+	// readability.
+	//
+	// The two's complement of the group order is:
+	// 0x00000000 00000000 00000000 00000001 45512319 50b75fc4 402da173 2fc9bebf
+	orderComplementWordZero  uint32 = (^orderWordZero) + 1
+	orderComplementWordOne   uint32 = ^orderWordOne
+	orderComplementWordTwo   uint32 = ^orderWordTwo
+	orderComplementWordThree uint32 = ^orderWordThree
+	//orderComplementWordFour  uint32 = ^orderWordFour  // unused
+	//orderComplementWordFive  uint32 = ^orderWordFive  // unused
+	//orderComplementWordSix   uint32 = ^orderWordSix   // unused
+	//orderComplementWordSeven uint32 = ^orderWordSeven // unused
+
+	// These fields provide convenient access to each of the words of the
+	// secp256k1 curve group order N / 2 to improve code readability and avoid
+	// the need to recalculate them.
+	//
+	// The half order of the secp256k1 curve group is:
+	// 0x7fffffff ffffffff ffffffff ffffffff 5d576e73 57a4501d dfe92f46 681b20a0
+	halfOrderWordZero  uint32 = 0x681b20a0
+	halfOrderWordOne   uint32 = 0xdfe92f46
+	halfOrderWordTwo   uint32 = 0x57a4501d
+	halfOrderWordThree uint32 = 0x5d576e73
+	halfOrderWordFour  uint32 = 0xffffffff
+	halfOrderWordFive  uint32 = 0xffffffff
+	halfOrderWordSix   uint32 = 0xffffffff
+	halfOrderWordSeven uint32 = 0x7fffffff
+
+	// uint32Mask is simply a mask with all bits set for a uint32 and is used to
+	// improve the readability of the code.
+	uint32Mask = 0xffffffff
+)
+
+var (
+	// zero32 is an array of 32 bytes used for the purposes of zeroing and is
+	// defined here to avoid extra allocations.
+	zero32 = [32]byte{}
+)
+
+// ModNScalar implements optimized 256-bit constant-time fixed-precision
+// arithmetic over the secp256k1 group order. This means all arithmetic is
+// performed modulo:
+//
+//	0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141
+//
+// It only implements the arithmetic needed for elliptic curve operations,
+// however, the operations that are not implemented can typically be worked
+// around if absolutely needed.  For example, subtraction can be performed by
+// adding the negation.
+//
+// Should it be absolutely necessary, conversion to the standard library
+// math/big.Int can be accomplished by using the Bytes method, slicing the
+// resulting fixed-size array, and feeding it to big.Int.SetBytes.  However,
+// that should typically be avoided when possible as conversion to big.Ints
+// requires allocations, is not constant time, and is slower when working modulo
+// the group order.
+type ModNScalar struct {
+	// The scalar is represented as 8 32-bit integers in base 2^32.
+	//
+	// The following depicts the internal representation:
+	// 	 ---------------------------------------------------------
+	// 	|       n[7]     |      n[6]      | ... |      n[0]      |
+	// 	| 32 bits        | 32 bits        | ... | 32 bits        |
+	// 	| Mult: 2^(32*7) | Mult: 2^(32*6) | ... | Mult: 2^(32*0) |
+	// 	 ---------------------------------------------------------
+	//
+	// For example, consider the number 2^87 + 2^42 + 1.  It would be
+	// represented as:
+	// 	n[0] = 1
+	// 	n[1] = 2^10
+	// 	n[2] = 2^23
+	// 	n[3..7] = 0
+	//
+	// The full 256-bit value is then calculated by looping i from 7..0 and
+	// doing sum(n[i] * 2^(32i)) like so:
+	// 	n[7] * 2^(32*7) = 0    * 2^224 = 0
+	// 	n[6] * 2^(32*6) = 0    * 2^192 = 0
+	// 	...
+	// 	n[2] * 2^(32*2) = 2^23 * 2^64  = 2^87
+	// 	n[1] * 2^(32*1) = 2^10 * 2^32  = 2^42
+	// 	n[0] * 2^(32*0) = 1    * 2^0   = 1
+	// 	Sum: 0 + 0 + ... + 2^87 + 2^42 + 1 = 2^87 + 2^42 + 1
+	n [8]uint32
+}
+
+// String returns the scalar as a human-readable hex string.
+//
+// This is NOT constant time.
+func (s ModNScalar) String() string {
+	b := s.Bytes()
+	return hex.EncodeToString(b[:])
+}
+
+// Set sets the scalar equal to a copy of the passed one in constant time.
+//
+// The scalar is returned to support chaining.  This enables syntax like:
+// s := new(ModNScalar).Set(s2).Add(1) so that s = s2 + 1 where s2 is not
+// modified.
+func (s *ModNScalar) Set(val *ModNScalar) *ModNScalar {
+	*s = *val
+	return s
+}
+
+// Zero sets the scalar to zero in constant time.  A newly created scalar is
+// already set to zero.  This function can be useful to clear an existing scalar
+// for reuse.
+func (s *ModNScalar) Zero() {
+	s.n[0] = 0
+	s.n[1] = 0
+	s.n[2] = 0
+	s.n[3] = 0
+	s.n[4] = 0
+	s.n[5] = 0
+	s.n[6] = 0
+	s.n[7] = 0
+}
+
+// IsZeroBit returns 1 when the scalar is equal to zero or 0 otherwise in
+// constant time.
+//
+// Note that a bool is not used here because it is not possible in Go to convert
+// from a bool to numeric value in constant time and many constant-time
+// operations require a numeric value.  See IsZero for the version that returns
+// a bool.
+func (s *ModNScalar) IsZeroBit() uint32 {
+	// The scalar can only be zero if no bits are set in any of the words.
+	bits := s.n[0] | s.n[1] | s.n[2] | s.n[3] | s.n[4] | s.n[5] | s.n[6] | s.n[7]
+	return constantTimeEq(bits, 0)
+}
+
+// IsZero returns whether or not the scalar is equal to zero in constant time.
+func (s *ModNScalar) IsZero() bool {
+	// The scalar can only be zero if no bits are set in any of the words.
+	bits := s.n[0] | s.n[1] | s.n[2] | s.n[3] | s.n[4] | s.n[5] | s.n[6] | s.n[7]
+	return bits == 0
+}
+
+// SetInt sets the scalar to the passed integer in constant time.  This is a
+// convenience function since it is fairly common to perform some arithmetic
+// with small native integers.
+//
+// The scalar is returned to support chaining.  This enables syntax like:
+// s := new(ModNScalar).SetInt(2).Mul(s2) so that s = 2 * s2.
+func (s *ModNScalar) SetInt(ui uint32) *ModNScalar {
+	s.Zero()
+	s.n[0] = ui
+	return s
+}
+
+// constantTimeEq returns 1 if a == b or 0 otherwise in constant time.
+func constantTimeEq(a, b uint32) uint32 {
+	return uint32((uint64(a^b) - 1) >> 63)
+}
+
+// constantTimeNotEq returns 1 if a != b or 0 otherwise in constant time.
+func constantTimeNotEq(a, b uint32) uint32 {
+	return ^uint32((uint64(a^b)-1)>>63) & 1
+}
+
+// constantTimeLess returns 1 if a < b or 0 otherwise in constant time.
+func constantTimeLess(a, b uint32) uint32 {
+	return uint32((uint64(a) - uint64(b)) >> 63)
+}
+
+// constantTimeLessOrEq returns 1 if a <= b or 0 otherwise in constant time.
+func constantTimeLessOrEq(a, b uint32) uint32 {
+	return uint32((uint64(a) - uint64(b) - 1) >> 63)
+}
+
+// constantTimeGreater returns 1 if a > b or 0 otherwise in constant time.
+func constantTimeGreater(a, b uint32) uint32 {
+	return constantTimeLess(b, a)
+}
+
+// constantTimeGreaterOrEq returns 1 if a >= b or 0 otherwise in constant time.
+func constantTimeGreaterOrEq(a, b uint32) uint32 {
+	return constantTimeLessOrEq(b, a)
+}
+
+// constantTimeMin returns min(a,b) in constant time.
+func constantTimeMin(a, b uint32) uint32 {
+	return b ^ ((a ^ b) & -constantTimeLess(a, b))
+}
+
+// overflows determines if the current scalar is greater than or equal to the
+// group order in constant time and returns 1 if it is or 0 otherwise.
+func (s *ModNScalar) overflows() uint32 {
+	// The intuition here is that the scalar is greater than the group order if
+	// one of the higher individual words is greater than corresponding word of
+	// the group order and all higher words in the scalar are equal to their
+	// corresponding word of the group order.  Since this type is modulo the
+	// group order, being equal is also an overflow back to 0.
+	//
+	// Note that the words 5, 6, and 7 are all the max uint32 value, so there is
+	// no need to test if those individual words of the scalar exceeds them,
+	// hence, only equality is checked for them.
+	highWordsEqual := constantTimeEq(s.n[7], orderWordSeven)
+	highWordsEqual &= constantTimeEq(s.n[6], orderWordSix)
+	highWordsEqual &= constantTimeEq(s.n[5], orderWordFive)
+	overflow := highWordsEqual & constantTimeGreater(s.n[4], orderWordFour)
+	highWordsEqual &= constantTimeEq(s.n[4], orderWordFour)
+	overflow |= highWordsEqual & constantTimeGreater(s.n[3], orderWordThree)
+	highWordsEqual &= constantTimeEq(s.n[3], orderWordThree)
+	overflow |= highWordsEqual & constantTimeGreater(s.n[2], orderWordTwo)
+	highWordsEqual &= constantTimeEq(s.n[2], orderWordTwo)
+	overflow |= highWordsEqual & constantTimeGreater(s.n[1], orderWordOne)
+	highWordsEqual &= constantTimeEq(s.n[1], orderWordOne)
+	overflow |= highWordsEqual & constantTimeGreaterOrEq(s.n[0], orderWordZero)
+
+	return overflow
+}
+
+// reduce256 reduces the current scalar modulo the group order in accordance
+// with the overflows parameter in constant time.  The overflows parameter
+// specifies whether or not the scalar is known to be greater than the group
+// order and MUST either be 1 in the case it is or 0 in the case it is not for a
+// correct result.
+func (s *ModNScalar) reduce256(overflows uint32) {
+	// Notice that since s < 2^256 < 2N (where N is the group order), the max
+	// possible number of reductions required is one.  Therefore, in the case a
+	// reduction is needed, it can be performed with a single subtraction of N.
+	// Also, recall that subtraction is equivalent to addition by the two's
+	// complement while ignoring the carry.
+	//
+	// When s >= N, the overflows parameter will be 1.  Conversely, it will be 0
+	// when s < N.  Thus multiplying by the overflows parameter will either
+	// result in 0 or the multiplicand itself.
+	//
+	// Combining the above along with the fact that s + 0 = s, the following is
+	// a constant time implementation that works by either adding 0 or the two's
+	// complement of N as needed.
+	//
+	// The final result will be in the range 0 <= s < N as expected.
+	overflows64 := uint64(overflows)
+	c := uint64(s.n[0]) + overflows64*uint64(orderComplementWordZero)
+	s.n[0] = uint32(c & uint32Mask)
+	c = (c >> 32) + uint64(s.n[1]) + overflows64*uint64(orderComplementWordOne)
+	s.n[1] = uint32(c & uint32Mask)
+	c = (c >> 32) + uint64(s.n[2]) + overflows64*uint64(orderComplementWordTwo)
+	s.n[2] = uint32(c & uint32Mask)
+	c = (c >> 32) + uint64(s.n[3]) + overflows64*uint64(orderComplementWordThree)
+	s.n[3] = uint32(c & uint32Mask)
+	c = (c >> 32) + uint64(s.n[4]) + overflows64 // * 1
+	s.n[4] = uint32(c & uint32Mask)
+	c = (c >> 32) + uint64(s.n[5]) // + overflows64 * 0
+	s.n[5] = uint32(c & uint32Mask)
+	c = (c >> 32) + uint64(s.n[6]) // + overflows64 * 0
+	s.n[6] = uint32(c & uint32Mask)
+	c = (c >> 32) + uint64(s.n[7]) // + overflows64 * 0
+	s.n[7] = uint32(c & uint32Mask)
+}
+
+// SetBytes interprets the provided array as a 256-bit big-endian unsigned
+// integer, reduces it modulo the group order, sets the scalar to the result,
+// and returns either 1 if it was reduced (aka it overflowed) or 0 otherwise in
+// constant time.
+//
+// Note that a bool is not used here because it is not possible in Go to convert
+// from a bool to numeric value in constant time and many constant-time
+// operations require a numeric value.
+func (s *ModNScalar) SetBytes(b *[32]byte) uint32 {
+	// Pack the 256 total bits across the 8 uint32 words.  This could be done
+	// with a for loop, but benchmarks show this unrolled version is about 2
+	// times faster than the variant that uses a loop.
+	s.n[0] = uint32(b[31]) | uint32(b[30])<<8 | uint32(b[29])<<16 | uint32(b[28])<<24
+	s.n[1] = uint32(b[27]) | uint32(b[26])<<8 | uint32(b[25])<<16 | uint32(b[24])<<24
+	s.n[2] = uint32(b[23]) | uint32(b[22])<<8 | uint32(b[21])<<16 | uint32(b[20])<<24
+	s.n[3] = uint32(b[19]) | uint32(b[18])<<8 | uint32(b[17])<<16 | uint32(b[16])<<24
+	s.n[4] = uint32(b[15]) | uint32(b[14])<<8 | uint32(b[13])<<16 | uint32(b[12])<<24
+	s.n[5] = uint32(b[11]) | uint32(b[10])<<8 | uint32(b[9])<<16 | uint32(b[8])<<24
+	s.n[6] = uint32(b[7]) | uint32(b[6])<<8 | uint32(b[5])<<16 | uint32(b[4])<<24
+	s.n[7] = uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+
+	// The value might be >= N, so reduce it as required and return whether or
+	// not it was reduced.
+	needsReduce := s.overflows()
+	s.reduce256(needsReduce)
+	return needsReduce
+}
+
+// zeroArray32 zeroes the provided 32-byte buffer.
+func zeroArray32(b *[32]byte) {
+	copy(b[:], zero32[:])
+}
+
+// SetByteSlice interprets the provided slice as a 256-bit big-endian unsigned
+// integer (meaning it is truncated to the first 32 bytes), reduces it modulo
+// the group order, sets the scalar to the result, and returns whether or not
+// the resulting truncated 256-bit integer overflowed in constant time.
+//
+// Note that since passing a slice with more than 32 bytes is truncated, it is
+// possible that the truncated value is less than the order of the curve and
+// hence it will not be reported as having overflowed in that case.  It is up to
+// the caller to decide whether it needs to provide numbers of the appropriate
+// size or it is acceptable to use this function with the described truncation
+// and overflow behavior.
+func (s *ModNScalar) SetByteSlice(b []byte) bool {
+	var b32 [32]byte
+	b = b[:constantTimeMin(uint32(len(b)), 32)]
+	copy(b32[:], b32[:32-len(b)])
+	copy(b32[32-len(b):], b)
+	result := s.SetBytes(&b32)
+	zeroArray32(&b32)
+	return result != 0
+}
+
+// PutBytesUnchecked unpacks the scalar to a 32-byte big-endian value directly
+// into the passed byte slice in constant time.  The target slice must must have
+// at least 32 bytes available or it will panic.
+//
+// There is a similar function, PutBytes, which unpacks the scalar into a
+// 32-byte array directly.  This version is provided since it can be useful to
+// write directly into part of a larger buffer without needing a separate
+// allocation.
+//
+// Preconditions:
+//   - The target slice MUST have at least 32 bytes available
+func (s *ModNScalar) PutBytesUnchecked(b []byte) {
+	// Unpack the 256 total bits from the 8 uint32 words.  This could be done
+	// with a for loop, but benchmarks show this unrolled version is about 2
+	// times faster than the variant which uses a loop.
+	b[31] = byte(s.n[0])
+	b[30] = byte(s.n[0] >> 8)
+	b[29] = byte(s.n[0] >> 16)
+	b[28] = byte(s.n[0] >> 24)
+	b[27] = byte(s.n[1])
+	b[26] = byte(s.n[1] >> 8)
+	b[25] = byte(s.n[1] >> 16)
+	b[24] = byte(s.n[1] >> 24)
+	b[23] = byte(s.n[2])
+	b[22] = byte(s.n[2] >> 8)
+	b[21] = byte(s.n[2] >> 16)
+	b[20] = byte(s.n[2] >> 24)
+	b[19] = byte(s.n[3])
+	b[18] = byte(s.n[3] >> 8)
+	b[17] = byte(s.n[3] >> 16)
+	b[16] = byte(s.n[3] >> 24)
+	b[15] = byte(s.n[4])
+	b[14] = byte(s.n[4] >> 8)
+	b[13] = byte(s.n[4] >> 16)
+	b[12] = byte(s.n[4] >> 24)
+	b[11] = byte(s.n[5])
+	b[10] = byte(s.n[5] >> 8)
+	b[9] = byte(s.n[5] >> 16)
+	b[8] = byte(s.n[5] >> 24)
+	b[7] = byte(s.n[6])
+	b[6] = byte(s.n[6] >> 8)
+	b[5] = byte(s.n[6] >> 16)
+	b[4] = byte(s.n[6] >> 24)
+	b[3] = byte(s.n[7])
+	b[2] = byte(s.n[7] >> 8)
+	b[1] = byte(s.n[7] >> 16)
+	b[0] = byte(s.n[7] >> 24)
+}
+
+// PutBytes unpacks the scalar to a 32-byte big-endian value using the passed
+// byte array in constant time.
+//
+// There is a similar function, PutBytesUnchecked, which unpacks the scalar into
+// a slice that must have at least 32 bytes available.  This version is provided
+// since it can be useful to write directly into an array that is type checked.
+//
+// Alternatively, there is also Bytes, which unpacks the scalar into a new array
+// and returns that which can sometimes be more ergonomic in applications that
+// aren't concerned about an additional copy.
+func (s *ModNScalar) PutBytes(b *[32]byte) {
+	s.PutBytesUnchecked(b[:])
+}
+
+// Bytes unpacks the scalar to a 32-byte big-endian value in constant time.
+//
+// See PutBytes and PutBytesUnchecked for variants that allow an array or slice
+// to be passed which can be useful to cut down on the number of allocations
+// by allowing the caller to reuse a buffer or write directly into part of a
+// larger buffer.
+func (s *ModNScalar) Bytes() [32]byte {
+	var b [32]byte
+	s.PutBytesUnchecked(b[:])
+	return b
+}
+
+// IsOdd returns whether or not the scalar is an odd number in constant time.
+func (s *ModNScalar) IsOdd() bool {
+	// Only odd numbers have the bottom bit set.
+	return s.n[0]&1 == 1
+}
+
+// Equals returns whether or not the two scalars are the same in constant time.
+func (s *ModNScalar) Equals(val *ModNScalar) bool {
+	// Xor only sets bits when they are different, so the two scalars can only
+	// be the same if no bits are set after xoring each word.
+	bits := (s.n[0] ^ val.n[0]) | (s.n[1] ^ val.n[1]) | (s.n[2] ^ val.n[2]) |
+		(s.n[3] ^ val.n[3]) | (s.n[4] ^ val.n[4]) | (s.n[5] ^ val.n[5]) |
+		(s.n[6] ^ val.n[6]) | (s.n[7] ^ val.n[7])
+
+	return bits == 0
+}
+
+// Add2 adds the passed two scalars together modulo the group order in constant
+// time and stores the result in s.
+//
+// The scalar is returned to support chaining.  This enables syntax like:
+// s3.Add2(s, s2).AddInt(1) so that s3 = s + s2 + 1.
+func (s *ModNScalar) Add2(val1, val2 *ModNScalar) *ModNScalar {
+	c := uint64(val1.n[0]) + uint64(val2.n[0])
+	s.n[0] = uint32(c & uint32Mask)
+	c = (c >> 32) + uint64(val1.n[1]) + uint64(val2.n[1])
+	s.n[1] = uint32(c & uint32Mask)
+	c = (c >> 32) + uint64(val1.n[2]) + uint64(val2.n[2])
+	s.n[2] = uint32(c & uint32Mask)
+	c = (c >> 32) + uint64(val1.n[3]) + uint64(val2.n[3])
+	s.n[3] = uint32(c & uint32Mask)
+	c = (c >> 32) + uint64(val1.n[4]) + uint64(val2.n[4])
+	s.n[4] = uint32(c & uint32Mask)
+	c = (c >> 32) + uint64(val1.n[5]) + uint64(val2.n[5])
+	s.n[5] = uint32(c & uint32Mask)
+	c = (c >> 32) + uint64(val1.n[6]) + uint64(val2.n[6])
+	s.n[6] = uint32(c & uint32Mask)
+	c = (c >> 32) + uint64(val1.n[7]) + uint64(val2.n[7])
+	s.n[7] = uint32(c & uint32Mask)
+
+	// The result is now 256 bits, but it might still be >= N, so use the
+	// existing normal reduce method for 256-bit values.
+	s.reduce256(uint32(c>>32) + s.overflows())
+	return s
+}
+
+// Add adds the passed scalar to the existing one modulo the group order in
+// constant time and stores the result in s.
+//
+// The scalar is returned to support chaining.  This enables syntax like:
+// s.Add(s2).AddInt(1) so that s = s + s2 + 1.
+func (s *ModNScalar) Add(val *ModNScalar) *ModNScalar {
+	return s.Add2(s, val)
+}
+
+// accumulator96 provides a 96-bit accumulator for use in the intermediate
+// calculations requiring more than 64-bits.
+type accumulator96 struct {
+	n [3]uint32
+}
+
+// Add adds the passed unsigned 64-bit value to the accumulator.
+func (a *accumulator96) Add(v uint64) {
+	low := uint32(v & uint32Mask)
+	hi := uint32(v >> 32)
+	a.n[0] += low
+	hi += constantTimeLess(a.n[0], low) // Carry if overflow in n[0].
+	a.n[1] += hi
+	a.n[2] += constantTimeLess(a.n[1], hi) // Carry if overflow in n[1].
+}
+
+// Rsh32 right shifts the accumulator by 32 bits.
+func (a *accumulator96) Rsh32() {
+	a.n[0] = a.n[1]
+	a.n[1] = a.n[2]
+	a.n[2] = 0
+}
+
+// reduce385 reduces the 385-bit intermediate result in the passed terms modulo
+// the group order in constant time and stores the result in s.
+func (s *ModNScalar) reduce385(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12 uint64) {
+	// At this point, the intermediate result in the passed terms has been
+	// reduced to fit within 385 bits, so reduce it again using the same method
+	// described in reduce512.  As before, the intermediate result will end up
+	// being reduced by another 127 bits to 258 bits, thus 9 32-bit terms are
+	// needed for this iteration.  The reduced terms are assigned back to t0
+	// through t8.
+	//
+	// Note that several of the intermediate calculations require adding 64-bit
+	// products together which would overflow a uint64, so a 96-bit accumulator
+	// is used instead until the value is reduced enough to use native uint64s.
+
+	// Terms for 2^(32*0).
+	var acc accumulator96
+	acc.n[0] = uint32(t0) // == acc.Add(t0) because acc is guaranteed to be 0.
+	acc.Add(t8 * uint64(orderComplementWordZero))
+	t0 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*1).
+	acc.Add(t1)
+	acc.Add(t8 * uint64(orderComplementWordOne))
+	acc.Add(t9 * uint64(orderComplementWordZero))
+	t1 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*2).
+	acc.Add(t2)
+	acc.Add(t8 * uint64(orderComplementWordTwo))
+	acc.Add(t9 * uint64(orderComplementWordOne))
+	acc.Add(t10 * uint64(orderComplementWordZero))
+	t2 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*3).
+	acc.Add(t3)
+	acc.Add(t8 * uint64(orderComplementWordThree))
+	acc.Add(t9 * uint64(orderComplementWordTwo))
+	acc.Add(t10 * uint64(orderComplementWordOne))
+	acc.Add(t11 * uint64(orderComplementWordZero))
+	t3 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*4).
+	acc.Add(t4)
+	acc.Add(t8) // * uint64(orderComplementWordFour) // * 1
+	acc.Add(t9 * uint64(orderComplementWordThree))
+	acc.Add(t10 * uint64(orderComplementWordTwo))
+	acc.Add(t11 * uint64(orderComplementWordOne))
+	acc.Add(t12 * uint64(orderComplementWordZero))
+	t4 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*5).
+	acc.Add(t5)
+	// acc.Add(t8 * uint64(orderComplementWordFive)) // 0
+	acc.Add(t9) // * uint64(orderComplementWordFour) // * 1
+	acc.Add(t10 * uint64(orderComplementWordThree))
+	acc.Add(t11 * uint64(orderComplementWordTwo))
+	acc.Add(t12 * uint64(orderComplementWordOne))
+	t5 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*6).
+	acc.Add(t6)
+	// acc.Add(t8 * uint64(orderComplementWordSix)) // 0
+	// acc.Add(t9 * uint64(orderComplementWordFive)) // 0
+	acc.Add(t10) // * uint64(orderComplementWordFour) // * 1
+	acc.Add(t11 * uint64(orderComplementWordThree))
+	acc.Add(t12 * uint64(orderComplementWordTwo))
+	t6 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*7).
+	acc.Add(t7)
+	// acc.Add(t8 * uint64(orderComplementWordSeven)) // 0
+	// acc.Add(t9 * uint64(orderComplementWordSix)) // 0
+	// acc.Add(t10 * uint64(orderComplementWordFive)) // 0
+	acc.Add(t11) // * uint64(orderComplementWordFour) // * 1
+	acc.Add(t12 * uint64(orderComplementWordThree))
+	t7 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*8).
+	// acc.Add(t9 * uint64(orderComplementWordSeven)) // 0
+	// acc.Add(t10 * uint64(orderComplementWordSix)) // 0
+	// acc.Add(t11 * uint64(orderComplementWordFive)) // 0
+	acc.Add(t12) // * uint64(orderComplementWordFour) // * 1
+	t8 = uint64(acc.n[0])
+	// acc.Rsh32() // No need since not used after this.  Guaranteed to be 0.
+
+	// NOTE: All of the remaining multiplications for this iteration result in 0
+	// as they all involve multiplying by combinations of the fifth, sixth, and
+	// seventh words of the two's complement of N, which are 0, so skip them.
+
+	// At this point, the result is reduced to fit within 258 bits, so reduce it
+	// again using a slightly modified version of the same method.  The maximum
+	// value in t8 is 2 at this point and therefore multiplying it by each word
+	// of the two's complement of N and adding it to a 32-bit term will result
+	// in a maximum requirement of 33 bits, so it is safe to use native uint64s
+	// here for the intermediate term carry propagation.
+	//
+	// Also, since the maximum value in t8 is 2, this ends up reducing by
+	// another 2 bits to 256 bits.
+	c := t0 + t8*uint64(orderComplementWordZero)
+	s.n[0] = uint32(c & uint32Mask)
+	c = (c >> 32) + t1 + t8*uint64(orderComplementWordOne)
+	s.n[1] = uint32(c & uint32Mask)
+	c = (c >> 32) + t2 + t8*uint64(orderComplementWordTwo)
+	s.n[2] = uint32(c & uint32Mask)
+	c = (c >> 32) + t3 + t8*uint64(orderComplementWordThree)
+	s.n[3] = uint32(c & uint32Mask)
+	c = (c >> 32) + t4 + t8 // * uint64(orderComplementWordFour) == * 1
+	s.n[4] = uint32(c & uint32Mask)
+	c = (c >> 32) + t5 // + t8*uint64(orderComplementWordFive) == 0
+	s.n[5] = uint32(c & uint32Mask)
+	c = (c >> 32) + t6 // + t8*uint64(orderComplementWordSix) == 0
+	s.n[6] = uint32(c & uint32Mask)
+	c = (c >> 32) + t7 // + t8*uint64(orderComplementWordSeven) == 0
+	s.n[7] = uint32(c & uint32Mask)
+
+	// The result is now 256 bits, but it might still be >= N, so use the
+	// existing normal reduce method for 256-bit values.
+	s.reduce256(uint32(c>>32) + s.overflows())
+}
+
+// reduce512 reduces the 512-bit intermediate result in the passed terms modulo
+// the group order down to 385 bits in constant time and stores the result in s.
+func (s *ModNScalar) reduce512(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15 uint64) {
+	// At this point, the intermediate result in the passed terms is grouped
+	// into the respective bases.
+	//
+	// Per [HAC] section 14.3.4: Reduction method of moduli of special form,
+	// when the modulus is of the special form m = b^t - c, where log_2(c) < t,
+	// highly efficient reduction can be achieved per the provided algorithm.
+	//
+	// The secp256k1 group order fits this criteria since it is:
+	//   2^256 - 432420386565659656852420866394968145599
+	//
+	// Technically the max possible value here is (N-1)^2 since the two scalars
+	// being multiplied are always mod N.  Nevertheless, it is safer to consider
+	// it to be (2^256-1)^2 = 2^512 - 2^256 + 1 since it is the product of two
+	// 256-bit values.
+	//
+	// The algorithm is to reduce the result modulo the prime by subtracting
+	// multiples of the group order N.  However, in order simplify carry
+	// propagation, this adds with the two's complement of N to achieve the same
+	// result.
+	//
+	// Since the two's complement of N has 127 leading zero bits, this will end
+	// up reducing the intermediate result from 512 bits to 385 bits, resulting
+	// in 13 32-bit terms.  The reduced terms are assigned back to t0 through
+	// t12.
+	//
+	// Note that several of the intermediate calculations require adding 64-bit
+	// products together which would overflow a uint64, so a 96-bit accumulator
+	// is used instead.
+
+	// Terms for 2^(32*0).
+	var acc accumulator96
+	acc.n[0] = uint32(t0) // == acc.Add(t0) because acc is guaranteed to be 0.
+	acc.Add(t8 * uint64(orderComplementWordZero))
+	t0 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*1).
+	acc.Add(t1)
+	acc.Add(t8 * uint64(orderComplementWordOne))
+	acc.Add(t9 * uint64(orderComplementWordZero))
+	t1 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*2).
+	acc.Add(t2)
+	acc.Add(t8 * uint64(orderComplementWordTwo))
+	acc.Add(t9 * uint64(orderComplementWordOne))
+	acc.Add(t10 * uint64(orderComplementWordZero))
+	t2 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*3).
+	acc.Add(t3)
+	acc.Add(t8 * uint64(orderComplementWordThree))
+	acc.Add(t9 * uint64(orderComplementWordTwo))
+	acc.Add(t10 * uint64(orderComplementWordOne))
+	acc.Add(t11 * uint64(orderComplementWordZero))
+	t3 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*4).
+	acc.Add(t4)
+	acc.Add(t8) // * uint64(orderComplementWordFour) // * 1
+	acc.Add(t9 * uint64(orderComplementWordThree))
+	acc.Add(t10 * uint64(orderComplementWordTwo))
+	acc.Add(t11 * uint64(orderComplementWordOne))
+	acc.Add(t12 * uint64(orderComplementWordZero))
+	t4 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*5).
+	acc.Add(t5)
+	// acc.Add(t8 * uint64(orderComplementWordFive)) // 0
+	acc.Add(t9) // * uint64(orderComplementWordFour) // * 1
+	acc.Add(t10 * uint64(orderComplementWordThree))
+	acc.Add(t11 * uint64(orderComplementWordTwo))
+	acc.Add(t12 * uint64(orderComplementWordOne))
+	acc.Add(t13 * uint64(orderComplementWordZero))
+	t5 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*6).
+	acc.Add(t6)
+	// acc.Add(t8 * uint64(orderComplementWordSix)) // 0
+	// acc.Add(t9 * uint64(orderComplementWordFive)) // 0
+	acc.Add(t10) // * uint64(orderComplementWordFour)) // * 1
+	acc.Add(t11 * uint64(orderComplementWordThree))
+	acc.Add(t12 * uint64(orderComplementWordTwo))
+	acc.Add(t13 * uint64(orderComplementWordOne))
+	acc.Add(t14 * uint64(orderComplementWordZero))
+	t6 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*7).
+	acc.Add(t7)
+	// acc.Add(t8 * uint64(orderComplementWordSeven)) // 0
+	// acc.Add(t9 * uint64(orderComplementWordSix)) // 0
+	// acc.Add(t10 * uint64(orderComplementWordFive)) // 0
+	acc.Add(t11) // * uint64(orderComplementWordFour) // * 1
+	acc.Add(t12 * uint64(orderComplementWordThree))
+	acc.Add(t13 * uint64(orderComplementWordTwo))
+	acc.Add(t14 * uint64(orderComplementWordOne))
+	acc.Add(t15 * uint64(orderComplementWordZero))
+	t7 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*8).
+	// acc.Add(t9 * uint64(orderComplementWordSeven)) // 0
+	// acc.Add(t10 * uint64(orderComplementWordSix)) // 0
+	// acc.Add(t11 * uint64(orderComplementWordFive)) // 0
+	acc.Add(t12) // * uint64(orderComplementWordFour) // * 1
+	acc.Add(t13 * uint64(orderComplementWordThree))
+	acc.Add(t14 * uint64(orderComplementWordTwo))
+	acc.Add(t15 * uint64(orderComplementWordOne))
+	t8 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*9).
+	// acc.Add(t10 * uint64(orderComplementWordSeven)) // 0
+	// acc.Add(t11 * uint64(orderComplementWordSix)) // 0
+	// acc.Add(t12 * uint64(orderComplementWordFive)) // 0
+	acc.Add(t13) // * uint64(orderComplementWordFour) // * 1
+	acc.Add(t14 * uint64(orderComplementWordThree))
+	acc.Add(t15 * uint64(orderComplementWordTwo))
+	t9 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*10).
+	// acc.Add(t11 * uint64(orderComplementWordSeven)) // 0
+	// acc.Add(t12 * uint64(orderComplementWordSix)) // 0
+	// acc.Add(t13 * uint64(orderComplementWordFive)) // 0
+	acc.Add(t14) // * uint64(orderComplementWordFour) // * 1
+	acc.Add(t15 * uint64(orderComplementWordThree))
+	t10 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*11).
+	// acc.Add(t12 * uint64(orderComplementWordSeven)) // 0
+	// acc.Add(t13 * uint64(orderComplementWordSix)) // 0
+	// acc.Add(t14 * uint64(orderComplementWordFive)) // 0
+	acc.Add(t15) // * uint64(orderComplementWordFour) // * 1
+	t11 = uint64(acc.n[0])
+	acc.Rsh32()
+
+	// NOTE: All of the remaining multiplications for this iteration result in 0
+	// as they all involve multiplying by combinations of the fifth, sixth, and
+	// seventh words of the two's complement of N, which are 0, so skip them.
+
+	// Terms for 2^(32*12).
+	t12 = uint64(acc.n[0])
+	// acc.Rsh32() // No need since not used after this.  Guaranteed to be 0.
+
+	// At this point, the result is reduced to fit within 385 bits, so reduce it
+	// again using the same method accordingly.
+	s.reduce385(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12)
+}
+
+// Mul2 multiplies the passed two scalars together modulo the group order in
+// constant time and stores the result in s.
+//
+// The scalar is returned to support chaining.  This enables syntax like:
+// s3.Mul2(s, s2).AddInt(1) so that s3 = (s * s2) + 1.
+func (s *ModNScalar) Mul2(val, val2 *ModNScalar) *ModNScalar {
+	// This could be done with for loops and an array to store the intermediate
+	// terms, but this unrolled version is significantly faster.
+
+	// The overall strategy employed here is:
+	// 1) Calculate the 512-bit product of the two scalars using the standard
+	//    pencil-and-paper method.
+	// 2) Reduce the result modulo the prime by effectively subtracting
+	//    multiples of the group order N (actually performed by adding multiples
+	//    of the two's complement of N to avoid implementing subtraction).
+	// 3) Repeat step 2 noting that each iteration reduces the required number
+	//    of bits by 127 because the two's complement of N has 127 leading zero
+	//    bits.
+	// 4) Once reduced to 256 bits, call the existing reduce method to perform
+	//    a final reduction as needed.
+	//
+	// Note that several of the intermediate calculations require adding 64-bit
+	// products together which would overflow a uint64, so a 96-bit accumulator
+	// is used instead.
+
+	// Terms for 2^(32*0).
+	var acc accumulator96
+	acc.Add(uint64(val.n[0]) * uint64(val2.n[0]))
+	t0 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*1).
+	acc.Add(uint64(val.n[0]) * uint64(val2.n[1]))
+	acc.Add(uint64(val.n[1]) * uint64(val2.n[0]))
+	t1 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*2).
+	acc.Add(uint64(val.n[0]) * uint64(val2.n[2]))
+	acc.Add(uint64(val.n[1]) * uint64(val2.n[1]))
+	acc.Add(uint64(val.n[2]) * uint64(val2.n[0]))
+	t2 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*3).
+	acc.Add(uint64(val.n[0]) * uint64(val2.n[3]))
+	acc.Add(uint64(val.n[1]) * uint64(val2.n[2]))
+	acc.Add(uint64(val.n[2]) * uint64(val2.n[1]))
+	acc.Add(uint64(val.n[3]) * uint64(val2.n[0]))
+	t3 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*4).
+	acc.Add(uint64(val.n[0]) * uint64(val2.n[4]))
+	acc.Add(uint64(val.n[1]) * uint64(val2.n[3]))
+	acc.Add(uint64(val.n[2]) * uint64(val2.n[2]))
+	acc.Add(uint64(val.n[3]) * uint64(val2.n[1]))
+	acc.Add(uint64(val.n[4]) * uint64(val2.n[0]))
+	t4 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*5).
+	acc.Add(uint64(val.n[0]) * uint64(val2.n[5]))
+	acc.Add(uint64(val.n[1]) * uint64(val2.n[4]))
+	acc.Add(uint64(val.n[2]) * uint64(val2.n[3]))
+	acc.Add(uint64(val.n[3]) * uint64(val2.n[2]))
+	acc.Add(uint64(val.n[4]) * uint64(val2.n[1]))
+	acc.Add(uint64(val.n[5]) * uint64(val2.n[0]))
+	t5 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*6).
+	acc.Add(uint64(val.n[0]) * uint64(val2.n[6]))
+	acc.Add(uint64(val.n[1]) * uint64(val2.n[5]))
+	acc.Add(uint64(val.n[2]) * uint64(val2.n[4]))
+	acc.Add(uint64(val.n[3]) * uint64(val2.n[3]))
+	acc.Add(uint64(val.n[4]) * uint64(val2.n[2]))
+	acc.Add(uint64(val.n[5]) * uint64(val2.n[1]))
+	acc.Add(uint64(val.n[6]) * uint64(val2.n[0]))
+	t6 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*7).
+	acc.Add(uint64(val.n[0]) * uint64(val2.n[7]))
+	acc.Add(uint64(val.n[1]) * uint64(val2.n[6]))
+	acc.Add(uint64(val.n[2]) * uint64(val2.n[5]))
+	acc.Add(uint64(val.n[3]) * uint64(val2.n[4]))
+	acc.Add(uint64(val.n[4]) * uint64(val2.n[3]))
+	acc.Add(uint64(val.n[5]) * uint64(val2.n[2]))
+	acc.Add(uint64(val.n[6]) * uint64(val2.n[1]))
+	acc.Add(uint64(val.n[7]) * uint64(val2.n[0]))
+	t7 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*8).
+	acc.Add(uint64(val.n[1]) * uint64(val2.n[7]))
+	acc.Add(uint64(val.n[2]) * uint64(val2.n[6]))
+	acc.Add(uint64(val.n[3]) * uint64(val2.n[5]))
+	acc.Add(uint64(val.n[4]) * uint64(val2.n[4]))
+	acc.Add(uint64(val.n[5]) * uint64(val2.n[3]))
+	acc.Add(uint64(val.n[6]) * uint64(val2.n[2]))
+	acc.Add(uint64(val.n[7]) * uint64(val2.n[1]))
+	t8 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*9).
+	acc.Add(uint64(val.n[2]) * uint64(val2.n[7]))
+	acc.Add(uint64(val.n[3]) * uint64(val2.n[6]))
+	acc.Add(uint64(val.n[4]) * uint64(val2.n[5]))
+	acc.Add(uint64(val.n[5]) * uint64(val2.n[4]))
+	acc.Add(uint64(val.n[6]) * uint64(val2.n[3]))
+	acc.Add(uint64(val.n[7]) * uint64(val2.n[2]))
+	t9 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*10).
+	acc.Add(uint64(val.n[3]) * uint64(val2.n[7]))
+	acc.Add(uint64(val.n[4]) * uint64(val2.n[6]))
+	acc.Add(uint64(val.n[5]) * uint64(val2.n[5]))
+	acc.Add(uint64(val.n[6]) * uint64(val2.n[4]))
+	acc.Add(uint64(val.n[7]) * uint64(val2.n[3]))
+	t10 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*11).
+	acc.Add(uint64(val.n[4]) * uint64(val2.n[7]))
+	acc.Add(uint64(val.n[5]) * uint64(val2.n[6]))
+	acc.Add(uint64(val.n[6]) * uint64(val2.n[5]))
+	acc.Add(uint64(val.n[7]) * uint64(val2.n[4]))
+	t11 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*12).
+	acc.Add(uint64(val.n[5]) * uint64(val2.n[7]))
+	acc.Add(uint64(val.n[6]) * uint64(val2.n[6]))
+	acc.Add(uint64(val.n[7]) * uint64(val2.n[5]))
+	t12 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*13).
+	acc.Add(uint64(val.n[6]) * uint64(val2.n[7]))
+	acc.Add(uint64(val.n[7]) * uint64(val2.n[6]))
+	t13 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// Terms for 2^(32*14).
+	acc.Add(uint64(val.n[7]) * uint64(val2.n[7]))
+	t14 := uint64(acc.n[0])
+	acc.Rsh32()
+
+	// What's left is for 2^(32*15).
+	t15 := uint64(acc.n[0])
+	// acc.Rsh32() // No need since not used after this.  Guaranteed to be 0.
+
+	// At this point, all of the terms are grouped into their respective base
+	// and occupy up to 512 bits.  Reduce the result accordingly.
+	s.reduce512(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14,
+		t15)
+	return s
+}
+
+// Mul multiplies the passed scalar with the existing one modulo the group order
+// in constant time and stores the result in s.
+//
+// The scalar is returned to support chaining.  This enables syntax like:
+// s.Mul(s2).AddInt(1) so that s = (s * s2) + 1.
+func (s *ModNScalar) Mul(val *ModNScalar) *ModNScalar {
+	return s.Mul2(s, val)
+}
+
+// SquareVal squares the passed scalar modulo the group order in constant time
+// and stores the result in s.
+//
+// The scalar is returned to support chaining.  This enables syntax like:
+// s3.SquareVal(s).Mul(s) so that s3 = s^2 * s = s^3.
+func (s *ModNScalar) SquareVal(val *ModNScalar) *ModNScalar {
+	// This could technically be optimized slightly to take advantage of the
+	// fact that many of the intermediate calculations in squaring are just
+	// doubling, however, benchmarking has shown that due to the need to use a
+	// 96-bit accumulator, any savings are essentially offset by that and
+	// consequently there is no real difference in performance over just
+	// multiplying the value by itself to justify the extra code for now.  This
+	// can be revisited in the future if it becomes a bottleneck in practice.
+
+	return s.Mul2(val, val)
+}
+
+// Square squares the scalar modulo the group order in constant time.  The
+// existing scalar is modified.
+//
+// The scalar is returned to support chaining.  This enables syntax like:
+// s.Square().Mul(s2) so that s = s^2 * s2.
+func (s *ModNScalar) Square() *ModNScalar {
+	return s.SquareVal(s)
+}
+
+// NegateVal negates the passed scalar modulo the group order and stores the
+// result in s in constant time.
+//
+// The scalar is returned to support chaining.  This enables syntax like:
+// s.NegateVal(s2).AddInt(1) so that s = -s2 + 1.
+func (s *ModNScalar) NegateVal(val *ModNScalar) *ModNScalar {
+	// Since the scalar is already in the range 0 <= val < N, where N is the
+	// group order, negation modulo the group order is just the group order
+	// minus the value.  This implies that the result will always be in the
+	// desired range with the sole exception of 0 because N - 0 = N itself.
+	//
+	// Therefore, in order to avoid the need to reduce the result for every
+	// other case in order to achieve constant time, this creates a mask that is
+	// all 0s in the case of the scalar being negated is 0 and all 1s otherwise
+	// and bitwise ands that mask with each word.
+	//
+	// Finally, to simplify the carry propagation, this adds the two's
+	// complement of the scalar to N in order to achieve the same result.
+	bits := val.n[0] | val.n[1] | val.n[2] | val.n[3] | val.n[4] | val.n[5] |
+		val.n[6] | val.n[7]
+	mask := uint64(uint32Mask * constantTimeNotEq(bits, 0))
+	c := uint64(orderWordZero) + (uint64(^val.n[0]) + 1)
+	s.n[0] = uint32(c & mask)
+	c = (c >> 32) + uint64(orderWordOne) + uint64(^val.n[1])
+	s.n[1] = uint32(c & mask)
+	c = (c >> 32) + uint64(orderWordTwo) + uint64(^val.n[2])
+	s.n[2] = uint32(c & mask)
+	c = (c >> 32) + uint64(orderWordThree) + uint64(^val.n[3])
+	s.n[3] = uint32(c & mask)
+	c = (c >> 32) + uint64(orderWordFour) + uint64(^val.n[4])
+	s.n[4] = uint32(c & mask)
+	c = (c >> 32) + uint64(orderWordFive) + uint64(^val.n[5])
+	s.n[5] = uint32(c & mask)
+	c = (c >> 32) + uint64(orderWordSix) + uint64(^val.n[6])
+	s.n[6] = uint32(c & mask)
+	c = (c >> 32) + uint64(orderWordSeven) + uint64(^val.n[7])
+	s.n[7] = uint32(c & mask)
+	return s
+}
+
+// Negate negates the scalar modulo the group order in constant time.  The
+// existing scalar is modified.
+//
+// The scalar is returned to support chaining.  This enables syntax like:
+// s.Negate().AddInt(1) so that s = -s + 1.
+func (s *ModNScalar) Negate() *ModNScalar {
+	return s.NegateVal(s)
+}
+
+// InverseValNonConst finds the modular multiplicative inverse of the passed
+// scalar and stores result in s in *non-constant* time.
+//
+// The scalar is returned to support chaining.  This enables syntax like:
+// s3.InverseVal(s1).Mul(s2) so that s3 = s1^-1 * s2.
+func (s *ModNScalar) InverseValNonConst(val *ModNScalar) *ModNScalar {
+	// This is making use of big integers for now.  Ideally it will be replaced
+	// with an implementation that does not depend on big integers.
+	valBytes := val.Bytes()
+	bigVal := new(big.Int).SetBytes(valBytes[:])
+	bigVal.ModInverse(bigVal, curveParams.N)
+	s.SetByteSlice(bigVal.Bytes())
+	return s
+}
+
+// InverseNonConst finds the modular multiplicative inverse of the scalar in
+// *non-constant* time.  The existing scalar is modified.
+//
+// The scalar is returned to support chaining.  This enables syntax like:
+// s.Inverse().Mul(s2) so that s = s^-1 * s2.
+func (s *ModNScalar) InverseNonConst() *ModNScalar {
+	return s.InverseValNonConst(s)
+}
+
+// IsOverHalfOrder returns whether or not the scalar exceeds the group order
+// divided by 2 in constant time.
+func (s *ModNScalar) IsOverHalfOrder() bool {
+	// The intuition here is that the scalar is greater than half of the group
+	// order if one of the higher individual words is greater than the
+	// corresponding word of the half group order and all higher words in the
+	// scalar are equal to their corresponding word of the half group order.
+	//
+	// Note that the words 4, 5, and 6 are all the max uint32 value, so there is
+	// no need to test if those individual words of the scalar exceeds them,
+	// hence, only equality is checked for them.
+	result := constantTimeGreater(s.n[7], halfOrderWordSeven)
+	highWordsEqual := constantTimeEq(s.n[7], halfOrderWordSeven)
+	highWordsEqual &= constantTimeEq(s.n[6], halfOrderWordSix)
+	highWordsEqual &= constantTimeEq(s.n[5], halfOrderWordFive)
+	highWordsEqual &= constantTimeEq(s.n[4], halfOrderWordFour)
+	result |= highWordsEqual & constantTimeGreater(s.n[3], halfOrderWordThree)
+	highWordsEqual &= constantTimeEq(s.n[3], halfOrderWordThree)
+	result |= highWordsEqual & constantTimeGreater(s.n[2], halfOrderWordTwo)
+	highWordsEqual &= constantTimeEq(s.n[2], halfOrderWordTwo)
+	result |= highWordsEqual & constantTimeGreater(s.n[1], halfOrderWordOne)
+	highWordsEqual &= constantTimeEq(s.n[1], halfOrderWordOne)
+	result |= highWordsEqual & constantTimeGreater(s.n[0], halfOrderWordZero)
+
+	return result != 0
+}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/nonce.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/nonce.go
new file mode 100644
index 0000000000..81b205d9c1
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/nonce.go
@@ -0,0 +1,263 @@
+// Copyright (c) 2013-2014 The btcsuite developers
+// Copyright (c) 2015-2020 The Decred developers
+// Use of this source code is governed by an ISC
+// license that can be found in the LICENSE file.
+
+package secp256k1
+
+import (
+	"bytes"
+	"crypto/sha256"
+	"hash"
+)
+
+// References:
+//   [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone)
+//
+//   [ISO/IEC 8825-1]: Information technology — ASN.1 encoding rules:
+//     Specification of Basic Encoding Rules (BER), Canonical Encoding Rules
+//     (CER) and Distinguished Encoding Rules (DER)
+//
+//   [SEC1]: Elliptic Curve Cryptography (May 31, 2009, Version 2.0)
+//     https://www.secg.org/sec1-v2.pdf
+
+var (
+	// singleZero is used during RFC6979 nonce generation.  It is provided
+	// here to avoid the need to create it multiple times.
+	singleZero = []byte{0x00}
+
+	// zeroInitializer is used during RFC6979 nonce generation.  It is provided
+	// here to avoid the need to create it multiple times.
+	zeroInitializer = bytes.Repeat([]byte{0x00}, sha256.BlockSize)
+
+	// singleOne is used during RFC6979 nonce generation.  It is provided
+	// here to avoid the need to create it multiple times.
+	singleOne = []byte{0x01}
+
+	// oneInitializer is used during RFC6979 nonce generation.  It is provided
+	// here to avoid the need to create it multiple times.
+	oneInitializer = bytes.Repeat([]byte{0x01}, sha256.Size)
+)
+
+// hmacsha256 implements a resettable version of HMAC-SHA256.
+type hmacsha256 struct {
+	inner, outer hash.Hash
+	ipad, opad   [sha256.BlockSize]byte
+}
+
+// Write adds data to the running hash.
+func (h *hmacsha256) Write(p []byte) {
+	h.inner.Write(p)
+}
+
+// initKey initializes the HMAC-SHA256 instance to the provided key.
+func (h *hmacsha256) initKey(key []byte) {
+	// Hash the key if it is too large.
+	if len(key) > sha256.BlockSize {
+		h.outer.Write(key)
+		key = h.outer.Sum(nil)
+	}
+	copy(h.ipad[:], key)
+	copy(h.opad[:], key)
+	for i := range h.ipad {
+		h.ipad[i] ^= 0x36
+	}
+	for i := range h.opad {
+		h.opad[i] ^= 0x5c
+	}
+	h.inner.Write(h.ipad[:])
+}
+
+// ResetKey resets the HMAC-SHA256 to its initial state and then initializes it
+// with the provided key.  It is equivalent to creating a new instance with the
+// provided key without allocating more memory.
+func (h *hmacsha256) ResetKey(key []byte) {
+	h.inner.Reset()
+	h.outer.Reset()
+	copy(h.ipad[:], zeroInitializer)
+	copy(h.opad[:], zeroInitializer)
+	h.initKey(key)
+}
+
+// Resets the HMAC-SHA256 to its initial state using the current key.
+func (h *hmacsha256) Reset() {
+	h.inner.Reset()
+	h.inner.Write(h.ipad[:])
+}
+
+// Sum returns the hash of the written data.
+func (h *hmacsha256) Sum() []byte {
+	h.outer.Reset()
+	h.outer.Write(h.opad[:])
+	h.outer.Write(h.inner.Sum(nil))
+	return h.outer.Sum(nil)
+}
+
+// newHMACSHA256 returns a new HMAC-SHA256 hasher using the provided key.
+func newHMACSHA256(key []byte) *hmacsha256 {
+	h := new(hmacsha256)
+	h.inner = sha256.New()
+	h.outer = sha256.New()
+	h.initKey(key)
+	return h
+}
+
+// NonceRFC6979 generates a nonce deterministically according to RFC 6979 using
+// HMAC-SHA256 for the hashing function.  It takes a 32-byte hash as an input
+// and returns a 32-byte nonce to be used for deterministic signing.  The extra
+// and version arguments are optional, but allow additional data to be added to
+// the input of the HMAC.  When provided, the extra data must be 32-bytes and
+// version must be 16 bytes or they will be ignored.
+//
+// Finally, the extraIterations parameter provides a method to produce a stream
+// of deterministic nonces to ensure the signing code is able to produce a nonce
+// that results in a valid signature in the extremely unlikely event the
+// original nonce produced results in an invalid signature (e.g. R == 0).
+// Signing code should start with 0 and increment it if necessary.
+func NonceRFC6979(privKey []byte, hash []byte, extra []byte, version []byte, extraIterations uint32) *ModNScalar {
+	// Input to HMAC is the 32-byte private key and the 32-byte hash.  In
+	// addition, it may include the optional 32-byte extra data and 16-byte
+	// version.  Create a fixed-size array to avoid extra allocs and slice it
+	// properly.
+	const (
+		privKeyLen = 32
+		hashLen    = 32
+		extraLen   = 32
+		versionLen = 16
+	)
+	var keyBuf [privKeyLen + hashLen + extraLen + versionLen]byte
+
+	// Truncate rightmost bytes of private key and hash if they are too long and
+	// leave left padding of zeros when they're too short.
+	if len(privKey) > privKeyLen {
+		privKey = privKey[:privKeyLen]
+	}
+	if len(hash) > hashLen {
+		hash = hash[:hashLen]
+	}
+	offset := privKeyLen - len(privKey) // Zero left padding if needed.
+	offset += copy(keyBuf[offset:], privKey)
+	offset += hashLen - len(hash) // Zero left padding if needed.
+	offset += copy(keyBuf[offset:], hash)
+	if len(extra) == extraLen {
+		offset += copy(keyBuf[offset:], extra)
+		if len(version) == versionLen {
+			offset += copy(keyBuf[offset:], version)
+		}
+	} else if len(version) == versionLen {
+		// When the version was specified, but not the extra data, leave the
+		// extra data portion all zero.
+		offset += privKeyLen
+		offset += copy(keyBuf[offset:], version)
+	}
+	key := keyBuf[:offset]
+
+	// Step B.
+	//
+	// V = 0x01 0x01 0x01 ... 0x01 such that the length of V, in bits, is
+	// equal to 8*ceil(hashLen/8).
+	//
+	// Note that since the hash length is a multiple of 8 for the chosen hash
+	// function in this optimized implementation, the result is just the hash
+	// length, so avoid the extra calculations.  Also, since it isn't modified,
+	// start with a global value.
+	v := oneInitializer
+
+	// Step C (Go zeroes all allocated memory).
+	//
+	// K = 0x00 0x00 0x00 ... 0x00 such that the length of K, in bits, is
+	// equal to 8*ceil(hashLen/8).
+	//
+	// As above, since the hash length is a multiple of 8 for the chosen hash
+	// function in this optimized implementation, the result is just the hash
+	// length, so avoid the extra calculations.
+	k := zeroInitializer[:hashLen]
+
+	// Step D.
+	//
+	// K = HMAC_K(V || 0x00 || int2octets(x) || bits2octets(h1))
+	//
+	// Note that key is the "int2octets(x) || bits2octets(h1)" portion along
+	// with potential additional data as described by section 3.6 of the RFC.
+	hasher := newHMACSHA256(k)
+	hasher.Write(oneInitializer)
+	hasher.Write(singleZero[:])
+	hasher.Write(key)
+	k = hasher.Sum()
+
+	// Step E.
+	//
+	// V = HMAC_K(V)
+	hasher.ResetKey(k)
+	hasher.Write(v)
+	v = hasher.Sum()
+
+	// Step F.
+	//
+	// K = HMAC_K(V || 0x01 || int2octets(x) || bits2octets(h1))
+	//
+	// Note that key is the "int2octets(x) || bits2octets(h1)" portion along
+	// with potential additional data as described by section 3.6 of the RFC.
+	hasher.Reset()
+	hasher.Write(v)
+	hasher.Write(singleOne[:])
+	hasher.Write(key[:])
+	k = hasher.Sum()
+
+	// Step G.
+	//
+	// V = HMAC_K(V)
+	hasher.ResetKey(k)
+	hasher.Write(v)
+	v = hasher.Sum()
+
+	// Step H.
+	//
+	// Repeat until the value is nonzero and less than the curve order.
+	var generated uint32
+	for {
+		// Step H1 and H2.
+		//
+		// Set T to the empty sequence.  The length of T (in bits) is denoted
+		// tlen; thus, at that point, tlen = 0.
+		//
+		// While tlen < qlen, do the following:
+		//   V = HMAC_K(V)
+		//   T = T || V
+		//
+		// Note that because the hash function output is the same length as the
+		// private key in this optimized implementation, there is no need to
+		// loop or create an intermediate T.
+		hasher.Reset()
+		hasher.Write(v)
+		v = hasher.Sum()
+
+		// Step H3.
+		//
+		// k = bits2int(T)
+		// If k is within the range [1,q-1], return it.
+		//
+		// Otherwise, compute:
+		// K = HMAC_K(V || 0x00)
+		// V = HMAC_K(V)
+		var secret ModNScalar
+		overflow := secret.SetByteSlice(v)
+		if !overflow && !secret.IsZero() {
+			generated++
+			if generated > extraIterations {
+				return &secret
+			}
+		}
+
+		// K = HMAC_K(V || 0x00)
+		hasher.Reset()
+		hasher.Write(v)
+		hasher.Write(singleZero[:])
+		k = hasher.Sum()
+
+		// V = HMAC_K(V)
+		hasher.ResetKey(k)
+		hasher.Write(v)
+		v = hasher.Sum()
+	}
+}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go
new file mode 100644
index 0000000000..ca3e8da281
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go
@@ -0,0 +1,111 @@
+// Copyright (c) 2013-2014 The btcsuite developers
+// Copyright (c) 2015-2023 The Decred developers
+// Use of this source code is governed by an ISC
+// license that can be found in the LICENSE file.
+
+package secp256k1
+
+import (
+	cryptorand "crypto/rand"
+	"io"
+)
+
+// PrivateKey provides facilities for working with secp256k1 private keys within
+// this package and includes functionality such as serializing and parsing them
+// as well as computing their associated public key.
+type PrivateKey struct {
+	Key ModNScalar
+}
+
+// NewPrivateKey instantiates a new private key from a scalar encoded as a
+// big integer.
+func NewPrivateKey(key *ModNScalar) *PrivateKey {
+	return &PrivateKey{Key: *key}
+}
+
+// PrivKeyFromBytes returns a private based on the provided byte slice which is
+// interpreted as an unsigned 256-bit big-endian integer in the range [0, N-1],
+// where N is the order of the curve.
+//
+// WARNING: This means passing a slice with more than 32 bytes is truncated and
+// that truncated value is reduced modulo N.  Further, 0 is not a valid private
+// key.  It is up to the caller to provide a value in the appropriate range of
+// [1, N-1].  Failure to do so will either result in an invalid private key or
+// potentially weak private keys that have bias that could be exploited.
+//
+// This function primarily exists to provide a mechanism for converting
+// serialized private keys that are already known to be good.
+//
+// Typically callers should make use of GeneratePrivateKey or
+// GeneratePrivateKeyFromRand when creating private keys since they properly
+// handle generation of appropriate values.
+func PrivKeyFromBytes(privKeyBytes []byte) *PrivateKey {
+	var privKey PrivateKey
+	privKey.Key.SetByteSlice(privKeyBytes)
+	return &privKey
+}
+
+// generatePrivateKey generates and returns a new private key that is suitable
+// for use with secp256k1 using the provided reader as a source of entropy.  The
+// provided reader must be a source of cryptographically secure randomness to
+// avoid weak private keys.
+func generatePrivateKey(rand io.Reader) (*PrivateKey, error) {
+	// The group order is close enough to 2^256 that there is only roughly a 1
+	// in 2^128 chance of generating an invalid private key, so this loop will
+	// virtually never run more than a single iteration in practice.
+	var key PrivateKey
+	var b32 [32]byte
+	for valid := false; !valid; {
+		if _, err := io.ReadFull(rand, b32[:]); err != nil {
+			return nil, err
+		}
+
+		// The private key is only valid when it is in the range [1, N-1], where
+		// N is the order of the curve.
+		overflow := key.Key.SetBytes(&b32)
+		valid = (key.Key.IsZeroBit() | overflow) == 0
+	}
+	zeroArray32(&b32)
+
+	return &key, nil
+}
+
+// GeneratePrivateKey generates and returns a new cryptographically secure
+// private key that is suitable for use with secp256k1.
+func GeneratePrivateKey() (*PrivateKey, error) {
+	return generatePrivateKey(cryptorand.Reader)
+}
+
+// GeneratePrivateKeyFromRand generates a private key that is suitable for use
+// with secp256k1 using the provided reader as a source of entropy.  The
+// provided reader must be a source of cryptographically secure randomness, such
+// as [crypto/rand.Reader], to avoid weak private keys.
+func GeneratePrivateKeyFromRand(rand io.Reader) (*PrivateKey, error) {
+	return generatePrivateKey(rand)
+}
+
+// PubKey computes and returns the public key corresponding to this private key.
+func (p *PrivateKey) PubKey() *PublicKey {
+	var result JacobianPoint
+	ScalarBaseMultNonConst(&p.Key, &result)
+	result.ToAffine()
+	return NewPublicKey(&result.X, &result.Y)
+}
+
+// Zero manually clears the memory associated with the private key.  This can be
+// used to explicitly clear key material from memory for enhanced security
+// against memory scraping.
+func (p *PrivateKey) Zero() {
+	p.Key.Zero()
+}
+
+// PrivKeyBytesLen defines the length in bytes of a serialized private key.
+const PrivKeyBytesLen = 32
+
+// Serialize returns the private key as a 256-bit big-endian binary-encoded
+// number, padded to a length of 32 bytes.
+func (p PrivateKey) Serialize() []byte {
+	var privKeyBytes [PrivKeyBytesLen]byte
+	p.Key.PutBytes(&privKeyBytes)
+	return privKeyBytes[:]
+}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/pubkey.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/pubkey.go
new file mode 100644
index 0000000000..54c54be5f1
--- /dev/null
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/pubkey.go
@@ -0,0 +1,237 @@
+// Copyright (c) 2013-2014 The btcsuite developers
+// Copyright (c) 2015-2022 The Decred developers
+// Use of this source code is governed by an ISC
+// license that can be found in the LICENSE file.
+
+package secp256k1
+
+// References:
+//   [SEC1] Elliptic Curve Cryptography
+//     https://www.secg.org/sec1-v2.pdf
+//
+//   [SEC2] Recommended Elliptic Curve Domain Parameters
+//     https://www.secg.org/sec2-v2.pdf
+//
+//   [ANSI X9.62-1998] Public Key Cryptography For The Financial Services
+//     Industry: The Elliptic Curve Digital Signature Algorithm (ECDSA)
+
+import (
+	"fmt"
+)
+
+const (
+	// PubKeyBytesLenCompressed is the number of bytes of a serialized
+	// compressed public key.
+	PubKeyBytesLenCompressed = 33
+
+	// PubKeyBytesLenUncompressed is the number of bytes of a serialized
+	// uncompressed public key.
+	PubKeyBytesLenUncompressed = 65
+
+	// PubKeyFormatCompressedEven is the identifier prefix byte for a public key
+	// whose Y coordinate is even when serialized in the compressed format per
+	// section 2.3.4 of [SEC1](https://secg.org/sec1-v2.pdf#subsubsection.2.3.4).
+	PubKeyFormatCompressedEven byte = 0x02
+
+	// PubKeyFormatCompressedOdd is the identifier prefix byte for a public key
+	// whose Y coordinate is odd when serialized in the compressed format per
+	// section 2.3.4 of [SEC1](https://secg.org/sec1-v2.pdf#subsubsection.2.3.4).
+	PubKeyFormatCompressedOdd byte = 0x03
+
+	// PubKeyFormatUncompressed is the identifier prefix byte for a public key
+	// when serialized according in the uncompressed format per section 2.3.3 of
+	// [SEC1](https://secg.org/sec1-v2.pdf#subsubsection.2.3.3).
+	PubKeyFormatUncompressed byte = 0x04
+
+	// PubKeyFormatHybridEven is the identifier prefix byte for a public key
+	// whose Y coordinate is even when serialized according to the hybrid format
+	// per section 4.3.6 of [ANSI X9.62-1998].
+	//
+	// NOTE: This format makes little sense in practice an therefore this
+	// package will not produce public keys serialized in this format.  However,
+	// it will parse them since they exist in the wild.
+	PubKeyFormatHybridEven byte = 0x06
+
+	// PubKeyFormatHybridOdd is the identifier prefix byte for a public key
+	// whose Y coordingate is odd when serialized according to the hybrid format
+	// per section 4.3.6 of [ANSI X9.62-1998].
+	//
+	// NOTE: This format makes little sense in practice an therefore this
+	// package will not produce public keys serialized in this format.  However,
+	// it will parse them since they exist in the wild.
+	PubKeyFormatHybridOdd byte = 0x07
+)
+
+// PublicKey provides facilities for efficiently working with secp256k1 public
+// keys within this package and includes functions to serialize in both
+// uncompressed and compressed SEC (Standards for Efficient Cryptography)
+// formats.
+type PublicKey struct {
+	x FieldVal
+	y FieldVal
+}
+
+// NewPublicKey instantiates a new public key with the given x and y
+// coordinates.
+//
+// It should be noted that, unlike ParsePubKey, since this accepts arbitrary x
+// and y coordinates, it allows creation of public keys that are not valid
+// points on the secp256k1 curve.  The IsOnCurve method of the returned instance
+// can be used to determine validity.
+func NewPublicKey(x, y *FieldVal) *PublicKey {
+	var pubKey PublicKey
+	pubKey.x.Set(x)
+	pubKey.y.Set(y)
+	return &pubKey
+}
+
+// ParsePubKey parses a secp256k1 public key encoded according to the format
+// specified by ANSI X9.62-1998, which means it is also compatible with the
+// SEC (Standards for Efficient Cryptography) specification which is a subset of
+// the former.  In other words, it supports the uncompressed, compressed, and
+// hybrid formats as follows:
+//
+// Compressed:
+//
+//	<format byte = 0x02/0x03><32-byte X coordinate>
+//
+// Uncompressed:
+//
+//	<format byte = 0x04><32-byte X coordinate><32-byte Y coordinate>
+//
+// Hybrid:
+//
+//	<format byte = 0x05/0x06><32-byte X coordinate><32-byte Y coordinate>
+//
+// NOTE: The hybrid format makes little sense in practice an therefore this
+// package will not produce public keys serialized in this format.  However,
+// this function will properly parse them since they exist in the wild.
+func ParsePubKey(serialized []byte) (key *PublicKey, err error) {
+	var x, y FieldVal
+	switch len(serialized) {
+	case PubKeyBytesLenUncompressed:
+		// Reject unsupported public key formats for the given length.
+		format := serialized[0]
+		switch format {
+		case PubKeyFormatUncompressed:
+		case PubKeyFormatHybridEven, PubKeyFormatHybridOdd:
+		default:
+			str := fmt.Sprintf("invalid public key: unsupported format: %x",
+				format)
+			return nil, makeError(ErrPubKeyInvalidFormat, str)
+		}
+
+		// Parse the x and y coordinates while ensuring that they are in the
+		// allowed range.
+		if overflow := x.SetByteSlice(serialized[1:33]); overflow {
+			str := "invalid public key: x >= field prime"
+			return nil, makeError(ErrPubKeyXTooBig, str)
+		}
+		if overflow := y.SetByteSlice(serialized[33:]); overflow {
+			str := "invalid public key: y >= field prime"
+			return nil, makeError(ErrPubKeyYTooBig, str)
+		}
+
+		// Ensure the oddness of the y coordinate matches the specified format
+		// for hybrid public keys.
+		if format == PubKeyFormatHybridEven || format == PubKeyFormatHybridOdd {
+			wantOddY := format == PubKeyFormatHybridOdd
+			if y.IsOdd() != wantOddY {
+				str := fmt.Sprintf("invalid public key: y oddness does not "+
+					"match specified value of %v", wantOddY)
+				return nil, makeError(ErrPubKeyMismatchedOddness, str)
+			}
+		}
+
+		// Reject public keys that are not on the secp256k1 curve.
+		if !isOnCurve(&x, &y) {
+			str := fmt.Sprintf("invalid public key: [%v,%v] not on secp256k1 "+
+				"curve", x, y)
+			return nil, makeError(ErrPubKeyNotOnCurve, str)
+		}
+
+	case PubKeyBytesLenCompressed:
+		// Reject unsupported public key formats for the given length.
+		format := serialized[0]
+		switch format {
+		case PubKeyFormatCompressedEven, PubKeyFormatCompressedOdd:
+		default:
+			str := fmt.Sprintf("invalid public key: unsupported format: %x",
+				format)
+			return nil, makeError(ErrPubKeyInvalidFormat, str)
+		}
+
+		// Parse the x coordinate while ensuring that it is in the allowed
+		// range.
+		if overflow := x.SetByteSlice(serialized[1:33]); overflow {
+			str := "invalid public key: x >= field prime"
+			return nil, makeError(ErrPubKeyXTooBig, str)
+		}
+
+		// Attempt to calculate the y coordinate for the given x coordinate such
+		// that the result pair is a point on the secp256k1 curve and the
+		// solution with desired oddness is chosen.
+		wantOddY := format == PubKeyFormatCompressedOdd
+		if !DecompressY(&x, wantOddY, &y) {
+			str := fmt.Sprintf("invalid public key: x coordinate %v is not on "+
+				"the secp256k1 curve", x)
+			return nil, makeError(ErrPubKeyNotOnCurve, str)
+		}
+		y.Normalize()
+
+	default:
+		str := fmt.Sprintf("malformed public key: invalid length: %d",
+			len(serialized))
+		return nil, makeError(ErrPubKeyInvalidLen, str)
+	}
+
+	return NewPublicKey(&x, &y), nil
+}
+
+// SerializeUncompressed serializes a public key in the 65-byte uncompressed
+// format.
+func (p PublicKey) SerializeUncompressed() []byte {
+	// 0x04 || 32-byte x coordinate || 32-byte y coordinate
+	var b [PubKeyBytesLenUncompressed]byte
+	b[0] = PubKeyFormatUncompressed
+	p.x.PutBytesUnchecked(b[1:33])
+	p.y.PutBytesUnchecked(b[33:65])
+	return b[:]
+}
+
+// SerializeCompressed serializes a public key in the 33-byte compressed format.
+func (p PublicKey) SerializeCompressed() []byte {
+	// Choose the format byte depending on the oddness of the Y coordinate.
+	format := PubKeyFormatCompressedEven
+	if p.y.IsOdd() {
+		format = PubKeyFormatCompressedOdd
+	}
+
+	// 0x02 or 0x03 || 32-byte x coordinate
+	var b [PubKeyBytesLenCompressed]byte
+	b[0] = format
+	p.x.PutBytesUnchecked(b[1:33])
+	return b[:]
+}
+
+// IsEqual compares this public key instance to the one passed, returning true
+// if both public keys are equivalent.  A public key is equivalent to another,
+// if they both have the same X and Y coordinates.
+func (p *PublicKey) IsEqual(otherPubKey *PublicKey) bool {
+	return p.x.Equals(&otherPubKey.x) && p.y.Equals(&otherPubKey.y)
+}
+
+// AsJacobian converts the public key into a Jacobian point with Z=1 and stores
+// the result in the provided result param.  This allows the public key to be
+// treated a Jacobian point in the secp256k1 group in calculations.
+func (p *PublicKey) AsJacobian(result *JacobianPoint) {
+	result.X.Set(&p.x)
+	result.Y.Set(&p.y)
+	result.Z.SetInt(1)
+}
+
+// IsOnCurve returns whether or not the public key represents a point on the
+// secp256k1 curve.
+func (p *PublicKey) IsOnCurve() bool {
+	return isOnCurve(&p.x, &p.y)
+}
diff --git a/vendor/github.com/digitorus/pkcs7/Makefile b/vendor/github.com/digitorus/pkcs7/Makefile
index 47c73b8684..07c78e14c0 100644
--- a/vendor/github.com/digitorus/pkcs7/Makefile
+++ b/vendor/github.com/digitorus/pkcs7/Makefile
@@ -1,7 +1,7 @@
 all: vet staticcheck test
 
 test:
-	go test -covermode=count -coverprofile=coverage.out .
+	GODEBUG=x509sha1=1 go test -covermode=count -coverprofile=coverage.out .
 
 showcoverage: test
 	go tool cover -html=coverage.out
diff --git a/vendor/github.com/digitorus/pkcs7/ber.go b/vendor/github.com/digitorus/pkcs7/ber.go
index 73da024a0d..31963b119f 100644
--- a/vendor/github.com/digitorus/pkcs7/ber.go
+++ b/vendor/github.com/digitorus/pkcs7/ber.go
@@ -5,8 +5,6 @@ import (
 	"errors"
 )
 
-var encodeIndent = 0
-
 type asn1Object interface {
 	EncodeTo(writer *bytes.Buffer) error
 }
@@ -18,7 +16,6 @@ type asn1Structured struct {
 
 func (s asn1Structured) EncodeTo(out *bytes.Buffer) error {
 	//fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes)
-	encodeIndent++
 	inner := new(bytes.Buffer)
 	for _, obj := range s.content {
 		err := obj.EncodeTo(inner)
@@ -26,7 +23,6 @@ func (s asn1Structured) EncodeTo(out *bytes.Buffer) error {
 			return err
 		}
 	}
-	encodeIndent--
 	out.Write(s.tagBytes)
 	encodeLength(out, inner.Len())
 	out.Write(inner.Bytes())
@@ -67,10 +63,6 @@ func ber2der(ber []byte) ([]byte, error) {
 	}
 	obj.EncodeTo(out)
 
-	// if offset < len(ber) {
-	//	return nil, fmt.Errorf("ber2der: Content longer than expected. Got %d, expected %d", offset, len(ber))
-	//}
-
 	return out.Bytes(), nil
 }
 
@@ -149,14 +141,14 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
 		for ber[offset] >= 0x80 {
 			tag = tag*128 + ber[offset] - 0x80
 			offset++
-			if offset > berLen {
+			if offset >= berLen {
 				return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
 			}
 		}
 		// jvehent 20170227: this doesn't appear to be used anywhere...
 		//tag = tag*128 + ber[offset] - 0x80
 		offset++
-		if offset > berLen {
+		if offset >= berLen {
 			return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
 		}
 	}
@@ -172,7 +164,10 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
 	var length int
 	l := ber[offset]
 	offset++
-	if offset > berLen {
+	if l >= 0x80 && offset >= berLen {
+		// if indefinite or multibyte length, we need to verify there is at least one more byte available
+		// otherwise we need to be flexible here for length == 0 conditions
+		// validation that the length is available is done after the length is correctly parsed
 		return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
 	}
 	indefinite := false
@@ -184,17 +179,20 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
 		if numberOfBytes == 4 && (int)(ber[offset]) > 0x7F {
 			return nil, 0, errors.New("ber2der: BER tag length is negative")
 		}
-		if (int)(ber[offset]) == 0x0 {
+		if offset + numberOfBytes > berLen {
+			// == condition is not checked here, this allows for a more descreptive error when the parsed length is
+			// compared with the remaining available bytes (`contentEnd > berLen`)
+			return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
+		}
+		if (int)(ber[offset]) == 0x0 && (numberOfBytes == 1 || ber[offset+1] <= 0x7F)  {
+			// `numberOfBytes == 1` is an important conditional to avoid a potential out of bounds panic with `ber[offset+1]`
 			return nil, 0, errors.New("ber2der: BER tag length has leading zero")
 		}
 		debugprint("--> (compute length) indicator byte: %x\n", l)
-		debugprint("--> (compute length) length bytes: % X\n", ber[offset:offset+numberOfBytes])
+		//debugprint("--> (compute length) length bytes: %x\n", ber[offset:offset+numberOfBytes])
 		for i := 0; i < numberOfBytes; i++ {
 			length = length*256 + (int)(ber[offset])
 			offset++
-			if offset > berLen {
-				return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
-			}
 		}
 	} else if l == 0x80 {
 		indefinite = true
@@ -206,12 +204,12 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
 	}
 	//fmt.Printf("--> length        : %d\n", length)
 	contentEnd := offset + length
-	if contentEnd > len(ber) {
+	if contentEnd > berLen {
 		return nil, 0, errors.New("ber2der: BER tag length is more than available data")
 	}
 	debugprint("--> content start : %d\n", offset)
 	debugprint("--> content end   : %d\n", contentEnd)
-	debugprint("--> content       : % X\n", ber[offset:contentEnd])
+	//debugprint("--> content       : %x\n", ber[offset:contentEnd])
 	var obj asn1Object
 	if indefinite && kind == 0 {
 		return nil, 0, errors.New("ber2der: Indefinite form tag must have constructed encoding")
diff --git a/vendor/github.com/digitorus/pkcs7/pkcs7.go b/vendor/github.com/digitorus/pkcs7/pkcs7.go
index 0b74dff9b7..aca3c53f43 100644
--- a/vendor/github.com/digitorus/pkcs7/pkcs7.go
+++ b/vendor/github.com/digitorus/pkcs7/pkcs7.go
@@ -101,9 +101,9 @@ func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) {
 	return crypto.Hash(0), ErrUnsupportedAlgorithm
 }
 
-// getDigestOIDForSignatureAlgorithm takes an x509.SignatureAlgorithm
+// GetDigestOIDForSignatureAlgorithm takes an x509.SignatureAlgorithm
 // and returns the corresponding OID digest algorithm
-func getDigestOIDForSignatureAlgorithm(digestAlg x509.SignatureAlgorithm) (asn1.ObjectIdentifier, error) {
+func GetDigestOIDForSignatureAlgorithm(digestAlg x509.SignatureAlgorithm) (asn1.ObjectIdentifier, error) {
 	switch digestAlg {
 	case x509.SHA1WithRSA, x509.ECDSAWithSHA1:
 		return OIDDigestAlgorithmSHA1, nil
diff --git a/vendor/github.com/digitorus/timestamp/rfc3161_struct.go b/vendor/github.com/digitorus/timestamp/rfc3161_struct.go
index 04060dd020..c5692253c5 100644
--- a/vendor/github.com/digitorus/timestamp/rfc3161_struct.go
+++ b/vendor/github.com/digitorus/timestamp/rfc3161_struct.go
@@ -45,7 +45,7 @@ func (s pkiStatusInfo) FailureInfo() FailureInfo {
 		}
 	}
 
-	return UnkownFailureInfo
+	return UnknownFailureInfo
 }
 
 // eContent within SignedData is TSTInfo
diff --git a/vendor/github.com/digitorus/timestamp/timestamp.go b/vendor/github.com/digitorus/timestamp/timestamp.go
index 8069cad8fc..ba8c7517ec 100644
--- a/vendor/github.com/digitorus/timestamp/timestamp.go
+++ b/vendor/github.com/digitorus/timestamp/timestamp.go
@@ -23,8 +23,8 @@ import (
 type FailureInfo int
 
 const (
-	// UnkownFailureInfo mean that no known failure info was provided
-	UnkownFailureInfo FailureInfo = -1
+	// UnknownFailureInfo mean that no known failure info was provided
+	UnknownFailureInfo FailureInfo = -1
 	// BadAlgorithm defines an unrecognized or unsupported Algorithm Identifier
 	BadAlgorithm FailureInfo = 0
 	// BadRequest indicates that the transaction not permitted or supported
@@ -268,7 +268,7 @@ func ParseResponse(bytes []byte) (*Timestamp, error) {
 	if resp.Status.Status > 0 {
 		var fis string
 		fi := resp.Status.FailureInfo()
-		if fi != UnkownFailureInfo {
+		if fi != UnknownFailureInfo {
 			fis = fi.String()
 		}
 		return nil, fmt.Errorf("%s: %s (%v)",
@@ -553,7 +553,7 @@ func (t *Timestamp) populateSigningCertificateV2Ext(certificate *x509.Certificat
 		return nil, x509.ErrUnsupportedAlgorithm
 	}
 	if t.HashAlgorithm.HashFunc() == crypto.SHA1 {
-		return nil, fmt.Errorf("for SHA1 usae ESSCertID instead of ESSCertIDv2")
+		return nil, fmt.Errorf("for SHA1 use ESSCertID instead of ESSCertIDv2")
 	}
 
 	h := t.HashAlgorithm.HashFunc().New()
@@ -596,7 +596,13 @@ func (t *Timestamp) generateSignedData(tstInfo []byte, signer crypto.Signer, cer
 	if err != nil {
 		return nil, err
 	}
-	signedData.SetDigestAlgorithm(pkcs7.OIDDigestAlgorithmSHA256)
+
+	digestAlgOID, err := pkcs7.GetDigestOIDForSignatureAlgorithm(certificate.SignatureAlgorithm)
+	if err != nil {
+		return nil, err
+	}
+
+	signedData.SetDigestAlgorithm(digestAlgOID)
 	signedData.SetContentType(asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 16, 1, 4})
 
 	signingCertV2Bytes, err := t.populateSigningCertificateV2Ext(certificate)
@@ -616,7 +622,11 @@ func (t *Timestamp) generateSignedData(tstInfo []byte, signer crypto.Signer, cer
 		signerInfoConfig.SkipCertificates = true
 	}
 
-	err = signedData.AddSigner(certificate, signer, signerInfoConfig)
+	if len(t.Certificates) > 0 {
+		err = signedData.AddSignerChain(certificate, signer, t.Certificates, signerInfoConfig)
+	} else {
+		err = signedData.AddSigner(certificate, signer, signerInfoConfig)
+	}
 	if err != nil {
 		return nil, err
 	}
@@ -628,7 +638,7 @@ func (t *Timestamp) generateSignedData(tstInfo []byte, signer crypto.Signer, cer
 	return signature, nil
 }
 
-// copied from cryto/x509 package
+// copied from crypto/x509 package
 // oidNotInExtensions reports whether an extension with the given oid exists in
 // extensions.
 func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool {
diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml
new file mode 100644
index 0000000000..ac12e485a1
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/.travis.yml
@@ -0,0 +1,21 @@
+sudo: false
+language: go
+go_import_path: github.com/dustin/go-humanize
+go:
+  - 1.13.x
+  - 1.14.x
+  - 1.15.x
+  - 1.16.x
+  - stable
+  - master
+matrix:
+  allow_failures:
+    - go: master
+  fast_finish: true
+install:
+  - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+  - diff -u <(echo -n) <(gofmt -d -s .)
+  - go vet .
+  - go install -v -race ./...
+  - go test -v -race ./...
diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE
new file mode 100644
index 0000000000..8d9a94a906
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/LICENSE
@@ -0,0 +1,21 @@
+Copyright (c) 2005-2008  Dustin Sallings <dustin@spy.net>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+<http://www.opensource.org/licenses/mit-license.php>
diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown
new file mode 100644
index 0000000000..7d0b16b34f
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/README.markdown
@@ -0,0 +1,124 @@
+# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
+
+Just a few functions for helping humanize times and sizes.
+
+`go get` it as `github.com/dustin/go-humanize`, import it as
+`"github.com/dustin/go-humanize"`, use it as `humanize`.
+
+See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for
+complete documentation.
+
+## Sizes
+
+This lets you take numbers like `82854982` and convert them to useful
+strings like, `83 MB` or `79 MiB` (whichever you prefer).
+
+Example:
+
+```go
+fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
+```
+
+## Times
+
+This lets you take a `time.Time` and spit it out in relative terms.
+For example, `12 seconds ago` or `3 days from now`.
+
+Example:
+
+```go
+fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
+```
+
+Thanks to Kyle Lemons for the time implementation from an IRC
+conversation one day. It's pretty neat.
+
+## Ordinals
+
+From a [mailing list discussion][odisc] where a user wanted to be able
+to label ordinals.
+
+    0 -> 0th
+    1 -> 1st
+    2 -> 2nd
+    3 -> 3rd
+    4 -> 4th
+    [...]
+
+Example:
+
+```go
+fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
+```
+
+## Commas
+
+Want to shove commas into numbers? Be my guest.
+
+    0 -> 0
+    100 -> 100
+    1000 -> 1,000
+    1000000000 -> 1,000,000,000
+    -100000 -> -100,000
+
+Example:
+
+```go
+fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
+```
+
+## Ftoa
+
+Nicer float64 formatter that removes trailing zeros.
+
+```go
+fmt.Printf("%f", 2.24)                // 2.240000
+fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
+fmt.Printf("%f", 2.0)                 // 2.000000
+fmt.Printf("%s", humanize.Ftoa(2.0))  // 2
+```
+
+## SI notation
+
+Format numbers with [SI notation][sinotation].
+
+Example:
+
+```go
+humanize.SI(0.00000000223, "M") // 2.23 nM
+```
+
+## English-specific functions
+
+The following functions are in the `humanize/english` subpackage.
+
+### Plurals
+
+Simple English pluralization
+
+```go
+english.PluralWord(1, "object", "") // object
+english.PluralWord(42, "object", "") // objects
+english.PluralWord(2, "bus", "") // buses
+english.PluralWord(99, "locus", "loci") // loci
+
+english.Plural(1, "object", "") // 1 object
+english.Plural(42, "object", "") // 42 objects
+english.Plural(2, "bus", "") // 2 buses
+english.Plural(99, "locus", "loci") // 99 loci
+```
+
+### Word series
+
+Format comma-separated words lists with conjuctions:
+
+```go
+english.WordSeries([]string{"foo"}, "and") // foo
+english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
+english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
+
+english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
+```
+
+[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
+[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go
new file mode 100644
index 0000000000..f49dc337dc
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/big.go
@@ -0,0 +1,31 @@
+package humanize
+
+import (
+	"math/big"
+)
+
+// order of magnitude (to a max order)
+func oomm(n, b *big.Int, maxmag int) (float64, int) {
+	mag := 0
+	m := &big.Int{}
+	for n.Cmp(b) >= 0 {
+		n.DivMod(n, b, m)
+		mag++
+		if mag == maxmag && maxmag >= 0 {
+			break
+		}
+	}
+	return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
+}
+
+// total order of magnitude
+// (same as above, but with no upper limit)
+func oom(n, b *big.Int) (float64, int) {
+	mag := 0
+	m := &big.Int{}
+	for n.Cmp(b) >= 0 {
+		n.DivMod(n, b, m)
+		mag++
+	}
+	return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
+}
diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go
new file mode 100644
index 0000000000..3b015fd59e
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/bigbytes.go
@@ -0,0 +1,189 @@
+package humanize
+
+import (
+	"fmt"
+	"math/big"
+	"strings"
+	"unicode"
+)
+
+var (
+	bigIECExp = big.NewInt(1024)
+
+	// BigByte is one byte in bit.Ints
+	BigByte = big.NewInt(1)
+	// BigKiByte is 1,024 bytes in bit.Ints
+	BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
+	// BigMiByte is 1,024 k bytes in bit.Ints
+	BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
+	// BigGiByte is 1,024 m bytes in bit.Ints
+	BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
+	// BigTiByte is 1,024 g bytes in bit.Ints
+	BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
+	// BigPiByte is 1,024 t bytes in bit.Ints
+	BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
+	// BigEiByte is 1,024 p bytes in bit.Ints
+	BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
+	// BigZiByte is 1,024 e bytes in bit.Ints
+	BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
+	// BigYiByte is 1,024 z bytes in bit.Ints
+	BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
+	// BigRiByte is 1,024 y bytes in bit.Ints
+	BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp)
+	// BigQiByte is 1,024 r bytes in bit.Ints
+	BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp)
+)
+
+var (
+	bigSIExp = big.NewInt(1000)
+
+	// BigSIByte is one SI byte in big.Ints
+	BigSIByte = big.NewInt(1)
+	// BigKByte is 1,000 SI bytes in big.Ints
+	BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
+	// BigMByte is 1,000 SI k bytes in big.Ints
+	BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
+	// BigGByte is 1,000 SI m bytes in big.Ints
+	BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
+	// BigTByte is 1,000 SI g bytes in big.Ints
+	BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
+	// BigPByte is 1,000 SI t bytes in big.Ints
+	BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
+	// BigEByte is 1,000 SI p bytes in big.Ints
+	BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
+	// BigZByte is 1,000 SI e bytes in big.Ints
+	BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
+	// BigYByte is 1,000 SI z bytes in big.Ints
+	BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
+	// BigRByte is 1,000 SI y bytes in big.Ints
+	BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp)
+	// BigQByte is 1,000 SI r bytes in big.Ints
+	BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp)
+)
+
+var bigBytesSizeTable = map[string]*big.Int{
+	"b":   BigByte,
+	"kib": BigKiByte,
+	"kb":  BigKByte,
+	"mib": BigMiByte,
+	"mb":  BigMByte,
+	"gib": BigGiByte,
+	"gb":  BigGByte,
+	"tib": BigTiByte,
+	"tb":  BigTByte,
+	"pib": BigPiByte,
+	"pb":  BigPByte,
+	"eib": BigEiByte,
+	"eb":  BigEByte,
+	"zib": BigZiByte,
+	"zb":  BigZByte,
+	"yib": BigYiByte,
+	"yb":  BigYByte,
+	"rib": BigRiByte,
+	"rb":  BigRByte,
+	"qib": BigQiByte,
+	"qb":  BigQByte,
+	// Without suffix
+	"":   BigByte,
+	"ki": BigKiByte,
+	"k":  BigKByte,
+	"mi": BigMiByte,
+	"m":  BigMByte,
+	"gi": BigGiByte,
+	"g":  BigGByte,
+	"ti": BigTiByte,
+	"t":  BigTByte,
+	"pi": BigPiByte,
+	"p":  BigPByte,
+	"ei": BigEiByte,
+	"e":  BigEByte,
+	"z":  BigZByte,
+	"zi": BigZiByte,
+	"y":  BigYByte,
+	"yi": BigYiByte,
+	"r":  BigRByte,
+	"ri": BigRiByte,
+	"q":  BigQByte,
+	"qi": BigQiByte,
+}
+
+var ten = big.NewInt(10)
+
+func humanateBigBytes(s, base *big.Int, sizes []string) string {
+	if s.Cmp(ten) < 0 {
+		return fmt.Sprintf("%d B", s)
+	}
+	c := (&big.Int{}).Set(s)
+	val, mag := oomm(c, base, len(sizes)-1)
+	suffix := sizes[mag]
+	f := "%.0f %s"
+	if val < 10 {
+		f = "%.1f %s"
+	}
+
+	return fmt.Sprintf(f, val, suffix)
+
+}
+
+// BigBytes produces a human readable representation of an SI size.
+//
+// See also: ParseBigBytes.
+//
+// BigBytes(82854982) -> 83 MB
+func BigBytes(s *big.Int) string {
+	sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"}
+	return humanateBigBytes(s, bigSIExp, sizes)
+}
+
+// BigIBytes produces a human readable representation of an IEC size.
+//
+// See also: ParseBigBytes.
+//
+// BigIBytes(82854982) -> 79 MiB
+func BigIBytes(s *big.Int) string {
+	sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"}
+	return humanateBigBytes(s, bigIECExp, sizes)
+}
+
+// ParseBigBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See also: BigBytes, BigIBytes.
+//
+// ParseBigBytes("42 MB") -> 42000000, nil
+// ParseBigBytes("42 mib") -> 44040192, nil
+func ParseBigBytes(s string) (*big.Int, error) {
+	lastDigit := 0
+	hasComma := false
+	for _, r := range s {
+		if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+			break
+		}
+		if r == ',' {
+			hasComma = true
+		}
+		lastDigit++
+	}
+
+	num := s[:lastDigit]
+	if hasComma {
+		num = strings.Replace(num, ",", "", -1)
+	}
+
+	val := &big.Rat{}
+	_, err := fmt.Sscanf(num, "%f", val)
+	if err != nil {
+		return nil, err
+	}
+
+	extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+	if m, ok := bigBytesSizeTable[extra]; ok {
+		mv := (&big.Rat{}).SetInt(m)
+		val.Mul(val, mv)
+		rv := &big.Int{}
+		rv.Div(val.Num(), val.Denom())
+		return rv, nil
+	}
+
+	return nil, fmt.Errorf("unhandled size name: %v", extra)
+}
diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go
new file mode 100644
index 0000000000..0b498f4885
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/bytes.go
@@ -0,0 +1,143 @@
+package humanize
+
+import (
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"unicode"
+)
+
+// IEC Sizes.
+// kibis of bits
+const (
+	Byte = 1 << (iota * 10)
+	KiByte
+	MiByte
+	GiByte
+	TiByte
+	PiByte
+	EiByte
+)
+
+// SI Sizes.
+const (
+	IByte = 1
+	KByte = IByte * 1000
+	MByte = KByte * 1000
+	GByte = MByte * 1000
+	TByte = GByte * 1000
+	PByte = TByte * 1000
+	EByte = PByte * 1000
+)
+
+var bytesSizeTable = map[string]uint64{
+	"b":   Byte,
+	"kib": KiByte,
+	"kb":  KByte,
+	"mib": MiByte,
+	"mb":  MByte,
+	"gib": GiByte,
+	"gb":  GByte,
+	"tib": TiByte,
+	"tb":  TByte,
+	"pib": PiByte,
+	"pb":  PByte,
+	"eib": EiByte,
+	"eb":  EByte,
+	// Without suffix
+	"":   Byte,
+	"ki": KiByte,
+	"k":  KByte,
+	"mi": MiByte,
+	"m":  MByte,
+	"gi": GiByte,
+	"g":  GByte,
+	"ti": TiByte,
+	"t":  TByte,
+	"pi": PiByte,
+	"p":  PByte,
+	"ei": EiByte,
+	"e":  EByte,
+}
+
+func logn(n, b float64) float64 {
+	return math.Log(n) / math.Log(b)
+}
+
+func humanateBytes(s uint64, base float64, sizes []string) string {
+	if s < 10 {
+		return fmt.Sprintf("%d B", s)
+	}
+	e := math.Floor(logn(float64(s), base))
+	suffix := sizes[int(e)]
+	val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
+	f := "%.0f %s"
+	if val < 10 {
+		f = "%.1f %s"
+	}
+
+	return fmt.Sprintf(f, val, suffix)
+}
+
+// Bytes produces a human readable representation of an SI size.
+//
+// See also: ParseBytes.
+//
+// Bytes(82854982) -> 83 MB
+func Bytes(s uint64) string {
+	sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
+	return humanateBytes(s, 1000, sizes)
+}
+
+// IBytes produces a human readable representation of an IEC size.
+//
+// See also: ParseBytes.
+//
+// IBytes(82854982) -> 79 MiB
+func IBytes(s uint64) string {
+	sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
+	return humanateBytes(s, 1024, sizes)
+}
+
+// ParseBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See Also: Bytes, IBytes.
+//
+// ParseBytes("42 MB") -> 42000000, nil
+// ParseBytes("42 mib") -> 44040192, nil
+func ParseBytes(s string) (uint64, error) {
+	lastDigit := 0
+	hasComma := false
+	for _, r := range s {
+		if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+			break
+		}
+		if r == ',' {
+			hasComma = true
+		}
+		lastDigit++
+	}
+
+	num := s[:lastDigit]
+	if hasComma {
+		num = strings.Replace(num, ",", "", -1)
+	}
+
+	f, err := strconv.ParseFloat(num, 64)
+	if err != nil {
+		return 0, err
+	}
+
+	extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+	if m, ok := bytesSizeTable[extra]; ok {
+		f *= float64(m)
+		if f >= math.MaxUint64 {
+			return 0, fmt.Errorf("too large: %v", s)
+		}
+		return uint64(f), nil
+	}
+
+	return 0, fmt.Errorf("unhandled size name: %v", extra)
+}
diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go
new file mode 100644
index 0000000000..520ae3e57d
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/comma.go
@@ -0,0 +1,116 @@
+package humanize
+
+import (
+	"bytes"
+	"math"
+	"math/big"
+	"strconv"
+	"strings"
+)
+
+// Comma produces a string form of the given number in base 10 with
+// commas after every three orders of magnitude.
+//
+// e.g. Comma(834142) -> 834,142
+func Comma(v int64) string {
+	sign := ""
+
+	// Min int64 can't be negated to a usable value, so it has to be special cased.
+	if v == math.MinInt64 {
+		return "-9,223,372,036,854,775,808"
+	}
+
+	if v < 0 {
+		sign = "-"
+		v = 0 - v
+	}
+
+	parts := []string{"", "", "", "", "", "", ""}
+	j := len(parts) - 1
+
+	for v > 999 {
+		parts[j] = strconv.FormatInt(v%1000, 10)
+		switch len(parts[j]) {
+		case 2:
+			parts[j] = "0" + parts[j]
+		case 1:
+			parts[j] = "00" + parts[j]
+		}
+		v = v / 1000
+		j--
+	}
+	parts[j] = strconv.Itoa(int(v))
+	return sign + strings.Join(parts[j:], ",")
+}
+
+// Commaf produces a string form of the given number in base 10 with
+// commas after every three orders of magnitude.
+//
+// e.g. Commaf(834142.32) -> 834,142.32
+func Commaf(v float64) string {
+	buf := &bytes.Buffer{}
+	if v < 0 {
+		buf.Write([]byte{'-'})
+		v = 0 - v
+	}
+
+	comma := []byte{','}
+
+	parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
+	pos := 0
+	if len(parts[0])%3 != 0 {
+		pos += len(parts[0]) % 3
+		buf.WriteString(parts[0][:pos])
+		buf.Write(comma)
+	}
+	for ; pos < len(parts[0]); pos += 3 {
+		buf.WriteString(parts[0][pos : pos+3])
+		buf.Write(comma)
+	}
+	buf.Truncate(buf.Len() - 1)
+
+	if len(parts) > 1 {
+		buf.Write([]byte{'.'})
+		buf.WriteString(parts[1])
+	}
+	return buf.String()
+}
+
+// CommafWithDigits works like the Commaf but limits the resulting
+// string to the given number of decimal places.
+//
+// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
+func CommafWithDigits(f float64, decimals int) string {
+	return stripTrailingDigits(Commaf(f), decimals)
+}
+
+// BigComma produces a string form of the given big.Int in base 10
+// with commas after every three orders of magnitude.
+func BigComma(b *big.Int) string {
+	sign := ""
+	if b.Sign() < 0 {
+		sign = "-"
+		b.Abs(b)
+	}
+
+	athousand := big.NewInt(1000)
+	c := (&big.Int{}).Set(b)
+	_, m := oom(c, athousand)
+	parts := make([]string, m+1)
+	j := len(parts) - 1
+
+	mod := &big.Int{}
+	for b.Cmp(athousand) >= 0 {
+		b.DivMod(b, athousand, mod)
+		parts[j] = strconv.FormatInt(mod.Int64(), 10)
+		switch len(parts[j]) {
+		case 2:
+			parts[j] = "0" + parts[j]
+		case 1:
+			parts[j] = "00" + parts[j]
+		}
+		j--
+	}
+	parts[j] = strconv.Itoa(int(b.Int64()))
+	return sign + strings.Join(parts[j:], ",")
+}
diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go
new file mode 100644
index 0000000000..2bc83a03cf
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/commaf.go
@@ -0,0 +1,41 @@
+//go:build go1.6
+// +build go1.6
+
+package humanize
+
+import (
+	"bytes"
+	"math/big"
+	"strings"
+)
+
+// BigCommaf produces a string form of the given big.Float in base 10
+// with commas after every three orders of magnitude.
+func BigCommaf(v *big.Float) string {
+	buf := &bytes.Buffer{}
+	if v.Sign() < 0 {
+		buf.Write([]byte{'-'})
+		v.Abs(v)
+	}
+
+	comma := []byte{','}
+
+	parts := strings.Split(v.Text('f', -1), ".")
+	pos := 0
+	if len(parts[0])%3 != 0 {
+		pos += len(parts[0]) % 3
+		buf.WriteString(parts[0][:pos])
+		buf.Write(comma)
+	}
+	for ; pos < len(parts[0]); pos += 3 {
+		buf.WriteString(parts[0][pos : pos+3])
+		buf.Write(comma)
+	}
+	buf.Truncate(buf.Len() - 1)
+
+	if len(parts) > 1 {
+		buf.Write([]byte{'.'})
+		buf.WriteString(parts[1])
+	}
+	return buf.String()
+}
diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go
new file mode 100644
index 0000000000..bce923f371
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/ftoa.go
@@ -0,0 +1,49 @@
+package humanize
+
+import (
+	"strconv"
+	"strings"
+)
+
+func stripTrailingZeros(s string) string {
+	if !strings.ContainsRune(s, '.') {
+		return s
+	}
+	offset := len(s) - 1
+	for offset > 0 {
+		if s[offset] == '.' {
+			offset--
+			break
+		}
+		if s[offset] != '0' {
+			break
+		}
+		offset--
+	}
+	return s[:offset+1]
+}
+
+func stripTrailingDigits(s string, digits int) string {
+	if i := strings.Index(s, "."); i >= 0 {
+		if digits <= 0 {
+			return s[:i]
+		}
+		i++
+		if i+digits >= len(s) {
+			return s
+		}
+		return s[:i+digits]
+	}
+	return s
+}
+
+// Ftoa converts a float to a string with no trailing zeros.
+func Ftoa(num float64) string {
+	return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
+}
+
+// FtoaWithDigits converts a float to a string but limits the resulting string
+// to the given number of decimal places, and no trailing zeros.
+func FtoaWithDigits(num float64, digits int) string {
+	return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
+}
diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go
new file mode 100644
index 0000000000..a2c2da31ef
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/humanize.go
@@ -0,0 +1,8 @@
+/*
+Package humanize converts boring ugly numbers to human-friendly strings and back.
+
+Durations can be turned into strings such as "3 days ago", numbers
+representing sizes like 82854982 into useful strings like, "83 MB" or
+"79 MiB" (whichever you prefer).
+*/
+package humanize
diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go
new file mode 100644
index 0000000000..6470d0d47a
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/number.go
@@ -0,0 +1,192 @@
+package humanize
+
+/*
+Slightly adapted from the source to fit go-humanize.
+
+Author: https://github.com/gorhill
+Source: https://gist.github.com/gorhill/5285193
+
+*/
+
+import (
+	"math"
+	"strconv"
+)
+
+var (
+	renderFloatPrecisionMultipliers = [...]float64{
+		1,
+		10,
+		100,
+		1000,
+		10000,
+		100000,
+		1000000,
+		10000000,
+		100000000,
+		1000000000,
+	}
+
+	renderFloatPrecisionRounders = [...]float64{
+		0.5,
+		0.05,
+		0.005,
+		0.0005,
+		0.00005,
+		0.000005,
+		0.0000005,
+		0.00000005,
+		0.000000005,
+		0.0000000005,
+	}
+)
+
+// FormatFloat produces a formatted number as string based on the following user-specified criteria:
+// * thousands separator
+// * decimal separator
+// * decimal precision
+//
+// Usage: s := RenderFloat(format, n)
+// The format parameter tells how to render the number n.
+//
+// See examples: http://play.golang.org/p/LXc1Ddm1lJ
+//
+// Examples of format strings, given n = 12345.6789:
+// "#,###.##" => "12,345.67"
+// "#,###." => "12,345"
+// "#,###" => "12345,678"
+// "#\u202F###,##" => "12 345,68"
+// "#.###,###### => 12.345,678900
+// "" (aka default format) => 12,345.67
+//
+// The highest precision allowed is 9 digits after the decimal symbol.
+// There is also a version for integer number, FormatInteger(),
+// which is convenient for calls within template.
+func FormatFloat(format string, n float64) string {
+	// Special cases:
+	//   NaN = "NaN"
+	//   +Inf = "+Infinity"
+	//   -Inf = "-Infinity"
+	if math.IsNaN(n) {
+		return "NaN"
+	}
+	if n > math.MaxFloat64 {
+		return "Infinity"
+	}
+	if n < (0.0 - math.MaxFloat64) {
+		return "-Infinity"
+	}
+
+	// default format
+	precision := 2
+	decimalStr := "."
+	thousandStr := ","
+	positiveStr := ""
+	negativeStr := "-"
+
+	if len(format) > 0 {
+		format := []rune(format)
+
+		// If there is an explicit format directive,
+		// then default values are these:
+		precision = 9
+		thousandStr = ""
+
+		// collect indices of meaningful formatting directives
+		formatIndx := []int{}
+		for i, char := range format {
+			if char != '#' && char != '0' {
+				formatIndx = append(formatIndx, i)
+			}
+		}
+
+		if len(formatIndx) > 0 {
+			// Directive at index 0:
+			//   Must be a '+'
+			//   Raise an error if not the case
+			// index: 0123456789
+			//        +0.000,000
+			//        +000,000.0
+			//        +0000.00
+			//        +0000
+			if formatIndx[0] == 0 {
+				if format[formatIndx[0]] != '+' {
+					panic("RenderFloat(): invalid positive sign directive")
+				}
+				positiveStr = "+"
+				formatIndx = formatIndx[1:]
+			}
+
+			// Two directives:
+			//   First is thousands separator
+			//   Raise an error if not followed by 3-digit
+			// 0123456789
+			// 0.000,000
+			// 000,000.00
+			if len(formatIndx) == 2 {
+				if (formatIndx[1] - formatIndx[0]) != 4 {
+					panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
+				}
+				thousandStr = string(format[formatIndx[0]])
+				formatIndx = formatIndx[1:]
+			}
+
+			// One directive:
+			//   Directive is decimal separator
+			//   The number of digit-specifier following the separator indicates wanted precision
+			// 0123456789
+			// 0.00
+			// 000,0000
+			if len(formatIndx) == 1 {
+				decimalStr = string(format[formatIndx[0]])
+				precision = len(format) - formatIndx[0] - 1
+			}
+		}
+	}
+
+	// generate sign part
+	var signStr string
+	if n >= 0.000000001 {
+		signStr = positiveStr
+	} else if n <= -0.000000001 {
+		signStr = negativeStr
+		n = -n
+	} else {
+		signStr = ""
+		n = 0.0
+	}
+
+	// split number into integer and fractional parts
+	intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
+
+	// generate integer part string
+	intStr := strconv.FormatInt(int64(intf), 10)
+
+	// add thousand separator if required
+	if len(thousandStr) > 0 {
+		for i := len(intStr); i > 3; {
+			i -= 3
+			intStr = intStr[:i] + thousandStr + intStr[i:]
+		}
+	}
+
+	// no fractional part, we can leave now
+	if precision == 0 {
+		return signStr + intStr
+	}
+
+	// generate fractional part
+	fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
+	// may need padding
+	if len(fracStr) < precision {
+		fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
+	}
+
+	return signStr + intStr + decimalStr + fracStr
+}
+
+// FormatInteger produces a formatted number as string.
+// See FormatFloat.
+func FormatInteger(format string, n int) string {
+	return FormatFloat(format, float64(n))
+}
diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go
new file mode 100644
index 0000000000..43d88a8619
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/ordinals.go
@@ -0,0 +1,25 @@
+package humanize
+
+import "strconv"
+
+// Ordinal gives you the input number in a rank/ordinal format.
+//
+// Ordinal(3) -> 3rd
+func Ordinal(x int) string {
+	suffix := "th"
+	switch x % 10 {
+	case 1:
+		if x%100 != 11 {
+			suffix = "st"
+		}
+	case 2:
+		if x%100 != 12 {
+			suffix = "nd"
+		}
+	case 3:
+		if x%100 != 13 {
+			suffix = "rd"
+		}
+	}
+	return strconv.Itoa(x) + suffix
+}
diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go
new file mode 100644
index 0000000000..8b85019849
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/si.go
@@ -0,0 +1,127 @@
+package humanize
+
+import (
+	"errors"
+	"math"
+	"regexp"
+	"strconv"
+)
+
+var siPrefixTable = map[float64]string{
+	-30: "q", // quecto
+	-27: "r", // ronto
+	-24: "y", // yocto
+	-21: "z", // zepto
+	-18: "a", // atto
+	-15: "f", // femto
+	-12: "p", // pico
+	-9:  "n", // nano
+	-6:  "µ", // micro
+	-3:  "m", // milli
+	0:   "",
+	3:   "k", // kilo
+	6:   "M", // mega
+	9:   "G", // giga
+	12:  "T", // tera
+	15:  "P", // peta
+	18:  "E", // exa
+	21:  "Z", // zetta
+	24:  "Y", // yotta
+	27:  "R", // ronna
+	30:  "Q", // quetta
+}
+
+var revSIPrefixTable = revfmap(siPrefixTable)
+
+// revfmap reverses the map and precomputes the power multiplier
+func revfmap(in map[float64]string) map[string]float64 {
+	rv := map[string]float64{}
+	for k, v := range in {
+		rv[v] = math.Pow(10, k)
+	}
+	return rv
+}
+
+var riParseRegex *regexp.Regexp
+
+func init() {
+	ri := `^([\-0-9.]+)\s?([`
+	for _, v := range siPrefixTable {
+		ri += v
+	}
+	ri += `]?)(.*)`
+
+	riParseRegex = regexp.MustCompile(ri)
+}
+
+// ComputeSI finds the most appropriate SI prefix for the given number
+// and returns the prefix along with the value adjusted to be within
+// that prefix.
+//
+// See also: SI, ParseSI.
+//
+// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
+func ComputeSI(input float64) (float64, string) {
+	if input == 0 {
+		return 0, ""
+	}
+	mag := math.Abs(input)
+	exponent := math.Floor(logn(mag, 10))
+	exponent = math.Floor(exponent/3) * 3
+
+	value := mag / math.Pow(10, exponent)
+
+	// Handle special case where value is exactly 1000.0
+	// Should return 1 M instead of 1000 k
+	if value == 1000.0 {
+		exponent += 3
+		value = mag / math.Pow(10, exponent)
+	}
+
+	value = math.Copysign(value, input)
+
+	prefix := siPrefixTable[exponent]
+	return value, prefix
+}
+
+// SI returns a string with default formatting.
+//
+// SI uses Ftoa to format float value, removing trailing zeros.
+//
+// See also: ComputeSI, ParseSI.
+//
+// e.g. SI(1000000, "B") -> 1 MB
+// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
+func SI(input float64, unit string) string {
+	value, prefix := ComputeSI(input)
+	return Ftoa(value) + " " + prefix + unit
+}
+
+// SIWithDigits works like SI but limits the resulting string to the
+// given number of decimal places.
+//
+// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
+// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
+func SIWithDigits(input float64, decimals int, unit string) string {
+	value, prefix := ComputeSI(input)
+	return FtoaWithDigits(value, decimals) + " " + prefix + unit
+}
+
+var errInvalid = errors.New("invalid input")
+
+// ParseSI parses an SI string back into the number and unit.
+//
+// See also: SI, ComputeSI.
+//
+// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
+func ParseSI(input string) (float64, string, error) {
+	found := riParseRegex.FindStringSubmatch(input)
+	if len(found) != 4 {
+		return 0, "", errInvalid
+	}
+	mag := revSIPrefixTable[found[2]]
+	unit := found[3]
+
+	base, err := strconv.ParseFloat(found[1], 64)
+	return base * mag, unit, err
+}
diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go
new file mode 100644
index 0000000000..dd3fbf5efc
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/times.go
@@ -0,0 +1,117 @@
+package humanize
+
+import (
+	"fmt"
+	"math"
+	"sort"
+	"time"
+)
+
+// Seconds-based time units
+const (
+	Day      = 24 * time.Hour
+	Week     = 7 * Day
+	Month    = 30 * Day
+	Year     = 12 * Month
+	LongTime = 37 * Year
+)
+
+// Time formats a time into a relative string.
+//
+// Time(someT) -> "3 weeks ago"
+func Time(then time.Time) string {
+	return RelTime(then, time.Now(), "ago", "from now")
+}
+
+// A RelTimeMagnitude struct contains a relative time point at which
+// the relative format of time will switch to a new format string.  A
+// slice of these in ascending order by their "D" field is passed to
+// CustomRelTime to format durations.
+//
+// The Format field is a string that may contain a "%s" which will be
+// replaced with the appropriate signed label (e.g. "ago" or "from
+// now") and a "%d" that will be replaced by the quantity.
+//
+// The DivBy field is the amount of time the time difference must be
+// divided by in order to display correctly.
+//
+// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
+// DivBy should be time.Minute so whatever the duration is will be
+// expressed in minutes.
+type RelTimeMagnitude struct {
+	D      time.Duration
+	Format string
+	DivBy  time.Duration
+}
+
+var defaultMagnitudes = []RelTimeMagnitude{
+	{time.Second, "now", time.Second},
+	{2 * time.Second, "1 second %s", 1},
+	{time.Minute, "%d seconds %s", time.Second},
+	{2 * time.Minute, "1 minute %s", 1},
+	{time.Hour, "%d minutes %s", time.Minute},
+	{2 * time.Hour, "1 hour %s", 1},
+	{Day, "%d hours %s", time.Hour},
+	{2 * Day, "1 day %s", 1},
+	{Week, "%d days %s", Day},
+	{2 * Week, "1 week %s", 1},
+	{Month, "%d weeks %s", Week},
+	{2 * Month, "1 month %s", 1},
+	{Year, "%d months %s", Month},
+	{18 * Month, "1 year %s", 1},
+	{2 * Year, "2 years %s", 1},
+	{LongTime, "%d years %s", Year},
+	{math.MaxInt64, "a long while %s", 1},
+}
+
+// RelTime formats a time into a relative string.
+//
+// It takes two times and two labels.  In addition to the generic time
+// delta string (e.g. 5 minutes), the labels are used applied so that
+// the label corresponding to the smaller time is applied.
+//
+// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
+func RelTime(a, b time.Time, albl, blbl string) string {
+	return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
+}
+
+// CustomRelTime formats a time into a relative string.
+//
+// It takes two times two labels and a table of relative time formats.
+// In addition to the generic time delta string (e.g. 5 minutes), the
+// labels are used applied so that the label corresponding to the
+// smaller time is applied.
+func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
+	lbl := albl
+	diff := b.Sub(a)
+
+	if a.After(b) {
+		lbl = blbl
+		diff = a.Sub(b)
+	}
+
+	n := sort.Search(len(magnitudes), func(i int) bool {
+		return magnitudes[i].D > diff
+	})
+
+	if n >= len(magnitudes) {
+		n = len(magnitudes) - 1
+	}
+	mag := magnitudes[n]
+	args := []interface{}{}
+	escaped := false
+	for _, ch := range mag.Format {
+		if escaped {
+			switch ch {
+			case 's':
+				args = append(args, lbl)
+			case 'd':
+				args = append(args, diff/mag.DivBy)
+			}
+			escaped = false
+		} else {
+			escaped = ch == '%'
+		}
+	}
+	return fmt.Sprintf(mag.Format, args...)
+}
diff --git a/vendor/github.com/ebitengine/purego/.gitignore b/vendor/github.com/ebitengine/purego/.gitignore
new file mode 100644
index 0000000000..b25c15b81f
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/.gitignore
@@ -0,0 +1 @@
+*~
diff --git a/vendor/github.com/ebitengine/purego/LICENSE b/vendor/github.com/ebitengine/purego/LICENSE
new file mode 100644
index 0000000000..8dada3edaf
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/ebitengine/purego/README.md b/vendor/github.com/ebitengine/purego/README.md
new file mode 100644
index 0000000000..b8480f4567
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/README.md
@@ -0,0 +1,70 @@
+# purego
+[![Go Reference](https://pkg.go.dev/badge/github.com/ebitengine/purego?GOOS=darwin.svg)](https://pkg.go.dev/github.com/ebitengine/purego?GOOS=darwin)
+
+A library for calling C functions from Go without Cgo.
+
+## Motivation
+
+The [Ebitengine](https://github.com/hajimehoshi/ebiten) game engine was ported to use only Go on Windows. This enabled
+cross-compiling to Windows from any other operating system simply by setting `GOOS=windows`. The purego project was
+born to bring that same vision to the other platforms supported by Ebitengine.
+
+## Benefits
+
+- **Simple Cross-Compilation**: No C means you can build for other platforms easily without a C compiler.
+- **Faster Compilation**: Efficiently cache your entirely Go builds.
+- **Smaller Binaries**: Using Cgo generates a C wrapper function for each C function called. Purego doesn't!
+- **Dynamic Linking**: Load symbols at runtime and use it as a plugin system.
+- **Foreign Function Interface**: Call into other languages that are compiled into shared objects.
+
+## Example
+
+This example only works on macOS and Linux. For a complete example look at [libc](https://github.com/ebitengine/purego/tree/main/examples/libc) which supports Windows.
+
+```go
+package main
+
+import (
+	"fmt"
+	"runtime"
+
+	"github.com/ebitengine/purego"
+)
+
+func getSystemLibrary() string {
+	switch runtime.GOOS {
+	case "darwin":
+		return "/usr/lib/libSystem.B.dylib"
+	case "linux":
+		return "libc.so.6"
+	default:
+		panic(fmt.Errorf("GOOS=%s is not supported", runtime.GOOS))
+	}
+}
+
+func main() {
+	libc, err := purego.Dlopen(getSystemLibrary(), purego.RTLD_NOW|purego.RTLD_GLOBAL)
+	if err != nil {
+		panic(err)
+	}
+	var puts func(string)
+	purego.RegisterLibFunc(&puts, libc, "puts")
+	puts("Calling C from Go without Cgo!")
+}
+```
+
+Then to run: `CGO_ENABLED=0 go run main.go`
+
+### External Code
+
+Purego uses code that originates from the Go runtime. These files are under the BSD-3
+License that can be found [in the Go Source](https://github.com/golang/go/blob/master/LICENSE).
+This is a list of the copied files:
+
+* `zcallback_darwin_*.s` from package `runtime`
+* `internal/abi/abi_*.h` from package `runtime/cgo`
+* `internal/fakecgo/asm_GOARCH.s` from package `runtime/cgo`
+* `internal/fakecgo/callbacks.go` from package `runtime/cgo`
+* `internal/fakecgo/go_GOOS_GOARCH.go` from package `runtime/cgo`
+* `internal/fakecgo/iscgo.go` from package `runtime/cgo`
+* `internal/fakecgo/setenv.go` from package `runtime/cgo`
diff --git a/vendor/github.com/ebitengine/purego/cgo.go b/vendor/github.com/ebitengine/purego/cgo.go
new file mode 100644
index 0000000000..ebe6912416
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/cgo.go
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build cgo && (darwin || linux)
+
+package purego
+
+// if CGO_ENABLED=1 import the Cgo runtime to ensure that it is set up properly.
+// This is required since some frameworks need TLS setup the C way which Go doesn't do.
+// We currently don't support ios in fakecgo mode so force Cgo or fail
+// Even if CGO_ENABLED=1 the Cgo runtime is not imported unless `import "C"` is used.
+// which will import this package automatically. Normally this isn't an issue since it
+// usually isn't possible to call into C without using that import. However, with purego
+// it is since we don't use `import "C"`!
+import _ "runtime/cgo"
diff --git a/vendor/github.com/ebitengine/purego/dlerror.go b/vendor/github.com/ebitengine/purego/dlerror.go
new file mode 100644
index 0000000000..e5ce708932
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/dlerror.go
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2023 The Ebitengine Authors
+
+//go:build darwin || linux
+
+package purego
+
+// Dlerror represents an error value returned from Dlopen, Dlsym, or Dlclose.
+type Dlerror struct {
+	s string
+}
+
+func (e Dlerror) Error() string {
+	return e.s
+}
diff --git a/vendor/github.com/ebitengine/purego/dlfcn.go b/vendor/github.com/ebitengine/purego/dlfcn.go
new file mode 100644
index 0000000000..5b535600bf
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/dlfcn.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || linux
+
+package purego
+
+import (
+	"unsafe"
+)
+
+// Unix Specification for dlfcn.h: https://pubs.opengroup.org/onlinepubs/7908799/xsh/dlfcn.h.html
+
+var (
+	fnDlopen  func(path string, mode int) uintptr
+	fnDlsym   func(handle uintptr, name string) uintptr
+	fnDlerror func() string
+	fnDlclose func(handle uintptr) bool
+)
+
+func init() {
+	RegisterFunc(&fnDlopen, dlopenABI0)
+	RegisterFunc(&fnDlsym, dlsymABI0)
+	RegisterFunc(&fnDlerror, dlerrorABI0)
+	RegisterFunc(&fnDlclose, dlcloseABI0)
+}
+
+// Dlopen examines the dynamic library or bundle file specified by path. If the file is compatible
+// with the current process and has not already been loaded into the
+// current process, it is loaded and linked. After being linked, if it contains
+// any initializer functions, they are called, before Dlopen
+// returns. It returns a handle that can be used with Dlsym and Dlclose.
+// A second call to Dlopen with the same path will return the same handle, but the internal
+// reference count for the handle will be incremented. Therefore, all
+// Dlopen calls should be balanced with a Dlclose call.
+func Dlopen(path string, mode int) (uintptr, error) {
+	u := fnDlopen(path, mode)
+	if u == 0 {
+		return 0, Dlerror{fnDlerror()}
+	}
+	return u, nil
+}
+
+// Dlsym takes a "handle" of a dynamic library returned by Dlopen and the symbol name.
+// It returns the address where that symbol is loaded into memory. If the symbol is not found,
+// in the specified library or any of the libraries that were automatically loaded by Dlopen
+// when that library was loaded, Dlsym returns zero.
+func Dlsym(handle uintptr, name string) (uintptr, error) {
+	u := fnDlsym(handle, name)
+	if u == 0 {
+		return 0, Dlerror{fnDlerror()}
+	}
+	return u, nil
+}
+
+// Dlclose decrements the reference count on the dynamic library handle.
+// If the reference count drops to zero and no other loaded libraries
+// use symbols in it, then the dynamic library is unloaded.
+func Dlclose(handle uintptr) error {
+	if fnDlclose(handle) {
+		return Dlerror{fnDlerror()}
+	}
+	return nil
+}
+
+//go:linkname openLibrary openLibrary
+func openLibrary(name string) (uintptr, error) {
+	return Dlopen(name, RTLD_NOW|RTLD_GLOBAL)
+}
+
+func loadSymbol(handle uintptr, name string) (uintptr, error) {
+	return Dlsym(handle, name)
+}
+
+// these functions exist in dlfcn_stubs.s and are calling C functions linked to in dlfcn_GOOS.go
+// the indirection is necessary because a function is actually a pointer to the pointer to the code.
+// sadly, I do not know of anyway to remove the assembly stubs entirely because //go:linkname doesn't
+// appear to work if you link directly to the C function on darwin arm64.
+
+//go:linkname dlopen dlopen
+var dlopen uintptr
+var dlopenABI0 = uintptr(unsafe.Pointer(&dlopen))
+
+//go:linkname dlsym dlsym
+var dlsym uintptr
+var dlsymABI0 = uintptr(unsafe.Pointer(&dlsym))
+
+//go:linkname dlclose dlclose
+var dlclose uintptr
+var dlcloseABI0 = uintptr(unsafe.Pointer(&dlclose))
+
+//go:linkname dlerror dlerror
+var dlerror uintptr
+var dlerrorABI0 = uintptr(unsafe.Pointer(&dlerror))
diff --git a/vendor/github.com/ebitengine/purego/dlfcn_darwin.go b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go
new file mode 100644
index 0000000000..66ccf16d9a
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+package purego
+
+// Source for constants: https://opensource.apple.com/source/dyld/dyld-360.14/include/dlfcn.h.auto.html
+
+const (
+	RTLD_DEFAULT = ^uintptr(0) - 1 // Pseudo-handle for dlsym so search for any loaded symbol
+	RTLD_LAZY    = 0x1             // Relocations are performed at an implementation-dependent time.
+	RTLD_NOW     = 0x2             // Relocations are performed when the object is loaded.
+	RTLD_LOCAL   = 0x4             // All symbols are not made available for relocation processing by other modules.
+	RTLD_GLOBAL  = 0x8             // All symbols are available for relocation processing of other modules.
+)
+
+//go:cgo_import_dynamic purego_dlopen dlopen "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_dlsym dlsym "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_dlerror dlerror "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_dlclose dlclose "/usr/lib/libSystem.B.dylib"
diff --git a/vendor/github.com/ebitengine/purego/dlfcn_linux.go b/vendor/github.com/ebitengine/purego/dlfcn_linux.go
new file mode 100644
index 0000000000..301127538c
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/dlfcn_linux.go
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+package purego
+
+// Source for constants: https://codebrowser.dev/glibc/glibc/bits/dlfcn.h.html
+
+const (
+	RTLD_DEFAULT = 0x00000 // Pseudo-handle for dlsym so search for any loaded symbol
+	RTLD_LAZY    = 0x00001 // Relocations are performed at an implementation-dependent time.
+	RTLD_NOW     = 0x00002 // Relocations are performed when the object is loaded.
+	RTLD_LOCAL   = 0x00000 // All symbols are not made available for relocation processing by other modules.
+	RTLD_GLOBAL  = 0x00100 // All symbols are available for relocation processing of other modules.
+)
diff --git a/vendor/github.com/ebitengine/purego/dlfcn_nocgo_linux.go b/vendor/github.com/ebitengine/purego/dlfcn_nocgo_linux.go
new file mode 100644
index 0000000000..2c8009fdb9
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/dlfcn_nocgo_linux.go
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo
+
+package purego
+
+// if there is no Cgo we must link to each of the functions from dlfcn.h
+// then the functions are called inside dlfcn_stubs.s
+
+//go:cgo_import_dynamic purego_dlopen dlopen "libdl.so.2"
+//go:cgo_import_dynamic purego_dlsym dlsym "libdl.so.2"
+//go:cgo_import_dynamic purego_dlerror dlerror "libdl.so.2"
+//go:cgo_import_dynamic purego_dlclose dlclose "libdl.so.2"
+
+// on amd64 we don't need the following line - on 386 we do...
+// anyway - with those lines the output is better (but doesn't matter) - without it on amd64 we get multiple DT_NEEDED with "libc.so.6" etc
+
+//go:cgo_import_dynamic _ _ "libdl.so.2"
diff --git a/vendor/github.com/ebitengine/purego/dlfcn_stubs.s b/vendor/github.com/ebitengine/purego/dlfcn_stubs.s
new file mode 100644
index 0000000000..c80116d7de
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/dlfcn_stubs.s
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || (linux && !cgo)
+
+#include "textflag.h"
+
+// func dlopen(path *byte, mode int) (ret uintptr)
+TEXT dlopen(SB), NOSPLIT, $0-0
+	JMP purego_dlopen(SB)
+	RET
+
+// func dlsym(handle uintptr, symbol *byte) (ret uintptr)
+TEXT dlsym(SB), NOSPLIT, $0-0
+	JMP purego_dlsym(SB)
+	RET
+
+// func dlerror() (ret *byte)
+TEXT dlerror(SB), NOSPLIT, $0-0
+	JMP purego_dlerror(SB)
+	RET
+
+// func dlclose(handle uintptr) (ret int)
+TEXT dlclose(SB), NOSPLIT, $0-0
+	JMP purego_dlclose(SB)
+	RET
diff --git a/vendor/github.com/ebitengine/purego/dummy.go b/vendor/github.com/ebitengine/purego/dummy.go
new file mode 100644
index 0000000000..8ac52fc7f7
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/dummy.go
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build dummy
+
+// This file exists purely to prevent the Go toolchain from stripping
+// away the C source directories and files when `go mod vendor` is used
+// to populate a `vendor/` directory of a project depending on this package.
+
+package purego
+
+import (
+	_ "github.com/ebitengine/purego/internal/abi"
+)
diff --git a/vendor/github.com/ebitengine/purego/func.go b/vendor/github.com/ebitengine/purego/func.go
new file mode 100644
index 0000000000..8ddf76d6d9
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/func.go
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || linux || windows
+
+package purego
+
+import (
+	"math"
+	"reflect"
+	"runtime"
+	"unsafe"
+
+	"github.com/ebitengine/purego/internal/strings"
+)
+
+// RegisterLibFunc is a wrapper around RegisterFunc that uses the C function returned from Dlsym(handle, name).
+// It panics if it can't find the name symbol.
+func RegisterLibFunc(fptr interface{}, handle uintptr, name string) {
+	sym, err := loadSymbol(handle, name)
+	if err != nil {
+		panic(err)
+	}
+	RegisterFunc(fptr, sym)
+}
+
+// RegisterFunc takes a pointer to a Go function representing the calling convention of the C function.
+// fptr will be set to a function that when called will call the C function given by cfn with the
+// parameters passed in the correct registers and stack.
+//
+// A panic is produced if the type is not a function pointer or if the function returns more than 1 value.
+//
+// These conversions describe how a Go type in the fptr will be used to call
+// the C function. It is important to note that there is no way to verify that fptr
+// matches the C function. This also holds true for struct types where the padding
+// needs to be ensured to match that of C; RegisterFunc does not verify this.
+//
+// Type Conversions (Go => C)
+//
+//	string <=> char*
+//	bool <=> _Bool
+//	uintptr <=> uintptr_t
+//	uint <=> uint32_t or uint64_t
+//	uint8 <=> uint8_t
+//	uint16 <=> uint16_t
+//	uint32 <=> uint32_t
+//	uint64 <=> uint64_t
+//	int <=> int32_t or int64_t
+//	int8 <=> int8_t
+//	int16 <=> int16_t
+//	int32 <=> int32_t
+//	int64 <=> int64_t
+//	float32 <=> float (WIP)
+//	float64 <=> double (WIP)
+//	struct <=> struct (WIP)
+//	func <=> C function
+//	unsafe.Pointer, *T <=> void*
+//	[]T => void*
+//
+// There is a special case when the last argument of fptr is a variadic interface (or []interface}
+// it will be expanded into a call to the C function as if it had the arguments in that slice.
+// This means that using arg ...interface{} is like a cast to the function with the arguments inside arg.
+// This is not the same as C variadic.
+//
+// There are some limitations when using RegisterFunc on Linux. First, there is no support for function arguments.
+// Second, float32 and float64 arguments and return values do not work when CGO_ENABLED=1. Otherwise, Linux
+// has the same feature parity as Darwin.
+func RegisterFunc(fptr interface{}, cfn uintptr) {
+	fn := reflect.ValueOf(fptr).Elem()
+	ty := fn.Type()
+	if ty.Kind() != reflect.Func {
+		panic("purego: fptr must be a function pointer")
+	}
+	if ty.NumOut() > 1 {
+		panic("purego: function can only return zero or one values")
+	}
+	if cfn == 0 {
+		panic("purego: cfn is nil")
+	}
+	{
+		// this code checks how many registers and stack this function will use
+		// to avoid crashing with too many arguments
+		var ints int
+		var floats int
+		var stack int
+		for i := 0; i < ty.NumIn(); i++ {
+			arg := ty.In(i)
+			switch arg.Kind() {
+			case reflect.String, reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+				reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Ptr, reflect.UnsafePointer, reflect.Slice,
+				reflect.Func, reflect.Bool:
+				if ints < numOfIntegerRegisters() {
+					ints++
+				} else {
+					stack++
+				}
+			case reflect.Float32, reflect.Float64:
+				if floats < numOfFloats {
+					floats++
+				} else {
+					stack++
+				}
+			default:
+				panic("purego: unsupported kind " + arg.Kind().String())
+			}
+		}
+		sizeOfStack := maxArgs - numOfIntegerRegisters()
+		if stack > sizeOfStack {
+			panic("purego: too many arguments")
+		}
+	}
+	v := reflect.MakeFunc(ty, func(args []reflect.Value) (results []reflect.Value) {
+		if len(args) > 0 {
+			if variadic, ok := args[len(args)-1].Interface().([]interface{}); ok {
+				// subtract one from args bc the last argument in args is []interface{}
+				// which we are currently expanding
+				tmp := make([]reflect.Value, len(args)-1+len(variadic))
+				n := copy(tmp, args[:len(args)-1])
+				for i, v := range variadic {
+					tmp[n+i] = reflect.ValueOf(v)
+				}
+				args = tmp
+			}
+		}
+		var sysargs [maxArgs]uintptr
+		stack := sysargs[numOfIntegerRegisters():]
+		var floats [numOfFloats]uintptr
+		var numInts int
+		var numFloats int
+		var numStack int
+		var addStack, addInt, addFloat func(x uintptr)
+		if runtime.GOARCH == "arm64" || runtime.GOOS != "windows" {
+			// Windows arm64 uses the same calling convention as macOS and Linux
+			addStack = func(x uintptr) {
+				stack[numStack] = x
+				numStack++
+			}
+			addInt = func(x uintptr) {
+				if numInts >= numOfIntegerRegisters() {
+					addStack(x)
+				} else {
+					sysargs[numInts] = x
+					numInts++
+				}
+			}
+			addFloat = func(x uintptr) {
+				if numFloats < len(floats) {
+					floats[numFloats] = x
+					numFloats++
+				} else {
+					addStack(x)
+				}
+			}
+		} else {
+			// On Windows amd64 the arguments are passed in the numbered registered.
+			// So the first int is in the first integer register and the first float
+			// is in the second floating register if there is already a first int.
+			// This is in contrast to how macOS and Linux pass arguments which
+			// tries to use as many registers as possible in the calling convention.
+			addStack = func(x uintptr) {
+				sysargs[numStack] = x
+				numStack++
+			}
+			addInt = addStack
+			addFloat = addStack
+		}
+
+		var keepAlive []interface{}
+		defer func() {
+			runtime.KeepAlive(keepAlive)
+			runtime.KeepAlive(args)
+		}()
+		for _, v := range args {
+			switch v.Kind() {
+			case reflect.String:
+				ptr := strings.CString(v.String())
+				keepAlive = append(keepAlive, ptr)
+				addInt(uintptr(unsafe.Pointer(ptr)))
+			case reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+				addInt(uintptr(v.Uint()))
+			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+				addInt(uintptr(v.Int()))
+			case reflect.Ptr, reflect.UnsafePointer, reflect.Slice:
+				// There is no need to keepAlive this pointer separately because it is kept alive in the args variable
+				addInt(v.Pointer())
+			case reflect.Func:
+				addInt(NewCallback(v.Interface()))
+			case reflect.Bool:
+				if v.Bool() {
+					addInt(1)
+				} else {
+					addInt(0)
+				}
+			case reflect.Float32:
+				addFloat(uintptr(math.Float32bits(float32(v.Float()))))
+			case reflect.Float64:
+				addFloat(uintptr(math.Float64bits(v.Float())))
+			default:
+				panic("purego: unsupported kind: " + v.Kind().String())
+			}
+		}
+		// TODO: support structs
+		var r1, r2 uintptr
+		if runtime.GOARCH == "arm64" || runtime.GOOS != "windows" {
+			// Use the normal arm64 calling convention even on Windows
+			syscall := syscall9Args{
+				cfn,
+				sysargs[0], sysargs[1], sysargs[2], sysargs[3], sysargs[4], sysargs[5], sysargs[6], sysargs[7], sysargs[8],
+				floats[0], floats[1], floats[2], floats[3], floats[4], floats[5], floats[6], floats[7],
+				0, 0, 0,
+			}
+			runtime_cgocall(syscall9XABI0, unsafe.Pointer(&syscall))
+			r1, r2 = syscall.r1, syscall.r2
+		} else {
+			// This is a fallback for amd64, 386, and arm. Note this may not support floats
+			r1, r2, _ = syscall_syscall9X(cfn, sysargs[0], sysargs[1], sysargs[2], sysargs[3], sysargs[4], sysargs[5], sysargs[6], sysargs[7], sysargs[8])
+		}
+		if ty.NumOut() == 0 {
+			return nil
+		}
+		outType := ty.Out(0)
+		v := reflect.New(outType).Elem()
+		switch outType.Kind() {
+		case reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+			v.SetUint(uint64(r1))
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			v.SetInt(int64(r1))
+		case reflect.Bool:
+			v.SetBool(r1 != 0)
+		case reflect.UnsafePointer:
+			// We take the address and then dereference it to trick go vet from creating a possible miss-use of unsafe.Pointer
+			v.SetPointer(*(*unsafe.Pointer)(unsafe.Pointer(&r1)))
+		case reflect.Ptr:
+			// It is safe to have the address of r1 not escape because it is immediately dereferenced with .Elem()
+			v = reflect.NewAt(outType, runtime_noescape(unsafe.Pointer(&r1))).Elem()
+		case reflect.Func:
+			// wrap this C function in a nicely typed Go function
+			v = reflect.New(outType)
+			RegisterFunc(v.Interface(), r1)
+		case reflect.String:
+			v.SetString(strings.GoString(r1))
+		case reflect.Float32, reflect.Float64:
+			// NOTE: r2 is only the floating return value on 64bit platforms.
+			// On 32bit platforms r2 is the upper part of a 64bit return.
+			v.SetFloat(math.Float64frombits(uint64(r2)))
+		default:
+			panic("purego: unsupported return kind: " + outType.Kind().String())
+		}
+		return []reflect.Value{v}
+	})
+	fn.Set(v)
+}
+
+func numOfIntegerRegisters() int {
+	switch runtime.GOARCH {
+	case "arm64":
+		return 8
+	case "amd64":
+		return 6
+	// TODO: figure out why 386 tests are not working
+	/*case "386":
+		return 0
+	case "arm":
+		return 4*/
+	default:
+		panic("purego: unknown GOARCH (" + runtime.GOARCH + ")")
+	}
+}
diff --git a/vendor/github.com/ebitengine/purego/go_runtime.go b/vendor/github.com/ebitengine/purego/go_runtime.go
new file mode 100644
index 0000000000..d4347402df
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/go_runtime.go
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || linux || windows
+
+package purego
+
+import (
+	"unsafe"
+)
+
+//go:linkname runtime_cgocall runtime.cgocall
+func runtime_cgocall(fn uintptr, arg unsafe.Pointer) int32 // from runtime/sys_libc.go
+
+//go:linkname runtime_noescape runtime.noescape
+//go:noescape
+func runtime_noescape(p unsafe.Pointer) unsafe.Pointer // from runtime/stubs.go
diff --git a/vendor/github.com/ebitengine/purego/internal/abi/abi_amd64.h b/vendor/github.com/ebitengine/purego/internal/abi/abi_amd64.h
new file mode 100644
index 0000000000..9949435fe9
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/abi/abi_amd64.h
@@ -0,0 +1,99 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Macros for transitioning from the host ABI to Go ABI0.
+//
+// These save the frame pointer, so in general, functions that use
+// these should have zero frame size to suppress the automatic frame
+// pointer, though it's harmless to not do this.
+
+#ifdef GOOS_windows
+
+// REGS_HOST_TO_ABI0_STACK is the stack bytes used by
+// PUSH_REGS_HOST_TO_ABI0.
+#define REGS_HOST_TO_ABI0_STACK (28*8 + 8)
+
+// PUSH_REGS_HOST_TO_ABI0 prepares for transitioning from
+// the host ABI to Go ABI0 code. It saves all registers that are
+// callee-save in the host ABI and caller-save in Go ABI0 and prepares
+// for entry to Go.
+//
+// Save DI SI BP BX R12 R13 R14 R15 X6-X15 registers and the DF flag.
+// Clear the DF flag for the Go ABI.
+// MXCSR matches the Go ABI, so we don't have to set that,
+// and Go doesn't modify it, so we don't have to save it.
+#define PUSH_REGS_HOST_TO_ABI0()	\
+	PUSHFQ			\
+	CLD			\
+	ADJSP	$(REGS_HOST_TO_ABI0_STACK - 8)	\
+	MOVQ	DI, (0*0)(SP)	\
+	MOVQ	SI, (1*8)(SP)	\
+	MOVQ	BP, (2*8)(SP)	\
+	MOVQ	BX, (3*8)(SP)	\
+	MOVQ	R12, (4*8)(SP)	\
+	MOVQ	R13, (5*8)(SP)	\
+	MOVQ	R14, (6*8)(SP)	\
+	MOVQ	R15, (7*8)(SP)	\
+	MOVUPS	X6, (8*8)(SP)	\
+	MOVUPS	X7, (10*8)(SP)	\
+	MOVUPS	X8, (12*8)(SP)	\
+	MOVUPS	X9, (14*8)(SP)	\
+	MOVUPS	X10, (16*8)(SP)	\
+	MOVUPS	X11, (18*8)(SP)	\
+	MOVUPS	X12, (20*8)(SP)	\
+	MOVUPS	X13, (22*8)(SP)	\
+	MOVUPS	X14, (24*8)(SP)	\
+	MOVUPS	X15, (26*8)(SP)
+
+#define POP_REGS_HOST_TO_ABI0()	\
+	MOVQ	(0*0)(SP), DI	\
+	MOVQ	(1*8)(SP), SI	\
+	MOVQ	(2*8)(SP), BP	\
+	MOVQ	(3*8)(SP), BX	\
+	MOVQ	(4*8)(SP), R12	\
+	MOVQ	(5*8)(SP), R13	\
+	MOVQ	(6*8)(SP), R14	\
+	MOVQ	(7*8)(SP), R15	\
+	MOVUPS	(8*8)(SP), X6	\
+	MOVUPS	(10*8)(SP), X7	\
+	MOVUPS	(12*8)(SP), X8	\
+	MOVUPS	(14*8)(SP), X9	\
+	MOVUPS	(16*8)(SP), X10	\
+	MOVUPS	(18*8)(SP), X11	\
+	MOVUPS	(20*8)(SP), X12	\
+	MOVUPS	(22*8)(SP), X13	\
+	MOVUPS	(24*8)(SP), X14	\
+	MOVUPS	(26*8)(SP), X15	\
+	ADJSP	$-(REGS_HOST_TO_ABI0_STACK - 8)	\
+	POPFQ
+
+#else
+// SysV ABI
+
+#define REGS_HOST_TO_ABI0_STACK (6*8)
+
+// SysV MXCSR matches the Go ABI, so we don't have to set that,
+// and Go doesn't modify it, so we don't have to save it.
+// Both SysV and Go require DF to be cleared, so that's already clear.
+// The SysV and Go frame pointer conventions are compatible.
+#define PUSH_REGS_HOST_TO_ABI0()	\
+	ADJSP	$(REGS_HOST_TO_ABI0_STACK)	\
+	MOVQ	BP, (5*8)(SP)	\
+	LEAQ	(5*8)(SP), BP	\
+	MOVQ	BX, (0*8)(SP)	\
+	MOVQ	R12, (1*8)(SP)	\
+	MOVQ	R13, (2*8)(SP)	\
+	MOVQ	R14, (3*8)(SP)	\
+	MOVQ	R15, (4*8)(SP)
+
+#define POP_REGS_HOST_TO_ABI0()	\
+	MOVQ	(0*8)(SP), BX	\
+	MOVQ	(1*8)(SP), R12	\
+	MOVQ	(2*8)(SP), R13	\
+	MOVQ	(3*8)(SP), R14	\
+	MOVQ	(4*8)(SP), R15	\
+	MOVQ	(5*8)(SP), BP	\
+	ADJSP	$-(REGS_HOST_TO_ABI0_STACK)
+
+#endif
diff --git a/vendor/github.com/ebitengine/purego/internal/abi/abi_arm64.h b/vendor/github.com/ebitengine/purego/internal/abi/abi_arm64.h
new file mode 100644
index 0000000000..5d5061ec1d
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/abi/abi_arm64.h
@@ -0,0 +1,39 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Macros for transitioning from the host ABI to Go ABI0.
+//
+// These macros save and restore the callee-saved registers
+// from the stack, but they don't adjust stack pointer, so
+// the user should prepare stack space in advance.
+// SAVE_R19_TO_R28(offset) saves R19 ~ R28 to the stack space
+// of ((offset)+0*8)(RSP) ~ ((offset)+9*8)(RSP).
+//
+// SAVE_F8_TO_F15(offset) saves F8 ~ F15 to the stack space
+// of ((offset)+0*8)(RSP) ~ ((offset)+7*8)(RSP).
+//
+// R29 is not saved because Go will save and restore it.
+
+#define SAVE_R19_TO_R28(offset) \
+	STP	(R19, R20), ((offset)+0*8)(RSP) \
+	STP	(R21, R22), ((offset)+2*8)(RSP) \
+	STP	(R23, R24), ((offset)+4*8)(RSP) \
+	STP	(R25, R26), ((offset)+6*8)(RSP) \
+	STP	(R27, g), ((offset)+8*8)(RSP)
+#define RESTORE_R19_TO_R28(offset) \
+	LDP	((offset)+0*8)(RSP), (R19, R20) \
+	LDP	((offset)+2*8)(RSP), (R21, R22) \
+	LDP	((offset)+4*8)(RSP), (R23, R24) \
+	LDP	((offset)+6*8)(RSP), (R25, R26) \
+	LDP	((offset)+8*8)(RSP), (R27, g) /* R28 */
+#define SAVE_F8_TO_F15(offset) \
+	FSTPD	(F8, F9), ((offset)+0*8)(RSP) \
+	FSTPD	(F10, F11), ((offset)+2*8)(RSP) \
+	FSTPD	(F12, F13), ((offset)+4*8)(RSP) \
+	FSTPD	(F14, F15), ((offset)+6*8)(RSP)
+#define RESTORE_F8_TO_F15(offset) \
+	FLDPD	((offset)+0*8)(RSP), (F8, F9) \
+	FLDPD	((offset)+2*8)(RSP), (F10, F11) \
+	FLDPD	((offset)+4*8)(RSP), (F12, F13) \
+	FLDPD	((offset)+6*8)(RSP), (F14, F15)
diff --git a/vendor/github.com/ebitengine/purego/internal/abi/dummy.go b/vendor/github.com/ebitengine/purego/internal/abi/dummy.go
new file mode 100644
index 0000000000..0f218f3a38
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/abi/dummy.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build dummy
+
+// Package abi is a dummy package that prevents go tooling from stripping the C dependencies.
+package abi
diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_linux.go b/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_linux.go
new file mode 100644
index 0000000000..89b9fca6c8
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_linux.go
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+package cgo
+
+// this file is placed inside internal/cgo and not package purego
+// because Cgo and assembly files can't be in the same package.
+
+/*
+ #cgo LDFLAGS: -ldl
+
+#include <stdint.h>
+#include <dlfcn.h>
+#include <errno.h>
+#include <assert.h>
+
+typedef struct syscall9Args {
+	uintptr_t fn;
+	uintptr_t a1, a2, a3, a4, a5, a6, a7, a8, a9;
+	uintptr_t f1, f2, f3, f4, f5, f6, f7, f8;
+	uintptr_t r1, r2, err;
+} syscall9Args;
+
+void syscall9(struct syscall9Args *args) {
+	assert((args->f1|args->f2|args->f3|args->f4|args->f5|args->f6|args->f7|args->f8) == 0);
+	uintptr_t (*func_name)(uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9);
+	*(void**)(&func_name) = (void*)(args->fn);
+	uintptr_t r1 =  func_name(args->a1,args->a2,args->a3,args->a4,args->a5,args->a6,args->a7,args->a8,args->a9);
+	args->r1 = r1;
+	args->err = errno;
+}
+
+*/
+import "C"
+import "unsafe"
+
+// assign purego.syscall9XABI0 to the C version of this function.
+var Syscall9XABI0 = unsafe.Pointer(C.syscall9)
+
+// all that is needed is to assign each dl function because then its
+// symbol will then be made available to the linker and linked to inside dlfcn.go
+var (
+	_ = C.dlopen
+	_ = C.dlsym
+	_ = C.dlerror
+	_ = C.dlclose
+)
+
+//go:nosplit
+func Syscall9X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
+	args := C.syscall9Args{C.uintptr_t(fn), C.uintptr_t(a1), C.uintptr_t(a2), C.uintptr_t(a3),
+		C.uintptr_t(a4), C.uintptr_t(a5), C.uintptr_t(a6),
+		C.uintptr_t(a7), C.uintptr_t(a8), C.uintptr_t(a9), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+	C.syscall9(&args)
+	return uintptr(args.r1), uintptr(args.r2), uintptr(args.err)
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s
new file mode 100644
index 0000000000..4f02a5d62a
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s
@@ -0,0 +1,76 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// Called by C code generated by cmd/cgo.
+// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr)
+// Saves C callee-saved registers and calls cgocallback with three arguments.
+// fn is the PC of a func(a unsafe.Pointer) function.
+// This signature is known to SWIG, so we can't change it.
+#ifndef GOOS_windows
+TEXT crosscall2(SB), NOSPLIT, $0x50-0 // keeps stack pointer 32-byte aligned
+#else
+TEXT crosscall2(SB), NOSPLIT, $0x110-0 // also need to save xmm6 - xmm15
+#endif
+	MOVQ BX, 0x18(SP)
+	MOVQ R12, 0x28(SP)
+	MOVQ R13, 0x30(SP)
+	MOVQ R14, 0x38(SP)
+	MOVQ R15, 0x40(SP)
+
+#ifdef GOOS_windows
+	// Win64 save RBX, RBP, RDI, RSI, RSP, R12, R13, R14, R15 and XMM6 -- XMM15.
+	MOVQ   DI, 0x48(SP)
+	MOVQ   SI, 0x50(SP)
+	MOVUPS X6, 0x60(SP)
+	MOVUPS X7, 0x70(SP)
+	MOVUPS X8, 0x80(SP)
+	MOVUPS X9, 0x90(SP)
+	MOVUPS X10, 0xa0(SP)
+	MOVUPS X11, 0xb0(SP)
+	MOVUPS X12, 0xc0(SP)
+	MOVUPS X13, 0xd0(SP)
+	MOVUPS X14, 0xe0(SP)
+	MOVUPS X15, 0xf0(SP)
+
+	MOVQ CX, 0x0(SP) // fn
+	MOVQ DX, 0x8(SP) // arg
+
+	// Skip n in R8.
+	MOVQ R9, 0x10(SP) // ctxt
+
+	CALL runtime·cgocallback(SB)
+
+	MOVQ   0x48(SP), DI
+	MOVQ   0x50(SP), SI
+	MOVUPS 0x60(SP), X6
+	MOVUPS 0x70(SP), X7
+	MOVUPS 0x80(SP), X8
+	MOVUPS 0x90(SP), X9
+	MOVUPS 0xa0(SP), X10
+	MOVUPS 0xb0(SP), X11
+	MOVUPS 0xc0(SP), X12
+	MOVUPS 0xd0(SP), X13
+	MOVUPS 0xe0(SP), X14
+	MOVUPS 0xf0(SP), X15
+
+#else
+	MOVQ DI, 0x0(SP) // fn
+	MOVQ SI, 0x8(SP) // arg
+
+	// Skip n in DX.
+	MOVQ CX, 0x10(SP) // ctxt
+
+	CALL runtime·cgocallback(SB)
+
+#endif
+
+	MOVQ 0x18(SP), BX
+	MOVQ 0x28(SP), R12
+	MOVQ 0x30(SP), R13
+	MOVQ 0x38(SP), R14
+	MOVQ 0x40(SP), R15
+
+	RET
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s
new file mode 100644
index 0000000000..3083b36bf0
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s
@@ -0,0 +1,36 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "../abi/abi_arm64.h"
+
+// Called by C code generated by cmd/cgo.
+// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr)
+// Saves C callee-saved registers and calls cgocallback with three arguments.
+// fn is the PC of a func(a unsafe.Pointer) function.
+TEXT crosscall2(SB), NOSPLIT|NOFRAME, $0
+/*
+ * We still need to save all callee save register as before, and then
+ *  push 3 args for fn (R0, R1, R3), skipping R2.
+ * Also note that at procedure entry in gc world, 8(RSP) will be the
+ *  first arg.
+ */
+	SUB  $(8*24), RSP
+	STP  (R0, R1), (8*1)(RSP)
+	MOVD R3, (8*3)(RSP)
+
+	SAVE_R19_TO_R28(8*4)
+	SAVE_F8_TO_F15(8*14)
+	STP (R29, R30), (8*22)(RSP)
+
+	// Initialize Go ABI environment
+	BL runtime·load_g(SB)
+	BL runtime·cgocallback(SB)
+
+	RESTORE_R19_TO_R28(8*4)
+	RESTORE_F8_TO_F15(8*14)
+	LDP (8*22)(RSP), (R29, R30)
+
+	ADD $(8*24), RSP
+	RET
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go
new file mode 100644
index 0000000000..84ee25b3e2
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go
@@ -0,0 +1,64 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || linux
+
+package fakecgo
+
+import _ "unsafe"
+
+// TODO: decide if we need _runtime_cgo_panic_internal
+
+//go:linkname x_cgo_init_trampoline x_cgo_init_trampoline
+//go:linkname _cgo_init _cgo_init
+var x_cgo_init_trampoline byte
+var _cgo_init = &x_cgo_init_trampoline
+
+// Creates a new system thread without updating any Go state.
+//
+// This method is invoked during shared library loading to create a new OS
+// thread to perform the runtime initialization. This method is similar to
+// _cgo_sys_thread_start except that it doesn't update any Go state.
+
+//go:linkname x_cgo_thread_start_trampoline x_cgo_thread_start_trampoline
+//go:linkname _cgo_thread_start _cgo_thread_start
+var x_cgo_thread_start_trampoline byte
+var _cgo_thread_start = &x_cgo_thread_start_trampoline
+
+// Notifies that the runtime has been initialized.
+//
+// We currently block at every CGO entry point (via _cgo_wait_runtime_init_done)
+// to ensure that the runtime has been initialized before the CGO call is
+// executed. This is necessary for shared libraries where we kickoff runtime
+// initialization in a separate thread and return without waiting for this
+// thread to complete the init.
+
+//go:linkname x_cgo_notify_runtime_init_done_trampoline x_cgo_notify_runtime_init_done_trampoline
+//go:linkname _cgo_notify_runtime_init_done _cgo_notify_runtime_init_done
+var x_cgo_notify_runtime_init_done_trampoline byte
+var _cgo_notify_runtime_init_done = &x_cgo_notify_runtime_init_done_trampoline
+
+// TODO: decide if we need x_cgo_set_context_function
+// TODO: decide if we need _cgo_yield
+
+var (
+	// In Go 1.20 the race detector was rewritten to pure Go
+	// on darwin. This means that when CGO_ENABLED=0 is set
+	// fakecgo is built with race detector code. This is not
+	// good since this code is pretending to be C. The go:norace
+	// pragma is not enough, since it only applies to the native
+	// ABIInternal function. The ABIO wrapper (which is necessary,
+	// since all references to text symbols from assembly will use it)
+	// does not inherit the go:norace pragma, so it will still be
+	// instrumented by the race detector.
+	//
+	// To circumvent this issue, using closure calls in the
+	// assembly, which forces the compiler to use the ABIInternal
+	// native implementation (which has go:norace) instead.
+	threadentry_call        = threadentry
+	x_cgo_init_call         = x_cgo_init
+	x_cgo_setenv_call       = x_cgo_setenv
+	x_cgo_unsetenv_call     = x_cgo_unsetenv
+	x_cgo_thread_start_call = x_cgo_thread_start
+)
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go
new file mode 100644
index 0000000000..7309b86ce8
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin
+
+// Package fakecgo implements the Cgo runtime (runtime/cgo) entirely in Go.
+// This allows code that calls into C to function properly when CGO_ENABLED=0.
+//
+// # Goals
+//
+// fakecgo attempts to replicate the same naming structure as in the runtime.
+// For example, functions that have the prefix "gcc_*" are named "go_*".
+// This makes it easier to port other GOOSs and GOARCHs as well as to keep
+// it in sync with runtime/cgo.
+//
+// # Support
+//
+// Currently, fakecgo only supports macOS on amd64 & arm64. It also cannot
+// be used with -buildmode=c-archive because that requires special initialization
+// that fakecgo does not implement at the moment.
+//
+// # Usage
+//
+// Using fakecgo is easy just import _ "github.com/ebitengine/purego" and then
+// set the environment variable CGO_ENABLED=0.
+// The recommended usage for fakecgo is to prefer using runtime/cgo if possible
+// but if cross-compiling or fast build times are important fakecgo is available.
+// Purego will pick which ever Cgo runtime is available and prefer the one that
+// comes with Go (runtime/cgo).
+package fakecgo
+
+//go:generate go run gen.go
+//go:generate gofmt -s -w symbols.go
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go
new file mode 100644
index 0000000000..fb3a3f7f0f
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go
@@ -0,0 +1,71 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fakecgo
+
+import "unsafe"
+
+//go:nosplit
+//go:norace
+func _cgo_sys_thread_start(ts *ThreadStart) {
+	var attr pthread_attr_t
+	var ign, oset sigset_t
+	var p pthread_t
+	var size size_t
+	var err int
+
+	sigfillset(&ign)
+	pthread_sigmask(SIG_SETMASK, &ign, &oset)
+
+	size = pthread_get_stacksize_np(pthread_self())
+	pthread_attr_init(&attr)
+	pthread_attr_setstacksize(&attr, size)
+	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+	ts.g.stackhi = uintptr(size)
+
+	err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts)
+
+	pthread_sigmask(SIG_SETMASK, &oset, nil)
+
+	if err != 0 {
+		print("fakecgo: pthread_create failed: ")
+		println(err)
+		abort()
+	}
+}
+
+// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function
+//
+//go:linkname x_threadentry_trampoline threadentry_trampoline
+var x_threadentry_trampoline byte
+var threadentry_trampolineABI0 = &x_threadentry_trampoline
+
+//go:nosplit
+//go:norace
+func threadentry(v unsafe.Pointer) unsafe.Pointer {
+	ts := *(*ThreadStart)(v)
+	free(v)
+
+	setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g)))
+
+	// faking funcs in go is a bit a... involved - but the following works :)
+	fn := uintptr(unsafe.Pointer(&ts.fn))
+	(*(*func())(unsafe.Pointer(&fn)))()
+
+	return nil
+}
+
+// here we will store a pointer to the provided setg func
+var setg_func uintptr
+
+//go:nosplit
+//go:norace
+func x_cgo_init(g *G, setg uintptr) {
+	var size size_t
+
+	setg_func = setg
+
+	size = pthread_get_stacksize_np(pthread_self())
+	g.stacklo = uintptr(unsafe.Add(unsafe.Pointer(&size), -size+4096))
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go
new file mode 100644
index 0000000000..b000b3fbf7
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go
@@ -0,0 +1,86 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fakecgo
+
+import "unsafe"
+
+//go:nosplit
+//go:norace
+func _cgo_sys_thread_start(ts *ThreadStart) {
+	var attr pthread_attr_t
+	var ign, oset sigset_t
+	var p pthread_t
+	var size size_t
+	var err int
+
+	sigfillset(&ign)
+	pthread_sigmask(SIG_SETMASK, &ign, &oset)
+
+	size = pthread_get_stacksize_np(pthread_self())
+	pthread_attr_init(&attr)
+	pthread_attr_setstacksize(&attr, size)
+	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+	ts.g.stackhi = uintptr(size)
+
+	err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts)
+
+	pthread_sigmask(SIG_SETMASK, &oset, nil)
+
+	if err != 0 {
+		print("fakecgo: pthread_create failed: ")
+		println(err)
+		abort()
+	}
+}
+
+// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function
+//
+//go:linkname x_threadentry_trampoline threadentry_trampoline
+var x_threadentry_trampoline byte
+var threadentry_trampolineABI0 = &x_threadentry_trampoline
+
+//go:nosplit
+//go:norace
+func threadentry(v unsafe.Pointer) unsafe.Pointer {
+	ts := *(*ThreadStart)(v)
+	free(v)
+
+	// TODO: support ios
+	//#if TARGET_OS_IPHONE
+	//	darwin_arm_init_thread_exception_port();
+	//#endif
+	setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g)))
+
+	// faking funcs in go is a bit a... involved - but the following works :)
+	fn := uintptr(unsafe.Pointer(&ts.fn))
+	(*(*func())(unsafe.Pointer(&fn)))()
+
+	return nil
+}
+
+// here we will store a pointer to the provided setg func
+var setg_func uintptr
+
+// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c)
+// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us
+// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup
+// This function can't be go:systemstack since go is not in a state where the systemcheck would work.
+//
+//go:nosplit
+//go:norace
+func x_cgo_init(g *G, setg uintptr) {
+	var size size_t
+
+	setg_func = setg
+	size = pthread_get_stacksize_np(pthread_self())
+	g.stacklo = uintptr(unsafe.Add(unsafe.Pointer(&size), -size+4096))
+
+	//TODO: support ios
+	//#if TARGET_OS_IPHONE
+	//	darwin_arm_init_mach_exception_handler();
+	//	darwin_arm_init_thread_exception_port();
+	//	init_working_dir();
+	//#endif
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go
new file mode 100644
index 0000000000..16b0aac56f
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || linux
+
+package fakecgo
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+//go:nosplit
+func x_cgo_notify_runtime_init_done() {
+	// we don't support being called as a library
+}
+
+// _cgo_try_pthread_create retries pthread_create if it fails with
+// EAGAIN.
+//
+//go:nosplit
+//go:norace
+func _cgo_try_pthread_create(thread *pthread_t, attr *pthread_attr_t, pfn unsafe.Pointer, arg *ThreadStart) int {
+	var ts syscall.Timespec
+	// tries needs to be the same type as syscall.Timespec.Nsec
+	// but the fields are int32 on 32bit and int64 on 64bit.
+	// tries is assigned to syscall.Timespec.Nsec in order to match its type.
+	var tries = ts.Nsec
+	var err int
+
+	for tries = 0; tries < 20; tries++ {
+		err = int(pthread_create(thread, attr, pfn, unsafe.Pointer(arg)))
+		if err == 0 {
+			pthread_detach(*thread)
+			return 0
+		}
+		if err != int(syscall.EAGAIN) {
+			return err
+		}
+		ts.Sec = 0
+		ts.Nsec = (tries + 1) * 1000 * 1000 // Milliseconds.
+		nanosleep(&ts, nil)
+	}
+	return int(syscall.EAGAIN)
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go
new file mode 100644
index 0000000000..9aa57ef663
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go
@@ -0,0 +1,93 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fakecgo
+
+import "unsafe"
+
+//go:nosplit
+func _cgo_sys_thread_start(ts *ThreadStart) {
+	var attr pthread_attr_t
+	var ign, oset sigset_t
+	var p pthread_t
+	var size size_t
+	var err int
+
+	//fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug
+	sigfillset(&ign)
+	pthread_sigmask(SIG_SETMASK, &ign, &oset)
+
+	pthread_attr_init(&attr)
+	pthread_attr_getstacksize(&attr, &size)
+	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+	ts.g.stackhi = uintptr(size)
+
+	err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts)
+
+	pthread_sigmask(SIG_SETMASK, &oset, nil)
+
+	if err != 0 {
+		print("fakecgo: pthread_create failed: ")
+		println(err)
+		abort()
+	}
+}
+
+// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function
+//
+//go:linkname x_threadentry_trampoline threadentry_trampoline
+var x_threadentry_trampoline byte
+var threadentry_trampolineABI0 = &x_threadentry_trampoline
+
+//go:nosplit
+func threadentry(v unsafe.Pointer) unsafe.Pointer {
+	ts := *(*ThreadStart)(v)
+	free(v)
+
+	setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g)))
+
+	// faking funcs in go is a bit a... involved - but the following works :)
+	fn := uintptr(unsafe.Pointer(&ts.fn))
+	(*(*func())(unsafe.Pointer(&fn)))()
+
+	return nil
+}
+
+// here we will store a pointer to the provided setg func
+var setg_func uintptr
+
+//go:nosplit
+func x_cgo_init(g *G, setg uintptr) {
+	var size size_t
+	var attr *pthread_attr_t
+
+	/* The memory sanitizer distributed with versions of clang
+	   before 3.8 has a bug: if you call mmap before malloc, mmap
+	   may return an address that is later overwritten by the msan
+	   library.  Avoid this problem by forcing a call to malloc
+	   here, before we ever call malloc.
+
+	   This is only required for the memory sanitizer, so it's
+	   unfortunate that we always run it.  It should be possible
+	   to remove this when we no longer care about versions of
+	   clang before 3.8.  The test for this is
+	   misc/cgo/testsanitizers.
+
+	   GCC works hard to eliminate a seemingly unnecessary call to
+	   malloc, so we actually use the memory we allocate.  */
+
+	setg_func = setg
+	attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr)))
+	if attr == nil {
+		println("fakecgo: malloc failed")
+		abort()
+	}
+	pthread_attr_init(attr)
+	pthread_attr_getstacksize(attr, &size)
+	// runtime/cgo uses __builtin_frame_address(0) instead of `uintptr(unsafe.Pointer(&size))`
+	// but this should be OK since we are taking the address of the first variable in this function.
+	g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096
+	pthread_attr_destroy(attr)
+	free(unsafe.Pointer(attr))
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go
new file mode 100644
index 0000000000..1db518e3a6
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go
@@ -0,0 +1,96 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fakecgo
+
+import "unsafe"
+
+//go:nosplit
+func _cgo_sys_thread_start(ts *ThreadStart) {
+	var attr pthread_attr_t
+	var ign, oset sigset_t
+	var p pthread_t
+	var size size_t
+	var err int
+
+	//fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug
+	sigfillset(&ign)
+	pthread_sigmask(SIG_SETMASK, &ign, &oset)
+
+	pthread_attr_init(&attr)
+	pthread_attr_getstacksize(&attr, &size)
+	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+	ts.g.stackhi = uintptr(size)
+
+	err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts)
+
+	pthread_sigmask(SIG_SETMASK, &oset, nil)
+
+	if err != 0 {
+		print("fakecgo: pthread_create failed: ")
+		println(err)
+		abort()
+	}
+}
+
+// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function
+//
+//go:linkname x_threadentry_trampoline threadentry_trampoline
+var x_threadentry_trampoline byte
+var threadentry_trampolineABI0 = &x_threadentry_trampoline
+
+//go:nosplit
+func threadentry(v unsafe.Pointer) unsafe.Pointer {
+	ts := *(*ThreadStart)(v)
+	free(v)
+
+	setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g)))
+
+	// faking funcs in go is a bit a... involved - but the following works :)
+	fn := uintptr(unsafe.Pointer(&ts.fn))
+	(*(*func())(unsafe.Pointer(&fn)))()
+
+	return nil
+}
+
+// here we will store a pointer to the provided setg func
+var setg_func uintptr
+
+// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c)
+// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us
+// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup
+// This function can't be go:systemstack since go is not in a state where the systemcheck would work.
+//
+//go:nosplit
+func x_cgo_init(g *G, setg uintptr) {
+	var size size_t
+	var attr *pthread_attr_t
+
+	/* The memory sanitizer distributed with versions of clang
+	   before 3.8 has a bug: if you call mmap before malloc, mmap
+	   may return an address that is later overwritten by the msan
+	   library.  Avoid this problem by forcing a call to malloc
+	   here, before we ever call malloc.
+
+	   This is only required for the memory sanitizer, so it's
+	   unfortunate that we always run it.  It should be possible
+	   to remove this when we no longer care about versions of
+	   clang before 3.8.  The test for this is
+	   misc/cgo/testsanitizers.
+
+	   GCC works hard to eliminate a seemingly unnecessary call to
+	   malloc, so we actually use the memory we allocate.  */
+
+	setg_func = setg
+	attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr)))
+	if attr == nil {
+		println("fakecgo: malloc failed")
+		abort()
+	}
+	pthread_attr_init(attr)
+	pthread_attr_getstacksize(attr, &size)
+	g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096
+	pthread_attr_destroy(attr)
+	free(unsafe.Pointer(attr))
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go
new file mode 100644
index 0000000000..6ab42dbcfe
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || linux
+
+package fakecgo
+
+//go:nosplit
+//go:norace
+func x_cgo_setenv(arg *[2]*byte) {
+	setenv(arg[0], arg[1], 1)
+}
+
+//go:nosplit
+//go:norace
+func x_cgo_unsetenv(arg *[1]*byte) {
+	unsetenv(arg[0])
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go
new file mode 100644
index 0000000000..daf12035f5
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || linux
+
+package fakecgo
+
+import "unsafe"
+
+// _cgo_thread_start is split into three parts in cgo since only one part is system dependent (keep it here for easier handling)
+
+// _cgo_thread_start(ThreadStart *arg) (runtime/cgo/gcc_util.c)
+// This get's called instead of the go code for creating new threads
+// -> pthread_* stuff is used, so threads are setup correctly for C
+// If this is missing, TLS is only setup correctly on thread 1!
+// This function should be go:systemstack instead of go:nosplit (but that requires runtime)
+//
+//go:nosplit
+//go:norace
+func x_cgo_thread_start(arg *ThreadStart) {
+	var ts *ThreadStart
+	// Make our own copy that can persist after we return.
+	//	_cgo_tsan_acquire();
+	ts = (*ThreadStart)(malloc(unsafe.Sizeof(*ts)))
+	//	_cgo_tsan_release();
+	if ts == nil {
+		println("fakecgo: out of memory in thread_start")
+		abort()
+	}
+	// *ts = *arg would cause a writebarrier so use memmove instead
+	memmove(unsafe.Pointer(ts), unsafe.Pointer(arg), unsafe.Sizeof(*ts))
+	_cgo_sys_thread_start(ts) // OS-dependent half
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go
new file mode 100644
index 0000000000..d8493e1991
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go
@@ -0,0 +1,19 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || linux
+
+// The runtime package contains an uninitialized definition
+// for runtime·iscgo. Override it to tell the runtime we're here.
+// There are various function pointers that should be set too,
+// but those depend on dynamic linker magic to get initialized
+// correctly, and sometimes they break. This variable is a
+// backup: it depends only on old C style static linking rules.
+
+package fakecgo
+
+import _ "unsafe" // for go:linkname
+
+//go:linkname _iscgo runtime.iscgo
+var _iscgo bool = true
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go
new file mode 100644
index 0000000000..0414b27580
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || linux
+
+package fakecgo
+
+type size_t uintptr
+type sigset_t [128]byte
+type pthread_attr_t [64]byte
+type pthread_t int
+
+// for pthread_sigmask:
+
+type sighow int32
+
+const (
+	SIG_BLOCK   sighow = 0
+	SIG_UNBLOCK sighow = 1
+	SIG_SETMASK sighow = 2
+)
+
+type G struct {
+	stacklo uintptr
+	stackhi uintptr
+}
+
+type ThreadStart struct {
+	g   *G
+	tls *uintptr
+	fn  uintptr
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go
new file mode 100644
index 0000000000..8e01ee524e
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go
@@ -0,0 +1,19 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || linux
+
+package fakecgo
+
+import _ "unsafe" // for go:linkname
+
+//go:linkname x_cgo_setenv_trampoline x_cgo_setenv_trampoline
+//go:linkname _cgo_setenv runtime._cgo_setenv
+var x_cgo_setenv_trampoline byte
+var _cgo_setenv = &x_cgo_setenv_trampoline
+
+//go:linkname x_cgo_unsetenv_trampoline x_cgo_unsetenv_trampoline
+//go:linkname _cgo_unsetenv runtime._cgo_unsetenv
+var x_cgo_unsetenv_trampoline byte
+var _cgo_unsetenv = &x_cgo_unsetenv_trampoline
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go
new file mode 100644
index 0000000000..9dea763d0f
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go
@@ -0,0 +1,152 @@
+// Code generated by 'go generate' with gen.go. DO NOT EDIT.
+
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || linux
+
+package fakecgo
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// setg_trampoline calls setg with the G provided
+func setg_trampoline(setg uintptr, G uintptr)
+
+//go:linkname memmove runtime.memmove
+func memmove(to, from unsafe.Pointer, n uintptr)
+
+// call5 takes fn the C function and 5 arguments and calls the function with those arguments
+func call5(fn, a1, a2, a3, a4, a5 uintptr) uintptr
+
+func malloc(size uintptr) unsafe.Pointer {
+	ret := call5(mallocABI0, uintptr(size), 0, 0, 0, 0)
+	// this indirection is to avoid go vet complaining about possible misuse of unsafe.Pointer
+	return *(*unsafe.Pointer)(unsafe.Pointer(&ret))
+}
+
+func free(ptr unsafe.Pointer) {
+	call5(freeABI0, uintptr(ptr), 0, 0, 0, 0)
+}
+
+func setenv(name *byte, value *byte, overwrite int32) int32 {
+	return int32(call5(setenvABI0, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), uintptr(overwrite), 0, 0))
+}
+
+func unsetenv(name *byte) int32 {
+	return int32(call5(unsetenvABI0, uintptr(unsafe.Pointer(name)), 0, 0, 0, 0))
+}
+
+func pthread_attr_init(attr *pthread_attr_t) int32 {
+	return int32(call5(pthread_attr_initABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0))
+}
+
+func pthread_create(thread *pthread_t, attr *pthread_attr_t, start unsafe.Pointer, arg unsafe.Pointer) int32 {
+	return int32(call5(pthread_createABI0, uintptr(unsafe.Pointer(thread)), uintptr(unsafe.Pointer(attr)), uintptr(start), uintptr(arg), 0))
+}
+
+func pthread_detach(thread pthread_t) int32 {
+	return int32(call5(pthread_detachABI0, uintptr(thread), 0, 0, 0, 0))
+}
+
+func pthread_sigmask(how sighow, ign *sigset_t, oset *sigset_t) int32 {
+	return int32(call5(pthread_sigmaskABI0, uintptr(how), uintptr(unsafe.Pointer(ign)), uintptr(unsafe.Pointer(oset)), 0, 0))
+}
+
+func pthread_self() pthread_t {
+	return pthread_t(call5(pthread_selfABI0, 0, 0, 0, 0, 0))
+}
+
+func pthread_get_stacksize_np(thread pthread_t) size_t {
+	return size_t(call5(pthread_get_stacksize_npABI0, uintptr(thread), 0, 0, 0, 0))
+}
+
+func pthread_attr_getstacksize(attr *pthread_attr_t, stacksize *size_t) int32 {
+	return int32(call5(pthread_attr_getstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(unsafe.Pointer(stacksize)), 0, 0, 0))
+}
+
+func pthread_attr_setstacksize(attr *pthread_attr_t, size size_t) int32 {
+	return int32(call5(pthread_attr_setstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(size), 0, 0, 0))
+}
+
+func pthread_attr_destroy(attr *pthread_attr_t) int32 {
+	return int32(call5(pthread_attr_destroyABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0))
+}
+
+func sigfillset(set *sigset_t) int32 {
+	return int32(call5(sigfillsetABI0, uintptr(unsafe.Pointer(set)), 0, 0, 0, 0))
+}
+
+func nanosleep(ts *syscall.Timespec, rem *syscall.Timespec) int32 {
+	return int32(call5(nanosleepABI0, uintptr(unsafe.Pointer(ts)), uintptr(unsafe.Pointer(rem)), 0, 0, 0))
+}
+
+func abort() {
+	call5(abortABI0, 0, 0, 0, 0, 0)
+}
+
+//go:linkname _malloc _malloc
+var _malloc uintptr
+var mallocABI0 = uintptr(unsafe.Pointer(&_malloc))
+
+//go:linkname _free _free
+var _free uintptr
+var freeABI0 = uintptr(unsafe.Pointer(&_free))
+
+//go:linkname _setenv _setenv
+var _setenv uintptr
+var setenvABI0 = uintptr(unsafe.Pointer(&_setenv))
+
+//go:linkname _unsetenv _unsetenv
+var _unsetenv uintptr
+var unsetenvABI0 = uintptr(unsafe.Pointer(&_unsetenv))
+
+//go:linkname _pthread_attr_init _pthread_attr_init
+var _pthread_attr_init uintptr
+var pthread_attr_initABI0 = uintptr(unsafe.Pointer(&_pthread_attr_init))
+
+//go:linkname _pthread_create _pthread_create
+var _pthread_create uintptr
+var pthread_createABI0 = uintptr(unsafe.Pointer(&_pthread_create))
+
+//go:linkname _pthread_detach _pthread_detach
+var _pthread_detach uintptr
+var pthread_detachABI0 = uintptr(unsafe.Pointer(&_pthread_detach))
+
+//go:linkname _pthread_sigmask _pthread_sigmask
+var _pthread_sigmask uintptr
+var pthread_sigmaskABI0 = uintptr(unsafe.Pointer(&_pthread_sigmask))
+
+//go:linkname _pthread_self _pthread_self
+var _pthread_self uintptr
+var pthread_selfABI0 = uintptr(unsafe.Pointer(&_pthread_self))
+
+//go:linkname _pthread_get_stacksize_np _pthread_get_stacksize_np
+var _pthread_get_stacksize_np uintptr
+var pthread_get_stacksize_npABI0 = uintptr(unsafe.Pointer(&_pthread_get_stacksize_np))
+
+//go:linkname _pthread_attr_getstacksize _pthread_attr_getstacksize
+var _pthread_attr_getstacksize uintptr
+var pthread_attr_getstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_getstacksize))
+
+//go:linkname _pthread_attr_setstacksize _pthread_attr_setstacksize
+var _pthread_attr_setstacksize uintptr
+var pthread_attr_setstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_setstacksize))
+
+//go:linkname _pthread_attr_destroy _pthread_attr_destroy
+var _pthread_attr_destroy uintptr
+var pthread_attr_destroyABI0 = uintptr(unsafe.Pointer(&_pthread_attr_destroy))
+
+//go:linkname _sigfillset _sigfillset
+var _sigfillset uintptr
+var sigfillsetABI0 = uintptr(unsafe.Pointer(&_sigfillset))
+
+//go:linkname _nanosleep _nanosleep
+var _nanosleep uintptr
+var nanosleepABI0 = uintptr(unsafe.Pointer(&_nanosleep))
+
+//go:linkname _abort _abort
+var _abort uintptr
+var abortABI0 = uintptr(unsafe.Pointer(&_abort))
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go
new file mode 100644
index 0000000000..d580873cbf
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+package fakecgo
+
+//go:cgo_import_dynamic purego_malloc malloc "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_free free "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_setenv setenv "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_unsetenv unsetenv "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_self pthread_self "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_create pthread_create "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_detach pthread_detach "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_abort abort "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_sigfillset sigfillset "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_nanosleep nanosleep "/usr/lib/libSystem.B.dylib"
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go
new file mode 100644
index 0000000000..8dbe8cf24d
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+package fakecgo
+
+// pthread_attr_init will get us the wrong version on glibc - but this doesn't matter, since the memory we
+// provide is zeroed - which will lead the correct result again
+
+//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_create pthread_create "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_detach pthread_detach "libpthread.so.0"
+//go:cgo_import_dynamic purego_setenv setenv "libc.so.6"
+//go:cgo_import_dynamic purego_unsetenv unsetenv "libc.so.6"
+//go:cgo_import_dynamic purego_malloc malloc "libc.so.6"
+//go:cgo_import_dynamic purego_free free "libc.so.6"
+//go:cgo_import_dynamic purego_nanosleep nanosleep "libc.so.6"
+//go:cgo_import_dynamic purego_sigfillset sigfillset "libc.so.6"
+//go:cgo_import_dynamic purego_abort abort "libc.so.6"
+
+// on amd64 we don't need the following lines - on 386 we do...
+// anyway - with those lines the output is better (but doesn't matter) - without it on amd64 we get multiple DT_NEEDED with "libc.so.6" etc
+
+//go:cgo_import_dynamic _ _ "libpthread.so.0"
+//go:cgo_import_dynamic _ _ "libc.so.6"
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s
new file mode 100644
index 0000000000..cfd8c441f9
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || linux
+
+/*
+trampoline for emulating required C functions for cgo in go (see cgo.go)
+(we convert cdecl calling convention to go and vice-versa)
+
+Since we're called from go and call into C we can cheat a bit with the calling conventions:
+ - in go all the registers are caller saved
+ - in C we have a couple of callee saved registers
+
+=> we can use BX, R12, R13, R14, R15 instead of the stack
+
+C Calling convention cdecl used here (we only need integer args):
+1. arg: DI
+2. arg: SI
+3. arg: DX
+4. arg: CX
+5. arg: R8
+6. arg: R9
+We don't need floats with these functions -> AX=0
+return value will be in AX
+*/
+#include "textflag.h"
+#include "go_asm.h"
+
+// these trampolines map the gcc ABI to Go ABI and then calls into the Go equivalent functions.
+
+TEXT x_cgo_init_trampoline(SB), NOSPLIT, $16
+	MOVQ DI, AX
+	MOVQ SI, BX
+	MOVQ ·x_cgo_init_call(SB), DX
+	MOVQ (DX), CX
+	CALL CX
+	RET
+
+TEXT x_cgo_thread_start_trampoline(SB), NOSPLIT, $8
+	MOVQ DI, AX
+	MOVQ ·x_cgo_thread_start_call(SB), DX
+	MOVQ (DX), CX
+	CALL CX
+	RET
+
+TEXT x_cgo_setenv_trampoline(SB), NOSPLIT, $8
+	MOVQ DI, AX
+	MOVQ ·x_cgo_setenv_call(SB), DX
+	MOVQ (DX), CX
+	CALL CX
+	RET
+
+TEXT x_cgo_unsetenv_trampoline(SB), NOSPLIT, $8
+	MOVQ DI, AX
+	MOVQ ·x_cgo_unsetenv_call(SB), DX
+	MOVQ (DX), CX
+	CALL CX
+	RET
+
+TEXT x_cgo_notify_runtime_init_done_trampoline(SB), NOSPLIT, $0
+	CALL ·x_cgo_notify_runtime_init_done(SB)
+	RET
+
+// func setg_trampoline(setg uintptr, g uintptr)
+TEXT ·setg_trampoline(SB), NOSPLIT, $0-16
+	MOVQ G+8(FP), DI
+	MOVQ setg+0(FP), BX
+	XORL AX, AX
+	CALL BX
+	RET
+
+TEXT threadentry_trampoline(SB), NOSPLIT, $16
+	MOVQ DI, AX
+	MOVQ ·threadentry_call(SB), DX
+	MOVQ (DX), CX
+	CALL CX
+	RET
+
+TEXT ·call5(SB), NOSPLIT, $0-56
+	MOVQ fn+0(FP), BX
+	MOVQ a1+8(FP), DI
+	MOVQ a2+16(FP), SI
+	MOVQ a3+24(FP), DX
+	MOVQ a4+32(FP), CX
+	MOVQ a5+40(FP), R8
+
+	XORL AX, AX // no floats
+
+	PUSHQ BP       // save BP
+	MOVQ  SP, BP   // save SP inside BP bc BP is callee-saved
+	SUBQ  $16, SP  // allocate space for alignment
+	ANDQ  $-16, SP // align on 16 bytes for SSE
+
+	CALL BX
+
+	MOVQ BP, SP // get SP back
+	POPQ BP     // restore BP
+
+	MOVQ AX, ret+48(FP)
+	RET
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s
new file mode 100644
index 0000000000..54f31983a1
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || linux
+
+#include "textflag.h"
+#include "go_asm.h"
+
+// these trampolines map the gcc ABI to Go ABI and then calls into the Go equivalent functions.
+
+TEXT x_cgo_init_trampoline(SB), NOSPLIT, $0-0
+	MOVD R0, 8(RSP)
+	MOVD R1, 16(RSP)
+	MOVD ·x_cgo_init_call(SB), R26
+	MOVD (R26), R2
+	CALL (R2)
+	RET
+
+TEXT x_cgo_thread_start_trampoline(SB), NOSPLIT, $0-0
+	MOVD R0, 8(RSP)
+	MOVD ·x_cgo_thread_start_call(SB), R26
+	MOVD (R26), R2
+	CALL (R2)
+	RET
+
+TEXT x_cgo_setenv_trampoline(SB), NOSPLIT, $0-0
+	MOVD R0, 8(RSP)
+	MOVD ·x_cgo_setenv_call(SB), R26
+	MOVD (R26), R2
+	CALL (R2)
+	RET
+
+TEXT x_cgo_unsetenv_trampoline(SB), NOSPLIT, $0-0
+	MOVD R0, 8(RSP)
+	MOVD ·x_cgo_unsetenv_call(SB), R26
+	MOVD (R26), R2
+	CALL (R2)
+	RET
+
+TEXT x_cgo_notify_runtime_init_done_trampoline(SB), NOSPLIT, $0-0
+	CALL ·x_cgo_notify_runtime_init_done(SB)
+	RET
+
+// func setg_trampoline(setg uintptr, g uintptr)
+TEXT ·setg_trampoline(SB), NOSPLIT, $0-16
+	MOVD G+8(FP), R0
+	MOVD setg+0(FP), R1
+	CALL R1
+	RET
+
+TEXT threadentry_trampoline(SB), NOSPLIT, $0-0
+	MOVD R0, 8(RSP)
+	MOVD ·threadentry_call(SB), R26
+	MOVD (R26), R2
+	CALL (R2)
+	MOVD $0, R0                     // TODO: get the return value from threadentry
+	RET
+
+TEXT ·call5(SB), NOSPLIT, $0-0
+	MOVD fn+0(FP), R6
+	MOVD a1+8(FP), R0
+	MOVD a2+16(FP), R1
+	MOVD a3+24(FP), R2
+	MOVD a4+32(FP), R3
+	MOVD a5+40(FP), R4
+	CALL R6
+	MOVD R0, ret+48(FP)
+	RET
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s
new file mode 100644
index 0000000000..ada31ea83e
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s
@@ -0,0 +1,74 @@
+// Code generated by 'go generate' with gen.go. DO NOT EDIT.
+
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || linux
+
+#include "textflag.h"
+
+// these stubs are here because it is not possible to go:linkname directly the C functions on darwin arm64
+
+TEXT _malloc(SB), NOSPLIT, $0-0
+	JMP purego_malloc(SB)
+	RET
+
+TEXT _free(SB), NOSPLIT, $0-0
+	JMP purego_free(SB)
+	RET
+
+TEXT _setenv(SB), NOSPLIT, $0-0
+	JMP purego_setenv(SB)
+	RET
+
+TEXT _unsetenv(SB), NOSPLIT, $0-0
+	JMP purego_unsetenv(SB)
+	RET
+
+TEXT _pthread_attr_init(SB), NOSPLIT, $0-0
+	JMP purego_pthread_attr_init(SB)
+	RET
+
+TEXT _pthread_create(SB), NOSPLIT, $0-0
+	JMP purego_pthread_create(SB)
+	RET
+
+TEXT _pthread_detach(SB), NOSPLIT, $0-0
+	JMP purego_pthread_detach(SB)
+	RET
+
+TEXT _pthread_sigmask(SB), NOSPLIT, $0-0
+	JMP purego_pthread_sigmask(SB)
+	RET
+
+TEXT _pthread_self(SB), NOSPLIT, $0-0
+	JMP purego_pthread_self(SB)
+	RET
+
+TEXT _pthread_get_stacksize_np(SB), NOSPLIT, $0-0
+	JMP purego_pthread_get_stacksize_np(SB)
+	RET
+
+TEXT _pthread_attr_getstacksize(SB), NOSPLIT, $0-0
+	JMP purego_pthread_attr_getstacksize(SB)
+	RET
+
+TEXT _pthread_attr_setstacksize(SB), NOSPLIT, $0-0
+	JMP purego_pthread_attr_setstacksize(SB)
+	RET
+
+TEXT _pthread_attr_destroy(SB), NOSPLIT, $0-0
+	JMP purego_pthread_attr_destroy(SB)
+	RET
+
+TEXT _sigfillset(SB), NOSPLIT, $0-0
+	JMP purego_sigfillset(SB)
+	RET
+
+TEXT _nanosleep(SB), NOSPLIT, $0-0
+	JMP purego_nanosleep(SB)
+	RET
+
+TEXT _abort(SB), NOSPLIT, $0-0
+	JMP purego_abort(SB)
+	RET
diff --git a/vendor/github.com/ebitengine/purego/internal/strings/strings.go b/vendor/github.com/ebitengine/purego/internal/strings/strings.go
new file mode 100644
index 0000000000..e34b5a1e05
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/strings/strings.go
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+package strings
+
+import (
+	"unsafe"
+)
+
+// hasSuffix tests whether the string s ends with suffix.
+func hasSuffix(s, suffix string) bool {
+	return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
+}
+
+// CString converts a go string to *byte that can be passed to C code.
+func CString(name string) *byte {
+	if hasSuffix(name, "\x00") {
+		return &(*(*[]byte)(unsafe.Pointer(&name)))[0]
+	}
+	var b = make([]byte, len(name)+1)
+	copy(b, name)
+	return &b[0]
+}
+
+// GoString copies a char* to a Go string.
+func GoString(c uintptr) string {
+	// We take the address and then dereference it to trick go vet from creating a possible misuse of unsafe.Pointer
+	ptr := *(*unsafe.Pointer)(unsafe.Pointer(&c))
+	if ptr == nil {
+		return ""
+	}
+	var length int
+	for {
+		if *(*byte)(unsafe.Add(ptr, uintptr(length))) == '\x00' {
+			break
+		}
+		length++
+	}
+	return string(unsafe.Slice((*byte)(ptr), length))
+}
diff --git a/vendor/github.com/ebitengine/purego/is_ios.go b/vendor/github.com/ebitengine/purego/is_ios.go
new file mode 100644
index 0000000000..ed31da9782
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/is_ios.go
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo
+
+package purego
+
+// if you are getting this error it means that you have
+// CGO_ENABLED=0 while trying to build for ios.
+// purego does not support this mode yet.
+// the fix is to set CGO_ENABLED=1 which will require
+// a C compiler.
+var _ = _PUREGO_REQUIRES_CGO_ON_IOS
diff --git a/vendor/github.com/ebitengine/purego/nocgo.go b/vendor/github.com/ebitengine/purego/nocgo.go
new file mode 100644
index 0000000000..c7ee865ab8
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/nocgo.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo && (darwin || linux)
+
+package purego
+
+// if CGO_ENABLED=0 import fakecgo to setup the Cgo runtime correctly.
+// This is required since some frameworks need TLS setup the C way which Go doesn't do.
+// We currently don't support ios in fakecgo mode so force Cgo or fail
+//
+// The way that the Cgo runtime (runtime/cgo) works is by setting some variables found
+// in runtime with non-null GCC compiled functions. The variables that are replaced are
+// var (
+//		iscgo             bool 							// in runtime/cgo.go
+//		_cgo_init         unsafe.Pointer 				// in runtime/cgo.go
+//		_cgo_thread_start unsafe.Pointer				// in runtime/cgo.go
+//		_cgo_notify_runtime_init_done unsafe.Pointer 	// in runtime/cgo.go
+//		_cgo_setenv unsafe.Pointer  					// in runtime/env_posix.go
+//		_cgo_unsetenv unsafe.Pointer					// in runtime/env_posix.go
+// )
+// importing fakecgo will set these (using //go:linkname) with functions written
+// entirely in Go (except for some assembly trampolines to change GCC ABI to Go ABI).
+// Doing so makes it possible to build applications that call into C without CGO_ENABLED=1.
+import _ "github.com/ebitengine/purego/internal/fakecgo"
diff --git a/vendor/github.com/ebitengine/purego/sys_amd64.s b/vendor/github.com/ebitengine/purego/sys_amd64.s
new file mode 100644
index 0000000000..87e4f0f332
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/sys_amd64.s
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || (!cgo && linux)
+
+#include "textflag.h"
+#include "internal/abi/abi_amd64.h"
+#include "go_asm.h"
+#include "funcdata.h"
+
+// syscall9X calls a function in libc on behalf of the syscall package.
+// syscall9X takes a pointer to a struct like:
+// struct {
+//	fn    uintptr
+//	a1    uintptr
+//	a2    uintptr
+//	a3    uintptr
+//	a4    uintptr
+//	a5    uintptr
+//	a6    uintptr
+//	a7    uintptr
+//	a8    uintptr
+//	a9    uintptr
+//	r1    uintptr
+//	r2    uintptr
+//	err   uintptr
+// }
+// syscall9X must be called on the g0 stack with the
+// C calling convention (use libcCall).
+GLOBL ·syscall9XABI0(SB), NOPTR|RODATA, $8
+DATA ·syscall9XABI0(SB)/8, $syscall9X(SB)
+TEXT syscall9X(SB), NOSPLIT, $0
+	PUSHQ BP
+	MOVQ  SP, BP
+	SUBQ  $32, SP
+	MOVQ  DI, 24(BP) // save the pointer
+
+	MOVQ syscall9Args_f1(DI), X0 // f1
+	MOVQ syscall9Args_f2(DI), X1 // f2
+	MOVQ syscall9Args_f3(DI), X2 // f3
+	MOVQ syscall9Args_f4(DI), X3 // f4
+	MOVQ syscall9Args_f5(DI), X4 // f5
+	MOVQ syscall9Args_f6(DI), X5 // f6
+	MOVQ syscall9Args_f7(DI), X6 // f7
+	MOVQ syscall9Args_f8(DI), X7 // f8
+
+	MOVQ syscall9Args_fn(DI), R10 // fn
+	MOVQ syscall9Args_a2(DI), SI  // a2
+	MOVQ syscall9Args_a3(DI), DX  // a3
+	MOVQ syscall9Args_a4(DI), CX  // a4
+	MOVQ syscall9Args_a5(DI), R8  // a5
+	MOVQ syscall9Args_a6(DI), R9  // a6
+	MOVQ syscall9Args_a7(DI), R11 // a7
+	MOVQ syscall9Args_a8(DI), R12 // a8
+	MOVQ syscall9Args_a9(DI), R13 // a9
+	MOVQ syscall9Args_a1(DI), DI  // a1
+
+	// push the remaining paramters onto the stack
+	MOVQ R11, 0(SP)  // push a7
+	MOVQ R12, 8(SP)  // push a8
+	MOVQ R13, 16(SP) // push a9
+	XORL AX, AX      // vararg: say "no float args"
+
+	CALL R10
+
+	MOVQ 24(BP), DI              // get the pointer back
+	MOVQ AX, syscall9Args_r1(DI) // r1
+	MOVQ X0, syscall9Args_r2(DI) // r2
+
+	XORL AX, AX  // no error (it's ignored anyway)
+	ADDQ $32, SP
+	MOVQ BP, SP
+	POPQ BP
+	RET
+
+TEXT callbackasm1(SB), NOSPLIT, $0
+	// remove return address from stack, we are not returning to callbackasm, but to its caller.
+	MOVQ 0(SP), AX
+	ADDQ $8, SP
+
+	MOVQ 0(SP), R10 // get the return SP so that we can align register args with stack args
+
+	// make space for first six int and 8 float arguments below the frame
+	ADJSP $14*8, SP
+	MOVSD X0, (1*8)(SP)
+	MOVSD X1, (2*8)(SP)
+	MOVSD X2, (3*8)(SP)
+	MOVSD X3, (4*8)(SP)
+	MOVSD X4, (5*8)(SP)
+	MOVSD X5, (6*8)(SP)
+	MOVSD X6, (7*8)(SP)
+	MOVSD X7, (8*8)(SP)
+	MOVQ  DI, (9*8)(SP)
+	MOVQ  SI, (10*8)(SP)
+	MOVQ  DX, (11*8)(SP)
+	MOVQ  CX, (12*8)(SP)
+	MOVQ  R8, (13*8)(SP)
+	MOVQ  R9, (14*8)(SP)
+	LEAQ  8(SP), R8      // R8 = address of args vector
+
+	MOVQ R10, 0(SP) // push the stack pointer below registers
+
+	// determine index into runtime·cbs table
+	MOVQ $callbackasm(SB), DX
+	SUBQ DX, AX
+	MOVQ $0, DX
+	MOVQ $5, CX               // divide by 5 because each call instruction in ·callbacks is 5 bytes long
+	DIVL CX
+	SUBQ $1, AX               // subtract 1 because return PC is to the next slot
+
+	// Switch from the host ABI to the Go ABI.
+	PUSH_REGS_HOST_TO_ABI0()
+
+	// Create a struct callbackArgs on our stack to be passed as
+	// the "frame" to cgocallback and on to callbackWrap.
+	// $24 to make enough room for the arguments to runtime.cgocallback
+	SUBQ $(24+callbackArgs__size), SP
+	MOVQ AX, (24+callbackArgs_index)(SP)  // callback index
+	MOVQ R8, (24+callbackArgs_args)(SP)   // address of args vector
+	MOVQ $0, (24+callbackArgs_result)(SP) // result
+	LEAQ 24(SP), AX                       // take the address of callbackArgs
+
+	// Call cgocallback, which will call callbackWrap(frame).
+	MOVQ ·callbackWrap_call(SB), DI // Get the ABIInternal function pointer
+	MOVQ (DI), DI                   // without <ABIInternal> by using a closure.
+	MOVQ AX, SI                     // frame (address of callbackArgs)
+	MOVQ $0, CX                     // context
+
+	CALL crosscall2(SB) // runtime.cgocallback(fn, frame, ctxt uintptr)
+
+	// Get callback result.
+	MOVQ (24+callbackArgs_result)(SP), AX
+	ADDQ $(24+callbackArgs__size), SP     // remove callbackArgs struct
+
+	POP_REGS_HOST_TO_ABI0()
+
+	MOVQ 0(SP), R10 // get the SP back
+
+	ADJSP $-14*8, SP // remove arguments
+
+	MOVQ R10, 0(SP)
+
+	RET
diff --git a/vendor/github.com/ebitengine/purego/sys_arm64.s b/vendor/github.com/ebitengine/purego/sys_arm64.s
new file mode 100644
index 0000000000..942fd56135
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/sys_arm64.s
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || (!cgo && linux) || windows
+
+#include "textflag.h"
+#include "go_asm.h"
+#include "funcdata.h"
+#include "internal/abi/abi_arm64.h"
+
+// syscall9X calls a function in libc on behalf of the syscall package.
+// syscall9X takes a pointer to a struct like:
+// struct {
+//	fn    uintptr
+//	a1    uintptr
+//	a2    uintptr
+//	a3    uintptr
+//	a4    uintptr
+//	a5    uintptr
+//	a6    uintptr
+//	a7    uintptr
+//	a8    uintptr
+//	a9    uintptr
+//	r1    uintptr
+//	r2    uintptr
+//	err   uintptr
+// }
+// syscall9X must be called on the g0 stack with the
+// C calling convention (use libcCall).
+GLOBL ·syscall9XABI0(SB), NOPTR|RODATA, $8
+DATA ·syscall9XABI0(SB)/8, $syscall9X(SB)
+TEXT syscall9X(SB), NOSPLIT, $0
+	SUB  $16, RSP   // push structure pointer
+	MOVD R0, 8(RSP)
+
+	FMOVD syscall9Args_f1(R0), F0 // f1
+	FMOVD syscall9Args_f2(R0), F1 // f2
+	FMOVD syscall9Args_f3(R0), F2 // f3
+	FMOVD syscall9Args_f4(R0), F3 // f4
+	FMOVD syscall9Args_f5(R0), F4 // f5
+	FMOVD syscall9Args_f6(R0), F5 // f6
+	FMOVD syscall9Args_f7(R0), F6 // f7
+	FMOVD syscall9Args_f8(R0), F7 // f8
+
+	MOVD syscall9Args_fn(R0), R12 // fn
+	MOVD syscall9Args_a2(R0), R1  // a2
+	MOVD syscall9Args_a3(R0), R2  // a3
+	MOVD syscall9Args_a4(R0), R3  // a4
+	MOVD syscall9Args_a5(R0), R4  // a5
+	MOVD syscall9Args_a6(R0), R5  // a6
+	MOVD syscall9Args_a7(R0), R6  // a7
+	MOVD syscall9Args_a8(R0), R7  // a8
+	MOVD syscall9Args_a9(R0), R8  // a9
+	MOVD syscall9Args_a1(R0), R0  // a1
+
+	MOVD R8, (RSP) // push a9 onto stack
+
+	BL (R12)
+
+	MOVD  8(RSP), R2              // pop structure pointer
+	ADD   $16, RSP
+	MOVD  R0, syscall9Args_r1(R2) // save r1
+	FMOVD F0, syscall9Args_r2(R2) // save r2
+	RET
diff --git a/vendor/github.com/ebitengine/purego/sys_unix_arm64.s b/vendor/github.com/ebitengine/purego/sys_unix_arm64.s
new file mode 100644
index 0000000000..d4281f7b56
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/sys_unix_arm64.s
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2023 The Ebitengine Authors
+
+//go:build darwin || (!cgo && linux)
+
+#include "textflag.h"
+#include "go_asm.h"
+#include "funcdata.h"
+#include "internal/abi/abi_arm64.h"
+
+TEXT callbackasm1(SB), NOSPLIT|NOFRAME, $0
+	NO_LOCAL_POINTERS
+
+	// On entry, the trampoline in zcallback_darwin_arm64.s left
+	// the callback index in R12 (which is volatile in the C ABI).
+
+	// Save callback register arguments R0-R7 and F0-F7.
+	// We do this at the top of the frame so they're contiguous with stack arguments.
+	SUB   $(16*8), RSP, R14
+	FMOVD F0, (0*8)(R14)
+	FMOVD F1, (1*8)(R14)
+	FMOVD F2, (2*8)(R14)
+	FMOVD F3, (3*8)(R14)
+	FMOVD F4, (4*8)(R14)
+	FMOVD F5, (5*8)(R14)
+	FMOVD F6, (6*8)(R14)
+	FMOVD F7, (7*8)(R14)
+	STP   (R0, R1), (8*8)(R14)
+	STP   (R2, R3), (10*8)(R14)
+	STP   (R4, R5), (12*8)(R14)
+	STP   (R6, R7), (14*8)(R14)
+
+	// Adjust SP by frame size.
+	// crosscall2 clobbers FP in the frame record so only save/restore SP.
+	SUB  $(28*8), RSP
+	MOVD R30, (RSP)
+
+	// Create a struct callbackArgs on our stack.
+	ADD  $(callbackArgs__size + 3*8), RSP, R13
+	MOVD R12, callbackArgs_index(R13)          // callback index
+	MOVD R14, R0
+	MOVD R0, callbackArgs_args(R13)            // address of args vector
+	MOVD $0, R0
+	MOVD R0, callbackArgs_result(R13)          // result
+
+	// Move parameters into registers
+	// Get the ABIInternal function pointer
+	// without <ABIInternal> by using a closure.
+	MOVD ·callbackWrap_call(SB), R0
+	MOVD (R0), R0                   // fn unsafe.Pointer
+	MOVD R13, R1                    // frame (&callbackArgs{...})
+	MOVD $0, R3                     // ctxt uintptr
+
+	BL crosscall2(SB)
+
+	// Get callback result.
+	ADD  $(callbackArgs__size + 3*8), RSP, R13
+	MOVD callbackArgs_result(R13), R0
+
+	// Restore SP
+	MOVD (RSP), R30
+	ADD  $(28*8), RSP
+
+	RET
diff --git a/vendor/github.com/ebitengine/purego/syscall.go b/vendor/github.com/ebitengine/purego/syscall.go
new file mode 100644
index 0000000000..d14fdb0d3a
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/syscall.go
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || windows || linux
+
+package purego
+
+const (
+	maxArgs     = 9
+	numOfFloats = 8 // arm64 and amd64 both have 8 float registers
+)
+
+// SyscallN takes fn, a C function pointer and a list of arguments as uintptr.
+// There is an internal maximum number of arguments that SyscallN can take. It panics
+// when the maximum is exceeded. It returns the result and the libc error code if there is one.
+//
+// NOTE: SyscallN does not properly call functions that have both integer and float parameters.
+// See discussion comment https://github.com/ebiten/purego/pull/1#issuecomment-1128057607
+// for an explanation of why that is.
+//
+// On amd64, if there are more than 8 floats the 9th and so on will be placed incorrectly on the
+// stack.
+//
+// The pragma go:nosplit is not needed at this function declaration because it uses go:uintptrescapes
+// which forces all the objects that the uintptrs point to onto the heap where a stack split won't affect
+// their memory location.
+//
+//go:uintptrescapes
+func SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) {
+	if fn == 0 {
+		panic("purego: fn is nil")
+	}
+	if len(args) > maxArgs {
+		panic("purego: too many arguments to SyscallN")
+	}
+	// add padding so there is no out-of-bounds slicing
+	var tmp [maxArgs]uintptr
+	copy(tmp[:], args)
+	return syscall_syscall9X(fn, tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], tmp[7], tmp[8])
+}
diff --git a/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go b/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go
new file mode 100644
index 0000000000..59c9a00bb0
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build cgo
+
+package purego
+
+import (
+	_ "unsafe" // for go:linkname
+
+	"github.com/ebitengine/purego/internal/cgo"
+)
+
+var syscall9XABI0 = uintptr(cgo.Syscall9XABI0)
+
+// this is only here to make the assembly files happy :)
+type syscall9Args struct {
+	fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr
+	f1, f2, f3, f4, f5, f6, f7, f8         uintptr
+	r1, r2, err                            uintptr
+}
+
+//go:nosplit
+func syscall_syscall9X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
+	return cgo.Syscall9X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9)
+}
+
+func NewCallback(_ interface{}) uintptr {
+	panic("purego: NewCallback not supported")
+}
diff --git a/vendor/github.com/ebitengine/purego/syscall_sysv.go b/vendor/github.com/ebitengine/purego/syscall_sysv.go
new file mode 100644
index 0000000000..695dda3328
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/syscall_sysv.go
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || (!cgo && linux && (amd64 || arm64))
+
+package purego
+
+import (
+	"reflect"
+	"runtime"
+	"sync"
+	"unsafe"
+)
+
+var syscall9XABI0 uintptr
+
+type syscall9Args struct {
+	fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr
+	f1, f2, f3, f4, f5, f6, f7, f8         uintptr
+	r1, r2, err                            uintptr
+}
+
+//go:nosplit
+func syscall_syscall9X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
+	args := syscall9Args{
+		fn, a1, a2, a3, a4, a5, a6, a7, a8, a9,
+		a1, a2, a3, a4, a5, a6, a7, a8,
+		r1, r2, err,
+	}
+	runtime_cgocall(syscall9XABI0, unsafe.Pointer(&args))
+	return args.r1, args.r2, args.err
+}
+
+// NewCallback converts a Go function to a function pointer conforming to the C calling convention.
+// This is useful when interoperating with C code requiring callbacks. The argument is expected to be a
+// function with zero or one uintptr-sized result. The function must not have arguments with size larger than the size
+// of uintptr. Only a limited number of callbacks may be created in a single Go process, and any memory allocated
+// for these callbacks is never released. At least 2000 callbacks can always be created. Although this function
+// provides similar functionality to windows.NewCallback it is distinct.
+//
+// NOTE: Linux is currently not supported and will panic if called.
+func NewCallback(fn interface{}) uintptr {
+	if runtime.GOOS == "linux" {
+		panic("purego: NewCallback not supported")
+	}
+	return compileCallback(fn)
+}
+
+// maxCb is the maximum number of callbacks
+// only increase this if you have added more to the callbackasm function
+const maxCB = 2000
+
+var cbs struct {
+	lock  sync.Mutex
+	numFn int                  // the number of functions currently in cbs.funcs
+	funcs [maxCB]reflect.Value // the saved callbacks
+}
+
+type callbackArgs struct {
+	index uintptr
+	// args points to the argument block.
+	//
+	// The structure of the arguments goes
+	// float registers followed by the
+	// integer registers followed by the stack.
+	//
+	// This variable is treated as a continuous
+	// block of memory containing all of the arguments
+	// for this callback.
+	args unsafe.Pointer
+	// Below are out-args from callbackWrap
+	result uintptr
+}
+
+func compileCallback(fn interface{}) uintptr {
+	val := reflect.ValueOf(fn)
+	if val.Kind() != reflect.Func {
+		panic("purego: the type must be a function but was not")
+	}
+	if val.IsNil() {
+		panic("purego: function must not be nil")
+	}
+	ty := val.Type()
+	for i := 0; i < ty.NumIn(); i++ {
+		in := ty.In(i)
+		switch in.Kind() {
+		case reflect.Struct, reflect.Interface, reflect.Func, reflect.Slice,
+			reflect.Chan, reflect.Complex64, reflect.Complex128,
+			reflect.String, reflect.Map, reflect.Invalid:
+			panic("purego: unsupported argument type: " + in.Kind().String())
+		}
+	}
+output:
+	switch {
+	case ty.NumOut() == 1:
+		switch ty.Out(0).Kind() {
+		case reflect.Pointer, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+			reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+			reflect.Bool, reflect.UnsafePointer:
+			break output
+		}
+		panic("purego: unsupported return type: " + ty.String())
+	case ty.NumOut() > 1:
+		panic("purego: callbacks can only have one return")
+	}
+	cbs.lock.Lock()
+	defer cbs.lock.Unlock()
+	if cbs.numFn >= maxCB {
+		panic("purego: the maximum number of callbacks has been reached")
+	}
+	cbs.funcs[cbs.numFn] = val
+	cbs.numFn++
+	return callbackasmAddr(cbs.numFn - 1)
+}
+
+const ptrSize = unsafe.Sizeof((*int)(nil))
+
+const callbackMaxFrame = 64 * ptrSize
+
+// callbackasmABI0 is implemented in zcallback_GOOS_GOARCH.s
+var callbackasmABI0 uintptr
+
+// callbackWrap_call allows the calling of the ABIInternal wrapper
+// which is required for runtime.cgocallback without the
+// <ABIInternal> tag which is only allowed in the runtime.
+// This closure is used inside sys_darwin_GOARCH.s
+var callbackWrap_call = callbackWrap
+
+// callbackWrap is called by assembly code which determines which Go function to call.
+// This function takes the arguments and passes them to the Go function and returns the result.
+func callbackWrap(a *callbackArgs) {
+	cbs.lock.Lock()
+	fn := cbs.funcs[a.index]
+	cbs.lock.Unlock()
+	fnType := fn.Type()
+	args := make([]reflect.Value, fnType.NumIn())
+	frame := (*[callbackMaxFrame]uintptr)(a.args)
+	var floatsN int // floatsN represents the number of float arguments processed
+	var intsN int   // intsN represents the number of integer arguments processed
+	// stack points to the index into frame of the current stack element.
+	// The stack begins after the float and integer registers.
+	stack := numOfIntegerRegisters() + numOfFloats
+	for i := range args {
+		var pos int
+		switch fnType.In(i).Kind() {
+		case reflect.Float32, reflect.Float64:
+			if floatsN >= numOfFloats {
+				pos = stack
+				stack++
+			} else {
+				pos = floatsN
+			}
+			floatsN++
+		default:
+			if intsN >= numOfIntegerRegisters() {
+				pos = stack
+				stack++
+			} else {
+				// the integers begin after the floats in frame
+				pos = intsN + numOfFloats
+			}
+			intsN++
+		}
+		args[i] = reflect.NewAt(fnType.In(i), unsafe.Pointer(&frame[pos])).Elem()
+	}
+	ret := fn.Call(args)
+	if len(ret) > 0 {
+		switch k := ret[0].Kind(); k {
+		case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uintptr:
+			a.result = uintptr(ret[0].Uint())
+		case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8:
+			a.result = uintptr(ret[0].Int())
+		case reflect.Bool:
+			if ret[0].Bool() {
+				a.result = 1
+			} else {
+				a.result = 0
+			}
+		case reflect.Pointer:
+			a.result = ret[0].Pointer()
+		case reflect.UnsafePointer:
+			a.result = ret[0].Pointer()
+		default:
+			panic("purego: unsupported kind: " + k.String())
+		}
+	}
+}
+
+// callbackasmAddr returns address of runtime.callbackasm
+// function adjusted by i.
+// On x86 and amd64, runtime.callbackasm is a series of CALL instructions,
+// and we want callback to arrive at
+// correspondent call instruction instead of start of
+// runtime.callbackasm.
+// On ARM, runtime.callbackasm is a series of mov and branch instructions.
+// R12 is loaded with the callback index. Each entry is two instructions,
+// hence 8 bytes.
+func callbackasmAddr(i int) uintptr {
+	var entrySize int
+	switch runtime.GOARCH {
+	default:
+		panic("purego: unsupported architecture")
+	case "386", "amd64":
+		entrySize = 5
+	case "arm", "arm64":
+		// On ARM and ARM64, each entry is a MOV instruction
+		// followed by a branch instruction
+		entrySize = 8
+	}
+	return callbackasmABI0 + uintptr(i*entrySize)
+}
diff --git a/vendor/github.com/ebitengine/purego/syscall_windows.go b/vendor/github.com/ebitengine/purego/syscall_windows.go
new file mode 100644
index 0000000000..a4db9f1316
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/syscall_windows.go
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+package purego
+
+import (
+	"syscall"
+	_ "unsafe" // only for go:linkname
+
+	"golang.org/x/sys/windows"
+)
+
+var syscall9XABI0 uintptr
+
+type syscall9Args struct {
+	fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr
+	f1, f2, f3, f4, f5, f6, f7, f8         uintptr
+	r1, r2, err                            uintptr
+}
+
+func syscall_syscall9X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
+	r1, r2, errno := syscall.Syscall9(fn, 9, a1, a2, a3, a4, a5, a6, a7, a8, a9)
+	return r1, r2, uintptr(errno)
+}
+
+// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention.
+// This is useful when interoperating with Windows code requiring callbacks. The argument is expected to be a
+// function with one uintptr-sized result. The function must not have arguments with size larger than the
+// size of uintptr. Only a limited number of callbacks may be created in a single Go process, and any memory
+// allocated for these callbacks is never released. Between NewCallback and NewCallbackCDecl, at least 1024
+// callbacks can always be created. Although this function is similiar to the darwin version it may act
+// differently.
+func NewCallback(fn interface{}) uintptr {
+	return syscall.NewCallback(fn)
+}
+
+//go:linkname openLibrary openLibrary
+func openLibrary(name string) (uintptr, error) {
+	handle, err := windows.LoadLibrary(name)
+	return uintptr(handle), err
+}
+
+func loadSymbol(handle uintptr, name string) (uintptr, error) {
+	return windows.GetProcAddress(windows.Handle(handle), name)
+}
diff --git a/vendor/github.com/ebitengine/purego/zcallback_amd64.s b/vendor/github.com/ebitengine/purego/zcallback_amd64.s
new file mode 100644
index 0000000000..0624563131
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/zcallback_amd64.s
@@ -0,0 +1,2016 @@
+// Code generated by wincallback.go using 'go generate'. DO NOT EDIT.
+
+//go:build darwin || (!cgo && linux)
+
+// runtime·callbackasm is called by external code to
+// execute Go implemented callback function. It is not
+// called from the start, instead runtime·compilecallback
+// always returns address into runtime·callbackasm offset
+// appropriately so different callbacks start with different
+// CALL instruction in runtime·callbackasm. This determines
+// which Go callback function is executed later on.
+#include "textflag.h"
+
+GLOBL ·callbackasmABI0(SB), NOPTR|RODATA, $8
+DATA ·callbackasmABI0(SB)/8, $callbackasm(SB)
+TEXT callbackasm(SB), 7, $0
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
+	CALL callbackasm1(SB)
diff --git a/vendor/github.com/ebitengine/purego/zcallback_arm64.s b/vendor/github.com/ebitengine/purego/zcallback_arm64.s
new file mode 100644
index 0000000000..25e0bf79f1
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/zcallback_arm64.s
@@ -0,0 +1,4016 @@
+// Code generated by wincallback.go using 'go generate'. DO NOT EDIT.
+
+//go:build darwin || (!cgo && linux)
+
+// External code calls into callbackasm at an offset corresponding
+// to the callback index. Callbackasm is a table of MOV and B instructions.
+// The MOV instruction loads R12 with the callback index, and the
+// B instruction branches to callbackasm1.
+// callbackasm1 takes the callback index from R12 and
+// indexes into an array that stores information about each callback.
+// It then calls the Go implementation for that callback.
+#include "textflag.h"
+
+GLOBL ·callbackasmABI0(SB), NOPTR|RODATA, $8
+DATA ·callbackasmABI0(SB)/8, $callbackasm(SB)
+TEXT callbackasm(SB), NOSPLIT|NOFRAME, $0
+	MOVD $0, R12
+	B    callbackasm1(SB)
+	MOVD $1, R12
+	B    callbackasm1(SB)
+	MOVD $2, R12
+	B    callbackasm1(SB)
+	MOVD $3, R12
+	B    callbackasm1(SB)
+	MOVD $4, R12
+	B    callbackasm1(SB)
+	MOVD $5, R12
+	B    callbackasm1(SB)
+	MOVD $6, R12
+	B    callbackasm1(SB)
+	MOVD $7, R12
+	B    callbackasm1(SB)
+	MOVD $8, R12
+	B    callbackasm1(SB)
+	MOVD $9, R12
+	B    callbackasm1(SB)
+	MOVD $10, R12
+	B    callbackasm1(SB)
+	MOVD $11, R12
+	B    callbackasm1(SB)
+	MOVD $12, R12
+	B    callbackasm1(SB)
+	MOVD $13, R12
+	B    callbackasm1(SB)
+	MOVD $14, R12
+	B    callbackasm1(SB)
+	MOVD $15, R12
+	B    callbackasm1(SB)
+	MOVD $16, R12
+	B    callbackasm1(SB)
+	MOVD $17, R12
+	B    callbackasm1(SB)
+	MOVD $18, R12
+	B    callbackasm1(SB)
+	MOVD $19, R12
+	B    callbackasm1(SB)
+	MOVD $20, R12
+	B    callbackasm1(SB)
+	MOVD $21, R12
+	B    callbackasm1(SB)
+	MOVD $22, R12
+	B    callbackasm1(SB)
+	MOVD $23, R12
+	B    callbackasm1(SB)
+	MOVD $24, R12
+	B    callbackasm1(SB)
+	MOVD $25, R12
+	B    callbackasm1(SB)
+	MOVD $26, R12
+	B    callbackasm1(SB)
+	MOVD $27, R12
+	B    callbackasm1(SB)
+	MOVD $28, R12
+	B    callbackasm1(SB)
+	MOVD $29, R12
+	B    callbackasm1(SB)
+	MOVD $30, R12
+	B    callbackasm1(SB)
+	MOVD $31, R12
+	B    callbackasm1(SB)
+	MOVD $32, R12
+	B    callbackasm1(SB)
+	MOVD $33, R12
+	B    callbackasm1(SB)
+	MOVD $34, R12
+	B    callbackasm1(SB)
+	MOVD $35, R12
+	B    callbackasm1(SB)
+	MOVD $36, R12
+	B    callbackasm1(SB)
+	MOVD $37, R12
+	B    callbackasm1(SB)
+	MOVD $38, R12
+	B    callbackasm1(SB)
+	MOVD $39, R12
+	B    callbackasm1(SB)
+	MOVD $40, R12
+	B    callbackasm1(SB)
+	MOVD $41, R12
+	B    callbackasm1(SB)
+	MOVD $42, R12
+	B    callbackasm1(SB)
+	MOVD $43, R12
+	B    callbackasm1(SB)
+	MOVD $44, R12
+	B    callbackasm1(SB)
+	MOVD $45, R12
+	B    callbackasm1(SB)
+	MOVD $46, R12
+	B    callbackasm1(SB)
+	MOVD $47, R12
+	B    callbackasm1(SB)
+	MOVD $48, R12
+	B    callbackasm1(SB)
+	MOVD $49, R12
+	B    callbackasm1(SB)
+	MOVD $50, R12
+	B    callbackasm1(SB)
+	MOVD $51, R12
+	B    callbackasm1(SB)
+	MOVD $52, R12
+	B    callbackasm1(SB)
+	MOVD $53, R12
+	B    callbackasm1(SB)
+	MOVD $54, R12
+	B    callbackasm1(SB)
+	MOVD $55, R12
+	B    callbackasm1(SB)
+	MOVD $56, R12
+	B    callbackasm1(SB)
+	MOVD $57, R12
+	B    callbackasm1(SB)
+	MOVD $58, R12
+	B    callbackasm1(SB)
+	MOVD $59, R12
+	B    callbackasm1(SB)
+	MOVD $60, R12
+	B    callbackasm1(SB)
+	MOVD $61, R12
+	B    callbackasm1(SB)
+	MOVD $62, R12
+	B    callbackasm1(SB)
+	MOVD $63, R12
+	B    callbackasm1(SB)
+	MOVD $64, R12
+	B    callbackasm1(SB)
+	MOVD $65, R12
+	B    callbackasm1(SB)
+	MOVD $66, R12
+	B    callbackasm1(SB)
+	MOVD $67, R12
+	B    callbackasm1(SB)
+	MOVD $68, R12
+	B    callbackasm1(SB)
+	MOVD $69, R12
+	B    callbackasm1(SB)
+	MOVD $70, R12
+	B    callbackasm1(SB)
+	MOVD $71, R12
+	B    callbackasm1(SB)
+	MOVD $72, R12
+	B    callbackasm1(SB)
+	MOVD $73, R12
+	B    callbackasm1(SB)
+	MOVD $74, R12
+	B    callbackasm1(SB)
+	MOVD $75, R12
+	B    callbackasm1(SB)
+	MOVD $76, R12
+	B    callbackasm1(SB)
+	MOVD $77, R12
+	B    callbackasm1(SB)
+	MOVD $78, R12
+	B    callbackasm1(SB)
+	MOVD $79, R12
+	B    callbackasm1(SB)
+	MOVD $80, R12
+	B    callbackasm1(SB)
+	MOVD $81, R12
+	B    callbackasm1(SB)
+	MOVD $82, R12
+	B    callbackasm1(SB)
+	MOVD $83, R12
+	B    callbackasm1(SB)
+	MOVD $84, R12
+	B    callbackasm1(SB)
+	MOVD $85, R12
+	B    callbackasm1(SB)
+	MOVD $86, R12
+	B    callbackasm1(SB)
+	MOVD $87, R12
+	B    callbackasm1(SB)
+	MOVD $88, R12
+	B    callbackasm1(SB)
+	MOVD $89, R12
+	B    callbackasm1(SB)
+	MOVD $90, R12
+	B    callbackasm1(SB)
+	MOVD $91, R12
+	B    callbackasm1(SB)
+	MOVD $92, R12
+	B    callbackasm1(SB)
+	MOVD $93, R12
+	B    callbackasm1(SB)
+	MOVD $94, R12
+	B    callbackasm1(SB)
+	MOVD $95, R12
+	B    callbackasm1(SB)
+	MOVD $96, R12
+	B    callbackasm1(SB)
+	MOVD $97, R12
+	B    callbackasm1(SB)
+	MOVD $98, R12
+	B    callbackasm1(SB)
+	MOVD $99, R12
+	B    callbackasm1(SB)
+	MOVD $100, R12
+	B    callbackasm1(SB)
+	MOVD $101, R12
+	B    callbackasm1(SB)
+	MOVD $102, R12
+	B    callbackasm1(SB)
+	MOVD $103, R12
+	B    callbackasm1(SB)
+	MOVD $104, R12
+	B    callbackasm1(SB)
+	MOVD $105, R12
+	B    callbackasm1(SB)
+	MOVD $106, R12
+	B    callbackasm1(SB)
+	MOVD $107, R12
+	B    callbackasm1(SB)
+	MOVD $108, R12
+	B    callbackasm1(SB)
+	MOVD $109, R12
+	B    callbackasm1(SB)
+	MOVD $110, R12
+	B    callbackasm1(SB)
+	MOVD $111, R12
+	B    callbackasm1(SB)
+	MOVD $112, R12
+	B    callbackasm1(SB)
+	MOVD $113, R12
+	B    callbackasm1(SB)
+	MOVD $114, R12
+	B    callbackasm1(SB)
+	MOVD $115, R12
+	B    callbackasm1(SB)
+	MOVD $116, R12
+	B    callbackasm1(SB)
+	MOVD $117, R12
+	B    callbackasm1(SB)
+	MOVD $118, R12
+	B    callbackasm1(SB)
+	MOVD $119, R12
+	B    callbackasm1(SB)
+	MOVD $120, R12
+	B    callbackasm1(SB)
+	MOVD $121, R12
+	B    callbackasm1(SB)
+	MOVD $122, R12
+	B    callbackasm1(SB)
+	MOVD $123, R12
+	B    callbackasm1(SB)
+	MOVD $124, R12
+	B    callbackasm1(SB)
+	MOVD $125, R12
+	B    callbackasm1(SB)
+	MOVD $126, R12
+	B    callbackasm1(SB)
+	MOVD $127, R12
+	B    callbackasm1(SB)
+	MOVD $128, R12
+	B    callbackasm1(SB)
+	MOVD $129, R12
+	B    callbackasm1(SB)
+	MOVD $130, R12
+	B    callbackasm1(SB)
+	MOVD $131, R12
+	B    callbackasm1(SB)
+	MOVD $132, R12
+	B    callbackasm1(SB)
+	MOVD $133, R12
+	B    callbackasm1(SB)
+	MOVD $134, R12
+	B    callbackasm1(SB)
+	MOVD $135, R12
+	B    callbackasm1(SB)
+	MOVD $136, R12
+	B    callbackasm1(SB)
+	MOVD $137, R12
+	B    callbackasm1(SB)
+	MOVD $138, R12
+	B    callbackasm1(SB)
+	MOVD $139, R12
+	B    callbackasm1(SB)
+	MOVD $140, R12
+	B    callbackasm1(SB)
+	MOVD $141, R12
+	B    callbackasm1(SB)
+	MOVD $142, R12
+	B    callbackasm1(SB)
+	MOVD $143, R12
+	B    callbackasm1(SB)
+	MOVD $144, R12
+	B    callbackasm1(SB)
+	MOVD $145, R12
+	B    callbackasm1(SB)
+	MOVD $146, R12
+	B    callbackasm1(SB)
+	MOVD $147, R12
+	B    callbackasm1(SB)
+	MOVD $148, R12
+	B    callbackasm1(SB)
+	MOVD $149, R12
+	B    callbackasm1(SB)
+	MOVD $150, R12
+	B    callbackasm1(SB)
+	MOVD $151, R12
+	B    callbackasm1(SB)
+	MOVD $152, R12
+	B    callbackasm1(SB)
+	MOVD $153, R12
+	B    callbackasm1(SB)
+	MOVD $154, R12
+	B    callbackasm1(SB)
+	MOVD $155, R12
+	B    callbackasm1(SB)
+	MOVD $156, R12
+	B    callbackasm1(SB)
+	MOVD $157, R12
+	B    callbackasm1(SB)
+	MOVD $158, R12
+	B    callbackasm1(SB)
+	MOVD $159, R12
+	B    callbackasm1(SB)
+	MOVD $160, R12
+	B    callbackasm1(SB)
+	MOVD $161, R12
+	B    callbackasm1(SB)
+	MOVD $162, R12
+	B    callbackasm1(SB)
+	MOVD $163, R12
+	B    callbackasm1(SB)
+	MOVD $164, R12
+	B    callbackasm1(SB)
+	MOVD $165, R12
+	B    callbackasm1(SB)
+	MOVD $166, R12
+	B    callbackasm1(SB)
+	MOVD $167, R12
+	B    callbackasm1(SB)
+	MOVD $168, R12
+	B    callbackasm1(SB)
+	MOVD $169, R12
+	B    callbackasm1(SB)
+	MOVD $170, R12
+	B    callbackasm1(SB)
+	MOVD $171, R12
+	B    callbackasm1(SB)
+	MOVD $172, R12
+	B    callbackasm1(SB)
+	MOVD $173, R12
+	B    callbackasm1(SB)
+	MOVD $174, R12
+	B    callbackasm1(SB)
+	MOVD $175, R12
+	B    callbackasm1(SB)
+	MOVD $176, R12
+	B    callbackasm1(SB)
+	MOVD $177, R12
+	B    callbackasm1(SB)
+	MOVD $178, R12
+	B    callbackasm1(SB)
+	MOVD $179, R12
+	B    callbackasm1(SB)
+	MOVD $180, R12
+	B    callbackasm1(SB)
+	MOVD $181, R12
+	B    callbackasm1(SB)
+	MOVD $182, R12
+	B    callbackasm1(SB)
+	MOVD $183, R12
+	B    callbackasm1(SB)
+	MOVD $184, R12
+	B    callbackasm1(SB)
+	MOVD $185, R12
+	B    callbackasm1(SB)
+	MOVD $186, R12
+	B    callbackasm1(SB)
+	MOVD $187, R12
+	B    callbackasm1(SB)
+	MOVD $188, R12
+	B    callbackasm1(SB)
+	MOVD $189, R12
+	B    callbackasm1(SB)
+	MOVD $190, R12
+	B    callbackasm1(SB)
+	MOVD $191, R12
+	B    callbackasm1(SB)
+	MOVD $192, R12
+	B    callbackasm1(SB)
+	MOVD $193, R12
+	B    callbackasm1(SB)
+	MOVD $194, R12
+	B    callbackasm1(SB)
+	MOVD $195, R12
+	B    callbackasm1(SB)
+	MOVD $196, R12
+	B    callbackasm1(SB)
+	MOVD $197, R12
+	B    callbackasm1(SB)
+	MOVD $198, R12
+	B    callbackasm1(SB)
+	MOVD $199, R12
+	B    callbackasm1(SB)
+	MOVD $200, R12
+	B    callbackasm1(SB)
+	MOVD $201, R12
+	B    callbackasm1(SB)
+	MOVD $202, R12
+	B    callbackasm1(SB)
+	MOVD $203, R12
+	B    callbackasm1(SB)
+	MOVD $204, R12
+	B    callbackasm1(SB)
+	MOVD $205, R12
+	B    callbackasm1(SB)
+	MOVD $206, R12
+	B    callbackasm1(SB)
+	MOVD $207, R12
+	B    callbackasm1(SB)
+	MOVD $208, R12
+	B    callbackasm1(SB)
+	MOVD $209, R12
+	B    callbackasm1(SB)
+	MOVD $210, R12
+	B    callbackasm1(SB)
+	MOVD $211, R12
+	B    callbackasm1(SB)
+	MOVD $212, R12
+	B    callbackasm1(SB)
+	MOVD $213, R12
+	B    callbackasm1(SB)
+	MOVD $214, R12
+	B    callbackasm1(SB)
+	MOVD $215, R12
+	B    callbackasm1(SB)
+	MOVD $216, R12
+	B    callbackasm1(SB)
+	MOVD $217, R12
+	B    callbackasm1(SB)
+	MOVD $218, R12
+	B    callbackasm1(SB)
+	MOVD $219, R12
+	B    callbackasm1(SB)
+	MOVD $220, R12
+	B    callbackasm1(SB)
+	MOVD $221, R12
+	B    callbackasm1(SB)
+	MOVD $222, R12
+	B    callbackasm1(SB)
+	MOVD $223, R12
+	B    callbackasm1(SB)
+	MOVD $224, R12
+	B    callbackasm1(SB)
+	MOVD $225, R12
+	B    callbackasm1(SB)
+	MOVD $226, R12
+	B    callbackasm1(SB)
+	MOVD $227, R12
+	B    callbackasm1(SB)
+	MOVD $228, R12
+	B    callbackasm1(SB)
+	MOVD $229, R12
+	B    callbackasm1(SB)
+	MOVD $230, R12
+	B    callbackasm1(SB)
+	MOVD $231, R12
+	B    callbackasm1(SB)
+	MOVD $232, R12
+	B    callbackasm1(SB)
+	MOVD $233, R12
+	B    callbackasm1(SB)
+	MOVD $234, R12
+	B    callbackasm1(SB)
+	MOVD $235, R12
+	B    callbackasm1(SB)
+	MOVD $236, R12
+	B    callbackasm1(SB)
+	MOVD $237, R12
+	B    callbackasm1(SB)
+	MOVD $238, R12
+	B    callbackasm1(SB)
+	MOVD $239, R12
+	B    callbackasm1(SB)
+	MOVD $240, R12
+	B    callbackasm1(SB)
+	MOVD $241, R12
+	B    callbackasm1(SB)
+	MOVD $242, R12
+	B    callbackasm1(SB)
+	MOVD $243, R12
+	B    callbackasm1(SB)
+	MOVD $244, R12
+	B    callbackasm1(SB)
+	MOVD $245, R12
+	B    callbackasm1(SB)
+	MOVD $246, R12
+	B    callbackasm1(SB)
+	MOVD $247, R12
+	B    callbackasm1(SB)
+	MOVD $248, R12
+	B    callbackasm1(SB)
+	MOVD $249, R12
+	B    callbackasm1(SB)
+	MOVD $250, R12
+	B    callbackasm1(SB)
+	MOVD $251, R12
+	B    callbackasm1(SB)
+	MOVD $252, R12
+	B    callbackasm1(SB)
+	MOVD $253, R12
+	B    callbackasm1(SB)
+	MOVD $254, R12
+	B    callbackasm1(SB)
+	MOVD $255, R12
+	B    callbackasm1(SB)
+	MOVD $256, R12
+	B    callbackasm1(SB)
+	MOVD $257, R12
+	B    callbackasm1(SB)
+	MOVD $258, R12
+	B    callbackasm1(SB)
+	MOVD $259, R12
+	B    callbackasm1(SB)
+	MOVD $260, R12
+	B    callbackasm1(SB)
+	MOVD $261, R12
+	B    callbackasm1(SB)
+	MOVD $262, R12
+	B    callbackasm1(SB)
+	MOVD $263, R12
+	B    callbackasm1(SB)
+	MOVD $264, R12
+	B    callbackasm1(SB)
+	MOVD $265, R12
+	B    callbackasm1(SB)
+	MOVD $266, R12
+	B    callbackasm1(SB)
+	MOVD $267, R12
+	B    callbackasm1(SB)
+	MOVD $268, R12
+	B    callbackasm1(SB)
+	MOVD $269, R12
+	B    callbackasm1(SB)
+	MOVD $270, R12
+	B    callbackasm1(SB)
+	MOVD $271, R12
+	B    callbackasm1(SB)
+	MOVD $272, R12
+	B    callbackasm1(SB)
+	MOVD $273, R12
+	B    callbackasm1(SB)
+	MOVD $274, R12
+	B    callbackasm1(SB)
+	MOVD $275, R12
+	B    callbackasm1(SB)
+	MOVD $276, R12
+	B    callbackasm1(SB)
+	MOVD $277, R12
+	B    callbackasm1(SB)
+	MOVD $278, R12
+	B    callbackasm1(SB)
+	MOVD $279, R12
+	B    callbackasm1(SB)
+	MOVD $280, R12
+	B    callbackasm1(SB)
+	MOVD $281, R12
+	B    callbackasm1(SB)
+	MOVD $282, R12
+	B    callbackasm1(SB)
+	MOVD $283, R12
+	B    callbackasm1(SB)
+	MOVD $284, R12
+	B    callbackasm1(SB)
+	MOVD $285, R12
+	B    callbackasm1(SB)
+	MOVD $286, R12
+	B    callbackasm1(SB)
+	MOVD $287, R12
+	B    callbackasm1(SB)
+	MOVD $288, R12
+	B    callbackasm1(SB)
+	MOVD $289, R12
+	B    callbackasm1(SB)
+	MOVD $290, R12
+	B    callbackasm1(SB)
+	MOVD $291, R12
+	B    callbackasm1(SB)
+	MOVD $292, R12
+	B    callbackasm1(SB)
+	MOVD $293, R12
+	B    callbackasm1(SB)
+	MOVD $294, R12
+	B    callbackasm1(SB)
+	MOVD $295, R12
+	B    callbackasm1(SB)
+	MOVD $296, R12
+	B    callbackasm1(SB)
+	MOVD $297, R12
+	B    callbackasm1(SB)
+	MOVD $298, R12
+	B    callbackasm1(SB)
+	MOVD $299, R12
+	B    callbackasm1(SB)
+	MOVD $300, R12
+	B    callbackasm1(SB)
+	MOVD $301, R12
+	B    callbackasm1(SB)
+	MOVD $302, R12
+	B    callbackasm1(SB)
+	MOVD $303, R12
+	B    callbackasm1(SB)
+	MOVD $304, R12
+	B    callbackasm1(SB)
+	MOVD $305, R12
+	B    callbackasm1(SB)
+	MOVD $306, R12
+	B    callbackasm1(SB)
+	MOVD $307, R12
+	B    callbackasm1(SB)
+	MOVD $308, R12
+	B    callbackasm1(SB)
+	MOVD $309, R12
+	B    callbackasm1(SB)
+	MOVD $310, R12
+	B    callbackasm1(SB)
+	MOVD $311, R12
+	B    callbackasm1(SB)
+	MOVD $312, R12
+	B    callbackasm1(SB)
+	MOVD $313, R12
+	B    callbackasm1(SB)
+	MOVD $314, R12
+	B    callbackasm1(SB)
+	MOVD $315, R12
+	B    callbackasm1(SB)
+	MOVD $316, R12
+	B    callbackasm1(SB)
+	MOVD $317, R12
+	B    callbackasm1(SB)
+	MOVD $318, R12
+	B    callbackasm1(SB)
+	MOVD $319, R12
+	B    callbackasm1(SB)
+	MOVD $320, R12
+	B    callbackasm1(SB)
+	MOVD $321, R12
+	B    callbackasm1(SB)
+	MOVD $322, R12
+	B    callbackasm1(SB)
+	MOVD $323, R12
+	B    callbackasm1(SB)
+	MOVD $324, R12
+	B    callbackasm1(SB)
+	MOVD $325, R12
+	B    callbackasm1(SB)
+	MOVD $326, R12
+	B    callbackasm1(SB)
+	MOVD $327, R12
+	B    callbackasm1(SB)
+	MOVD $328, R12
+	B    callbackasm1(SB)
+	MOVD $329, R12
+	B    callbackasm1(SB)
+	MOVD $330, R12
+	B    callbackasm1(SB)
+	MOVD $331, R12
+	B    callbackasm1(SB)
+	MOVD $332, R12
+	B    callbackasm1(SB)
+	MOVD $333, R12
+	B    callbackasm1(SB)
+	MOVD $334, R12
+	B    callbackasm1(SB)
+	MOVD $335, R12
+	B    callbackasm1(SB)
+	MOVD $336, R12
+	B    callbackasm1(SB)
+	MOVD $337, R12
+	B    callbackasm1(SB)
+	MOVD $338, R12
+	B    callbackasm1(SB)
+	MOVD $339, R12
+	B    callbackasm1(SB)
+	MOVD $340, R12
+	B    callbackasm1(SB)
+	MOVD $341, R12
+	B    callbackasm1(SB)
+	MOVD $342, R12
+	B    callbackasm1(SB)
+	MOVD $343, R12
+	B    callbackasm1(SB)
+	MOVD $344, R12
+	B    callbackasm1(SB)
+	MOVD $345, R12
+	B    callbackasm1(SB)
+	MOVD $346, R12
+	B    callbackasm1(SB)
+	MOVD $347, R12
+	B    callbackasm1(SB)
+	MOVD $348, R12
+	B    callbackasm1(SB)
+	MOVD $349, R12
+	B    callbackasm1(SB)
+	MOVD $350, R12
+	B    callbackasm1(SB)
+	MOVD $351, R12
+	B    callbackasm1(SB)
+	MOVD $352, R12
+	B    callbackasm1(SB)
+	MOVD $353, R12
+	B    callbackasm1(SB)
+	MOVD $354, R12
+	B    callbackasm1(SB)
+	MOVD $355, R12
+	B    callbackasm1(SB)
+	MOVD $356, R12
+	B    callbackasm1(SB)
+	MOVD $357, R12
+	B    callbackasm1(SB)
+	MOVD $358, R12
+	B    callbackasm1(SB)
+	MOVD $359, R12
+	B    callbackasm1(SB)
+	MOVD $360, R12
+	B    callbackasm1(SB)
+	MOVD $361, R12
+	B    callbackasm1(SB)
+	MOVD $362, R12
+	B    callbackasm1(SB)
+	MOVD $363, R12
+	B    callbackasm1(SB)
+	MOVD $364, R12
+	B    callbackasm1(SB)
+	MOVD $365, R12
+	B    callbackasm1(SB)
+	MOVD $366, R12
+	B    callbackasm1(SB)
+	MOVD $367, R12
+	B    callbackasm1(SB)
+	MOVD $368, R12
+	B    callbackasm1(SB)
+	MOVD $369, R12
+	B    callbackasm1(SB)
+	MOVD $370, R12
+	B    callbackasm1(SB)
+	MOVD $371, R12
+	B    callbackasm1(SB)
+	MOVD $372, R12
+	B    callbackasm1(SB)
+	MOVD $373, R12
+	B    callbackasm1(SB)
+	MOVD $374, R12
+	B    callbackasm1(SB)
+	MOVD $375, R12
+	B    callbackasm1(SB)
+	MOVD $376, R12
+	B    callbackasm1(SB)
+	MOVD $377, R12
+	B    callbackasm1(SB)
+	MOVD $378, R12
+	B    callbackasm1(SB)
+	MOVD $379, R12
+	B    callbackasm1(SB)
+	MOVD $380, R12
+	B    callbackasm1(SB)
+	MOVD $381, R12
+	B    callbackasm1(SB)
+	MOVD $382, R12
+	B    callbackasm1(SB)
+	MOVD $383, R12
+	B    callbackasm1(SB)
+	MOVD $384, R12
+	B    callbackasm1(SB)
+	MOVD $385, R12
+	B    callbackasm1(SB)
+	MOVD $386, R12
+	B    callbackasm1(SB)
+	MOVD $387, R12
+	B    callbackasm1(SB)
+	MOVD $388, R12
+	B    callbackasm1(SB)
+	MOVD $389, R12
+	B    callbackasm1(SB)
+	MOVD $390, R12
+	B    callbackasm1(SB)
+	MOVD $391, R12
+	B    callbackasm1(SB)
+	MOVD $392, R12
+	B    callbackasm1(SB)
+	MOVD $393, R12
+	B    callbackasm1(SB)
+	MOVD $394, R12
+	B    callbackasm1(SB)
+	MOVD $395, R12
+	B    callbackasm1(SB)
+	MOVD $396, R12
+	B    callbackasm1(SB)
+	MOVD $397, R12
+	B    callbackasm1(SB)
+	MOVD $398, R12
+	B    callbackasm1(SB)
+	MOVD $399, R12
+	B    callbackasm1(SB)
+	MOVD $400, R12
+	B    callbackasm1(SB)
+	MOVD $401, R12
+	B    callbackasm1(SB)
+	MOVD $402, R12
+	B    callbackasm1(SB)
+	MOVD $403, R12
+	B    callbackasm1(SB)
+	MOVD $404, R12
+	B    callbackasm1(SB)
+	MOVD $405, R12
+	B    callbackasm1(SB)
+	MOVD $406, R12
+	B    callbackasm1(SB)
+	MOVD $407, R12
+	B    callbackasm1(SB)
+	MOVD $408, R12
+	B    callbackasm1(SB)
+	MOVD $409, R12
+	B    callbackasm1(SB)
+	MOVD $410, R12
+	B    callbackasm1(SB)
+	MOVD $411, R12
+	B    callbackasm1(SB)
+	MOVD $412, R12
+	B    callbackasm1(SB)
+	MOVD $413, R12
+	B    callbackasm1(SB)
+	MOVD $414, R12
+	B    callbackasm1(SB)
+	MOVD $415, R12
+	B    callbackasm1(SB)
+	MOVD $416, R12
+	B    callbackasm1(SB)
+	MOVD $417, R12
+	B    callbackasm1(SB)
+	MOVD $418, R12
+	B    callbackasm1(SB)
+	MOVD $419, R12
+	B    callbackasm1(SB)
+	MOVD $420, R12
+	B    callbackasm1(SB)
+	MOVD $421, R12
+	B    callbackasm1(SB)
+	MOVD $422, R12
+	B    callbackasm1(SB)
+	MOVD $423, R12
+	B    callbackasm1(SB)
+	MOVD $424, R12
+	B    callbackasm1(SB)
+	MOVD $425, R12
+	B    callbackasm1(SB)
+	MOVD $426, R12
+	B    callbackasm1(SB)
+	MOVD $427, R12
+	B    callbackasm1(SB)
+	MOVD $428, R12
+	B    callbackasm1(SB)
+	MOVD $429, R12
+	B    callbackasm1(SB)
+	MOVD $430, R12
+	B    callbackasm1(SB)
+	MOVD $431, R12
+	B    callbackasm1(SB)
+	MOVD $432, R12
+	B    callbackasm1(SB)
+	MOVD $433, R12
+	B    callbackasm1(SB)
+	MOVD $434, R12
+	B    callbackasm1(SB)
+	MOVD $435, R12
+	B    callbackasm1(SB)
+	MOVD $436, R12
+	B    callbackasm1(SB)
+	MOVD $437, R12
+	B    callbackasm1(SB)
+	MOVD $438, R12
+	B    callbackasm1(SB)
+	MOVD $439, R12
+	B    callbackasm1(SB)
+	MOVD $440, R12
+	B    callbackasm1(SB)
+	MOVD $441, R12
+	B    callbackasm1(SB)
+	MOVD $442, R12
+	B    callbackasm1(SB)
+	MOVD $443, R12
+	B    callbackasm1(SB)
+	MOVD $444, R12
+	B    callbackasm1(SB)
+	MOVD $445, R12
+	B    callbackasm1(SB)
+	MOVD $446, R12
+	B    callbackasm1(SB)
+	MOVD $447, R12
+	B    callbackasm1(SB)
+	MOVD $448, R12
+	B    callbackasm1(SB)
+	MOVD $449, R12
+	B    callbackasm1(SB)
+	MOVD $450, R12
+	B    callbackasm1(SB)
+	MOVD $451, R12
+	B    callbackasm1(SB)
+	MOVD $452, R12
+	B    callbackasm1(SB)
+	MOVD $453, R12
+	B    callbackasm1(SB)
+	MOVD $454, R12
+	B    callbackasm1(SB)
+	MOVD $455, R12
+	B    callbackasm1(SB)
+	MOVD $456, R12
+	B    callbackasm1(SB)
+	MOVD $457, R12
+	B    callbackasm1(SB)
+	MOVD $458, R12
+	B    callbackasm1(SB)
+	MOVD $459, R12
+	B    callbackasm1(SB)
+	MOVD $460, R12
+	B    callbackasm1(SB)
+	MOVD $461, R12
+	B    callbackasm1(SB)
+	MOVD $462, R12
+	B    callbackasm1(SB)
+	MOVD $463, R12
+	B    callbackasm1(SB)
+	MOVD $464, R12
+	B    callbackasm1(SB)
+	MOVD $465, R12
+	B    callbackasm1(SB)
+	MOVD $466, R12
+	B    callbackasm1(SB)
+	MOVD $467, R12
+	B    callbackasm1(SB)
+	MOVD $468, R12
+	B    callbackasm1(SB)
+	MOVD $469, R12
+	B    callbackasm1(SB)
+	MOVD $470, R12
+	B    callbackasm1(SB)
+	MOVD $471, R12
+	B    callbackasm1(SB)
+	MOVD $472, R12
+	B    callbackasm1(SB)
+	MOVD $473, R12
+	B    callbackasm1(SB)
+	MOVD $474, R12
+	B    callbackasm1(SB)
+	MOVD $475, R12
+	B    callbackasm1(SB)
+	MOVD $476, R12
+	B    callbackasm1(SB)
+	MOVD $477, R12
+	B    callbackasm1(SB)
+	MOVD $478, R12
+	B    callbackasm1(SB)
+	MOVD $479, R12
+	B    callbackasm1(SB)
+	MOVD $480, R12
+	B    callbackasm1(SB)
+	MOVD $481, R12
+	B    callbackasm1(SB)
+	MOVD $482, R12
+	B    callbackasm1(SB)
+	MOVD $483, R12
+	B    callbackasm1(SB)
+	MOVD $484, R12
+	B    callbackasm1(SB)
+	MOVD $485, R12
+	B    callbackasm1(SB)
+	MOVD $486, R12
+	B    callbackasm1(SB)
+	MOVD $487, R12
+	B    callbackasm1(SB)
+	MOVD $488, R12
+	B    callbackasm1(SB)
+	MOVD $489, R12
+	B    callbackasm1(SB)
+	MOVD $490, R12
+	B    callbackasm1(SB)
+	MOVD $491, R12
+	B    callbackasm1(SB)
+	MOVD $492, R12
+	B    callbackasm1(SB)
+	MOVD $493, R12
+	B    callbackasm1(SB)
+	MOVD $494, R12
+	B    callbackasm1(SB)
+	MOVD $495, R12
+	B    callbackasm1(SB)
+	MOVD $496, R12
+	B    callbackasm1(SB)
+	MOVD $497, R12
+	B    callbackasm1(SB)
+	MOVD $498, R12
+	B    callbackasm1(SB)
+	MOVD $499, R12
+	B    callbackasm1(SB)
+	MOVD $500, R12
+	B    callbackasm1(SB)
+	MOVD $501, R12
+	B    callbackasm1(SB)
+	MOVD $502, R12
+	B    callbackasm1(SB)
+	MOVD $503, R12
+	B    callbackasm1(SB)
+	MOVD $504, R12
+	B    callbackasm1(SB)
+	MOVD $505, R12
+	B    callbackasm1(SB)
+	MOVD $506, R12
+	B    callbackasm1(SB)
+	MOVD $507, R12
+	B    callbackasm1(SB)
+	MOVD $508, R12
+	B    callbackasm1(SB)
+	MOVD $509, R12
+	B    callbackasm1(SB)
+	MOVD $510, R12
+	B    callbackasm1(SB)
+	MOVD $511, R12
+	B    callbackasm1(SB)
+	MOVD $512, R12
+	B    callbackasm1(SB)
+	MOVD $513, R12
+	B    callbackasm1(SB)
+	MOVD $514, R12
+	B    callbackasm1(SB)
+	MOVD $515, R12
+	B    callbackasm1(SB)
+	MOVD $516, R12
+	B    callbackasm1(SB)
+	MOVD $517, R12
+	B    callbackasm1(SB)
+	MOVD $518, R12
+	B    callbackasm1(SB)
+	MOVD $519, R12
+	B    callbackasm1(SB)
+	MOVD $520, R12
+	B    callbackasm1(SB)
+	MOVD $521, R12
+	B    callbackasm1(SB)
+	MOVD $522, R12
+	B    callbackasm1(SB)
+	MOVD $523, R12
+	B    callbackasm1(SB)
+	MOVD $524, R12
+	B    callbackasm1(SB)
+	MOVD $525, R12
+	B    callbackasm1(SB)
+	MOVD $526, R12
+	B    callbackasm1(SB)
+	MOVD $527, R12
+	B    callbackasm1(SB)
+	MOVD $528, R12
+	B    callbackasm1(SB)
+	MOVD $529, R12
+	B    callbackasm1(SB)
+	MOVD $530, R12
+	B    callbackasm1(SB)
+	MOVD $531, R12
+	B    callbackasm1(SB)
+	MOVD $532, R12
+	B    callbackasm1(SB)
+	MOVD $533, R12
+	B    callbackasm1(SB)
+	MOVD $534, R12
+	B    callbackasm1(SB)
+	MOVD $535, R12
+	B    callbackasm1(SB)
+	MOVD $536, R12
+	B    callbackasm1(SB)
+	MOVD $537, R12
+	B    callbackasm1(SB)
+	MOVD $538, R12
+	B    callbackasm1(SB)
+	MOVD $539, R12
+	B    callbackasm1(SB)
+	MOVD $540, R12
+	B    callbackasm1(SB)
+	MOVD $541, R12
+	B    callbackasm1(SB)
+	MOVD $542, R12
+	B    callbackasm1(SB)
+	MOVD $543, R12
+	B    callbackasm1(SB)
+	MOVD $544, R12
+	B    callbackasm1(SB)
+	MOVD $545, R12
+	B    callbackasm1(SB)
+	MOVD $546, R12
+	B    callbackasm1(SB)
+	MOVD $547, R12
+	B    callbackasm1(SB)
+	MOVD $548, R12
+	B    callbackasm1(SB)
+	MOVD $549, R12
+	B    callbackasm1(SB)
+	MOVD $550, R12
+	B    callbackasm1(SB)
+	MOVD $551, R12
+	B    callbackasm1(SB)
+	MOVD $552, R12
+	B    callbackasm1(SB)
+	MOVD $553, R12
+	B    callbackasm1(SB)
+	MOVD $554, R12
+	B    callbackasm1(SB)
+	MOVD $555, R12
+	B    callbackasm1(SB)
+	MOVD $556, R12
+	B    callbackasm1(SB)
+	MOVD $557, R12
+	B    callbackasm1(SB)
+	MOVD $558, R12
+	B    callbackasm1(SB)
+	MOVD $559, R12
+	B    callbackasm1(SB)
+	MOVD $560, R12
+	B    callbackasm1(SB)
+	MOVD $561, R12
+	B    callbackasm1(SB)
+	MOVD $562, R12
+	B    callbackasm1(SB)
+	MOVD $563, R12
+	B    callbackasm1(SB)
+	MOVD $564, R12
+	B    callbackasm1(SB)
+	MOVD $565, R12
+	B    callbackasm1(SB)
+	MOVD $566, R12
+	B    callbackasm1(SB)
+	MOVD $567, R12
+	B    callbackasm1(SB)
+	MOVD $568, R12
+	B    callbackasm1(SB)
+	MOVD $569, R12
+	B    callbackasm1(SB)
+	MOVD $570, R12
+	B    callbackasm1(SB)
+	MOVD $571, R12
+	B    callbackasm1(SB)
+	MOVD $572, R12
+	B    callbackasm1(SB)
+	MOVD $573, R12
+	B    callbackasm1(SB)
+	MOVD $574, R12
+	B    callbackasm1(SB)
+	MOVD $575, R12
+	B    callbackasm1(SB)
+	MOVD $576, R12
+	B    callbackasm1(SB)
+	MOVD $577, R12
+	B    callbackasm1(SB)
+	MOVD $578, R12
+	B    callbackasm1(SB)
+	MOVD $579, R12
+	B    callbackasm1(SB)
+	MOVD $580, R12
+	B    callbackasm1(SB)
+	MOVD $581, R12
+	B    callbackasm1(SB)
+	MOVD $582, R12
+	B    callbackasm1(SB)
+	MOVD $583, R12
+	B    callbackasm1(SB)
+	MOVD $584, R12
+	B    callbackasm1(SB)
+	MOVD $585, R12
+	B    callbackasm1(SB)
+	MOVD $586, R12
+	B    callbackasm1(SB)
+	MOVD $587, R12
+	B    callbackasm1(SB)
+	MOVD $588, R12
+	B    callbackasm1(SB)
+	MOVD $589, R12
+	B    callbackasm1(SB)
+	MOVD $590, R12
+	B    callbackasm1(SB)
+	MOVD $591, R12
+	B    callbackasm1(SB)
+	MOVD $592, R12
+	B    callbackasm1(SB)
+	MOVD $593, R12
+	B    callbackasm1(SB)
+	MOVD $594, R12
+	B    callbackasm1(SB)
+	MOVD $595, R12
+	B    callbackasm1(SB)
+	MOVD $596, R12
+	B    callbackasm1(SB)
+	MOVD $597, R12
+	B    callbackasm1(SB)
+	MOVD $598, R12
+	B    callbackasm1(SB)
+	MOVD $599, R12
+	B    callbackasm1(SB)
+	MOVD $600, R12
+	B    callbackasm1(SB)
+	MOVD $601, R12
+	B    callbackasm1(SB)
+	MOVD $602, R12
+	B    callbackasm1(SB)
+	MOVD $603, R12
+	B    callbackasm1(SB)
+	MOVD $604, R12
+	B    callbackasm1(SB)
+	MOVD $605, R12
+	B    callbackasm1(SB)
+	MOVD $606, R12
+	B    callbackasm1(SB)
+	MOVD $607, R12
+	B    callbackasm1(SB)
+	MOVD $608, R12
+	B    callbackasm1(SB)
+	MOVD $609, R12
+	B    callbackasm1(SB)
+	MOVD $610, R12
+	B    callbackasm1(SB)
+	MOVD $611, R12
+	B    callbackasm1(SB)
+	MOVD $612, R12
+	B    callbackasm1(SB)
+	MOVD $613, R12
+	B    callbackasm1(SB)
+	MOVD $614, R12
+	B    callbackasm1(SB)
+	MOVD $615, R12
+	B    callbackasm1(SB)
+	MOVD $616, R12
+	B    callbackasm1(SB)
+	MOVD $617, R12
+	B    callbackasm1(SB)
+	MOVD $618, R12
+	B    callbackasm1(SB)
+	MOVD $619, R12
+	B    callbackasm1(SB)
+	MOVD $620, R12
+	B    callbackasm1(SB)
+	MOVD $621, R12
+	B    callbackasm1(SB)
+	MOVD $622, R12
+	B    callbackasm1(SB)
+	MOVD $623, R12
+	B    callbackasm1(SB)
+	MOVD $624, R12
+	B    callbackasm1(SB)
+	MOVD $625, R12
+	B    callbackasm1(SB)
+	MOVD $626, R12
+	B    callbackasm1(SB)
+	MOVD $627, R12
+	B    callbackasm1(SB)
+	MOVD $628, R12
+	B    callbackasm1(SB)
+	MOVD $629, R12
+	B    callbackasm1(SB)
+	MOVD $630, R12
+	B    callbackasm1(SB)
+	MOVD $631, R12
+	B    callbackasm1(SB)
+	MOVD $632, R12
+	B    callbackasm1(SB)
+	MOVD $633, R12
+	B    callbackasm1(SB)
+	MOVD $634, R12
+	B    callbackasm1(SB)
+	MOVD $635, R12
+	B    callbackasm1(SB)
+	MOVD $636, R12
+	B    callbackasm1(SB)
+	MOVD $637, R12
+	B    callbackasm1(SB)
+	MOVD $638, R12
+	B    callbackasm1(SB)
+	MOVD $639, R12
+	B    callbackasm1(SB)
+	MOVD $640, R12
+	B    callbackasm1(SB)
+	MOVD $641, R12
+	B    callbackasm1(SB)
+	MOVD $642, R12
+	B    callbackasm1(SB)
+	MOVD $643, R12
+	B    callbackasm1(SB)
+	MOVD $644, R12
+	B    callbackasm1(SB)
+	MOVD $645, R12
+	B    callbackasm1(SB)
+	MOVD $646, R12
+	B    callbackasm1(SB)
+	MOVD $647, R12
+	B    callbackasm1(SB)
+	MOVD $648, R12
+	B    callbackasm1(SB)
+	MOVD $649, R12
+	B    callbackasm1(SB)
+	MOVD $650, R12
+	B    callbackasm1(SB)
+	MOVD $651, R12
+	B    callbackasm1(SB)
+	MOVD $652, R12
+	B    callbackasm1(SB)
+	MOVD $653, R12
+	B    callbackasm1(SB)
+	MOVD $654, R12
+	B    callbackasm1(SB)
+	MOVD $655, R12
+	B    callbackasm1(SB)
+	MOVD $656, R12
+	B    callbackasm1(SB)
+	MOVD $657, R12
+	B    callbackasm1(SB)
+	MOVD $658, R12
+	B    callbackasm1(SB)
+	MOVD $659, R12
+	B    callbackasm1(SB)
+	MOVD $660, R12
+	B    callbackasm1(SB)
+	MOVD $661, R12
+	B    callbackasm1(SB)
+	MOVD $662, R12
+	B    callbackasm1(SB)
+	MOVD $663, R12
+	B    callbackasm1(SB)
+	MOVD $664, R12
+	B    callbackasm1(SB)
+	MOVD $665, R12
+	B    callbackasm1(SB)
+	MOVD $666, R12
+	B    callbackasm1(SB)
+	MOVD $667, R12
+	B    callbackasm1(SB)
+	MOVD $668, R12
+	B    callbackasm1(SB)
+	MOVD $669, R12
+	B    callbackasm1(SB)
+	MOVD $670, R12
+	B    callbackasm1(SB)
+	MOVD $671, R12
+	B    callbackasm1(SB)
+	MOVD $672, R12
+	B    callbackasm1(SB)
+	MOVD $673, R12
+	B    callbackasm1(SB)
+	MOVD $674, R12
+	B    callbackasm1(SB)
+	MOVD $675, R12
+	B    callbackasm1(SB)
+	MOVD $676, R12
+	B    callbackasm1(SB)
+	MOVD $677, R12
+	B    callbackasm1(SB)
+	MOVD $678, R12
+	B    callbackasm1(SB)
+	MOVD $679, R12
+	B    callbackasm1(SB)
+	MOVD $680, R12
+	B    callbackasm1(SB)
+	MOVD $681, R12
+	B    callbackasm1(SB)
+	MOVD $682, R12
+	B    callbackasm1(SB)
+	MOVD $683, R12
+	B    callbackasm1(SB)
+	MOVD $684, R12
+	B    callbackasm1(SB)
+	MOVD $685, R12
+	B    callbackasm1(SB)
+	MOVD $686, R12
+	B    callbackasm1(SB)
+	MOVD $687, R12
+	B    callbackasm1(SB)
+	MOVD $688, R12
+	B    callbackasm1(SB)
+	MOVD $689, R12
+	B    callbackasm1(SB)
+	MOVD $690, R12
+	B    callbackasm1(SB)
+	MOVD $691, R12
+	B    callbackasm1(SB)
+	MOVD $692, R12
+	B    callbackasm1(SB)
+	MOVD $693, R12
+	B    callbackasm1(SB)
+	MOVD $694, R12
+	B    callbackasm1(SB)
+	MOVD $695, R12
+	B    callbackasm1(SB)
+	MOVD $696, R12
+	B    callbackasm1(SB)
+	MOVD $697, R12
+	B    callbackasm1(SB)
+	MOVD $698, R12
+	B    callbackasm1(SB)
+	MOVD $699, R12
+	B    callbackasm1(SB)
+	MOVD $700, R12
+	B    callbackasm1(SB)
+	MOVD $701, R12
+	B    callbackasm1(SB)
+	MOVD $702, R12
+	B    callbackasm1(SB)
+	MOVD $703, R12
+	B    callbackasm1(SB)
+	MOVD $704, R12
+	B    callbackasm1(SB)
+	MOVD $705, R12
+	B    callbackasm1(SB)
+	MOVD $706, R12
+	B    callbackasm1(SB)
+	MOVD $707, R12
+	B    callbackasm1(SB)
+	MOVD $708, R12
+	B    callbackasm1(SB)
+	MOVD $709, R12
+	B    callbackasm1(SB)
+	MOVD $710, R12
+	B    callbackasm1(SB)
+	MOVD $711, R12
+	B    callbackasm1(SB)
+	MOVD $712, R12
+	B    callbackasm1(SB)
+	MOVD $713, R12
+	B    callbackasm1(SB)
+	MOVD $714, R12
+	B    callbackasm1(SB)
+	MOVD $715, R12
+	B    callbackasm1(SB)
+	MOVD $716, R12
+	B    callbackasm1(SB)
+	MOVD $717, R12
+	B    callbackasm1(SB)
+	MOVD $718, R12
+	B    callbackasm1(SB)
+	MOVD $719, R12
+	B    callbackasm1(SB)
+	MOVD $720, R12
+	B    callbackasm1(SB)
+	MOVD $721, R12
+	B    callbackasm1(SB)
+	MOVD $722, R12
+	B    callbackasm1(SB)
+	MOVD $723, R12
+	B    callbackasm1(SB)
+	MOVD $724, R12
+	B    callbackasm1(SB)
+	MOVD $725, R12
+	B    callbackasm1(SB)
+	MOVD $726, R12
+	B    callbackasm1(SB)
+	MOVD $727, R12
+	B    callbackasm1(SB)
+	MOVD $728, R12
+	B    callbackasm1(SB)
+	MOVD $729, R12
+	B    callbackasm1(SB)
+	MOVD $730, R12
+	B    callbackasm1(SB)
+	MOVD $731, R12
+	B    callbackasm1(SB)
+	MOVD $732, R12
+	B    callbackasm1(SB)
+	MOVD $733, R12
+	B    callbackasm1(SB)
+	MOVD $734, R12
+	B    callbackasm1(SB)
+	MOVD $735, R12
+	B    callbackasm1(SB)
+	MOVD $736, R12
+	B    callbackasm1(SB)
+	MOVD $737, R12
+	B    callbackasm1(SB)
+	MOVD $738, R12
+	B    callbackasm1(SB)
+	MOVD $739, R12
+	B    callbackasm1(SB)
+	MOVD $740, R12
+	B    callbackasm1(SB)
+	MOVD $741, R12
+	B    callbackasm1(SB)
+	MOVD $742, R12
+	B    callbackasm1(SB)
+	MOVD $743, R12
+	B    callbackasm1(SB)
+	MOVD $744, R12
+	B    callbackasm1(SB)
+	MOVD $745, R12
+	B    callbackasm1(SB)
+	MOVD $746, R12
+	B    callbackasm1(SB)
+	MOVD $747, R12
+	B    callbackasm1(SB)
+	MOVD $748, R12
+	B    callbackasm1(SB)
+	MOVD $749, R12
+	B    callbackasm1(SB)
+	MOVD $750, R12
+	B    callbackasm1(SB)
+	MOVD $751, R12
+	B    callbackasm1(SB)
+	MOVD $752, R12
+	B    callbackasm1(SB)
+	MOVD $753, R12
+	B    callbackasm1(SB)
+	MOVD $754, R12
+	B    callbackasm1(SB)
+	MOVD $755, R12
+	B    callbackasm1(SB)
+	MOVD $756, R12
+	B    callbackasm1(SB)
+	MOVD $757, R12
+	B    callbackasm1(SB)
+	MOVD $758, R12
+	B    callbackasm1(SB)
+	MOVD $759, R12
+	B    callbackasm1(SB)
+	MOVD $760, R12
+	B    callbackasm1(SB)
+	MOVD $761, R12
+	B    callbackasm1(SB)
+	MOVD $762, R12
+	B    callbackasm1(SB)
+	MOVD $763, R12
+	B    callbackasm1(SB)
+	MOVD $764, R12
+	B    callbackasm1(SB)
+	MOVD $765, R12
+	B    callbackasm1(SB)
+	MOVD $766, R12
+	B    callbackasm1(SB)
+	MOVD $767, R12
+	B    callbackasm1(SB)
+	MOVD $768, R12
+	B    callbackasm1(SB)
+	MOVD $769, R12
+	B    callbackasm1(SB)
+	MOVD $770, R12
+	B    callbackasm1(SB)
+	MOVD $771, R12
+	B    callbackasm1(SB)
+	MOVD $772, R12
+	B    callbackasm1(SB)
+	MOVD $773, R12
+	B    callbackasm1(SB)
+	MOVD $774, R12
+	B    callbackasm1(SB)
+	MOVD $775, R12
+	B    callbackasm1(SB)
+	MOVD $776, R12
+	B    callbackasm1(SB)
+	MOVD $777, R12
+	B    callbackasm1(SB)
+	MOVD $778, R12
+	B    callbackasm1(SB)
+	MOVD $779, R12
+	B    callbackasm1(SB)
+	MOVD $780, R12
+	B    callbackasm1(SB)
+	MOVD $781, R12
+	B    callbackasm1(SB)
+	MOVD $782, R12
+	B    callbackasm1(SB)
+	MOVD $783, R12
+	B    callbackasm1(SB)
+	MOVD $784, R12
+	B    callbackasm1(SB)
+	MOVD $785, R12
+	B    callbackasm1(SB)
+	MOVD $786, R12
+	B    callbackasm1(SB)
+	MOVD $787, R12
+	B    callbackasm1(SB)
+	MOVD $788, R12
+	B    callbackasm1(SB)
+	MOVD $789, R12
+	B    callbackasm1(SB)
+	MOVD $790, R12
+	B    callbackasm1(SB)
+	MOVD $791, R12
+	B    callbackasm1(SB)
+	MOVD $792, R12
+	B    callbackasm1(SB)
+	MOVD $793, R12
+	B    callbackasm1(SB)
+	MOVD $794, R12
+	B    callbackasm1(SB)
+	MOVD $795, R12
+	B    callbackasm1(SB)
+	MOVD $796, R12
+	B    callbackasm1(SB)
+	MOVD $797, R12
+	B    callbackasm1(SB)
+	MOVD $798, R12
+	B    callbackasm1(SB)
+	MOVD $799, R12
+	B    callbackasm1(SB)
+	MOVD $800, R12
+	B    callbackasm1(SB)
+	MOVD $801, R12
+	B    callbackasm1(SB)
+	MOVD $802, R12
+	B    callbackasm1(SB)
+	MOVD $803, R12
+	B    callbackasm1(SB)
+	MOVD $804, R12
+	B    callbackasm1(SB)
+	MOVD $805, R12
+	B    callbackasm1(SB)
+	MOVD $806, R12
+	B    callbackasm1(SB)
+	MOVD $807, R12
+	B    callbackasm1(SB)
+	MOVD $808, R12
+	B    callbackasm1(SB)
+	MOVD $809, R12
+	B    callbackasm1(SB)
+	MOVD $810, R12
+	B    callbackasm1(SB)
+	MOVD $811, R12
+	B    callbackasm1(SB)
+	MOVD $812, R12
+	B    callbackasm1(SB)
+	MOVD $813, R12
+	B    callbackasm1(SB)
+	MOVD $814, R12
+	B    callbackasm1(SB)
+	MOVD $815, R12
+	B    callbackasm1(SB)
+	MOVD $816, R12
+	B    callbackasm1(SB)
+	MOVD $817, R12
+	B    callbackasm1(SB)
+	MOVD $818, R12
+	B    callbackasm1(SB)
+	MOVD $819, R12
+	B    callbackasm1(SB)
+	MOVD $820, R12
+	B    callbackasm1(SB)
+	MOVD $821, R12
+	B    callbackasm1(SB)
+	MOVD $822, R12
+	B    callbackasm1(SB)
+	MOVD $823, R12
+	B    callbackasm1(SB)
+	MOVD $824, R12
+	B    callbackasm1(SB)
+	MOVD $825, R12
+	B    callbackasm1(SB)
+	MOVD $826, R12
+	B    callbackasm1(SB)
+	MOVD $827, R12
+	B    callbackasm1(SB)
+	MOVD $828, R12
+	B    callbackasm1(SB)
+	MOVD $829, R12
+	B    callbackasm1(SB)
+	MOVD $830, R12
+	B    callbackasm1(SB)
+	MOVD $831, R12
+	B    callbackasm1(SB)
+	MOVD $832, R12
+	B    callbackasm1(SB)
+	MOVD $833, R12
+	B    callbackasm1(SB)
+	MOVD $834, R12
+	B    callbackasm1(SB)
+	MOVD $835, R12
+	B    callbackasm1(SB)
+	MOVD $836, R12
+	B    callbackasm1(SB)
+	MOVD $837, R12
+	B    callbackasm1(SB)
+	MOVD $838, R12
+	B    callbackasm1(SB)
+	MOVD $839, R12
+	B    callbackasm1(SB)
+	MOVD $840, R12
+	B    callbackasm1(SB)
+	MOVD $841, R12
+	B    callbackasm1(SB)
+	MOVD $842, R12
+	B    callbackasm1(SB)
+	MOVD $843, R12
+	B    callbackasm1(SB)
+	MOVD $844, R12
+	B    callbackasm1(SB)
+	MOVD $845, R12
+	B    callbackasm1(SB)
+	MOVD $846, R12
+	B    callbackasm1(SB)
+	MOVD $847, R12
+	B    callbackasm1(SB)
+	MOVD $848, R12
+	B    callbackasm1(SB)
+	MOVD $849, R12
+	B    callbackasm1(SB)
+	MOVD $850, R12
+	B    callbackasm1(SB)
+	MOVD $851, R12
+	B    callbackasm1(SB)
+	MOVD $852, R12
+	B    callbackasm1(SB)
+	MOVD $853, R12
+	B    callbackasm1(SB)
+	MOVD $854, R12
+	B    callbackasm1(SB)
+	MOVD $855, R12
+	B    callbackasm1(SB)
+	MOVD $856, R12
+	B    callbackasm1(SB)
+	MOVD $857, R12
+	B    callbackasm1(SB)
+	MOVD $858, R12
+	B    callbackasm1(SB)
+	MOVD $859, R12
+	B    callbackasm1(SB)
+	MOVD $860, R12
+	B    callbackasm1(SB)
+	MOVD $861, R12
+	B    callbackasm1(SB)
+	MOVD $862, R12
+	B    callbackasm1(SB)
+	MOVD $863, R12
+	B    callbackasm1(SB)
+	MOVD $864, R12
+	B    callbackasm1(SB)
+	MOVD $865, R12
+	B    callbackasm1(SB)
+	MOVD $866, R12
+	B    callbackasm1(SB)
+	MOVD $867, R12
+	B    callbackasm1(SB)
+	MOVD $868, R12
+	B    callbackasm1(SB)
+	MOVD $869, R12
+	B    callbackasm1(SB)
+	MOVD $870, R12
+	B    callbackasm1(SB)
+	MOVD $871, R12
+	B    callbackasm1(SB)
+	MOVD $872, R12
+	B    callbackasm1(SB)
+	MOVD $873, R12
+	B    callbackasm1(SB)
+	MOVD $874, R12
+	B    callbackasm1(SB)
+	MOVD $875, R12
+	B    callbackasm1(SB)
+	MOVD $876, R12
+	B    callbackasm1(SB)
+	MOVD $877, R12
+	B    callbackasm1(SB)
+	MOVD $878, R12
+	B    callbackasm1(SB)
+	MOVD $879, R12
+	B    callbackasm1(SB)
+	MOVD $880, R12
+	B    callbackasm1(SB)
+	MOVD $881, R12
+	B    callbackasm1(SB)
+	MOVD $882, R12
+	B    callbackasm1(SB)
+	MOVD $883, R12
+	B    callbackasm1(SB)
+	MOVD $884, R12
+	B    callbackasm1(SB)
+	MOVD $885, R12
+	B    callbackasm1(SB)
+	MOVD $886, R12
+	B    callbackasm1(SB)
+	MOVD $887, R12
+	B    callbackasm1(SB)
+	MOVD $888, R12
+	B    callbackasm1(SB)
+	MOVD $889, R12
+	B    callbackasm1(SB)
+	MOVD $890, R12
+	B    callbackasm1(SB)
+	MOVD $891, R12
+	B    callbackasm1(SB)
+	MOVD $892, R12
+	B    callbackasm1(SB)
+	MOVD $893, R12
+	B    callbackasm1(SB)
+	MOVD $894, R12
+	B    callbackasm1(SB)
+	MOVD $895, R12
+	B    callbackasm1(SB)
+	MOVD $896, R12
+	B    callbackasm1(SB)
+	MOVD $897, R12
+	B    callbackasm1(SB)
+	MOVD $898, R12
+	B    callbackasm1(SB)
+	MOVD $899, R12
+	B    callbackasm1(SB)
+	MOVD $900, R12
+	B    callbackasm1(SB)
+	MOVD $901, R12
+	B    callbackasm1(SB)
+	MOVD $902, R12
+	B    callbackasm1(SB)
+	MOVD $903, R12
+	B    callbackasm1(SB)
+	MOVD $904, R12
+	B    callbackasm1(SB)
+	MOVD $905, R12
+	B    callbackasm1(SB)
+	MOVD $906, R12
+	B    callbackasm1(SB)
+	MOVD $907, R12
+	B    callbackasm1(SB)
+	MOVD $908, R12
+	B    callbackasm1(SB)
+	MOVD $909, R12
+	B    callbackasm1(SB)
+	MOVD $910, R12
+	B    callbackasm1(SB)
+	MOVD $911, R12
+	B    callbackasm1(SB)
+	MOVD $912, R12
+	B    callbackasm1(SB)
+	MOVD $913, R12
+	B    callbackasm1(SB)
+	MOVD $914, R12
+	B    callbackasm1(SB)
+	MOVD $915, R12
+	B    callbackasm1(SB)
+	MOVD $916, R12
+	B    callbackasm1(SB)
+	MOVD $917, R12
+	B    callbackasm1(SB)
+	MOVD $918, R12
+	B    callbackasm1(SB)
+	MOVD $919, R12
+	B    callbackasm1(SB)
+	MOVD $920, R12
+	B    callbackasm1(SB)
+	MOVD $921, R12
+	B    callbackasm1(SB)
+	MOVD $922, R12
+	B    callbackasm1(SB)
+	MOVD $923, R12
+	B    callbackasm1(SB)
+	MOVD $924, R12
+	B    callbackasm1(SB)
+	MOVD $925, R12
+	B    callbackasm1(SB)
+	MOVD $926, R12
+	B    callbackasm1(SB)
+	MOVD $927, R12
+	B    callbackasm1(SB)
+	MOVD $928, R12
+	B    callbackasm1(SB)
+	MOVD $929, R12
+	B    callbackasm1(SB)
+	MOVD $930, R12
+	B    callbackasm1(SB)
+	MOVD $931, R12
+	B    callbackasm1(SB)
+	MOVD $932, R12
+	B    callbackasm1(SB)
+	MOVD $933, R12
+	B    callbackasm1(SB)
+	MOVD $934, R12
+	B    callbackasm1(SB)
+	MOVD $935, R12
+	B    callbackasm1(SB)
+	MOVD $936, R12
+	B    callbackasm1(SB)
+	MOVD $937, R12
+	B    callbackasm1(SB)
+	MOVD $938, R12
+	B    callbackasm1(SB)
+	MOVD $939, R12
+	B    callbackasm1(SB)
+	MOVD $940, R12
+	B    callbackasm1(SB)
+	MOVD $941, R12
+	B    callbackasm1(SB)
+	MOVD $942, R12
+	B    callbackasm1(SB)
+	MOVD $943, R12
+	B    callbackasm1(SB)
+	MOVD $944, R12
+	B    callbackasm1(SB)
+	MOVD $945, R12
+	B    callbackasm1(SB)
+	MOVD $946, R12
+	B    callbackasm1(SB)
+	MOVD $947, R12
+	B    callbackasm1(SB)
+	MOVD $948, R12
+	B    callbackasm1(SB)
+	MOVD $949, R12
+	B    callbackasm1(SB)
+	MOVD $950, R12
+	B    callbackasm1(SB)
+	MOVD $951, R12
+	B    callbackasm1(SB)
+	MOVD $952, R12
+	B    callbackasm1(SB)
+	MOVD $953, R12
+	B    callbackasm1(SB)
+	MOVD $954, R12
+	B    callbackasm1(SB)
+	MOVD $955, R12
+	B    callbackasm1(SB)
+	MOVD $956, R12
+	B    callbackasm1(SB)
+	MOVD $957, R12
+	B    callbackasm1(SB)
+	MOVD $958, R12
+	B    callbackasm1(SB)
+	MOVD $959, R12
+	B    callbackasm1(SB)
+	MOVD $960, R12
+	B    callbackasm1(SB)
+	MOVD $961, R12
+	B    callbackasm1(SB)
+	MOVD $962, R12
+	B    callbackasm1(SB)
+	MOVD $963, R12
+	B    callbackasm1(SB)
+	MOVD $964, R12
+	B    callbackasm1(SB)
+	MOVD $965, R12
+	B    callbackasm1(SB)
+	MOVD $966, R12
+	B    callbackasm1(SB)
+	MOVD $967, R12
+	B    callbackasm1(SB)
+	MOVD $968, R12
+	B    callbackasm1(SB)
+	MOVD $969, R12
+	B    callbackasm1(SB)
+	MOVD $970, R12
+	B    callbackasm1(SB)
+	MOVD $971, R12
+	B    callbackasm1(SB)
+	MOVD $972, R12
+	B    callbackasm1(SB)
+	MOVD $973, R12
+	B    callbackasm1(SB)
+	MOVD $974, R12
+	B    callbackasm1(SB)
+	MOVD $975, R12
+	B    callbackasm1(SB)
+	MOVD $976, R12
+	B    callbackasm1(SB)
+	MOVD $977, R12
+	B    callbackasm1(SB)
+	MOVD $978, R12
+	B    callbackasm1(SB)
+	MOVD $979, R12
+	B    callbackasm1(SB)
+	MOVD $980, R12
+	B    callbackasm1(SB)
+	MOVD $981, R12
+	B    callbackasm1(SB)
+	MOVD $982, R12
+	B    callbackasm1(SB)
+	MOVD $983, R12
+	B    callbackasm1(SB)
+	MOVD $984, R12
+	B    callbackasm1(SB)
+	MOVD $985, R12
+	B    callbackasm1(SB)
+	MOVD $986, R12
+	B    callbackasm1(SB)
+	MOVD $987, R12
+	B    callbackasm1(SB)
+	MOVD $988, R12
+	B    callbackasm1(SB)
+	MOVD $989, R12
+	B    callbackasm1(SB)
+	MOVD $990, R12
+	B    callbackasm1(SB)
+	MOVD $991, R12
+	B    callbackasm1(SB)
+	MOVD $992, R12
+	B    callbackasm1(SB)
+	MOVD $993, R12
+	B    callbackasm1(SB)
+	MOVD $994, R12
+	B    callbackasm1(SB)
+	MOVD $995, R12
+	B    callbackasm1(SB)
+	MOVD $996, R12
+	B    callbackasm1(SB)
+	MOVD $997, R12
+	B    callbackasm1(SB)
+	MOVD $998, R12
+	B    callbackasm1(SB)
+	MOVD $999, R12
+	B    callbackasm1(SB)
+	MOVD $1000, R12
+	B    callbackasm1(SB)
+	MOVD $1001, R12
+	B    callbackasm1(SB)
+	MOVD $1002, R12
+	B    callbackasm1(SB)
+	MOVD $1003, R12
+	B    callbackasm1(SB)
+	MOVD $1004, R12
+	B    callbackasm1(SB)
+	MOVD $1005, R12
+	B    callbackasm1(SB)
+	MOVD $1006, R12
+	B    callbackasm1(SB)
+	MOVD $1007, R12
+	B    callbackasm1(SB)
+	MOVD $1008, R12
+	B    callbackasm1(SB)
+	MOVD $1009, R12
+	B    callbackasm1(SB)
+	MOVD $1010, R12
+	B    callbackasm1(SB)
+	MOVD $1011, R12
+	B    callbackasm1(SB)
+	MOVD $1012, R12
+	B    callbackasm1(SB)
+	MOVD $1013, R12
+	B    callbackasm1(SB)
+	MOVD $1014, R12
+	B    callbackasm1(SB)
+	MOVD $1015, R12
+	B    callbackasm1(SB)
+	MOVD $1016, R12
+	B    callbackasm1(SB)
+	MOVD $1017, R12
+	B    callbackasm1(SB)
+	MOVD $1018, R12
+	B    callbackasm1(SB)
+	MOVD $1019, R12
+	B    callbackasm1(SB)
+	MOVD $1020, R12
+	B    callbackasm1(SB)
+	MOVD $1021, R12
+	B    callbackasm1(SB)
+	MOVD $1022, R12
+	B    callbackasm1(SB)
+	MOVD $1023, R12
+	B    callbackasm1(SB)
+	MOVD $1024, R12
+	B    callbackasm1(SB)
+	MOVD $1025, R12
+	B    callbackasm1(SB)
+	MOVD $1026, R12
+	B    callbackasm1(SB)
+	MOVD $1027, R12
+	B    callbackasm1(SB)
+	MOVD $1028, R12
+	B    callbackasm1(SB)
+	MOVD $1029, R12
+	B    callbackasm1(SB)
+	MOVD $1030, R12
+	B    callbackasm1(SB)
+	MOVD $1031, R12
+	B    callbackasm1(SB)
+	MOVD $1032, R12
+	B    callbackasm1(SB)
+	MOVD $1033, R12
+	B    callbackasm1(SB)
+	MOVD $1034, R12
+	B    callbackasm1(SB)
+	MOVD $1035, R12
+	B    callbackasm1(SB)
+	MOVD $1036, R12
+	B    callbackasm1(SB)
+	MOVD $1037, R12
+	B    callbackasm1(SB)
+	MOVD $1038, R12
+	B    callbackasm1(SB)
+	MOVD $1039, R12
+	B    callbackasm1(SB)
+	MOVD $1040, R12
+	B    callbackasm1(SB)
+	MOVD $1041, R12
+	B    callbackasm1(SB)
+	MOVD $1042, R12
+	B    callbackasm1(SB)
+	MOVD $1043, R12
+	B    callbackasm1(SB)
+	MOVD $1044, R12
+	B    callbackasm1(SB)
+	MOVD $1045, R12
+	B    callbackasm1(SB)
+	MOVD $1046, R12
+	B    callbackasm1(SB)
+	MOVD $1047, R12
+	B    callbackasm1(SB)
+	MOVD $1048, R12
+	B    callbackasm1(SB)
+	MOVD $1049, R12
+	B    callbackasm1(SB)
+	MOVD $1050, R12
+	B    callbackasm1(SB)
+	MOVD $1051, R12
+	B    callbackasm1(SB)
+	MOVD $1052, R12
+	B    callbackasm1(SB)
+	MOVD $1053, R12
+	B    callbackasm1(SB)
+	MOVD $1054, R12
+	B    callbackasm1(SB)
+	MOVD $1055, R12
+	B    callbackasm1(SB)
+	MOVD $1056, R12
+	B    callbackasm1(SB)
+	MOVD $1057, R12
+	B    callbackasm1(SB)
+	MOVD $1058, R12
+	B    callbackasm1(SB)
+	MOVD $1059, R12
+	B    callbackasm1(SB)
+	MOVD $1060, R12
+	B    callbackasm1(SB)
+	MOVD $1061, R12
+	B    callbackasm1(SB)
+	MOVD $1062, R12
+	B    callbackasm1(SB)
+	MOVD $1063, R12
+	B    callbackasm1(SB)
+	MOVD $1064, R12
+	B    callbackasm1(SB)
+	MOVD $1065, R12
+	B    callbackasm1(SB)
+	MOVD $1066, R12
+	B    callbackasm1(SB)
+	MOVD $1067, R12
+	B    callbackasm1(SB)
+	MOVD $1068, R12
+	B    callbackasm1(SB)
+	MOVD $1069, R12
+	B    callbackasm1(SB)
+	MOVD $1070, R12
+	B    callbackasm1(SB)
+	MOVD $1071, R12
+	B    callbackasm1(SB)
+	MOVD $1072, R12
+	B    callbackasm1(SB)
+	MOVD $1073, R12
+	B    callbackasm1(SB)
+	MOVD $1074, R12
+	B    callbackasm1(SB)
+	MOVD $1075, R12
+	B    callbackasm1(SB)
+	MOVD $1076, R12
+	B    callbackasm1(SB)
+	MOVD $1077, R12
+	B    callbackasm1(SB)
+	MOVD $1078, R12
+	B    callbackasm1(SB)
+	MOVD $1079, R12
+	B    callbackasm1(SB)
+	MOVD $1080, R12
+	B    callbackasm1(SB)
+	MOVD $1081, R12
+	B    callbackasm1(SB)
+	MOVD $1082, R12
+	B    callbackasm1(SB)
+	MOVD $1083, R12
+	B    callbackasm1(SB)
+	MOVD $1084, R12
+	B    callbackasm1(SB)
+	MOVD $1085, R12
+	B    callbackasm1(SB)
+	MOVD $1086, R12
+	B    callbackasm1(SB)
+	MOVD $1087, R12
+	B    callbackasm1(SB)
+	MOVD $1088, R12
+	B    callbackasm1(SB)
+	MOVD $1089, R12
+	B    callbackasm1(SB)
+	MOVD $1090, R12
+	B    callbackasm1(SB)
+	MOVD $1091, R12
+	B    callbackasm1(SB)
+	MOVD $1092, R12
+	B    callbackasm1(SB)
+	MOVD $1093, R12
+	B    callbackasm1(SB)
+	MOVD $1094, R12
+	B    callbackasm1(SB)
+	MOVD $1095, R12
+	B    callbackasm1(SB)
+	MOVD $1096, R12
+	B    callbackasm1(SB)
+	MOVD $1097, R12
+	B    callbackasm1(SB)
+	MOVD $1098, R12
+	B    callbackasm1(SB)
+	MOVD $1099, R12
+	B    callbackasm1(SB)
+	MOVD $1100, R12
+	B    callbackasm1(SB)
+	MOVD $1101, R12
+	B    callbackasm1(SB)
+	MOVD $1102, R12
+	B    callbackasm1(SB)
+	MOVD $1103, R12
+	B    callbackasm1(SB)
+	MOVD $1104, R12
+	B    callbackasm1(SB)
+	MOVD $1105, R12
+	B    callbackasm1(SB)
+	MOVD $1106, R12
+	B    callbackasm1(SB)
+	MOVD $1107, R12
+	B    callbackasm1(SB)
+	MOVD $1108, R12
+	B    callbackasm1(SB)
+	MOVD $1109, R12
+	B    callbackasm1(SB)
+	MOVD $1110, R12
+	B    callbackasm1(SB)
+	MOVD $1111, R12
+	B    callbackasm1(SB)
+	MOVD $1112, R12
+	B    callbackasm1(SB)
+	MOVD $1113, R12
+	B    callbackasm1(SB)
+	MOVD $1114, R12
+	B    callbackasm1(SB)
+	MOVD $1115, R12
+	B    callbackasm1(SB)
+	MOVD $1116, R12
+	B    callbackasm1(SB)
+	MOVD $1117, R12
+	B    callbackasm1(SB)
+	MOVD $1118, R12
+	B    callbackasm1(SB)
+	MOVD $1119, R12
+	B    callbackasm1(SB)
+	MOVD $1120, R12
+	B    callbackasm1(SB)
+	MOVD $1121, R12
+	B    callbackasm1(SB)
+	MOVD $1122, R12
+	B    callbackasm1(SB)
+	MOVD $1123, R12
+	B    callbackasm1(SB)
+	MOVD $1124, R12
+	B    callbackasm1(SB)
+	MOVD $1125, R12
+	B    callbackasm1(SB)
+	MOVD $1126, R12
+	B    callbackasm1(SB)
+	MOVD $1127, R12
+	B    callbackasm1(SB)
+	MOVD $1128, R12
+	B    callbackasm1(SB)
+	MOVD $1129, R12
+	B    callbackasm1(SB)
+	MOVD $1130, R12
+	B    callbackasm1(SB)
+	MOVD $1131, R12
+	B    callbackasm1(SB)
+	MOVD $1132, R12
+	B    callbackasm1(SB)
+	MOVD $1133, R12
+	B    callbackasm1(SB)
+	MOVD $1134, R12
+	B    callbackasm1(SB)
+	MOVD $1135, R12
+	B    callbackasm1(SB)
+	MOVD $1136, R12
+	B    callbackasm1(SB)
+	MOVD $1137, R12
+	B    callbackasm1(SB)
+	MOVD $1138, R12
+	B    callbackasm1(SB)
+	MOVD $1139, R12
+	B    callbackasm1(SB)
+	MOVD $1140, R12
+	B    callbackasm1(SB)
+	MOVD $1141, R12
+	B    callbackasm1(SB)
+	MOVD $1142, R12
+	B    callbackasm1(SB)
+	MOVD $1143, R12
+	B    callbackasm1(SB)
+	MOVD $1144, R12
+	B    callbackasm1(SB)
+	MOVD $1145, R12
+	B    callbackasm1(SB)
+	MOVD $1146, R12
+	B    callbackasm1(SB)
+	MOVD $1147, R12
+	B    callbackasm1(SB)
+	MOVD $1148, R12
+	B    callbackasm1(SB)
+	MOVD $1149, R12
+	B    callbackasm1(SB)
+	MOVD $1150, R12
+	B    callbackasm1(SB)
+	MOVD $1151, R12
+	B    callbackasm1(SB)
+	MOVD $1152, R12
+	B    callbackasm1(SB)
+	MOVD $1153, R12
+	B    callbackasm1(SB)
+	MOVD $1154, R12
+	B    callbackasm1(SB)
+	MOVD $1155, R12
+	B    callbackasm1(SB)
+	MOVD $1156, R12
+	B    callbackasm1(SB)
+	MOVD $1157, R12
+	B    callbackasm1(SB)
+	MOVD $1158, R12
+	B    callbackasm1(SB)
+	MOVD $1159, R12
+	B    callbackasm1(SB)
+	MOVD $1160, R12
+	B    callbackasm1(SB)
+	MOVD $1161, R12
+	B    callbackasm1(SB)
+	MOVD $1162, R12
+	B    callbackasm1(SB)
+	MOVD $1163, R12
+	B    callbackasm1(SB)
+	MOVD $1164, R12
+	B    callbackasm1(SB)
+	MOVD $1165, R12
+	B    callbackasm1(SB)
+	MOVD $1166, R12
+	B    callbackasm1(SB)
+	MOVD $1167, R12
+	B    callbackasm1(SB)
+	MOVD $1168, R12
+	B    callbackasm1(SB)
+	MOVD $1169, R12
+	B    callbackasm1(SB)
+	MOVD $1170, R12
+	B    callbackasm1(SB)
+	MOVD $1171, R12
+	B    callbackasm1(SB)
+	MOVD $1172, R12
+	B    callbackasm1(SB)
+	MOVD $1173, R12
+	B    callbackasm1(SB)
+	MOVD $1174, R12
+	B    callbackasm1(SB)
+	MOVD $1175, R12
+	B    callbackasm1(SB)
+	MOVD $1176, R12
+	B    callbackasm1(SB)
+	MOVD $1177, R12
+	B    callbackasm1(SB)
+	MOVD $1178, R12
+	B    callbackasm1(SB)
+	MOVD $1179, R12
+	B    callbackasm1(SB)
+	MOVD $1180, R12
+	B    callbackasm1(SB)
+	MOVD $1181, R12
+	B    callbackasm1(SB)
+	MOVD $1182, R12
+	B    callbackasm1(SB)
+	MOVD $1183, R12
+	B    callbackasm1(SB)
+	MOVD $1184, R12
+	B    callbackasm1(SB)
+	MOVD $1185, R12
+	B    callbackasm1(SB)
+	MOVD $1186, R12
+	B    callbackasm1(SB)
+	MOVD $1187, R12
+	B    callbackasm1(SB)
+	MOVD $1188, R12
+	B    callbackasm1(SB)
+	MOVD $1189, R12
+	B    callbackasm1(SB)
+	MOVD $1190, R12
+	B    callbackasm1(SB)
+	MOVD $1191, R12
+	B    callbackasm1(SB)
+	MOVD $1192, R12
+	B    callbackasm1(SB)
+	MOVD $1193, R12
+	B    callbackasm1(SB)
+	MOVD $1194, R12
+	B    callbackasm1(SB)
+	MOVD $1195, R12
+	B    callbackasm1(SB)
+	MOVD $1196, R12
+	B    callbackasm1(SB)
+	MOVD $1197, R12
+	B    callbackasm1(SB)
+	MOVD $1198, R12
+	B    callbackasm1(SB)
+	MOVD $1199, R12
+	B    callbackasm1(SB)
+	MOVD $1200, R12
+	B    callbackasm1(SB)
+	MOVD $1201, R12
+	B    callbackasm1(SB)
+	MOVD $1202, R12
+	B    callbackasm1(SB)
+	MOVD $1203, R12
+	B    callbackasm1(SB)
+	MOVD $1204, R12
+	B    callbackasm1(SB)
+	MOVD $1205, R12
+	B    callbackasm1(SB)
+	MOVD $1206, R12
+	B    callbackasm1(SB)
+	MOVD $1207, R12
+	B    callbackasm1(SB)
+	MOVD $1208, R12
+	B    callbackasm1(SB)
+	MOVD $1209, R12
+	B    callbackasm1(SB)
+	MOVD $1210, R12
+	B    callbackasm1(SB)
+	MOVD $1211, R12
+	B    callbackasm1(SB)
+	MOVD $1212, R12
+	B    callbackasm1(SB)
+	MOVD $1213, R12
+	B    callbackasm1(SB)
+	MOVD $1214, R12
+	B    callbackasm1(SB)
+	MOVD $1215, R12
+	B    callbackasm1(SB)
+	MOVD $1216, R12
+	B    callbackasm1(SB)
+	MOVD $1217, R12
+	B    callbackasm1(SB)
+	MOVD $1218, R12
+	B    callbackasm1(SB)
+	MOVD $1219, R12
+	B    callbackasm1(SB)
+	MOVD $1220, R12
+	B    callbackasm1(SB)
+	MOVD $1221, R12
+	B    callbackasm1(SB)
+	MOVD $1222, R12
+	B    callbackasm1(SB)
+	MOVD $1223, R12
+	B    callbackasm1(SB)
+	MOVD $1224, R12
+	B    callbackasm1(SB)
+	MOVD $1225, R12
+	B    callbackasm1(SB)
+	MOVD $1226, R12
+	B    callbackasm1(SB)
+	MOVD $1227, R12
+	B    callbackasm1(SB)
+	MOVD $1228, R12
+	B    callbackasm1(SB)
+	MOVD $1229, R12
+	B    callbackasm1(SB)
+	MOVD $1230, R12
+	B    callbackasm1(SB)
+	MOVD $1231, R12
+	B    callbackasm1(SB)
+	MOVD $1232, R12
+	B    callbackasm1(SB)
+	MOVD $1233, R12
+	B    callbackasm1(SB)
+	MOVD $1234, R12
+	B    callbackasm1(SB)
+	MOVD $1235, R12
+	B    callbackasm1(SB)
+	MOVD $1236, R12
+	B    callbackasm1(SB)
+	MOVD $1237, R12
+	B    callbackasm1(SB)
+	MOVD $1238, R12
+	B    callbackasm1(SB)
+	MOVD $1239, R12
+	B    callbackasm1(SB)
+	MOVD $1240, R12
+	B    callbackasm1(SB)
+	MOVD $1241, R12
+	B    callbackasm1(SB)
+	MOVD $1242, R12
+	B    callbackasm1(SB)
+	MOVD $1243, R12
+	B    callbackasm1(SB)
+	MOVD $1244, R12
+	B    callbackasm1(SB)
+	MOVD $1245, R12
+	B    callbackasm1(SB)
+	MOVD $1246, R12
+	B    callbackasm1(SB)
+	MOVD $1247, R12
+	B    callbackasm1(SB)
+	MOVD $1248, R12
+	B    callbackasm1(SB)
+	MOVD $1249, R12
+	B    callbackasm1(SB)
+	MOVD $1250, R12
+	B    callbackasm1(SB)
+	MOVD $1251, R12
+	B    callbackasm1(SB)
+	MOVD $1252, R12
+	B    callbackasm1(SB)
+	MOVD $1253, R12
+	B    callbackasm1(SB)
+	MOVD $1254, R12
+	B    callbackasm1(SB)
+	MOVD $1255, R12
+	B    callbackasm1(SB)
+	MOVD $1256, R12
+	B    callbackasm1(SB)
+	MOVD $1257, R12
+	B    callbackasm1(SB)
+	MOVD $1258, R12
+	B    callbackasm1(SB)
+	MOVD $1259, R12
+	B    callbackasm1(SB)
+	MOVD $1260, R12
+	B    callbackasm1(SB)
+	MOVD $1261, R12
+	B    callbackasm1(SB)
+	MOVD $1262, R12
+	B    callbackasm1(SB)
+	MOVD $1263, R12
+	B    callbackasm1(SB)
+	MOVD $1264, R12
+	B    callbackasm1(SB)
+	MOVD $1265, R12
+	B    callbackasm1(SB)
+	MOVD $1266, R12
+	B    callbackasm1(SB)
+	MOVD $1267, R12
+	B    callbackasm1(SB)
+	MOVD $1268, R12
+	B    callbackasm1(SB)
+	MOVD $1269, R12
+	B    callbackasm1(SB)
+	MOVD $1270, R12
+	B    callbackasm1(SB)
+	MOVD $1271, R12
+	B    callbackasm1(SB)
+	MOVD $1272, R12
+	B    callbackasm1(SB)
+	MOVD $1273, R12
+	B    callbackasm1(SB)
+	MOVD $1274, R12
+	B    callbackasm1(SB)
+	MOVD $1275, R12
+	B    callbackasm1(SB)
+	MOVD $1276, R12
+	B    callbackasm1(SB)
+	MOVD $1277, R12
+	B    callbackasm1(SB)
+	MOVD $1278, R12
+	B    callbackasm1(SB)
+	MOVD $1279, R12
+	B    callbackasm1(SB)
+	MOVD $1280, R12
+	B    callbackasm1(SB)
+	MOVD $1281, R12
+	B    callbackasm1(SB)
+	MOVD $1282, R12
+	B    callbackasm1(SB)
+	MOVD $1283, R12
+	B    callbackasm1(SB)
+	MOVD $1284, R12
+	B    callbackasm1(SB)
+	MOVD $1285, R12
+	B    callbackasm1(SB)
+	MOVD $1286, R12
+	B    callbackasm1(SB)
+	MOVD $1287, R12
+	B    callbackasm1(SB)
+	MOVD $1288, R12
+	B    callbackasm1(SB)
+	MOVD $1289, R12
+	B    callbackasm1(SB)
+	MOVD $1290, R12
+	B    callbackasm1(SB)
+	MOVD $1291, R12
+	B    callbackasm1(SB)
+	MOVD $1292, R12
+	B    callbackasm1(SB)
+	MOVD $1293, R12
+	B    callbackasm1(SB)
+	MOVD $1294, R12
+	B    callbackasm1(SB)
+	MOVD $1295, R12
+	B    callbackasm1(SB)
+	MOVD $1296, R12
+	B    callbackasm1(SB)
+	MOVD $1297, R12
+	B    callbackasm1(SB)
+	MOVD $1298, R12
+	B    callbackasm1(SB)
+	MOVD $1299, R12
+	B    callbackasm1(SB)
+	MOVD $1300, R12
+	B    callbackasm1(SB)
+	MOVD $1301, R12
+	B    callbackasm1(SB)
+	MOVD $1302, R12
+	B    callbackasm1(SB)
+	MOVD $1303, R12
+	B    callbackasm1(SB)
+	MOVD $1304, R12
+	B    callbackasm1(SB)
+	MOVD $1305, R12
+	B    callbackasm1(SB)
+	MOVD $1306, R12
+	B    callbackasm1(SB)
+	MOVD $1307, R12
+	B    callbackasm1(SB)
+	MOVD $1308, R12
+	B    callbackasm1(SB)
+	MOVD $1309, R12
+	B    callbackasm1(SB)
+	MOVD $1310, R12
+	B    callbackasm1(SB)
+	MOVD $1311, R12
+	B    callbackasm1(SB)
+	MOVD $1312, R12
+	B    callbackasm1(SB)
+	MOVD $1313, R12
+	B    callbackasm1(SB)
+	MOVD $1314, R12
+	B    callbackasm1(SB)
+	MOVD $1315, R12
+	B    callbackasm1(SB)
+	MOVD $1316, R12
+	B    callbackasm1(SB)
+	MOVD $1317, R12
+	B    callbackasm1(SB)
+	MOVD $1318, R12
+	B    callbackasm1(SB)
+	MOVD $1319, R12
+	B    callbackasm1(SB)
+	MOVD $1320, R12
+	B    callbackasm1(SB)
+	MOVD $1321, R12
+	B    callbackasm1(SB)
+	MOVD $1322, R12
+	B    callbackasm1(SB)
+	MOVD $1323, R12
+	B    callbackasm1(SB)
+	MOVD $1324, R12
+	B    callbackasm1(SB)
+	MOVD $1325, R12
+	B    callbackasm1(SB)
+	MOVD $1326, R12
+	B    callbackasm1(SB)
+	MOVD $1327, R12
+	B    callbackasm1(SB)
+	MOVD $1328, R12
+	B    callbackasm1(SB)
+	MOVD $1329, R12
+	B    callbackasm1(SB)
+	MOVD $1330, R12
+	B    callbackasm1(SB)
+	MOVD $1331, R12
+	B    callbackasm1(SB)
+	MOVD $1332, R12
+	B    callbackasm1(SB)
+	MOVD $1333, R12
+	B    callbackasm1(SB)
+	MOVD $1334, R12
+	B    callbackasm1(SB)
+	MOVD $1335, R12
+	B    callbackasm1(SB)
+	MOVD $1336, R12
+	B    callbackasm1(SB)
+	MOVD $1337, R12
+	B    callbackasm1(SB)
+	MOVD $1338, R12
+	B    callbackasm1(SB)
+	MOVD $1339, R12
+	B    callbackasm1(SB)
+	MOVD $1340, R12
+	B    callbackasm1(SB)
+	MOVD $1341, R12
+	B    callbackasm1(SB)
+	MOVD $1342, R12
+	B    callbackasm1(SB)
+	MOVD $1343, R12
+	B    callbackasm1(SB)
+	MOVD $1344, R12
+	B    callbackasm1(SB)
+	MOVD $1345, R12
+	B    callbackasm1(SB)
+	MOVD $1346, R12
+	B    callbackasm1(SB)
+	MOVD $1347, R12
+	B    callbackasm1(SB)
+	MOVD $1348, R12
+	B    callbackasm1(SB)
+	MOVD $1349, R12
+	B    callbackasm1(SB)
+	MOVD $1350, R12
+	B    callbackasm1(SB)
+	MOVD $1351, R12
+	B    callbackasm1(SB)
+	MOVD $1352, R12
+	B    callbackasm1(SB)
+	MOVD $1353, R12
+	B    callbackasm1(SB)
+	MOVD $1354, R12
+	B    callbackasm1(SB)
+	MOVD $1355, R12
+	B    callbackasm1(SB)
+	MOVD $1356, R12
+	B    callbackasm1(SB)
+	MOVD $1357, R12
+	B    callbackasm1(SB)
+	MOVD $1358, R12
+	B    callbackasm1(SB)
+	MOVD $1359, R12
+	B    callbackasm1(SB)
+	MOVD $1360, R12
+	B    callbackasm1(SB)
+	MOVD $1361, R12
+	B    callbackasm1(SB)
+	MOVD $1362, R12
+	B    callbackasm1(SB)
+	MOVD $1363, R12
+	B    callbackasm1(SB)
+	MOVD $1364, R12
+	B    callbackasm1(SB)
+	MOVD $1365, R12
+	B    callbackasm1(SB)
+	MOVD $1366, R12
+	B    callbackasm1(SB)
+	MOVD $1367, R12
+	B    callbackasm1(SB)
+	MOVD $1368, R12
+	B    callbackasm1(SB)
+	MOVD $1369, R12
+	B    callbackasm1(SB)
+	MOVD $1370, R12
+	B    callbackasm1(SB)
+	MOVD $1371, R12
+	B    callbackasm1(SB)
+	MOVD $1372, R12
+	B    callbackasm1(SB)
+	MOVD $1373, R12
+	B    callbackasm1(SB)
+	MOVD $1374, R12
+	B    callbackasm1(SB)
+	MOVD $1375, R12
+	B    callbackasm1(SB)
+	MOVD $1376, R12
+	B    callbackasm1(SB)
+	MOVD $1377, R12
+	B    callbackasm1(SB)
+	MOVD $1378, R12
+	B    callbackasm1(SB)
+	MOVD $1379, R12
+	B    callbackasm1(SB)
+	MOVD $1380, R12
+	B    callbackasm1(SB)
+	MOVD $1381, R12
+	B    callbackasm1(SB)
+	MOVD $1382, R12
+	B    callbackasm1(SB)
+	MOVD $1383, R12
+	B    callbackasm1(SB)
+	MOVD $1384, R12
+	B    callbackasm1(SB)
+	MOVD $1385, R12
+	B    callbackasm1(SB)
+	MOVD $1386, R12
+	B    callbackasm1(SB)
+	MOVD $1387, R12
+	B    callbackasm1(SB)
+	MOVD $1388, R12
+	B    callbackasm1(SB)
+	MOVD $1389, R12
+	B    callbackasm1(SB)
+	MOVD $1390, R12
+	B    callbackasm1(SB)
+	MOVD $1391, R12
+	B    callbackasm1(SB)
+	MOVD $1392, R12
+	B    callbackasm1(SB)
+	MOVD $1393, R12
+	B    callbackasm1(SB)
+	MOVD $1394, R12
+	B    callbackasm1(SB)
+	MOVD $1395, R12
+	B    callbackasm1(SB)
+	MOVD $1396, R12
+	B    callbackasm1(SB)
+	MOVD $1397, R12
+	B    callbackasm1(SB)
+	MOVD $1398, R12
+	B    callbackasm1(SB)
+	MOVD $1399, R12
+	B    callbackasm1(SB)
+	MOVD $1400, R12
+	B    callbackasm1(SB)
+	MOVD $1401, R12
+	B    callbackasm1(SB)
+	MOVD $1402, R12
+	B    callbackasm1(SB)
+	MOVD $1403, R12
+	B    callbackasm1(SB)
+	MOVD $1404, R12
+	B    callbackasm1(SB)
+	MOVD $1405, R12
+	B    callbackasm1(SB)
+	MOVD $1406, R12
+	B    callbackasm1(SB)
+	MOVD $1407, R12
+	B    callbackasm1(SB)
+	MOVD $1408, R12
+	B    callbackasm1(SB)
+	MOVD $1409, R12
+	B    callbackasm1(SB)
+	MOVD $1410, R12
+	B    callbackasm1(SB)
+	MOVD $1411, R12
+	B    callbackasm1(SB)
+	MOVD $1412, R12
+	B    callbackasm1(SB)
+	MOVD $1413, R12
+	B    callbackasm1(SB)
+	MOVD $1414, R12
+	B    callbackasm1(SB)
+	MOVD $1415, R12
+	B    callbackasm1(SB)
+	MOVD $1416, R12
+	B    callbackasm1(SB)
+	MOVD $1417, R12
+	B    callbackasm1(SB)
+	MOVD $1418, R12
+	B    callbackasm1(SB)
+	MOVD $1419, R12
+	B    callbackasm1(SB)
+	MOVD $1420, R12
+	B    callbackasm1(SB)
+	MOVD $1421, R12
+	B    callbackasm1(SB)
+	MOVD $1422, R12
+	B    callbackasm1(SB)
+	MOVD $1423, R12
+	B    callbackasm1(SB)
+	MOVD $1424, R12
+	B    callbackasm1(SB)
+	MOVD $1425, R12
+	B    callbackasm1(SB)
+	MOVD $1426, R12
+	B    callbackasm1(SB)
+	MOVD $1427, R12
+	B    callbackasm1(SB)
+	MOVD $1428, R12
+	B    callbackasm1(SB)
+	MOVD $1429, R12
+	B    callbackasm1(SB)
+	MOVD $1430, R12
+	B    callbackasm1(SB)
+	MOVD $1431, R12
+	B    callbackasm1(SB)
+	MOVD $1432, R12
+	B    callbackasm1(SB)
+	MOVD $1433, R12
+	B    callbackasm1(SB)
+	MOVD $1434, R12
+	B    callbackasm1(SB)
+	MOVD $1435, R12
+	B    callbackasm1(SB)
+	MOVD $1436, R12
+	B    callbackasm1(SB)
+	MOVD $1437, R12
+	B    callbackasm1(SB)
+	MOVD $1438, R12
+	B    callbackasm1(SB)
+	MOVD $1439, R12
+	B    callbackasm1(SB)
+	MOVD $1440, R12
+	B    callbackasm1(SB)
+	MOVD $1441, R12
+	B    callbackasm1(SB)
+	MOVD $1442, R12
+	B    callbackasm1(SB)
+	MOVD $1443, R12
+	B    callbackasm1(SB)
+	MOVD $1444, R12
+	B    callbackasm1(SB)
+	MOVD $1445, R12
+	B    callbackasm1(SB)
+	MOVD $1446, R12
+	B    callbackasm1(SB)
+	MOVD $1447, R12
+	B    callbackasm1(SB)
+	MOVD $1448, R12
+	B    callbackasm1(SB)
+	MOVD $1449, R12
+	B    callbackasm1(SB)
+	MOVD $1450, R12
+	B    callbackasm1(SB)
+	MOVD $1451, R12
+	B    callbackasm1(SB)
+	MOVD $1452, R12
+	B    callbackasm1(SB)
+	MOVD $1453, R12
+	B    callbackasm1(SB)
+	MOVD $1454, R12
+	B    callbackasm1(SB)
+	MOVD $1455, R12
+	B    callbackasm1(SB)
+	MOVD $1456, R12
+	B    callbackasm1(SB)
+	MOVD $1457, R12
+	B    callbackasm1(SB)
+	MOVD $1458, R12
+	B    callbackasm1(SB)
+	MOVD $1459, R12
+	B    callbackasm1(SB)
+	MOVD $1460, R12
+	B    callbackasm1(SB)
+	MOVD $1461, R12
+	B    callbackasm1(SB)
+	MOVD $1462, R12
+	B    callbackasm1(SB)
+	MOVD $1463, R12
+	B    callbackasm1(SB)
+	MOVD $1464, R12
+	B    callbackasm1(SB)
+	MOVD $1465, R12
+	B    callbackasm1(SB)
+	MOVD $1466, R12
+	B    callbackasm1(SB)
+	MOVD $1467, R12
+	B    callbackasm1(SB)
+	MOVD $1468, R12
+	B    callbackasm1(SB)
+	MOVD $1469, R12
+	B    callbackasm1(SB)
+	MOVD $1470, R12
+	B    callbackasm1(SB)
+	MOVD $1471, R12
+	B    callbackasm1(SB)
+	MOVD $1472, R12
+	B    callbackasm1(SB)
+	MOVD $1473, R12
+	B    callbackasm1(SB)
+	MOVD $1474, R12
+	B    callbackasm1(SB)
+	MOVD $1475, R12
+	B    callbackasm1(SB)
+	MOVD $1476, R12
+	B    callbackasm1(SB)
+	MOVD $1477, R12
+	B    callbackasm1(SB)
+	MOVD $1478, R12
+	B    callbackasm1(SB)
+	MOVD $1479, R12
+	B    callbackasm1(SB)
+	MOVD $1480, R12
+	B    callbackasm1(SB)
+	MOVD $1481, R12
+	B    callbackasm1(SB)
+	MOVD $1482, R12
+	B    callbackasm1(SB)
+	MOVD $1483, R12
+	B    callbackasm1(SB)
+	MOVD $1484, R12
+	B    callbackasm1(SB)
+	MOVD $1485, R12
+	B    callbackasm1(SB)
+	MOVD $1486, R12
+	B    callbackasm1(SB)
+	MOVD $1487, R12
+	B    callbackasm1(SB)
+	MOVD $1488, R12
+	B    callbackasm1(SB)
+	MOVD $1489, R12
+	B    callbackasm1(SB)
+	MOVD $1490, R12
+	B    callbackasm1(SB)
+	MOVD $1491, R12
+	B    callbackasm1(SB)
+	MOVD $1492, R12
+	B    callbackasm1(SB)
+	MOVD $1493, R12
+	B    callbackasm1(SB)
+	MOVD $1494, R12
+	B    callbackasm1(SB)
+	MOVD $1495, R12
+	B    callbackasm1(SB)
+	MOVD $1496, R12
+	B    callbackasm1(SB)
+	MOVD $1497, R12
+	B    callbackasm1(SB)
+	MOVD $1498, R12
+	B    callbackasm1(SB)
+	MOVD $1499, R12
+	B    callbackasm1(SB)
+	MOVD $1500, R12
+	B    callbackasm1(SB)
+	MOVD $1501, R12
+	B    callbackasm1(SB)
+	MOVD $1502, R12
+	B    callbackasm1(SB)
+	MOVD $1503, R12
+	B    callbackasm1(SB)
+	MOVD $1504, R12
+	B    callbackasm1(SB)
+	MOVD $1505, R12
+	B    callbackasm1(SB)
+	MOVD $1506, R12
+	B    callbackasm1(SB)
+	MOVD $1507, R12
+	B    callbackasm1(SB)
+	MOVD $1508, R12
+	B    callbackasm1(SB)
+	MOVD $1509, R12
+	B    callbackasm1(SB)
+	MOVD $1510, R12
+	B    callbackasm1(SB)
+	MOVD $1511, R12
+	B    callbackasm1(SB)
+	MOVD $1512, R12
+	B    callbackasm1(SB)
+	MOVD $1513, R12
+	B    callbackasm1(SB)
+	MOVD $1514, R12
+	B    callbackasm1(SB)
+	MOVD $1515, R12
+	B    callbackasm1(SB)
+	MOVD $1516, R12
+	B    callbackasm1(SB)
+	MOVD $1517, R12
+	B    callbackasm1(SB)
+	MOVD $1518, R12
+	B    callbackasm1(SB)
+	MOVD $1519, R12
+	B    callbackasm1(SB)
+	MOVD $1520, R12
+	B    callbackasm1(SB)
+	MOVD $1521, R12
+	B    callbackasm1(SB)
+	MOVD $1522, R12
+	B    callbackasm1(SB)
+	MOVD $1523, R12
+	B    callbackasm1(SB)
+	MOVD $1524, R12
+	B    callbackasm1(SB)
+	MOVD $1525, R12
+	B    callbackasm1(SB)
+	MOVD $1526, R12
+	B    callbackasm1(SB)
+	MOVD $1527, R12
+	B    callbackasm1(SB)
+	MOVD $1528, R12
+	B    callbackasm1(SB)
+	MOVD $1529, R12
+	B    callbackasm1(SB)
+	MOVD $1530, R12
+	B    callbackasm1(SB)
+	MOVD $1531, R12
+	B    callbackasm1(SB)
+	MOVD $1532, R12
+	B    callbackasm1(SB)
+	MOVD $1533, R12
+	B    callbackasm1(SB)
+	MOVD $1534, R12
+	B    callbackasm1(SB)
+	MOVD $1535, R12
+	B    callbackasm1(SB)
+	MOVD $1536, R12
+	B    callbackasm1(SB)
+	MOVD $1537, R12
+	B    callbackasm1(SB)
+	MOVD $1538, R12
+	B    callbackasm1(SB)
+	MOVD $1539, R12
+	B    callbackasm1(SB)
+	MOVD $1540, R12
+	B    callbackasm1(SB)
+	MOVD $1541, R12
+	B    callbackasm1(SB)
+	MOVD $1542, R12
+	B    callbackasm1(SB)
+	MOVD $1543, R12
+	B    callbackasm1(SB)
+	MOVD $1544, R12
+	B    callbackasm1(SB)
+	MOVD $1545, R12
+	B    callbackasm1(SB)
+	MOVD $1546, R12
+	B    callbackasm1(SB)
+	MOVD $1547, R12
+	B    callbackasm1(SB)
+	MOVD $1548, R12
+	B    callbackasm1(SB)
+	MOVD $1549, R12
+	B    callbackasm1(SB)
+	MOVD $1550, R12
+	B    callbackasm1(SB)
+	MOVD $1551, R12
+	B    callbackasm1(SB)
+	MOVD $1552, R12
+	B    callbackasm1(SB)
+	MOVD $1553, R12
+	B    callbackasm1(SB)
+	MOVD $1554, R12
+	B    callbackasm1(SB)
+	MOVD $1555, R12
+	B    callbackasm1(SB)
+	MOVD $1556, R12
+	B    callbackasm1(SB)
+	MOVD $1557, R12
+	B    callbackasm1(SB)
+	MOVD $1558, R12
+	B    callbackasm1(SB)
+	MOVD $1559, R12
+	B    callbackasm1(SB)
+	MOVD $1560, R12
+	B    callbackasm1(SB)
+	MOVD $1561, R12
+	B    callbackasm1(SB)
+	MOVD $1562, R12
+	B    callbackasm1(SB)
+	MOVD $1563, R12
+	B    callbackasm1(SB)
+	MOVD $1564, R12
+	B    callbackasm1(SB)
+	MOVD $1565, R12
+	B    callbackasm1(SB)
+	MOVD $1566, R12
+	B    callbackasm1(SB)
+	MOVD $1567, R12
+	B    callbackasm1(SB)
+	MOVD $1568, R12
+	B    callbackasm1(SB)
+	MOVD $1569, R12
+	B    callbackasm1(SB)
+	MOVD $1570, R12
+	B    callbackasm1(SB)
+	MOVD $1571, R12
+	B    callbackasm1(SB)
+	MOVD $1572, R12
+	B    callbackasm1(SB)
+	MOVD $1573, R12
+	B    callbackasm1(SB)
+	MOVD $1574, R12
+	B    callbackasm1(SB)
+	MOVD $1575, R12
+	B    callbackasm1(SB)
+	MOVD $1576, R12
+	B    callbackasm1(SB)
+	MOVD $1577, R12
+	B    callbackasm1(SB)
+	MOVD $1578, R12
+	B    callbackasm1(SB)
+	MOVD $1579, R12
+	B    callbackasm1(SB)
+	MOVD $1580, R12
+	B    callbackasm1(SB)
+	MOVD $1581, R12
+	B    callbackasm1(SB)
+	MOVD $1582, R12
+	B    callbackasm1(SB)
+	MOVD $1583, R12
+	B    callbackasm1(SB)
+	MOVD $1584, R12
+	B    callbackasm1(SB)
+	MOVD $1585, R12
+	B    callbackasm1(SB)
+	MOVD $1586, R12
+	B    callbackasm1(SB)
+	MOVD $1587, R12
+	B    callbackasm1(SB)
+	MOVD $1588, R12
+	B    callbackasm1(SB)
+	MOVD $1589, R12
+	B    callbackasm1(SB)
+	MOVD $1590, R12
+	B    callbackasm1(SB)
+	MOVD $1591, R12
+	B    callbackasm1(SB)
+	MOVD $1592, R12
+	B    callbackasm1(SB)
+	MOVD $1593, R12
+	B    callbackasm1(SB)
+	MOVD $1594, R12
+	B    callbackasm1(SB)
+	MOVD $1595, R12
+	B    callbackasm1(SB)
+	MOVD $1596, R12
+	B    callbackasm1(SB)
+	MOVD $1597, R12
+	B    callbackasm1(SB)
+	MOVD $1598, R12
+	B    callbackasm1(SB)
+	MOVD $1599, R12
+	B    callbackasm1(SB)
+	MOVD $1600, R12
+	B    callbackasm1(SB)
+	MOVD $1601, R12
+	B    callbackasm1(SB)
+	MOVD $1602, R12
+	B    callbackasm1(SB)
+	MOVD $1603, R12
+	B    callbackasm1(SB)
+	MOVD $1604, R12
+	B    callbackasm1(SB)
+	MOVD $1605, R12
+	B    callbackasm1(SB)
+	MOVD $1606, R12
+	B    callbackasm1(SB)
+	MOVD $1607, R12
+	B    callbackasm1(SB)
+	MOVD $1608, R12
+	B    callbackasm1(SB)
+	MOVD $1609, R12
+	B    callbackasm1(SB)
+	MOVD $1610, R12
+	B    callbackasm1(SB)
+	MOVD $1611, R12
+	B    callbackasm1(SB)
+	MOVD $1612, R12
+	B    callbackasm1(SB)
+	MOVD $1613, R12
+	B    callbackasm1(SB)
+	MOVD $1614, R12
+	B    callbackasm1(SB)
+	MOVD $1615, R12
+	B    callbackasm1(SB)
+	MOVD $1616, R12
+	B    callbackasm1(SB)
+	MOVD $1617, R12
+	B    callbackasm1(SB)
+	MOVD $1618, R12
+	B    callbackasm1(SB)
+	MOVD $1619, R12
+	B    callbackasm1(SB)
+	MOVD $1620, R12
+	B    callbackasm1(SB)
+	MOVD $1621, R12
+	B    callbackasm1(SB)
+	MOVD $1622, R12
+	B    callbackasm1(SB)
+	MOVD $1623, R12
+	B    callbackasm1(SB)
+	MOVD $1624, R12
+	B    callbackasm1(SB)
+	MOVD $1625, R12
+	B    callbackasm1(SB)
+	MOVD $1626, R12
+	B    callbackasm1(SB)
+	MOVD $1627, R12
+	B    callbackasm1(SB)
+	MOVD $1628, R12
+	B    callbackasm1(SB)
+	MOVD $1629, R12
+	B    callbackasm1(SB)
+	MOVD $1630, R12
+	B    callbackasm1(SB)
+	MOVD $1631, R12
+	B    callbackasm1(SB)
+	MOVD $1632, R12
+	B    callbackasm1(SB)
+	MOVD $1633, R12
+	B    callbackasm1(SB)
+	MOVD $1634, R12
+	B    callbackasm1(SB)
+	MOVD $1635, R12
+	B    callbackasm1(SB)
+	MOVD $1636, R12
+	B    callbackasm1(SB)
+	MOVD $1637, R12
+	B    callbackasm1(SB)
+	MOVD $1638, R12
+	B    callbackasm1(SB)
+	MOVD $1639, R12
+	B    callbackasm1(SB)
+	MOVD $1640, R12
+	B    callbackasm1(SB)
+	MOVD $1641, R12
+	B    callbackasm1(SB)
+	MOVD $1642, R12
+	B    callbackasm1(SB)
+	MOVD $1643, R12
+	B    callbackasm1(SB)
+	MOVD $1644, R12
+	B    callbackasm1(SB)
+	MOVD $1645, R12
+	B    callbackasm1(SB)
+	MOVD $1646, R12
+	B    callbackasm1(SB)
+	MOVD $1647, R12
+	B    callbackasm1(SB)
+	MOVD $1648, R12
+	B    callbackasm1(SB)
+	MOVD $1649, R12
+	B    callbackasm1(SB)
+	MOVD $1650, R12
+	B    callbackasm1(SB)
+	MOVD $1651, R12
+	B    callbackasm1(SB)
+	MOVD $1652, R12
+	B    callbackasm1(SB)
+	MOVD $1653, R12
+	B    callbackasm1(SB)
+	MOVD $1654, R12
+	B    callbackasm1(SB)
+	MOVD $1655, R12
+	B    callbackasm1(SB)
+	MOVD $1656, R12
+	B    callbackasm1(SB)
+	MOVD $1657, R12
+	B    callbackasm1(SB)
+	MOVD $1658, R12
+	B    callbackasm1(SB)
+	MOVD $1659, R12
+	B    callbackasm1(SB)
+	MOVD $1660, R12
+	B    callbackasm1(SB)
+	MOVD $1661, R12
+	B    callbackasm1(SB)
+	MOVD $1662, R12
+	B    callbackasm1(SB)
+	MOVD $1663, R12
+	B    callbackasm1(SB)
+	MOVD $1664, R12
+	B    callbackasm1(SB)
+	MOVD $1665, R12
+	B    callbackasm1(SB)
+	MOVD $1666, R12
+	B    callbackasm1(SB)
+	MOVD $1667, R12
+	B    callbackasm1(SB)
+	MOVD $1668, R12
+	B    callbackasm1(SB)
+	MOVD $1669, R12
+	B    callbackasm1(SB)
+	MOVD $1670, R12
+	B    callbackasm1(SB)
+	MOVD $1671, R12
+	B    callbackasm1(SB)
+	MOVD $1672, R12
+	B    callbackasm1(SB)
+	MOVD $1673, R12
+	B    callbackasm1(SB)
+	MOVD $1674, R12
+	B    callbackasm1(SB)
+	MOVD $1675, R12
+	B    callbackasm1(SB)
+	MOVD $1676, R12
+	B    callbackasm1(SB)
+	MOVD $1677, R12
+	B    callbackasm1(SB)
+	MOVD $1678, R12
+	B    callbackasm1(SB)
+	MOVD $1679, R12
+	B    callbackasm1(SB)
+	MOVD $1680, R12
+	B    callbackasm1(SB)
+	MOVD $1681, R12
+	B    callbackasm1(SB)
+	MOVD $1682, R12
+	B    callbackasm1(SB)
+	MOVD $1683, R12
+	B    callbackasm1(SB)
+	MOVD $1684, R12
+	B    callbackasm1(SB)
+	MOVD $1685, R12
+	B    callbackasm1(SB)
+	MOVD $1686, R12
+	B    callbackasm1(SB)
+	MOVD $1687, R12
+	B    callbackasm1(SB)
+	MOVD $1688, R12
+	B    callbackasm1(SB)
+	MOVD $1689, R12
+	B    callbackasm1(SB)
+	MOVD $1690, R12
+	B    callbackasm1(SB)
+	MOVD $1691, R12
+	B    callbackasm1(SB)
+	MOVD $1692, R12
+	B    callbackasm1(SB)
+	MOVD $1693, R12
+	B    callbackasm1(SB)
+	MOVD $1694, R12
+	B    callbackasm1(SB)
+	MOVD $1695, R12
+	B    callbackasm1(SB)
+	MOVD $1696, R12
+	B    callbackasm1(SB)
+	MOVD $1697, R12
+	B    callbackasm1(SB)
+	MOVD $1698, R12
+	B    callbackasm1(SB)
+	MOVD $1699, R12
+	B    callbackasm1(SB)
+	MOVD $1700, R12
+	B    callbackasm1(SB)
+	MOVD $1701, R12
+	B    callbackasm1(SB)
+	MOVD $1702, R12
+	B    callbackasm1(SB)
+	MOVD $1703, R12
+	B    callbackasm1(SB)
+	MOVD $1704, R12
+	B    callbackasm1(SB)
+	MOVD $1705, R12
+	B    callbackasm1(SB)
+	MOVD $1706, R12
+	B    callbackasm1(SB)
+	MOVD $1707, R12
+	B    callbackasm1(SB)
+	MOVD $1708, R12
+	B    callbackasm1(SB)
+	MOVD $1709, R12
+	B    callbackasm1(SB)
+	MOVD $1710, R12
+	B    callbackasm1(SB)
+	MOVD $1711, R12
+	B    callbackasm1(SB)
+	MOVD $1712, R12
+	B    callbackasm1(SB)
+	MOVD $1713, R12
+	B    callbackasm1(SB)
+	MOVD $1714, R12
+	B    callbackasm1(SB)
+	MOVD $1715, R12
+	B    callbackasm1(SB)
+	MOVD $1716, R12
+	B    callbackasm1(SB)
+	MOVD $1717, R12
+	B    callbackasm1(SB)
+	MOVD $1718, R12
+	B    callbackasm1(SB)
+	MOVD $1719, R12
+	B    callbackasm1(SB)
+	MOVD $1720, R12
+	B    callbackasm1(SB)
+	MOVD $1721, R12
+	B    callbackasm1(SB)
+	MOVD $1722, R12
+	B    callbackasm1(SB)
+	MOVD $1723, R12
+	B    callbackasm1(SB)
+	MOVD $1724, R12
+	B    callbackasm1(SB)
+	MOVD $1725, R12
+	B    callbackasm1(SB)
+	MOVD $1726, R12
+	B    callbackasm1(SB)
+	MOVD $1727, R12
+	B    callbackasm1(SB)
+	MOVD $1728, R12
+	B    callbackasm1(SB)
+	MOVD $1729, R12
+	B    callbackasm1(SB)
+	MOVD $1730, R12
+	B    callbackasm1(SB)
+	MOVD $1731, R12
+	B    callbackasm1(SB)
+	MOVD $1732, R12
+	B    callbackasm1(SB)
+	MOVD $1733, R12
+	B    callbackasm1(SB)
+	MOVD $1734, R12
+	B    callbackasm1(SB)
+	MOVD $1735, R12
+	B    callbackasm1(SB)
+	MOVD $1736, R12
+	B    callbackasm1(SB)
+	MOVD $1737, R12
+	B    callbackasm1(SB)
+	MOVD $1738, R12
+	B    callbackasm1(SB)
+	MOVD $1739, R12
+	B    callbackasm1(SB)
+	MOVD $1740, R12
+	B    callbackasm1(SB)
+	MOVD $1741, R12
+	B    callbackasm1(SB)
+	MOVD $1742, R12
+	B    callbackasm1(SB)
+	MOVD $1743, R12
+	B    callbackasm1(SB)
+	MOVD $1744, R12
+	B    callbackasm1(SB)
+	MOVD $1745, R12
+	B    callbackasm1(SB)
+	MOVD $1746, R12
+	B    callbackasm1(SB)
+	MOVD $1747, R12
+	B    callbackasm1(SB)
+	MOVD $1748, R12
+	B    callbackasm1(SB)
+	MOVD $1749, R12
+	B    callbackasm1(SB)
+	MOVD $1750, R12
+	B    callbackasm1(SB)
+	MOVD $1751, R12
+	B    callbackasm1(SB)
+	MOVD $1752, R12
+	B    callbackasm1(SB)
+	MOVD $1753, R12
+	B    callbackasm1(SB)
+	MOVD $1754, R12
+	B    callbackasm1(SB)
+	MOVD $1755, R12
+	B    callbackasm1(SB)
+	MOVD $1756, R12
+	B    callbackasm1(SB)
+	MOVD $1757, R12
+	B    callbackasm1(SB)
+	MOVD $1758, R12
+	B    callbackasm1(SB)
+	MOVD $1759, R12
+	B    callbackasm1(SB)
+	MOVD $1760, R12
+	B    callbackasm1(SB)
+	MOVD $1761, R12
+	B    callbackasm1(SB)
+	MOVD $1762, R12
+	B    callbackasm1(SB)
+	MOVD $1763, R12
+	B    callbackasm1(SB)
+	MOVD $1764, R12
+	B    callbackasm1(SB)
+	MOVD $1765, R12
+	B    callbackasm1(SB)
+	MOVD $1766, R12
+	B    callbackasm1(SB)
+	MOVD $1767, R12
+	B    callbackasm1(SB)
+	MOVD $1768, R12
+	B    callbackasm1(SB)
+	MOVD $1769, R12
+	B    callbackasm1(SB)
+	MOVD $1770, R12
+	B    callbackasm1(SB)
+	MOVD $1771, R12
+	B    callbackasm1(SB)
+	MOVD $1772, R12
+	B    callbackasm1(SB)
+	MOVD $1773, R12
+	B    callbackasm1(SB)
+	MOVD $1774, R12
+	B    callbackasm1(SB)
+	MOVD $1775, R12
+	B    callbackasm1(SB)
+	MOVD $1776, R12
+	B    callbackasm1(SB)
+	MOVD $1777, R12
+	B    callbackasm1(SB)
+	MOVD $1778, R12
+	B    callbackasm1(SB)
+	MOVD $1779, R12
+	B    callbackasm1(SB)
+	MOVD $1780, R12
+	B    callbackasm1(SB)
+	MOVD $1781, R12
+	B    callbackasm1(SB)
+	MOVD $1782, R12
+	B    callbackasm1(SB)
+	MOVD $1783, R12
+	B    callbackasm1(SB)
+	MOVD $1784, R12
+	B    callbackasm1(SB)
+	MOVD $1785, R12
+	B    callbackasm1(SB)
+	MOVD $1786, R12
+	B    callbackasm1(SB)
+	MOVD $1787, R12
+	B    callbackasm1(SB)
+	MOVD $1788, R12
+	B    callbackasm1(SB)
+	MOVD $1789, R12
+	B    callbackasm1(SB)
+	MOVD $1790, R12
+	B    callbackasm1(SB)
+	MOVD $1791, R12
+	B    callbackasm1(SB)
+	MOVD $1792, R12
+	B    callbackasm1(SB)
+	MOVD $1793, R12
+	B    callbackasm1(SB)
+	MOVD $1794, R12
+	B    callbackasm1(SB)
+	MOVD $1795, R12
+	B    callbackasm1(SB)
+	MOVD $1796, R12
+	B    callbackasm1(SB)
+	MOVD $1797, R12
+	B    callbackasm1(SB)
+	MOVD $1798, R12
+	B    callbackasm1(SB)
+	MOVD $1799, R12
+	B    callbackasm1(SB)
+	MOVD $1800, R12
+	B    callbackasm1(SB)
+	MOVD $1801, R12
+	B    callbackasm1(SB)
+	MOVD $1802, R12
+	B    callbackasm1(SB)
+	MOVD $1803, R12
+	B    callbackasm1(SB)
+	MOVD $1804, R12
+	B    callbackasm1(SB)
+	MOVD $1805, R12
+	B    callbackasm1(SB)
+	MOVD $1806, R12
+	B    callbackasm1(SB)
+	MOVD $1807, R12
+	B    callbackasm1(SB)
+	MOVD $1808, R12
+	B    callbackasm1(SB)
+	MOVD $1809, R12
+	B    callbackasm1(SB)
+	MOVD $1810, R12
+	B    callbackasm1(SB)
+	MOVD $1811, R12
+	B    callbackasm1(SB)
+	MOVD $1812, R12
+	B    callbackasm1(SB)
+	MOVD $1813, R12
+	B    callbackasm1(SB)
+	MOVD $1814, R12
+	B    callbackasm1(SB)
+	MOVD $1815, R12
+	B    callbackasm1(SB)
+	MOVD $1816, R12
+	B    callbackasm1(SB)
+	MOVD $1817, R12
+	B    callbackasm1(SB)
+	MOVD $1818, R12
+	B    callbackasm1(SB)
+	MOVD $1819, R12
+	B    callbackasm1(SB)
+	MOVD $1820, R12
+	B    callbackasm1(SB)
+	MOVD $1821, R12
+	B    callbackasm1(SB)
+	MOVD $1822, R12
+	B    callbackasm1(SB)
+	MOVD $1823, R12
+	B    callbackasm1(SB)
+	MOVD $1824, R12
+	B    callbackasm1(SB)
+	MOVD $1825, R12
+	B    callbackasm1(SB)
+	MOVD $1826, R12
+	B    callbackasm1(SB)
+	MOVD $1827, R12
+	B    callbackasm1(SB)
+	MOVD $1828, R12
+	B    callbackasm1(SB)
+	MOVD $1829, R12
+	B    callbackasm1(SB)
+	MOVD $1830, R12
+	B    callbackasm1(SB)
+	MOVD $1831, R12
+	B    callbackasm1(SB)
+	MOVD $1832, R12
+	B    callbackasm1(SB)
+	MOVD $1833, R12
+	B    callbackasm1(SB)
+	MOVD $1834, R12
+	B    callbackasm1(SB)
+	MOVD $1835, R12
+	B    callbackasm1(SB)
+	MOVD $1836, R12
+	B    callbackasm1(SB)
+	MOVD $1837, R12
+	B    callbackasm1(SB)
+	MOVD $1838, R12
+	B    callbackasm1(SB)
+	MOVD $1839, R12
+	B    callbackasm1(SB)
+	MOVD $1840, R12
+	B    callbackasm1(SB)
+	MOVD $1841, R12
+	B    callbackasm1(SB)
+	MOVD $1842, R12
+	B    callbackasm1(SB)
+	MOVD $1843, R12
+	B    callbackasm1(SB)
+	MOVD $1844, R12
+	B    callbackasm1(SB)
+	MOVD $1845, R12
+	B    callbackasm1(SB)
+	MOVD $1846, R12
+	B    callbackasm1(SB)
+	MOVD $1847, R12
+	B    callbackasm1(SB)
+	MOVD $1848, R12
+	B    callbackasm1(SB)
+	MOVD $1849, R12
+	B    callbackasm1(SB)
+	MOVD $1850, R12
+	B    callbackasm1(SB)
+	MOVD $1851, R12
+	B    callbackasm1(SB)
+	MOVD $1852, R12
+	B    callbackasm1(SB)
+	MOVD $1853, R12
+	B    callbackasm1(SB)
+	MOVD $1854, R12
+	B    callbackasm1(SB)
+	MOVD $1855, R12
+	B    callbackasm1(SB)
+	MOVD $1856, R12
+	B    callbackasm1(SB)
+	MOVD $1857, R12
+	B    callbackasm1(SB)
+	MOVD $1858, R12
+	B    callbackasm1(SB)
+	MOVD $1859, R12
+	B    callbackasm1(SB)
+	MOVD $1860, R12
+	B    callbackasm1(SB)
+	MOVD $1861, R12
+	B    callbackasm1(SB)
+	MOVD $1862, R12
+	B    callbackasm1(SB)
+	MOVD $1863, R12
+	B    callbackasm1(SB)
+	MOVD $1864, R12
+	B    callbackasm1(SB)
+	MOVD $1865, R12
+	B    callbackasm1(SB)
+	MOVD $1866, R12
+	B    callbackasm1(SB)
+	MOVD $1867, R12
+	B    callbackasm1(SB)
+	MOVD $1868, R12
+	B    callbackasm1(SB)
+	MOVD $1869, R12
+	B    callbackasm1(SB)
+	MOVD $1870, R12
+	B    callbackasm1(SB)
+	MOVD $1871, R12
+	B    callbackasm1(SB)
+	MOVD $1872, R12
+	B    callbackasm1(SB)
+	MOVD $1873, R12
+	B    callbackasm1(SB)
+	MOVD $1874, R12
+	B    callbackasm1(SB)
+	MOVD $1875, R12
+	B    callbackasm1(SB)
+	MOVD $1876, R12
+	B    callbackasm1(SB)
+	MOVD $1877, R12
+	B    callbackasm1(SB)
+	MOVD $1878, R12
+	B    callbackasm1(SB)
+	MOVD $1879, R12
+	B    callbackasm1(SB)
+	MOVD $1880, R12
+	B    callbackasm1(SB)
+	MOVD $1881, R12
+	B    callbackasm1(SB)
+	MOVD $1882, R12
+	B    callbackasm1(SB)
+	MOVD $1883, R12
+	B    callbackasm1(SB)
+	MOVD $1884, R12
+	B    callbackasm1(SB)
+	MOVD $1885, R12
+	B    callbackasm1(SB)
+	MOVD $1886, R12
+	B    callbackasm1(SB)
+	MOVD $1887, R12
+	B    callbackasm1(SB)
+	MOVD $1888, R12
+	B    callbackasm1(SB)
+	MOVD $1889, R12
+	B    callbackasm1(SB)
+	MOVD $1890, R12
+	B    callbackasm1(SB)
+	MOVD $1891, R12
+	B    callbackasm1(SB)
+	MOVD $1892, R12
+	B    callbackasm1(SB)
+	MOVD $1893, R12
+	B    callbackasm1(SB)
+	MOVD $1894, R12
+	B    callbackasm1(SB)
+	MOVD $1895, R12
+	B    callbackasm1(SB)
+	MOVD $1896, R12
+	B    callbackasm1(SB)
+	MOVD $1897, R12
+	B    callbackasm1(SB)
+	MOVD $1898, R12
+	B    callbackasm1(SB)
+	MOVD $1899, R12
+	B    callbackasm1(SB)
+	MOVD $1900, R12
+	B    callbackasm1(SB)
+	MOVD $1901, R12
+	B    callbackasm1(SB)
+	MOVD $1902, R12
+	B    callbackasm1(SB)
+	MOVD $1903, R12
+	B    callbackasm1(SB)
+	MOVD $1904, R12
+	B    callbackasm1(SB)
+	MOVD $1905, R12
+	B    callbackasm1(SB)
+	MOVD $1906, R12
+	B    callbackasm1(SB)
+	MOVD $1907, R12
+	B    callbackasm1(SB)
+	MOVD $1908, R12
+	B    callbackasm1(SB)
+	MOVD $1909, R12
+	B    callbackasm1(SB)
+	MOVD $1910, R12
+	B    callbackasm1(SB)
+	MOVD $1911, R12
+	B    callbackasm1(SB)
+	MOVD $1912, R12
+	B    callbackasm1(SB)
+	MOVD $1913, R12
+	B    callbackasm1(SB)
+	MOVD $1914, R12
+	B    callbackasm1(SB)
+	MOVD $1915, R12
+	B    callbackasm1(SB)
+	MOVD $1916, R12
+	B    callbackasm1(SB)
+	MOVD $1917, R12
+	B    callbackasm1(SB)
+	MOVD $1918, R12
+	B    callbackasm1(SB)
+	MOVD $1919, R12
+	B    callbackasm1(SB)
+	MOVD $1920, R12
+	B    callbackasm1(SB)
+	MOVD $1921, R12
+	B    callbackasm1(SB)
+	MOVD $1922, R12
+	B    callbackasm1(SB)
+	MOVD $1923, R12
+	B    callbackasm1(SB)
+	MOVD $1924, R12
+	B    callbackasm1(SB)
+	MOVD $1925, R12
+	B    callbackasm1(SB)
+	MOVD $1926, R12
+	B    callbackasm1(SB)
+	MOVD $1927, R12
+	B    callbackasm1(SB)
+	MOVD $1928, R12
+	B    callbackasm1(SB)
+	MOVD $1929, R12
+	B    callbackasm1(SB)
+	MOVD $1930, R12
+	B    callbackasm1(SB)
+	MOVD $1931, R12
+	B    callbackasm1(SB)
+	MOVD $1932, R12
+	B    callbackasm1(SB)
+	MOVD $1933, R12
+	B    callbackasm1(SB)
+	MOVD $1934, R12
+	B    callbackasm1(SB)
+	MOVD $1935, R12
+	B    callbackasm1(SB)
+	MOVD $1936, R12
+	B    callbackasm1(SB)
+	MOVD $1937, R12
+	B    callbackasm1(SB)
+	MOVD $1938, R12
+	B    callbackasm1(SB)
+	MOVD $1939, R12
+	B    callbackasm1(SB)
+	MOVD $1940, R12
+	B    callbackasm1(SB)
+	MOVD $1941, R12
+	B    callbackasm1(SB)
+	MOVD $1942, R12
+	B    callbackasm1(SB)
+	MOVD $1943, R12
+	B    callbackasm1(SB)
+	MOVD $1944, R12
+	B    callbackasm1(SB)
+	MOVD $1945, R12
+	B    callbackasm1(SB)
+	MOVD $1946, R12
+	B    callbackasm1(SB)
+	MOVD $1947, R12
+	B    callbackasm1(SB)
+	MOVD $1948, R12
+	B    callbackasm1(SB)
+	MOVD $1949, R12
+	B    callbackasm1(SB)
+	MOVD $1950, R12
+	B    callbackasm1(SB)
+	MOVD $1951, R12
+	B    callbackasm1(SB)
+	MOVD $1952, R12
+	B    callbackasm1(SB)
+	MOVD $1953, R12
+	B    callbackasm1(SB)
+	MOVD $1954, R12
+	B    callbackasm1(SB)
+	MOVD $1955, R12
+	B    callbackasm1(SB)
+	MOVD $1956, R12
+	B    callbackasm1(SB)
+	MOVD $1957, R12
+	B    callbackasm1(SB)
+	MOVD $1958, R12
+	B    callbackasm1(SB)
+	MOVD $1959, R12
+	B    callbackasm1(SB)
+	MOVD $1960, R12
+	B    callbackasm1(SB)
+	MOVD $1961, R12
+	B    callbackasm1(SB)
+	MOVD $1962, R12
+	B    callbackasm1(SB)
+	MOVD $1963, R12
+	B    callbackasm1(SB)
+	MOVD $1964, R12
+	B    callbackasm1(SB)
+	MOVD $1965, R12
+	B    callbackasm1(SB)
+	MOVD $1966, R12
+	B    callbackasm1(SB)
+	MOVD $1967, R12
+	B    callbackasm1(SB)
+	MOVD $1968, R12
+	B    callbackasm1(SB)
+	MOVD $1969, R12
+	B    callbackasm1(SB)
+	MOVD $1970, R12
+	B    callbackasm1(SB)
+	MOVD $1971, R12
+	B    callbackasm1(SB)
+	MOVD $1972, R12
+	B    callbackasm1(SB)
+	MOVD $1973, R12
+	B    callbackasm1(SB)
+	MOVD $1974, R12
+	B    callbackasm1(SB)
+	MOVD $1975, R12
+	B    callbackasm1(SB)
+	MOVD $1976, R12
+	B    callbackasm1(SB)
+	MOVD $1977, R12
+	B    callbackasm1(SB)
+	MOVD $1978, R12
+	B    callbackasm1(SB)
+	MOVD $1979, R12
+	B    callbackasm1(SB)
+	MOVD $1980, R12
+	B    callbackasm1(SB)
+	MOVD $1981, R12
+	B    callbackasm1(SB)
+	MOVD $1982, R12
+	B    callbackasm1(SB)
+	MOVD $1983, R12
+	B    callbackasm1(SB)
+	MOVD $1984, R12
+	B    callbackasm1(SB)
+	MOVD $1985, R12
+	B    callbackasm1(SB)
+	MOVD $1986, R12
+	B    callbackasm1(SB)
+	MOVD $1987, R12
+	B    callbackasm1(SB)
+	MOVD $1988, R12
+	B    callbackasm1(SB)
+	MOVD $1989, R12
+	B    callbackasm1(SB)
+	MOVD $1990, R12
+	B    callbackasm1(SB)
+	MOVD $1991, R12
+	B    callbackasm1(SB)
+	MOVD $1992, R12
+	B    callbackasm1(SB)
+	MOVD $1993, R12
+	B    callbackasm1(SB)
+	MOVD $1994, R12
+	B    callbackasm1(SB)
+	MOVD $1995, R12
+	B    callbackasm1(SB)
+	MOVD $1996, R12
+	B    callbackasm1(SB)
+	MOVD $1997, R12
+	B    callbackasm1(SB)
+	MOVD $1998, R12
+	B    callbackasm1(SB)
+	MOVD $1999, R12
+	B    callbackasm1(SB)
diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md
index 520661db62..b2e0e2d9f7 100644
--- a/vendor/github.com/go-playground/validator/v10/README.md
+++ b/vendor/github.com/go-playground/validator/v10/README.md
@@ -1,7 +1,7 @@
 Package validator
 =================
-<img align="right" src="https://raw.githubusercontent.com/go-playground/validator/v10/logo.png">[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-![Project status](https://img.shields.io/badge/version-10.14.1-green.svg)
+<img align="right" src="logo.png">[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+![Project status](https://img.shields.io/badge/version-10.15.0-green.svg)
 [![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator)
 [![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master)
 [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator)
@@ -158,6 +158,7 @@ Baked-in Validations
 | credit_card | Credit Card Number |
 | mongodb | MongoDB ObjectID |
 | cron | Cron |
+| spicedb | SpiceDb ObjectID/Permission/Type |
 | datetime | Datetime |
 | e164 | e164 formatted phone number |
 | email | E-mail String
diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go
index e676f1d164..ca9eeb1dd5 100644
--- a/vendor/github.com/go-playground/validator/v10/baked_in.go
+++ b/vendor/github.com/go-playground/validator/v10/baked_in.go
@@ -230,6 +230,7 @@ var (
 		"luhn_checksum":                 hasLuhnChecksum,
 		"mongodb":                       isMongoDB,
 		"cron":                          isCron,
+		"spicedb":                       isSpiceDB,
 	}
 )
 
@@ -1294,8 +1295,13 @@ func isEq(fl FieldLevel) bool {
 
 		return field.Uint() == p
 
-	case reflect.Float32, reflect.Float64:
-		p := asFloat(param)
+	case reflect.Float32:
+		p := asFloat32(param)
+
+		return field.Float() == p
+
+	case reflect.Float64:
+		p := asFloat64(param)
 
 		return field.Float() == p
 
@@ -1561,6 +1567,10 @@ func isFilePath(fl FieldLevel) bool {
 
 	field := fl.Field()
 
+	// Not valid if it is a directory.
+	if isDir(fl) {
+		return false
+	}
 	// If it exists, it obviously is valid.
 	// This is done first to avoid code duplication and unnecessary additional logic.
 	if exists = isFile(fl); exists {
@@ -1710,7 +1720,7 @@ func hasValue(fl FieldLevel) bool {
 		if fl.(*validate).fldIsPointer && field.Interface() != nil {
 			return true
 		}
-		return field.IsValid() && field.Interface() != reflect.Zero(field.Type()).Interface()
+		return field.IsValid() && !field.IsZero()
 	}
 }
 
@@ -1734,7 +1744,7 @@ func requireCheckFieldKind(fl FieldLevel, param string, defaultNotFoundValue boo
 		if nullable && field.Interface() != nil {
 			return false
 		}
-		return field.IsValid() && field.Interface() == reflect.Zero(field.Type()).Interface()
+		return field.IsValid() && field.IsZero()
 	}
 }
 
@@ -1755,8 +1765,11 @@ func requireCheckFieldValue(
 	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
 		return field.Uint() == asUint(value)
 
-	case reflect.Float32, reflect.Float64:
-		return field.Float() == asFloat(value)
+	case reflect.Float32:
+		return field.Float() == asFloat32(value)
+
+	case reflect.Float64:
+		return field.Float() == asFloat64(value)
 
 	case reflect.Slice, reflect.Map, reflect.Array:
 		return int64(field.Len()) == asInt(value)
@@ -2055,8 +2068,13 @@ func isGte(fl FieldLevel) bool {
 
 		return field.Uint() >= p
 
-	case reflect.Float32, reflect.Float64:
-		p := asFloat(param)
+	case reflect.Float32:
+		p := asFloat32(param)
+
+		return field.Float() >= p
+
+	case reflect.Float64:
+		p := asFloat64(param)
 
 		return field.Float() >= p
 
@@ -2101,10 +2119,16 @@ func isGt(fl FieldLevel) bool {
 
 		return field.Uint() > p
 
-	case reflect.Float32, reflect.Float64:
-		p := asFloat(param)
+	case reflect.Float32:
+		p := asFloat32(param)
+
+		return field.Float() > p
+
+	case reflect.Float64:
+		p := asFloat64(param)
 
 		return field.Float() > p
+
 	case reflect.Struct:
 
 		if field.Type().ConvertibleTo(timeType) {
@@ -2143,8 +2167,13 @@ func hasLengthOf(fl FieldLevel) bool {
 
 		return field.Uint() == p
 
-	case reflect.Float32, reflect.Float64:
-		p := asFloat(param)
+	case reflect.Float32:
+		p := asFloat32(param)
+
+		return field.Float() == p
+
+	case reflect.Float64:
+		p := asFloat64(param)
 
 		return field.Float() == p
 	}
@@ -2276,8 +2305,13 @@ func isLte(fl FieldLevel) bool {
 
 		return field.Uint() <= p
 
-	case reflect.Float32, reflect.Float64:
-		p := asFloat(param)
+	case reflect.Float32:
+		p := asFloat32(param)
+
+		return field.Float() <= p
+
+	case reflect.Float64:
+		p := asFloat64(param)
 
 		return field.Float() <= p
 
@@ -2322,8 +2356,13 @@ func isLt(fl FieldLevel) bool {
 
 		return field.Uint() < p
 
-	case reflect.Float32, reflect.Float64:
-		p := asFloat(param)
+	case reflect.Float32:
+		p := asFloat32(param)
+
+		return field.Float() < p
+
+	case reflect.Float64:
+		p := asFloat64(param)
 
 		return field.Float() < p
 
@@ -2808,6 +2847,23 @@ func isMongoDB(fl FieldLevel) bool {
 	return mongodbRegex.MatchString(val)
 }
 
+// isSpiceDB is the validation function for validating if the current field's value is valid for use with Authzed SpiceDB in the indicated way
+func isSpiceDB(fl FieldLevel) bool {
+	val := fl.Field().String()
+	param := fl.Param()
+
+	switch param {
+	case "permission":
+		return spicedbPermissionRegex.MatchString(val)
+	case "type":
+		return spicedbTypeRegex.MatchString(val)
+	case "id", "":
+		return spicedbIDRegex.MatchString(val)
+	}
+
+	panic("Unrecognized parameter: " + param)
+}
+
 // isCreditCard is the validation function for validating if the current field's value is a valid credit card number
 func isCreditCard(fl FieldLevel) bool {
 	val := fl.Field().String()
diff --git a/vendor/github.com/go-playground/validator/v10/cache.go b/vendor/github.com/go-playground/validator/v10/cache.go
index bbfd2a4af1..ddd37b8329 100644
--- a/vendor/github.com/go-playground/validator/v10/cache.go
+++ b/vendor/github.com/go-playground/validator/v10/cache.go
@@ -20,6 +20,7 @@ const (
 	typeOr
 	typeKeys
 	typeEndKeys
+	typeNestedStructLevel
 )
 
 const (
@@ -152,7 +153,7 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr
 		// and so only struct level caching can be used instead of combined with Field tag caching
 
 		if len(tag) > 0 {
-			ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, "", false)
+			ctag, _ = v.parseFieldTagsRecursive(tag, fld, "", false)
 		} else {
 			// even if field doesn't have validations need cTag for traversing to potential inner/nested
 			// elements of the field.
@@ -171,7 +172,7 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr
 	return cs
 }
 
-func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) {
+func (v *Validate) parseFieldTagsRecursive(tag string, field reflect.StructField, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) {
 	var t string
 	noAlias := len(alias) == 0
 	tags := strings.Split(tag, tagSeparator)
@@ -185,9 +186,9 @@ func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias s
 		// check map for alias and process new tags, otherwise process as usual
 		if tagsVal, found := v.aliases[t]; found {
 			if i == 0 {
-				firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true)
+				firstCtag, current = v.parseFieldTagsRecursive(tagsVal, field, t, true)
 			} else {
-				next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true)
+				next, curr := v.parseFieldTagsRecursive(tagsVal, field, t, true)
 				current.next, current = next, curr
 
 			}
@@ -235,7 +236,7 @@ func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias s
 				}
 			}
 
-			current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false)
+			current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), field, "", false)
 			continue
 
 		case endKeysTag:
@@ -284,14 +285,18 @@ func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias s
 
 				current.tag = vals[0]
 				if len(current.tag) == 0 {
-					panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName)))
+					panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, field.Name)))
 				}
 
 				if wrapper, ok := v.validations[current.tag]; ok {
 					current.fn = wrapper.fn
 					current.runValidationWhenNil = wrapper.runValidatinOnNil
 				} else {
-					panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName)))
+					panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, field.Name)))
+				}
+
+				if current.typeof == typeDefault && isNestedStructOrStructPtr(field) {
+					current.typeof = typeNestedStructLevel
 				}
 
 				if len(orVals) > 1 {
@@ -319,7 +324,7 @@ func (v *Validate) fetchCacheTag(tag string) *cTag {
 		// isn't parsed again.
 		ctag, found = v.tagCache.Get(tag)
 		if !found {
-			ctag, _ = v.parseFieldTagsRecursive(tag, "", "", false)
+			ctag, _ = v.parseFieldTagsRecursive(tag, reflect.StructField{}, "", false)
 			v.tagCache.Set(tag, ctag)
 		}
 	}
diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go
index f5aa9e5230..d1eff50fa7 100644
--- a/vendor/github.com/go-playground/validator/v10/doc.go
+++ b/vendor/github.com/go-playground/validator/v10/doc.go
@@ -247,7 +247,7 @@ Example #2
 This validates that the value is not the data types default zero value.
 For numbers ensures value is not zero. For strings ensures value is
 not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
+ensures the value is not nil. For structs ensures value is not the zero value.
 
 	Usage: required
 
@@ -256,7 +256,7 @@ ensures the value is not nil.
 The field under validation must be present and not empty only if all
 the other specified fields are equal to the value following the specified
 field. For strings ensures value is not "". For slices, maps, pointers,
-interfaces, channels and functions ensures the value is not nil.
+interfaces, channels and functions ensures the value is not nil. For structs ensures value is not the zero value.
 
 	Usage: required_if
 
@@ -273,7 +273,7 @@ Examples:
 The field under validation must be present and not empty unless all
 the other specified fields are equal to the value following the specified
 field. For strings ensures value is not "". For slices, maps, pointers,
-interfaces, channels and functions ensures the value is not nil.
+interfaces, channels and functions ensures the value is not nil. For structs ensures value is not the zero value.
 
 	Usage: required_unless
 
@@ -290,7 +290,7 @@ Examples:
 The field under validation must be present and not empty only if any
 of the other specified fields are present. For strings ensures value is
 not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
+ensures the value is not nil. For structs ensures value is not the zero value.
 
 	Usage: required_with
 
@@ -307,7 +307,7 @@ Examples:
 The field under validation must be present and not empty only if all
 of the other specified fields are present. For strings ensures value is
 not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
+ensures the value is not nil. For structs ensures value is not the zero value.
 
 	Usage: required_with_all
 
@@ -321,7 +321,7 @@ Example:
 The field under validation must be present and not empty only when any
 of the other specified fields are not present. For strings ensures value is
 not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
+ensures the value is not nil. For structs ensures value is not the zero value.
 
 	Usage: required_without
 
@@ -338,7 +338,7 @@ Examples:
 The field under validation must be present and not empty only when all
 of the other specified fields are not present. For strings ensures value is
 not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
+ensures the value is not nil. For structs ensures value is not the zero value.
 
 	Usage: required_without_all
 
@@ -352,7 +352,7 @@ Example:
 The field under validation must not be present or not empty only if all
 the other specified fields are equal to the value following the specified
 field. For strings ensures value is not "". For slices, maps, pointers,
-interfaces, channels and functions ensures the value is not nil.
+interfaces, channels and functions ensures the value is not nil. For structs ensures value is not the zero value.
 
 	Usage: excluded_if
 
@@ -369,7 +369,7 @@ Examples:
 The field under validation must not be present or empty unless all
 the other specified fields are equal to the value following the specified
 field. For strings ensures value is not "". For slices, maps, pointers,
-interfaces, channels and functions ensures the value is not nil.
+interfaces, channels and functions ensures the value is not nil. For structs ensures value is not the zero value.
 
 	Usage: excluded_unless
 
@@ -879,8 +879,6 @@ This is done using os.Stat and github.com/gabriel-vasile/mimetype
 
 	Usage: image
 
-# URL String
-
 # File Path
 
 This validates that a string value contains a valid file path but does not
@@ -1384,6 +1382,12 @@ This validates that a string value contains a valid cron expression.
 
 	Usage: cron
 
+# SpiceDb ObjectID/Permission/Object Type
+
+This validates that a string is valid for use with SpiceDb for the indicated purpose. If no purpose is given, a purpose of 'id' is assumed.
+
+	Usage: spicedb=id|permission|type
+
 # Alias Validators and Tags
 
 Alias Validators and Tags
diff --git a/vendor/github.com/go-playground/validator/v10/regexes.go b/vendor/github.com/go-playground/validator/v10/regexes.go
index ba450b3d05..6c8f985607 100644
--- a/vendor/github.com/go-playground/validator/v10/regexes.go
+++ b/vendor/github.com/go-playground/validator/v10/regexes.go
@@ -68,6 +68,9 @@ const (
 	cveRegexString                   = `^CVE-(1999|2\d{3})-(0[^0]\d{2}|0\d[^0]\d{1}|0\d{2}[^0]|[1-9]{1}\d{3,})$` // CVE Format Id https://cve.mitre.org/cve/identifiers/syntaxchange.html
 	mongodbRegexString               = "^[a-f\\d]{24}$"
 	cronRegexString                  = `(@(annually|yearly|monthly|weekly|daily|hourly|reboot))|(@every (\d+(ns|us|µs|ms|s|m|h))+)|((((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|\*) ?){5,7})`
+	spicedbIDRegexString             = `^(([a-zA-Z0-9/_|\-=+]{1,})|\*)$`
+	spicedbPermissionRegexString     = "^([a-z][a-z0-9_]{1,62}[a-z0-9])?$"
+	spicedbTypeRegexString           = "^([a-z][a-z0-9_]{1,61}[a-z0-9]/)?[a-z][a-z0-9_]{1,62}[a-z0-9]$"
 )
 
 var (
@@ -134,4 +137,7 @@ var (
 	cveRegex                   = regexp.MustCompile(cveRegexString)
 	mongodbRegex               = regexp.MustCompile(mongodbRegexString)
 	cronRegex                  = regexp.MustCompile(cronRegexString)
+	spicedbIDRegex             = regexp.MustCompile(spicedbIDRegexString)
+	spicedbPermissionRegex     = regexp.MustCompile(spicedbPermissionRegexString)
+	spicedbTypeRegex           = regexp.MustCompile(spicedbTypeRegexString)
 )
diff --git a/vendor/github.com/go-playground/validator/v10/util.go b/vendor/github.com/go-playground/validator/v10/util.go
index 3925cfe1cd..084d46173d 100644
--- a/vendor/github.com/go-playground/validator/v10/util.go
+++ b/vendor/github.com/go-playground/validator/v10/util.go
@@ -261,13 +261,19 @@ func asUint(param string) uint64 {
 	return i
 }
 
-// asFloat returns the parameter as a float64
+// asFloat64 returns the parameter as a float64
 // or panics if it can't convert
-func asFloat(param string) float64 {
-
+func asFloat64(param string) float64 {
 	i, err := strconv.ParseFloat(param, 64)
 	panicIf(err)
+	return i
+}
 
+// asFloat64 returns the parameter as a float64
+// or panics if it can't convert
+func asFloat32(param string) float64 {
+	i, err := strconv.ParseFloat(param, 32)
+	panicIf(err)
 	return i
 }
 
@@ -286,3 +292,11 @@ func panicIf(err error) {
 		panic(err.Error())
 	}
 }
+
+func isNestedStructOrStructPtr(v reflect.StructField) bool {
+	if v.Type == nil {
+		return false
+	}
+	kind := v.Type.Kind()
+	return kind == reflect.Struct || kind == reflect.Ptr && v.Type.Elem().Kind() == reflect.Struct
+}
diff --git a/vendor/github.com/go-playground/validator/v10/validator.go b/vendor/github.com/go-playground/validator/v10/validator.go
index 6f6d53ada7..a6fa1f5d53 100644
--- a/vendor/github.com/go-playground/validator/v10/validator.go
+++ b/vendor/github.com/go-playground/validator/v10/validator.go
@@ -170,7 +170,7 @@ func (v *validate) traverseField(ctx context.Context, parent reflect.Value, curr
 
 				if ct.typeof == typeStructOnly {
 					goto CONTINUE
-				} else if ct.typeof == typeIsDefault {
+				} else if ct.typeof == typeIsDefault || ct.typeof == typeNestedStructLevel {
 					// set Field Level fields
 					v.slflParent = parent
 					v.flField = current
diff --git a/vendor/github.com/goccy/go-json/.codecov.yml b/vendor/github.com/goccy/go-json/.codecov.yml
new file mode 100644
index 0000000000..e98134570c
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/.codecov.yml
@@ -0,0 +1,32 @@
+codecov:
+  require_ci_to_pass: yes
+
+coverage:
+  precision: 2
+  round: down
+  range: "70...100"
+
+  status:
+    project:
+      default:
+        target: 70%
+        threshold: 2%
+    patch: off
+    changes: no
+
+parsers:
+  gcov:
+    branch_detection:
+      conditional: yes
+      loop: yes
+      method: no
+      macro: no
+
+comment:
+  layout: "header,diff"
+  behavior: default
+  require_changes: no
+
+ignore:
+  - internal/encoder/vm_color
+  - internal/encoder/vm_color_indent
diff --git a/vendor/github.com/goccy/go-json/.gitignore b/vendor/github.com/goccy/go-json/.gitignore
new file mode 100644
index 0000000000..378283829c
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/.gitignore
@@ -0,0 +1,2 @@
+cover.html
+cover.out
diff --git a/vendor/github.com/goccy/go-json/.golangci.yml b/vendor/github.com/goccy/go-json/.golangci.yml
new file mode 100644
index 0000000000..57ae5a528f
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/.golangci.yml
@@ -0,0 +1,83 @@
+run:
+  skip-files:
+    - encode_optype.go
+    - ".*_test\\.go$"
+
+linters-settings:
+  govet:
+    enable-all: true
+    disable:
+      - shadow
+
+linters:
+  enable-all: true
+  disable:
+    - dogsled
+    - dupl
+    - exhaustive
+    - exhaustivestruct
+    - errorlint
+    - forbidigo
+    - funlen
+    - gci
+    - gochecknoglobals
+    - gochecknoinits
+    - gocognit
+    - gocritic
+    - gocyclo
+    - godot
+    - godox
+    - goerr113
+    - gofumpt
+    - gomnd
+    - gosec
+    - ifshort
+    - lll
+    - makezero
+    - nakedret
+    - nestif
+    - nlreturn
+    - paralleltest
+    - testpackage
+    - thelper
+    - wrapcheck
+    - interfacer
+    - lll
+    - nakedret
+    - nestif
+    - nlreturn
+    - testpackage
+    - wsl
+    - varnamelen
+    - nilnil
+    - ireturn
+    - govet
+    - forcetypeassert
+    - cyclop
+    - containedctx
+    - revive
+
+issues:
+  exclude-rules:
+    # not needed
+    - path: /*.go
+      text: "ST1003: should not use underscores in package names"
+      linters:
+        - stylecheck
+    - path: /*.go
+      text: "don't use an underscore in package name"
+      linters:
+        - golint
+    - path: rtype.go
+      linters:
+        - golint
+        - stylecheck
+    - path: error.go
+      linters:
+        - staticcheck
+
+  # Maximum issues count per one linter. Set to 0 to disable. Default is 50.
+  max-issues-per-linter: 0
+
+  # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
+  max-same-issues: 0
diff --git a/vendor/github.com/goccy/go-json/CHANGELOG.md b/vendor/github.com/goccy/go-json/CHANGELOG.md
new file mode 100644
index 0000000000..d09bb89c31
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/CHANGELOG.md
@@ -0,0 +1,425 @@
+# v0.10.2 - 2023/03/20
+
+### New features
+
+* Support DebugDOT option for debugging encoder ( #440 )
+
+### Fix bugs
+
+* Fix combination of embedding structure and omitempty option ( #442 )
+
+# v0.10.1 - 2023/03/13
+
+### Fix bugs
+
+* Fix checkptr error for array decoder ( #415 )
+* Fix added buffer size check when decoding key ( #430 )
+* Fix handling of anonymous fields other than struct ( #431 )
+* Fix to not optimize when lower conversion can't handle byte-by-byte ( #432 )
+* Fix a problem that MarshalIndent does not work when UnorderedMap is specified ( #435 )
+* Fix mapDecoder.DecodeStream() for empty objects containing whitespace ( #425 )
+* Fix an issue that could not set the correct NextField for fields in the embedded structure ( #438 )
+
+# v0.10.0 - 2022/11/29
+
+### New features
+
+* Support JSON Path ( #250 )
+
+### Fix bugs
+
+* Fix marshaler for map's key ( #409 )
+
+# v0.9.11 - 2022/08/18
+
+### Fix bugs
+
+* Fix unexpected behavior when buffer ends with backslash ( #383 )
+* Fix stream decoding of escaped character ( #387 )
+
+# v0.9.10 - 2022/07/15
+
+### Fix bugs
+
+* Fix boundary exception of type caching ( #382 )
+
+# v0.9.9 - 2022/07/15
+
+### Fix bugs
+
+* Fix encoding of directed interface with typed nil ( #377 )
+* Fix embedded primitive type encoding using alias ( #378 )
+* Fix slice/array type encoding with types implementing MarshalJSON ( #379 )
+* Fix unicode decoding when the expected buffer state is not met after reading ( #380 )
+
+# v0.9.8 - 2022/06/30
+
+### Fix bugs
+
+* Fix decoding of surrogate-pair ( #365 )
+* Fix handling of embedded primitive type ( #366 )
+* Add validation of escape sequence for decoder ( #367 )
+* Fix stream tokenizing respecting UseNumber ( #369 )
+* Fix encoding when struct pointer type that implements Marshal JSON is embedded ( #375 )
+
+### Improve performance
+
+* Improve performance of linkRecursiveCode ( #368 )
+
+# v0.9.7 - 2022/04/22
+
+### Fix bugs
+
+#### Encoder
+
+* Add filtering process for encoding on slow path ( #355 )
+* Fix encoding of interface{} with pointer type ( #363 )
+
+#### Decoder
+
+* Fix map key decoder that implements UnmarshalJSON ( #353 )
+* Fix decoding of []uint8 type ( #361 )
+
+### New features
+
+* Add DebugWith option for encoder ( #356 )
+
+# v0.9.6 - 2022/03/22
+
+### Fix bugs
+
+* Correct the handling of the minimum value of int type for decoder ( #344 )
+* Fix bugs of stream decoder's bufferSize ( #349 )
+* Add a guard to use typeptr more safely ( #351 )
+
+### Improve decoder performance
+
+* Improve escapeString's performance ( #345 )
+
+### Others
+
+* Update go version for CI ( #347 )
+
+# v0.9.5 - 2022/03/04
+
+### Fix bugs
+
+* Fix panic when decoding time.Time with context ( #328 )
+* Fix reading the next character in buffer to nul consideration ( #338 )
+* Fix incorrect handling on skipValue ( #341 )
+
+### Improve decoder performance
+
+* Improve performance when a payload contains escape sequence ( #334 )
+
+# v0.9.4 - 2022/01/21
+
+* Fix IsNilForMarshaler for string type with omitempty ( #323 )
+* Fix the case where the embedded field is at the end ( #326 )
+
+# v0.9.3 - 2022/01/14
+
+* Fix logic of removing struct field for decoder ( #322 )
+
+# v0.9.2 - 2022/01/14
+
+* Add invalid decoder to delay type error judgment at decode ( #321 )
+
+# v0.9.1 - 2022/01/11
+
+* Fix encoding of MarshalText/MarshalJSON operation with head offset ( #319 )
+
+# v0.9.0 - 2022/01/05
+
+### New feature
+
+* Supports dynamic filtering of struct fields ( #314 )
+
+### Improve encoding performance
+
+* Improve map encoding performance ( #310 )
+* Optimize encoding path for escaped string ( #311 )
+* Add encoding option for performance ( #312 )
+
+### Fix bugs
+
+* Fix panic at encoding map value on 1.18 ( #310 )
+* Fix MarshalIndent for interface type ( #317 )
+
+# v0.8.1 - 2021/12/05
+
+* Fix operation conversion from PtrHead to Head in Recursive type ( #305 )
+
+# v0.8.0 - 2021/12/02
+
+* Fix embedded field conflict behavior ( #300 )
+* Refactor compiler for encoder ( #301 #302 )
+
+# v0.7.10 - 2021/10/16
+
+* Fix conversion from pointer to uint64  ( #294 )
+
+# v0.7.9 - 2021/09/28
+
+* Fix encoding of nil value about interface type that has method ( #291 )
+
+# v0.7.8 - 2021/09/01
+
+* Fix mapassign_faststr for indirect struct type ( #283 )
+* Fix encoding of not empty interface type ( #284 )
+* Fix encoding of empty struct interface type ( #286 )
+
+# v0.7.7 - 2021/08/25
+
+* Fix invalid utf8 on stream decoder ( #279 )
+* Fix buffer length bug on string stream decoder ( #280 )
+
+Thank you @orisano !!
+
+# v0.7.6 - 2021/08/13
+
+* Fix nil slice assignment ( #276 )
+* Improve error message ( #277 )
+
+# v0.7.5 - 2021/08/12
+
+* Fix encoding of embedded struct with tags ( #265 )
+* Fix encoding of embedded struct that isn't first field ( #272 )
+* Fix decoding of binary type with escaped char ( #273 )
+
+# v0.7.4 - 2021/07/06
+
+* Fix encoding of indirect layout structure ( #264 )
+
+# v0.7.3 - 2021/06/29
+
+* Fix encoding of pointer type in empty interface ( #262 )
+
+# v0.7.2 - 2021/06/26
+
+### Fix decoder
+
+* Add decoder for func type to fix decoding of nil function value ( #257 )
+* Fix stream decoding of []byte type ( #258 )
+
+### Performance
+
+* Improve decoding performance of map[string]interface{} type ( use `mapassign_faststr` ) ( #256 )
+* Improve encoding performance of empty interface type ( remove recursive calling of `vm.Run` ) ( #259 )
+
+### Benchmark
+
+* Add bytedance/sonic as benchmark target ( #254 )
+
+# v0.7.1 - 2021/06/18
+
+### Fix decoder
+
+* Fix error when unmarshal empty array ( #253 )
+
+# v0.7.0 - 2021/06/12
+
+### Support context for MarshalJSON and UnmarshalJSON ( #248 )
+
+* json.MarshalContext(context.Context, interface{}, ...json.EncodeOption) ([]byte, error)
+* json.NewEncoder(io.Writer).EncodeContext(context.Context, interface{}, ...json.EncodeOption) error
+* json.UnmarshalContext(context.Context, []byte, interface{}, ...json.DecodeOption) error
+* json.NewDecoder(io.Reader).DecodeContext(context.Context, interface{}) error
+
+```go
+type MarshalerContext interface {
+  MarshalJSON(context.Context) ([]byte, error)
+}
+
+type UnmarshalerContext interface {
+  UnmarshalJSON(context.Context, []byte) error
+}
+```
+
+### Add DecodeFieldPriorityFirstWin option ( #242 )
+
+In the default behavior, go-json, like encoding/json, will reflect the result of the last evaluation when a field with the same name exists. I've added new options to allow you to change this behavior. `json.DecodeFieldPriorityFirstWin` option reflects the result of the first evaluation if a field with the same name exists. This behavior has a performance advantage as it allows the subsequent strings to be skipped if all fields have been evaluated.
+
+### Fix encoder
+
+* Fix indent number contains recursive type ( #249 )
+* Fix encoding of using empty interface as map key ( #244 )
+
+### Fix decoder
+
+* Fix decoding fields containing escaped characters ( #237 )
+
+### Refactor
+
+* Move some tests to subdirectory ( #243 )
+* Refactor package layout for decoder ( #238 )
+
+# v0.6.1 - 2021/06/02
+
+### Fix encoder
+
+* Fix value of totalLength for encoding ( #236 )
+
+# v0.6.0 - 2021/06/01
+
+### Support Colorize option for encoding (#233)
+
+```go
+b, err := json.MarshalWithOption(v, json.Colorize(json.DefaultColorScheme))
+if err != nil {
+  ...
+}
+fmt.Println(string(b)) // print colored json
+```
+
+### Refactor
+
+* Fix opcode layout - Adjust memory layout of the opcode to 128 bytes in a 64-bit environment ( #230 )
+* Refactor encode option ( #231 )
+* Refactor escape string ( #232 )
+
+# v0.5.1 - 2021/5/20
+
+### Optimization
+
+* Add type addrShift to enable bigger encoder/decoder cache ( #213 )
+
+### Fix decoder
+
+* Keep original reference of slice element ( #229 )
+
+### Refactor
+
+* Refactor Debug mode for encoding ( #226 )
+* Generate VM sources for encoding ( #227 )
+* Refactor validator for null/true/false for decoding ( #221 )
+
+# v0.5.0 - 2021/5/9
+
+### Supports using omitempty and string tags at the same time ( #216 )
+
+### Fix decoder
+
+* Fix stream decoder for unicode char ( #215 )
+* Fix decoding of slice element ( #219 )
+* Fix calculating of buffer length for stream decoder ( #220 )
+
+### Refactor
+
+* replace skipWhiteSpace goto by loop ( #212 )
+
+# v0.4.14 - 2021/5/4
+
+### Benchmark
+
+* Add valyala/fastjson to benchmark ( #193 )
+* Add benchmark task for CI ( #211 )
+
+### Fix decoder
+
+* Fix decoding of slice with unmarshal json type ( #198 )
+* Fix decoding of null value for interface type that does not implement Unmarshaler ( #205 )
+* Fix decoding of null value to []byte by json.Unmarshal ( #206 )
+* Fix decoding of backslash char at the end of string ( #207 )
+* Fix stream decoder for null/true/false value ( #208 )
+* Fix stream decoder for slow reader ( #211 )
+
+### Performance
+
+* If cap of slice is enough, reuse slice data for compatibility with encoding/json ( #200 )
+
+# v0.4.13 - 2021/4/20
+
+### Fix json.Compact and json.Indent
+
+* Support validation the input buffer for json.Compact and json.Indent ( #189 )
+* Optimize json.Compact and json.Indent ( improve memory footprint ) ( #190 )
+
+# v0.4.12 - 2021/4/15
+
+### Fix encoder
+
+* Fix unnecessary indent for empty slice type ( #181 )
+* Fix encoding of omitempty feature for the slice or interface type ( #183 )
+* Fix encoding custom types zero values with omitempty when marshaller exists ( #187 )
+
+### Fix decoder
+
+* Fix decoder for invalid top level value ( #184 )
+* Fix decoder for invalid number value ( #185 )
+
+# v0.4.11 - 2021/4/3
+
+* Improve decoder performance for interface type
+
+# v0.4.10 - 2021/4/2
+
+### Fix encoder
+
+* Fixed a bug when encoding slice and map containing recursive structures
+* Fixed a logic to determine if indirect reference
+
+# v0.4.9 - 2021/3/29
+
+### Add debug mode
+
+If you use `json.MarshalWithOption(v, json.Debug())` and `panic` occurred in `go-json`, produces debug information to console.
+
+### Support a new feature to compatible with encoding/json
+
+- invalid UTF-8 is coerced to valid UTF-8 ( without performance down )
+
+### Fix encoder
+
+- Fixed handling of MarshalJSON of function type
+
+### Fix decoding of slice of pointer type
+
+If there is a pointer value, go-json will use it. (This behavior is necessary to achieve the ability to prioritize pre-filled values). However, since slices are reused internally, there was a bug that referred to the previous pointer value. Therefore, it is not necessary to refer to the pointer value in advance for the slice element, so we explicitly initialize slice element by `nil`.
+
+# v0.4.8 - 2021/3/21
+
+### Reduce memory usage at compile time
+
+* go-json have used about 2GB of memory at compile time, but now it can compile with about less than 550MB.
+
+### Fix any encoder's bug
+
+* Add many test cases for encoder
+* Fix composite type ( slice/array/map )
+* Fix pointer types
+* Fix encoding of MarshalJSON or MarshalText or json.Number type
+
+### Refactor encoder
+
+* Change package layout for reducing memory usage at compile
+* Remove anonymous and only operation
+* Remove root property from encodeCompileContext and opcode
+
+### Fix CI
+
+* Add Go 1.16
+* Remove Go 1.13
+* Fix `make cover` task
+
+### Number/Delim/Token/RawMessage use the types defined in encoding/json by type alias
+
+# v0.4.7 - 2021/02/22
+
+### Fix decoder
+
+* Fix decoding of deep recursive structure
+* Fix decoding of embedded unexported pointer field
+* Fix invalid test case
+* Fix decoding of invalid value
+* Fix decoding of prefilled value
+* Fix not being able to return UnmarshalTypeError when it should be returned
+* Fix decoding of null value
+* Fix decoding of type of null string
+* Use pre allocated pointer if exists it at decoding
+
+### Reduce memory usage at compile
+
+* Integrate int/int8/int16/int32/int64 and uint/uint8/uint16/uint32/uint64 operation to reduce memory usage at compile
+
+### Remove unnecessary optype
diff --git a/vendor/github.com/goccy/go-json/LICENSE b/vendor/github.com/goccy/go-json/LICENSE
new file mode 100644
index 0000000000..6449c8bff6
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 Masaaki Goshima
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/goccy/go-json/Makefile b/vendor/github.com/goccy/go-json/Makefile
new file mode 100644
index 0000000000..5bbfc4c9a2
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/Makefile
@@ -0,0 +1,39 @@
+PKG := github.com/goccy/go-json
+
+BIN_DIR := $(CURDIR)/bin
+PKGS := $(shell go list ./... | grep -v internal/cmd|grep -v test)
+COVER_PKGS := $(foreach pkg,$(PKGS),$(subst $(PKG),.,$(pkg)))
+
+COMMA := ,
+EMPTY :=
+SPACE := $(EMPTY) $(EMPTY)
+COVERPKG_OPT := $(subst $(SPACE),$(COMMA),$(COVER_PKGS))
+
+$(BIN_DIR):
+	@mkdir -p $(BIN_DIR)
+
+.PHONY: cover
+cover:
+	go test -coverpkg=$(COVERPKG_OPT) -coverprofile=cover.out ./...
+
+.PHONY: cover-html
+cover-html: cover
+	go tool cover -html=cover.out
+
+.PHONY: lint
+lint: golangci-lint
+	$(BIN_DIR)/golangci-lint run
+
+golangci-lint: | $(BIN_DIR)
+	@{ \
+		set -e; \
+		GOLANGCI_LINT_TMP_DIR=$$(mktemp -d); \
+		cd $$GOLANGCI_LINT_TMP_DIR; \
+		go mod init tmp; \
+		GOBIN=$(BIN_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.48.0; \
+		rm -rf $$GOLANGCI_LINT_TMP_DIR; \
+	}
+
+.PHONY: generate
+generate:
+	go generate ./internal/...
diff --git a/vendor/github.com/goccy/go-json/README.md b/vendor/github.com/goccy/go-json/README.md
new file mode 100644
index 0000000000..7bacc54f9c
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/README.md
@@ -0,0 +1,529 @@
+# go-json
+
+![Go](https://github.com/goccy/go-json/workflows/Go/badge.svg)
+[![GoDoc](https://godoc.org/github.com/goccy/go-json?status.svg)](https://pkg.go.dev/github.com/goccy/go-json?tab=doc)
+[![codecov](https://codecov.io/gh/goccy/go-json/branch/master/graph/badge.svg)](https://codecov.io/gh/goccy/go-json)
+
+Fast JSON encoder/decoder compatible with encoding/json for Go
+
+<img width="400px" src="https://user-images.githubusercontent.com/209884/92572337-42b42900-f2bf-11ea-973a-c74a359553a5.png"></img>
+
+# Roadmap
+
+```
+* version ( expected release date )
+
+* v0.9.0
+ |
+ | while maintaining compatibility with encoding/json, we will add convenient APIs
+ |
+ v
+* v1.0.0
+```
+
+We are accepting requests for features that will be implemented between v0.9.0 and v.1.0.0.
+If you have the API you need, please submit your issue [here](https://github.com/goccy/go-json/issues).
+
+# Features
+
+- Drop-in replacement of `encoding/json`
+- Fast ( See [Benchmark section](https://github.com/goccy/go-json#benchmarks) )
+- Flexible customization with options
+- Coloring the encoded string
+- Can propagate context.Context to `MarshalJSON` or `UnmarshalJSON`
+- Can dynamically filter the fields of the structure type-safely
+
+# Installation
+
+```
+go get github.com/goccy/go-json
+```
+
+# How to use
+
+Replace import statement from `encoding/json` to `github.com/goccy/go-json`
+
+```
+-import "encoding/json"
++import "github.com/goccy/go-json"
+```
+
+# JSON library comparison
+
+|  name  |  encoder | decoder | compatible with `encoding/json` |
+| :----: | :------: | :-----: | :-----------------------------: |
+| encoding/json |  yes | yes | N/A |
+| [json-iterator/go](https://github.com/json-iterator/go) | yes | yes | partial |
+| [easyjson](https://github.com/mailru/easyjson) | yes | yes |  no |
+| [gojay](https://github.com/francoispqt/gojay) | yes | yes |  no |
+| [segmentio/encoding/json](https://github.com/segmentio/encoding/tree/master/json) | yes | yes | partial |
+| [jettison](https://github.com/wI2L/jettison) | yes | no | no |
+| [simdjson-go](https://github.com/minio/simdjson-go) | no | yes | no |
+| goccy/go-json | yes | yes | yes |
+
+- `json-iterator/go` isn't compatible with `encoding/json` in many ways (e.g. https://github.com/json-iterator/go/issues/229 ), but it hasn't been supported for a long time.
+- `segmentio/encoding/json` is well supported for encoders, but some are not supported for decoder APIs such as `Token` ( streaming decode )
+
+## Other libraries
+
+- [jingo](https://github.com/bet365/jingo)
+
+I tried the benchmark but it didn't work.
+Also, it seems to panic when it receives an unexpected value because there is no error handling...
+
+- [ffjson](https://github.com/pquerna/ffjson)
+
+Benchmarking gave very slow results.
+It seems that it is assumed that the user will use the buffer pool properly.
+Also, development seems to have already stopped
+
+# Benchmarks
+
+```
+$ cd benchmarks
+$ go test -bench .
+```
+
+## Encode
+
+<img width="700px" src="https://user-images.githubusercontent.com/209884/107126758-0845cb00-68f5-11eb-8db7-086fcf9bcfaa.png"></img>
+<img width="700px" src="https://user-images.githubusercontent.com/209884/107126757-07ad3480-68f5-11eb-87aa-858cc5eacfcb.png"></img>
+
+## Decode
+
+<img width="700" alt="" src="https://user-images.githubusercontent.com/209884/107979944-bd1d6d80-7002-11eb-944b-9d17b6674e3f.png">
+<img width="700" alt="" src="https://user-images.githubusercontent.com/209884/107979931-b989e680-7002-11eb-87a0-66fc22d90dd4.png">
+<img width="700" alt="" src="https://user-images.githubusercontent.com/209884/107979940-bc84d700-7002-11eb-9647-869bbc25c9d9.png">
+
+
+# Fuzzing
+
+[go-json-fuzz](https://github.com/goccy/go-json-fuzz) is the repository for fuzzing tests.
+If you run the test in this repository and find a bug, please commit to corpus to go-json-fuzz and report the issue to [go-json](https://github.com/goccy/go-json/issues).
+
+# How it works
+
+`go-json` is very fast in both encoding and decoding compared to other libraries.
+It's easier to implement by using automatic code generation for performance or by using a dedicated interface, but `go-json` dares to stick to compatibility with `encoding/json` and is the simple interface. Despite this, we are developing with the aim of being the fastest library.
+
+Here, we explain the various speed-up techniques implemented by `go-json`.
+
+## Basic technique
+
+The techniques listed here are the ones used by most of the libraries listed above.
+
+### Buffer reuse
+
+Since the only value required for the result of `json.Marshal(interface{}) ([]byte, error)` is `[]byte`, the only value that must be allocated during encoding is the return value `[]byte` .
+
+Also, as the number of allocations increases, the performance will be affected, so the number of allocations should be kept as low as possible when creating `[]byte`.
+
+Therefore, there is a technique to reduce the number of times a new buffer must be allocated by reusing the buffer used for the previous encoding by using `sync.Pool`.
+
+Finally, you allocate a buffer that is as long as the resulting buffer and copy the contents into it, you only need to allocate the buffer once in theory.
+
+```go
+type buffer struct {
+    data []byte
+}
+
+var bufPool = sync.Pool{
+    New: func() interface{} {
+        return &buffer{data: make([]byte, 0, 1024)}
+    },
+}
+
+buf := bufPool.Get().(*buffer)
+data := encode(buf.data) // reuse buf.data
+
+newBuf := make([]byte, len(data))
+copy(newBuf, buf)
+
+buf.data = data
+bufPool.Put(buf)
+```
+
+### Elimination of reflection
+
+As you know, the reflection operation is very slow.
+
+Therefore, using the fact that the address position where the type information is stored is fixed for each binary ( we call this `typeptr` ),
+we can use the address in the type information to call a pre-built optimized process.
+
+For example, you can get the address to the type information from `interface{}` as follows and you can use that information to call a process that does not have reflection.
+
+To process without reflection, pass a pointer (`unsafe.Pointer`) to the value is stored.
+
+```go
+
+type emptyInterface struct {
+    typ unsafe.Pointer
+    ptr unsafe.Pointer
+}
+
+var typeToEncoder = map[uintptr]func(unsafe.Pointer)([]byte, error){}
+
+func Marshal(v interface{}) ([]byte, error) {
+    iface := (*emptyInterface)(unsafe.Pointer(&v)
+    typeptr := uintptr(iface.typ)
+    if enc, exists := typeToEncoder[typeptr]; exists {
+        return enc(iface.ptr)
+    }
+    ...
+}
+```
+
+※ In reality, `typeToEncoder` can be referenced by multiple goroutines, so exclusive control is required.
+
+## Unique speed-up technique
+
+## Encoder
+
+### Do not escape arguments of `Marshal`
+
+`json.Marshal` and `json.Unmarshal` receive `interface{}` value and they perform type determination dynamically to process.
+In normal case, you need to use the `reflect` library to determine the type dynamically, but since `reflect.Type` is defined as `interface`, when you call the method of `reflect.Type`, The reflect's argument is escaped.
+
+Therefore, the arguments for `Marshal` and `Unmarshal` are always escaped to the heap.
+However, `go-json` can use the feature of `reflect.Type` while avoiding escaping.
+
+`reflect.Type` is defined as `interface`, but in reality `reflect.Type` is implemented only by the structure `rtype` defined in the `reflect` package.
+For this reason, to date `reflect.Type` is the same as `*reflect.rtype`.
+
+Therefore, by directly handling `*reflect.rtype`, which is an implementation of `reflect.Type`, it is possible to avoid escaping because it changes from `interface` to using `struct`.
+
+The technique for working with `*reflect.rtype` directly from `go-json` is implemented at [rtype.go](https://github.com/goccy/go-json/blob/master/internal/runtime/rtype.go)
+
+Also, the same technique is cut out as a library ( https://github.com/goccy/go-reflect )
+
+Initially this feature was the default behavior of `go-json`.
+But after careful testing, I found that I passed a large value to `json.Marshal()` and if the argument could not be assigned to the stack, it could not be properly escaped to the heap (a bug in the Go compiler).
+
+Therefore, this feature will be provided as an **optional** until this issue is resolved.
+
+To use it, add `NoEscape` like `MarshalNoEscape()`
+
+### Encoding using opcode sequence
+
+I explained that you can use `typeptr` to call a pre-built process from type information.
+
+In other libraries, this dedicated process is processed by making it an function calling like anonymous function, but function calls are inherently slow processes and should be avoided as much as possible.
+
+Therefore, `go-json` adopted the Instruction-based execution processing system, which is also used to implement virtual machines for programming language.
+
+If it is the first type to encode, create the opcode ( instruction ) sequence required for encoding.
+From the second time onward, use `typeptr` to get the cached pre-built opcode sequence and encode it based on it. An example of the opcode sequence is shown below.
+
+```go
+json.Marshal(struct{
+    X int `json:"x"`
+    Y string `json:"y"`
+}{X: 1, Y: "hello"})
+```
+
+When encoding a structure like the one above, create a sequence of opcodes like this:
+
+```
+- opStructFieldHead ( `{` )
+- opStructFieldInt ( `"x": 1,` )
+- opStructFieldString ( `"y": "hello"` )
+- opStructEnd ( `}` )
+- opEnd
+```
+
+※ When processing each operation, write the letters on the right.
+
+In addition, each opcode is managed by the following structure ( 
+Pseudo code ).
+
+```go
+type opType int
+const (
+    opStructFieldHead opType = iota
+    opStructFieldInt
+    opStructFieldStirng
+    opStructEnd
+    opEnd
+)
+type opcode struct {
+    op opType
+    key []byte
+    next *opcode
+}
+```
+
+The process of encoding using the opcode sequence is roughly implemented as follows.
+
+```go
+func encode(code *opcode, b []byte, p unsafe.Pointer) ([]byte, error) {
+    for {
+        switch code.op {
+        case opStructFieldHead:
+            b = append(b, '{')
+            code = code.next
+        case opStructFieldInt:
+            b = append(b, code.key...)
+            b = appendInt((*int)(unsafe.Pointer(uintptr(p)+code.offset)))
+            code = code.next
+        case opStructFieldString:
+            b = append(b, code.key...)
+            b = appendString((*string)(unsafe.Pointer(uintptr(p)+code.offset)))
+            code = code.next
+        case opStructEnd:
+            b = append(b, '}')
+            code = code.next
+        case opEnd:
+            goto END
+        }
+    }
+END:
+    return b, nil
+}
+```
+
+In this way, the huge `switch-case` is used to encode by manipulating the linked list opcodes to avoid unnecessary function calls.
+
+### Opcode sequence optimization
+
+One of the advantages of encoding using the opcode sequence is the ease of optimization.
+The opcode sequence mentioned above is actually converted into the following optimized operations and used.
+
+```
+- opStructFieldHeadInt ( `{"x": 1,` )
+- opStructEndString ( `"y": "hello"}` )
+- opEnd
+```
+
+It has been reduced from 5 opcodes to 3 opcodes !
+Reducing the number of opcodees means reducing the number of branches with `switch-case`.
+In other words, the closer the number of operations is to 1, the faster the processing can be performed.
+
+In `go-json`, optimization to reduce the number of opcodes itself like the above and it speeds up by preparing opcodes with optimized paths.
+
+### Change recursive call from CALL to JMP
+
+Recursive processing is required during encoding if the type is defined recursively as follows:
+
+```go
+type T struct {
+    X int
+    U *U
+}
+
+type U struct {
+    T *T
+}
+
+b, err := json.Marshal(&T{
+    X: 1,
+    U: &U{
+        T: &T{
+            X: 2,
+        },
+    },
+})
+fmt.Println(string(b)) // {"X":1,"U":{"T":{"X":2,"U":null}}}
+```
+
+In `go-json`, recursive processing is processed by the operation type of ` opStructFieldRecursive`.
+
+In this operation, after acquiring the opcode sequence used for recursive processing, the function is **not** called recursively as it is, but the necessary values ​​are saved by itself and implemented by moving to the next operation.
+
+The technique of implementing recursive processing with the `JMP` operation while avoiding the `CALL` operation is a famous technique for implementing a high-speed virtual machine.
+
+For more details, please refer to [the article](https://engineering.mercari.com/blog/entry/1599563768-081104c850) ( but Japanese only ).
+
+### Dispatch by typeptr from map to slice
+
+When retrieving the data cached from the type information by `typeptr`, we usually use map.
+Map requires exclusive control, so use `sync.Map` for a naive implementation.
+
+However, this is slow, so it's a good idea to use the `atomic` package for exclusive control as implemented by `segmentio/encoding/json` ( https://github.com/segmentio/encoding/blob/master/json/codec.go#L41-L55 ).
+
+This implementation slows down the set instead of speeding up the get, but it works well because of the nature of the library, it encodes much more for the same type.
+
+However, as a result of profiling, I noticed that `runtime.mapaccess2` accounts for a significant percentage of the execution time. So I thought if I could change the lookup from map to slice.
+
+There is an API named `typelinks` defined in the `runtime` package that the `reflect` package uses internally.
+This allows you to get all the type information defined in the binary at runtime.
+
+The fact that all type information can be acquired means that by constructing slices in advance with the acquired total number of type information, it is possible to look up with the value of `typeptr` without worrying about out-of-range access.
+
+However, if there is too much type information, it will use a lot of memory, so by default we will only use this optimization if the slice size fits within **2Mib** .
+
+If this approach is not available, it will fall back to the `atomic` based process described above.
+
+If you want to know more, please refer to the implementation [here](https://github.com/goccy/go-json/blob/master/internal/runtime/type.go#L36-L100)
+
+## Decoder
+
+### Dispatch by typeptr from map to slice
+
+Like the encoder, the decoder also uses typeptr to call the dedicated process.
+
+### Faster termination character inspection using NUL character
+
+In order to decode, you have to traverse the input buffer character by position.
+At that time, if you check whether the buffer has reached the end, it will be very slow.
+
+`buf` : `[]byte` type variable. holds the string passed to the decoder
+`cursor` : `int64` type variable. holds the current read position
+
+```go
+buflen := len(buf)
+for ; cursor < buflen; cursor++ { // compare cursor and buflen at all times, it is so slow.
+    switch buf[cursor] {
+    case ' ', '\n', '\r', '\t':
+    }
+}
+```
+
+Therefore, by adding the `NUL` (`\000`) character to the end of the read buffer as shown below, it is possible to check the termination character at the same time as other characters.
+
+```go
+for {
+    switch buf[cursor] {
+    case ' ', '\n', '\r', '\t':
+    case '\000':
+        return nil
+    }
+    cursor++
+}
+```
+
+### Use Boundary Check Elimination
+
+Due to the `NUL` character optimization, the Go compiler does a boundary check every time, even though `buf[cursor]` does not cause out-of-range access.
+
+Therefore, `go-json` eliminates boundary check by fetching characters for hotspot by pointer operation. For example, the following code.
+
+```go
+func char(ptr unsafe.Pointer, offset int64) byte {
+	return *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(offset)))
+}
+
+p := (*sliceHeader)(&unsafe.Pointer(buf)).data
+for {
+    switch char(p, cursor) {
+    case ' ', '\n', '\r', '\t':
+    case '\000':
+        return nil
+    }
+    cursor++
+}
+```
+
+### Checking the existence of fields of struct using Bitmaps
+
+I found by the profiling result, in the struct decode, lookup process for field was taking a long time.
+
+For example, consider decoding a string like `{"a":1,"b":2,"c":3}` into the following structure:
+
+```go
+type T struct {
+    A int `json:"a"`
+    B int `json:"b"`
+    C int `json:"c"`
+}
+```
+
+At this time, it was found that it takes a lot of time to acquire the decoding process corresponding to the field from the field name as shown below during the decoding process.
+
+```go
+fieldName := decodeKey(buf, cursor) // "a" or "b" or "c"
+decoder, exists := fieldToDecoderMap[fieldName] // so slow
+if exists {
+    decoder(buf, cursor)
+} else {
+    skipValue(buf, cursor)
+}
+```
+
+To improve this process, `json-iterator/go` is optimized so that it can be branched by switch-case when the number of fields in the structure is 10 or less (switch-case is faster than map). However, there is a risk of hash collision because the value hashed by the FNV algorithm is used for conditional branching. Also, `gojay` processes this part at high speed by letting the library user yourself write `switch-case`.
+
+
+`go-json` considers and implements a new approach that is different from these. I call this **bitmap field optimization**.
+
+The range of values ​​per character can be represented by `[256]byte`. Also, if the number of fields in the structure is 8 or less, `int8` type can represent the state of each field.
+In other words, it has the following structure.
+
+- Base ( 8bit ): `00000000`
+- Key "a": `00000001` ( assign key "a" to the first bit )
+- Key "b": `00000010` ( assign key "b" to the second bit )
+- Key "c": `00000100` ( assign key "c" to the third bit )
+
+Bitmap structure is the following
+
+```
+        | key index(0) |
+------------------------
+ 0      | 00000000     |
+ 1      | 00000000     |
+~~      |              |
+97 (a)  | 00000001     |
+98 (b)  | 00000010     |
+99 (c)  | 00000100     |
+~~      |              |
+255     | 00000000     |
+```
+
+You can think of this as a Bitmap with a height of `256` and a width of the maximum string length in the field name.
+In other words, it can be represented by the following type .
+
+```go
+[maxFieldKeyLength][256]int8
+```
+
+When decoding a field character, check whether the corresponding character exists by referring to the pre-built bitmap like the following.
+
+```go
+var curBit int8 = math.MaxInt8 // 11111111
+
+c := char(buf, cursor)
+bit := bitmap[keyIdx][c]
+curBit &= bit
+if curBit == 0 {
+    // not found field
+}
+```
+
+If `curBit` is not `0` until the end of the field string, then the string is
+You may have hit one of the fields.
+But the possibility is that if the decoded string is shorter than the field string, you will get a false hit.
+
+- input: `{"a":1}`
+```go
+type T struct {
+    X int `json:"abc"`
+}
+```
+※ Since `a` is shorter than `abc`, it can decode to the end of the field character without `curBit` being 0.
+
+Rest assured. In this case, it doesn't matter because you can tell if you hit by comparing the string length of `a` with the string length of `abc`.
+
+Finally, calculate the position of the bit where `1` is set and get the corresponding value, and you're done.
+
+Using this technique, field lookups are possible with only bitwise operations and access to slices.
+
+`go-json` uses a similar technique for fields with 9 or more and 16 or less fields. At this time, Bitmap is constructed as `[maxKeyLen][256]int16` type.
+
+Currently, this optimization is not performed when the maximum length of the field name is long (specifically, 64 bytes or more) in addition to the limitation of the number of fields from the viewpoint of saving memory usage.
+
+### Others
+
+I have done a lot of other optimizations. I will find time to write about them. If you have any questions about what's written here or other optimizations, please visit the `#go-json` channel on `gophers.slack.com` .
+
+## Reference
+
+Regarding the story of go-json, there are the following articles in Japanese only.
+
+- https://speakerdeck.com/goccy/zui-su-falsejsonraiburariwoqiu-mete
+- https://engineering.mercari.com/blog/entry/1599563768-081104c850/
+
+# Looking for Sponsors
+
+I'm looking for sponsors this library. This library is being developed as a personal project in my spare time. If you want a quick response or problem resolution when using this library in your project, please register as a [sponsor](https://github.com/sponsors/goccy). I will cooperate as much as possible. Of course, this library is developed as an MIT license, so you can use it freely for free.
+
+# License
+
+MIT
diff --git a/vendor/github.com/goccy/go-json/color.go b/vendor/github.com/goccy/go-json/color.go
new file mode 100644
index 0000000000..e80b22b486
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/color.go
@@ -0,0 +1,68 @@
+package json
+
+import (
+	"fmt"
+
+	"github.com/goccy/go-json/internal/encoder"
+)
+
+type (
+	ColorFormat = encoder.ColorFormat
+	ColorScheme = encoder.ColorScheme
+)
+
+const escape = "\x1b"
+
+type colorAttr int
+
+//nolint:deadcode,varcheck
+const (
+	fgBlackColor colorAttr = iota + 30
+	fgRedColor
+	fgGreenColor
+	fgYellowColor
+	fgBlueColor
+	fgMagentaColor
+	fgCyanColor
+	fgWhiteColor
+)
+
+//nolint:deadcode,varcheck
+const (
+	fgHiBlackColor colorAttr = iota + 90
+	fgHiRedColor
+	fgHiGreenColor
+	fgHiYellowColor
+	fgHiBlueColor
+	fgHiMagentaColor
+	fgHiCyanColor
+	fgHiWhiteColor
+)
+
+func createColorFormat(attr colorAttr) ColorFormat {
+	return ColorFormat{
+		Header: wrapColor(attr),
+		Footer: resetColor(),
+	}
+}
+
+func wrapColor(attr colorAttr) string {
+	return fmt.Sprintf("%s[%dm", escape, attr)
+}
+
+func resetColor() string {
+	return wrapColor(colorAttr(0))
+}
+
+var (
+	DefaultColorScheme = &ColorScheme{
+		Int:       createColorFormat(fgHiMagentaColor),
+		Uint:      createColorFormat(fgHiMagentaColor),
+		Float:     createColorFormat(fgHiMagentaColor),
+		Bool:      createColorFormat(fgHiYellowColor),
+		String:    createColorFormat(fgHiGreenColor),
+		Binary:    createColorFormat(fgHiRedColor),
+		ObjectKey: createColorFormat(fgHiCyanColor),
+		Null:      createColorFormat(fgBlueColor),
+	}
+)
diff --git a/vendor/github.com/goccy/go-json/decode.go b/vendor/github.com/goccy/go-json/decode.go
new file mode 100644
index 0000000000..74c6ac3bca
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/decode.go
@@ -0,0 +1,263 @@
+package json
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"reflect"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/decoder"
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type Decoder struct {
+	s *decoder.Stream
+}
+
+const (
+	nul = '\000'
+)
+
+type emptyInterface struct {
+	typ *runtime.Type
+	ptr unsafe.Pointer
+}
+
+func unmarshal(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error {
+	src := make([]byte, len(data)+1) // append nul byte to the end
+	copy(src, data)
+
+	header := (*emptyInterface)(unsafe.Pointer(&v))
+
+	if err := validateType(header.typ, uintptr(header.ptr)); err != nil {
+		return err
+	}
+	dec, err := decoder.CompileToGetDecoder(header.typ)
+	if err != nil {
+		return err
+	}
+	ctx := decoder.TakeRuntimeContext()
+	ctx.Buf = src
+	ctx.Option.Flags = 0
+	for _, optFunc := range optFuncs {
+		optFunc(ctx.Option)
+	}
+	cursor, err := dec.Decode(ctx, 0, 0, header.ptr)
+	if err != nil {
+		decoder.ReleaseRuntimeContext(ctx)
+		return err
+	}
+	decoder.ReleaseRuntimeContext(ctx)
+	return validateEndBuf(src, cursor)
+}
+
+func unmarshalContext(ctx context.Context, data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error {
+	src := make([]byte, len(data)+1) // append nul byte to the end
+	copy(src, data)
+
+	header := (*emptyInterface)(unsafe.Pointer(&v))
+
+	if err := validateType(header.typ, uintptr(header.ptr)); err != nil {
+		return err
+	}
+	dec, err := decoder.CompileToGetDecoder(header.typ)
+	if err != nil {
+		return err
+	}
+	rctx := decoder.TakeRuntimeContext()
+	rctx.Buf = src
+	rctx.Option.Flags = 0
+	rctx.Option.Flags |= decoder.ContextOption
+	rctx.Option.Context = ctx
+	for _, optFunc := range optFuncs {
+		optFunc(rctx.Option)
+	}
+	cursor, err := dec.Decode(rctx, 0, 0, header.ptr)
+	if err != nil {
+		decoder.ReleaseRuntimeContext(rctx)
+		return err
+	}
+	decoder.ReleaseRuntimeContext(rctx)
+	return validateEndBuf(src, cursor)
+}
+
+var (
+	pathDecoder = decoder.NewPathDecoder()
+)
+
+func extractFromPath(path *Path, data []byte, optFuncs ...DecodeOptionFunc) ([][]byte, error) {
+	if path.path.RootSelectorOnly {
+		return [][]byte{data}, nil
+	}
+	src := make([]byte, len(data)+1) // append nul byte to the end
+	copy(src, data)
+
+	ctx := decoder.TakeRuntimeContext()
+	ctx.Buf = src
+	ctx.Option.Flags = 0
+	ctx.Option.Flags |= decoder.PathOption
+	ctx.Option.Path = path.path
+	for _, optFunc := range optFuncs {
+		optFunc(ctx.Option)
+	}
+	paths, cursor, err := pathDecoder.DecodePath(ctx, 0, 0)
+	if err != nil {
+		decoder.ReleaseRuntimeContext(ctx)
+		return nil, err
+	}
+	decoder.ReleaseRuntimeContext(ctx)
+	if err := validateEndBuf(src, cursor); err != nil {
+		return nil, err
+	}
+	return paths, nil
+}
+
+func unmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error {
+	src := make([]byte, len(data)+1) // append nul byte to the end
+	copy(src, data)
+
+	header := (*emptyInterface)(unsafe.Pointer(&v))
+
+	if err := validateType(header.typ, uintptr(header.ptr)); err != nil {
+		return err
+	}
+	dec, err := decoder.CompileToGetDecoder(header.typ)
+	if err != nil {
+		return err
+	}
+
+	ctx := decoder.TakeRuntimeContext()
+	ctx.Buf = src
+	ctx.Option.Flags = 0
+	for _, optFunc := range optFuncs {
+		optFunc(ctx.Option)
+	}
+	cursor, err := dec.Decode(ctx, 0, 0, noescape(header.ptr))
+	if err != nil {
+		decoder.ReleaseRuntimeContext(ctx)
+		return err
+	}
+	decoder.ReleaseRuntimeContext(ctx)
+	return validateEndBuf(src, cursor)
+}
+
+func validateEndBuf(src []byte, cursor int64) error {
+	for {
+		switch src[cursor] {
+		case ' ', '\t', '\n', '\r':
+			cursor++
+			continue
+		case nul:
+			return nil
+		}
+		return errors.ErrSyntax(
+			fmt.Sprintf("invalid character '%c' after top-level value", src[cursor]),
+			cursor+1,
+		)
+	}
+}
+
+//nolint:staticcheck
+//go:nosplit
+func noescape(p unsafe.Pointer) unsafe.Pointer {
+	x := uintptr(p)
+	return unsafe.Pointer(x ^ 0)
+}
+
+func validateType(typ *runtime.Type, p uintptr) error {
+	if typ == nil || typ.Kind() != reflect.Ptr || p == 0 {
+		return &InvalidUnmarshalError{Type: runtime.RType2Type(typ)}
+	}
+	return nil
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may
+// read data from r beyond the JSON values requested.
+func NewDecoder(r io.Reader) *Decoder {
+	s := decoder.NewStream(r)
+	return &Decoder{
+		s: s,
+	}
+}
+
+// Buffered returns a reader of the data remaining in the Decoder's
+// buffer. The reader is valid until the next call to Decode.
+func (d *Decoder) Buffered() io.Reader {
+	return d.s.Buffered()
+}
+
+// Decode reads the next JSON-encoded value from its
+// input and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about
+// the conversion of JSON into a Go value.
+func (d *Decoder) Decode(v interface{}) error {
+	return d.DecodeWithOption(v)
+}
+
+// DecodeContext reads the next JSON-encoded value from its
+// input and stores it in the value pointed to by v with context.Context.
+func (d *Decoder) DecodeContext(ctx context.Context, v interface{}) error {
+	d.s.Option.Flags |= decoder.ContextOption
+	d.s.Option.Context = ctx
+	return d.DecodeWithOption(v)
+}
+
+func (d *Decoder) DecodeWithOption(v interface{}, optFuncs ...DecodeOptionFunc) error {
+	header := (*emptyInterface)(unsafe.Pointer(&v))
+	typ := header.typ
+	ptr := uintptr(header.ptr)
+	typeptr := uintptr(unsafe.Pointer(typ))
+	// noescape trick for header.typ ( reflect.*rtype )
+	copiedType := *(**runtime.Type)(unsafe.Pointer(&typeptr))
+
+	if err := validateType(copiedType, ptr); err != nil {
+		return err
+	}
+
+	dec, err := decoder.CompileToGetDecoder(typ)
+	if err != nil {
+		return err
+	}
+	if err := d.s.PrepareForDecode(); err != nil {
+		return err
+	}
+	s := d.s
+	for _, optFunc := range optFuncs {
+		optFunc(s.Option)
+	}
+	if err := dec.DecodeStream(s, 0, header.ptr); err != nil {
+		return err
+	}
+	s.Reset()
+	return nil
+}
+
+func (d *Decoder) More() bool {
+	return d.s.More()
+}
+
+func (d *Decoder) Token() (Token, error) {
+	return d.s.Token()
+}
+
+// DisallowUnknownFields causes the Decoder to return an error when the destination
+// is a struct and the input contains object keys which do not match any
+// non-ignored, exported fields in the destination.
+func (d *Decoder) DisallowUnknownFields() {
+	d.s.DisallowUnknownFields = true
+}
+
+func (d *Decoder) InputOffset() int64 {
+	return d.s.TotalOffset()
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (d *Decoder) UseNumber() {
+	d.s.UseNumber = true
+}
diff --git a/vendor/github.com/goccy/go-json/docker-compose.yml b/vendor/github.com/goccy/go-json/docker-compose.yml
new file mode 100644
index 0000000000..db40c79ad5
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/docker-compose.yml
@@ -0,0 +1,13 @@
+version: '2'
+services:
+  go-json:
+    image: golang:1.18
+    volumes:
+      - '.:/go/src/go-json'
+    deploy:
+      resources:
+        limits:
+          memory: 620M
+    working_dir: /go/src/go-json
+    command: |
+      sh -c "go test -c . && ls go-json.test"
diff --git a/vendor/github.com/goccy/go-json/encode.go b/vendor/github.com/goccy/go-json/encode.go
new file mode 100644
index 0000000000..4bd899f38b
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/encode.go
@@ -0,0 +1,326 @@
+package json
+
+import (
+	"context"
+	"io"
+	"os"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/encoder"
+	"github.com/goccy/go-json/internal/encoder/vm"
+	"github.com/goccy/go-json/internal/encoder/vm_color"
+	"github.com/goccy/go-json/internal/encoder/vm_color_indent"
+	"github.com/goccy/go-json/internal/encoder/vm_indent"
+)
+
+// An Encoder writes JSON values to an output stream.
+type Encoder struct {
+	w                 io.Writer
+	enabledIndent     bool
+	enabledHTMLEscape bool
+	prefix            string
+	indentStr         string
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{w: w, enabledHTMLEscape: true}
+}
+
+// Encode writes the JSON encoding of v to the stream, followed by a newline character.
+//
+// See the documentation for Marshal for details about the conversion of Go values to JSON.
+func (e *Encoder) Encode(v interface{}) error {
+	return e.EncodeWithOption(v)
+}
+
+// EncodeWithOption call Encode with EncodeOption.
+func (e *Encoder) EncodeWithOption(v interface{}, optFuncs ...EncodeOptionFunc) error {
+	ctx := encoder.TakeRuntimeContext()
+	ctx.Option.Flag = 0
+
+	err := e.encodeWithOption(ctx, v, optFuncs...)
+
+	encoder.ReleaseRuntimeContext(ctx)
+	return err
+}
+
+// EncodeContext call Encode with context.Context and EncodeOption.
+func (e *Encoder) EncodeContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) error {
+	rctx := encoder.TakeRuntimeContext()
+	rctx.Option.Flag = 0
+	rctx.Option.Flag |= encoder.ContextOption
+	rctx.Option.Context = ctx
+
+	err := e.encodeWithOption(rctx, v, optFuncs...)
+
+	encoder.ReleaseRuntimeContext(rctx)
+	return err
+}
+
+func (e *Encoder) encodeWithOption(ctx *encoder.RuntimeContext, v interface{}, optFuncs ...EncodeOptionFunc) error {
+	if e.enabledHTMLEscape {
+		ctx.Option.Flag |= encoder.HTMLEscapeOption
+	}
+	ctx.Option.Flag |= encoder.NormalizeUTF8Option
+	ctx.Option.DebugOut = os.Stdout
+	for _, optFunc := range optFuncs {
+		optFunc(ctx.Option)
+	}
+	var (
+		buf []byte
+		err error
+	)
+	if e.enabledIndent {
+		buf, err = encodeIndent(ctx, v, e.prefix, e.indentStr)
+	} else {
+		buf, err = encode(ctx, v)
+	}
+	if err != nil {
+		return err
+	}
+	if e.enabledIndent {
+		buf = buf[:len(buf)-2]
+	} else {
+		buf = buf[:len(buf)-1]
+	}
+	buf = append(buf, '\n')
+	if _, err := e.w.Write(buf); err != nil {
+		return err
+	}
+	return nil
+}
+
+// SetEscapeHTML specifies whether problematic HTML characters should be escaped inside JSON quoted strings.
+// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e to avoid certain safety problems that can arise when embedding JSON in HTML.
+//
+// In non-HTML settings where the escaping interferes with the readability of the output, SetEscapeHTML(false) disables this behavior.
+func (e *Encoder) SetEscapeHTML(on bool) {
+	e.enabledHTMLEscape = on
+}
+
+// SetIndent instructs the encoder to format each subsequent encoded value as if indented by the package-level function Indent(dst, src, prefix, indent).
+// Calling SetIndent("", "") disables indentation.
+func (e *Encoder) SetIndent(prefix, indent string) {
+	if prefix == "" && indent == "" {
+		e.enabledIndent = false
+		return
+	}
+	e.prefix = prefix
+	e.indentStr = indent
+	e.enabledIndent = true
+}
+
+func marshalContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) {
+	rctx := encoder.TakeRuntimeContext()
+	rctx.Option.Flag = 0
+	rctx.Option.Flag = encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option | encoder.ContextOption
+	rctx.Option.Context = ctx
+	for _, optFunc := range optFuncs {
+		optFunc(rctx.Option)
+	}
+
+	buf, err := encode(rctx, v)
+	if err != nil {
+		encoder.ReleaseRuntimeContext(rctx)
+		return nil, err
+	}
+
+	// this line exists to escape call of `runtime.makeslicecopy` .
+	// if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`,
+	// dst buffer size and src buffer size are differrent.
+	// in this case, compiler uses `runtime.makeslicecopy`, but it is slow.
+	buf = buf[:len(buf)-1]
+	copied := make([]byte, len(buf))
+	copy(copied, buf)
+
+	encoder.ReleaseRuntimeContext(rctx)
+	return copied, nil
+}
+
+func marshal(v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) {
+	ctx := encoder.TakeRuntimeContext()
+
+	ctx.Option.Flag = 0
+	ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option)
+	for _, optFunc := range optFuncs {
+		optFunc(ctx.Option)
+	}
+
+	buf, err := encode(ctx, v)
+	if err != nil {
+		encoder.ReleaseRuntimeContext(ctx)
+		return nil, err
+	}
+
+	// this line exists to escape call of `runtime.makeslicecopy` .
+	// if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`,
+	// dst buffer size and src buffer size are differrent.
+	// in this case, compiler uses `runtime.makeslicecopy`, but it is slow.
+	buf = buf[:len(buf)-1]
+	copied := make([]byte, len(buf))
+	copy(copied, buf)
+
+	encoder.ReleaseRuntimeContext(ctx)
+	return copied, nil
+}
+
+func marshalNoEscape(v interface{}) ([]byte, error) {
+	ctx := encoder.TakeRuntimeContext()
+
+	ctx.Option.Flag = 0
+	ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option)
+
+	buf, err := encodeNoEscape(ctx, v)
+	if err != nil {
+		encoder.ReleaseRuntimeContext(ctx)
+		return nil, err
+	}
+
+	// this line exists to escape call of `runtime.makeslicecopy` .
+	// if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`,
+	// dst buffer size and src buffer size are differrent.
+	// in this case, compiler uses `runtime.makeslicecopy`, but it is slow.
+	buf = buf[:len(buf)-1]
+	copied := make([]byte, len(buf))
+	copy(copied, buf)
+
+	encoder.ReleaseRuntimeContext(ctx)
+	return copied, nil
+}
+
+func marshalIndent(v interface{}, prefix, indent string, optFuncs ...EncodeOptionFunc) ([]byte, error) {
+	ctx := encoder.TakeRuntimeContext()
+
+	ctx.Option.Flag = 0
+	ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option | encoder.IndentOption)
+	for _, optFunc := range optFuncs {
+		optFunc(ctx.Option)
+	}
+
+	buf, err := encodeIndent(ctx, v, prefix, indent)
+	if err != nil {
+		encoder.ReleaseRuntimeContext(ctx)
+		return nil, err
+	}
+
+	buf = buf[:len(buf)-2]
+	copied := make([]byte, len(buf))
+	copy(copied, buf)
+
+	encoder.ReleaseRuntimeContext(ctx)
+	return copied, nil
+}
+
+func encode(ctx *encoder.RuntimeContext, v interface{}) ([]byte, error) {
+	b := ctx.Buf[:0]
+	if v == nil {
+		b = encoder.AppendNull(ctx, b)
+		b = encoder.AppendComma(ctx, b)
+		return b, nil
+	}
+	header := (*emptyInterface)(unsafe.Pointer(&v))
+	typ := header.typ
+
+	typeptr := uintptr(unsafe.Pointer(typ))
+	codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr)
+	if err != nil {
+		return nil, err
+	}
+
+	p := uintptr(header.ptr)
+	ctx.Init(p, codeSet.CodeLength)
+	ctx.KeepRefs = append(ctx.KeepRefs, header.ptr)
+
+	buf, err := encodeRunCode(ctx, b, codeSet)
+	if err != nil {
+		return nil, err
+	}
+	ctx.Buf = buf
+	return buf, nil
+}
+
+func encodeNoEscape(ctx *encoder.RuntimeContext, v interface{}) ([]byte, error) {
+	b := ctx.Buf[:0]
+	if v == nil {
+		b = encoder.AppendNull(ctx, b)
+		b = encoder.AppendComma(ctx, b)
+		return b, nil
+	}
+	header := (*emptyInterface)(unsafe.Pointer(&v))
+	typ := header.typ
+
+	typeptr := uintptr(unsafe.Pointer(typ))
+	codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr)
+	if err != nil {
+		return nil, err
+	}
+
+	p := uintptr(header.ptr)
+	ctx.Init(p, codeSet.CodeLength)
+	buf, err := encodeRunCode(ctx, b, codeSet)
+	if err != nil {
+		return nil, err
+	}
+
+	ctx.Buf = buf
+	return buf, nil
+}
+
+func encodeIndent(ctx *encoder.RuntimeContext, v interface{}, prefix, indent string) ([]byte, error) {
+	b := ctx.Buf[:0]
+	if v == nil {
+		b = encoder.AppendNull(ctx, b)
+		b = encoder.AppendCommaIndent(ctx, b)
+		return b, nil
+	}
+	header := (*emptyInterface)(unsafe.Pointer(&v))
+	typ := header.typ
+
+	typeptr := uintptr(unsafe.Pointer(typ))
+	codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr)
+	if err != nil {
+		return nil, err
+	}
+
+	p := uintptr(header.ptr)
+	ctx.Init(p, codeSet.CodeLength)
+	buf, err := encodeRunIndentCode(ctx, b, codeSet, prefix, indent)
+
+	ctx.KeepRefs = append(ctx.KeepRefs, header.ptr)
+
+	if err != nil {
+		return nil, err
+	}
+
+	ctx.Buf = buf
+	return buf, nil
+}
+
+func encodeRunCode(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) {
+	if (ctx.Option.Flag & encoder.DebugOption) != 0 {
+		if (ctx.Option.Flag & encoder.ColorizeOption) != 0 {
+			return vm_color.DebugRun(ctx, b, codeSet)
+		}
+		return vm.DebugRun(ctx, b, codeSet)
+	}
+	if (ctx.Option.Flag & encoder.ColorizeOption) != 0 {
+		return vm_color.Run(ctx, b, codeSet)
+	}
+	return vm.Run(ctx, b, codeSet)
+}
+
+func encodeRunIndentCode(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet, prefix, indent string) ([]byte, error) {
+	ctx.Prefix = []byte(prefix)
+	ctx.IndentStr = []byte(indent)
+	if (ctx.Option.Flag & encoder.DebugOption) != 0 {
+		if (ctx.Option.Flag & encoder.ColorizeOption) != 0 {
+			return vm_color_indent.DebugRun(ctx, b, codeSet)
+		}
+		return vm_indent.DebugRun(ctx, b, codeSet)
+	}
+	if (ctx.Option.Flag & encoder.ColorizeOption) != 0 {
+		return vm_color_indent.Run(ctx, b, codeSet)
+	}
+	return vm_indent.Run(ctx, b, codeSet)
+}
diff --git a/vendor/github.com/goccy/go-json/error.go b/vendor/github.com/goccy/go-json/error.go
new file mode 100644
index 0000000000..5b2dcee50e
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/error.go
@@ -0,0 +1,41 @@
+package json
+
+import (
+	"github.com/goccy/go-json/internal/errors"
+)
+
+// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
+// attempting to encode a string value with invalid UTF-8 sequences.
+// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
+// replacing invalid bytes with the Unicode replacement rune U+FFFD.
+//
+// Deprecated: No longer used; kept for compatibility.
+type InvalidUTF8Error = errors.InvalidUTF8Error
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError = errors.InvalidUnmarshalError
+
+// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method.
+type MarshalerError = errors.MarshalerError
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError = errors.SyntaxError
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+//
+// Deprecated: No longer used; kept for compatibility.
+type UnmarshalFieldError = errors.UnmarshalFieldError
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError = errors.UnmarshalTypeError
+
+// An UnsupportedTypeError is returned by Marshal when attempting
+// to encode an unsupported value type.
+type UnsupportedTypeError = errors.UnsupportedTypeError
+
+type UnsupportedValueError = errors.UnsupportedValueError
+
+type PathError = errors.PathError
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go b/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go
new file mode 100644
index 0000000000..b6876cf0d0
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go
@@ -0,0 +1,41 @@
+package decoder
+
+import (
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type anonymousFieldDecoder struct {
+	structType *runtime.Type
+	offset     uintptr
+	dec        Decoder
+}
+
+func newAnonymousFieldDecoder(structType *runtime.Type, offset uintptr, dec Decoder) *anonymousFieldDecoder {
+	return &anonymousFieldDecoder{
+		structType: structType,
+		offset:     offset,
+		dec:        dec,
+	}
+}
+
+func (d *anonymousFieldDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	if *(*unsafe.Pointer)(p) == nil {
+		*(*unsafe.Pointer)(p) = unsafe_New(d.structType)
+	}
+	p = *(*unsafe.Pointer)(p)
+	return d.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+d.offset))
+}
+
+func (d *anonymousFieldDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	if *(*unsafe.Pointer)(p) == nil {
+		*(*unsafe.Pointer)(p) = unsafe_New(d.structType)
+	}
+	p = *(*unsafe.Pointer)(p)
+	return d.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+d.offset))
+}
+
+func (d *anonymousFieldDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	return d.dec.DecodePath(ctx, cursor, depth)
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/array.go b/vendor/github.com/goccy/go-json/internal/decoder/array.go
new file mode 100644
index 0000000000..4b23ed43fe
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/array.go
@@ -0,0 +1,176 @@
+package decoder
+
+import (
+	"fmt"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type arrayDecoder struct {
+	elemType     *runtime.Type
+	size         uintptr
+	valueDecoder Decoder
+	alen         int
+	structName   string
+	fieldName    string
+	zeroValue    unsafe.Pointer
+}
+
+func newArrayDecoder(dec Decoder, elemType *runtime.Type, alen int, structName, fieldName string) *arrayDecoder {
+	// workaround to avoid checkptr errors. cannot use `*(*unsafe.Pointer)(unsafe_New(elemType))` directly.
+	zeroValuePtr := unsafe_New(elemType)
+	zeroValue := **(**unsafe.Pointer)(unsafe.Pointer(&zeroValuePtr))
+	return &arrayDecoder{
+		valueDecoder: dec,
+		elemType:     elemType,
+		size:         elemType.Size(),
+		alen:         alen,
+		structName:   structName,
+		fieldName:    fieldName,
+		zeroValue:    zeroValue,
+	}
+}
+
+func (d *arrayDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	depth++
+	if depth > maxDecodeNestingDepth {
+		return errors.ErrExceededMaxDepth(s.char(), s.cursor)
+	}
+
+	for {
+		switch s.char() {
+		case ' ', '\n', '\t', '\r':
+		case 'n':
+			if err := nullBytes(s); err != nil {
+				return err
+			}
+			return nil
+		case '[':
+			idx := 0
+			s.cursor++
+			if s.skipWhiteSpace() == ']' {
+				for idx < d.alen {
+					*(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue
+					idx++
+				}
+				s.cursor++
+				return nil
+			}
+			for {
+				if idx < d.alen {
+					if err := d.valueDecoder.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+uintptr(idx)*d.size)); err != nil {
+						return err
+					}
+				} else {
+					if err := s.skipValue(depth); err != nil {
+						return err
+					}
+				}
+				idx++
+				switch s.skipWhiteSpace() {
+				case ']':
+					for idx < d.alen {
+						*(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue
+						idx++
+					}
+					s.cursor++
+					return nil
+				case ',':
+					s.cursor++
+					continue
+				case nul:
+					if s.read() {
+						s.cursor++
+						continue
+					}
+					goto ERROR
+				default:
+					goto ERROR
+				}
+			}
+		case nul:
+			if s.read() {
+				continue
+			}
+			goto ERROR
+		default:
+			goto ERROR
+		}
+		s.cursor++
+	}
+ERROR:
+	return errors.ErrUnexpectedEndOfJSON("array", s.totalOffset())
+}
+
+func (d *arrayDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	buf := ctx.Buf
+	depth++
+	if depth > maxDecodeNestingDepth {
+		return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor)
+	}
+
+	for {
+		switch buf[cursor] {
+		case ' ', '\n', '\t', '\r':
+			cursor++
+			continue
+		case 'n':
+			if err := validateNull(buf, cursor); err != nil {
+				return 0, err
+			}
+			cursor += 4
+			return cursor, nil
+		case '[':
+			idx := 0
+			cursor++
+			cursor = skipWhiteSpace(buf, cursor)
+			if buf[cursor] == ']' {
+				for idx < d.alen {
+					*(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue
+					idx++
+				}
+				cursor++
+				return cursor, nil
+			}
+			for {
+				if idx < d.alen {
+					c, err := d.valueDecoder.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+uintptr(idx)*d.size))
+					if err != nil {
+						return 0, err
+					}
+					cursor = c
+				} else {
+					c, err := skipValue(buf, cursor, depth)
+					if err != nil {
+						return 0, err
+					}
+					cursor = c
+				}
+				idx++
+				cursor = skipWhiteSpace(buf, cursor)
+				switch buf[cursor] {
+				case ']':
+					for idx < d.alen {
+						*(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue
+						idx++
+					}
+					cursor++
+					return cursor, nil
+				case ',':
+					cursor++
+					continue
+				default:
+					return 0, errors.ErrInvalidCharacter(buf[cursor], "array", cursor)
+				}
+			}
+		default:
+			return 0, errors.ErrUnexpectedEndOfJSON("array", cursor)
+		}
+	}
+}
+
+func (d *arrayDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	return nil, 0, fmt.Errorf("json: array decoder does not support decode path")
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/assign.go b/vendor/github.com/goccy/go-json/internal/decoder/assign.go
new file mode 100644
index 0000000000..c53e6ad9fc
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/assign.go
@@ -0,0 +1,438 @@
+package decoder
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+)
+
+var (
+	nilValue = reflect.ValueOf(nil)
+)
+
+func AssignValue(src, dst reflect.Value) error {
+	if dst.Type().Kind() != reflect.Ptr {
+		return fmt.Errorf("invalid dst type. required pointer type: %T", dst.Type())
+	}
+	casted, err := castValue(dst.Elem().Type(), src)
+	if err != nil {
+		return err
+	}
+	dst.Elem().Set(casted)
+	return nil
+}
+
+func castValue(t reflect.Type, v reflect.Value) (reflect.Value, error) {
+	switch t.Kind() {
+	case reflect.Int:
+		vv, err := castInt(v)
+		if err != nil {
+			return nilValue, err
+		}
+		return reflect.ValueOf(int(vv.Int())), nil
+	case reflect.Int8:
+		vv, err := castInt(v)
+		if err != nil {
+			return nilValue, err
+		}
+		return reflect.ValueOf(int8(vv.Int())), nil
+	case reflect.Int16:
+		vv, err := castInt(v)
+		if err != nil {
+			return nilValue, err
+		}
+		return reflect.ValueOf(int16(vv.Int())), nil
+	case reflect.Int32:
+		vv, err := castInt(v)
+		if err != nil {
+			return nilValue, err
+		}
+		return reflect.ValueOf(int32(vv.Int())), nil
+	case reflect.Int64:
+		return castInt(v)
+	case reflect.Uint:
+		vv, err := castUint(v)
+		if err != nil {
+			return nilValue, err
+		}
+		return reflect.ValueOf(uint(vv.Uint())), nil
+	case reflect.Uint8:
+		vv, err := castUint(v)
+		if err != nil {
+			return nilValue, err
+		}
+		return reflect.ValueOf(uint8(vv.Uint())), nil
+	case reflect.Uint16:
+		vv, err := castUint(v)
+		if err != nil {
+			return nilValue, err
+		}
+		return reflect.ValueOf(uint16(vv.Uint())), nil
+	case reflect.Uint32:
+		vv, err := castUint(v)
+		if err != nil {
+			return nilValue, err
+		}
+		return reflect.ValueOf(uint32(vv.Uint())), nil
+	case reflect.Uint64:
+		return castUint(v)
+	case reflect.Uintptr:
+		vv, err := castUint(v)
+		if err != nil {
+			return nilValue, err
+		}
+		return reflect.ValueOf(uintptr(vv.Uint())), nil
+	case reflect.String:
+		return castString(v)
+	case reflect.Bool:
+		return castBool(v)
+	case reflect.Float32:
+		vv, err := castFloat(v)
+		if err != nil {
+			return nilValue, err
+		}
+		return reflect.ValueOf(float32(vv.Float())), nil
+	case reflect.Float64:
+		return castFloat(v)
+	case reflect.Array:
+		return castArray(t, v)
+	case reflect.Slice:
+		return castSlice(t, v)
+	case reflect.Map:
+		return castMap(t, v)
+	case reflect.Struct:
+		return castStruct(t, v)
+	}
+	return v, nil
+}
+
+func castInt(v reflect.Value) (reflect.Value, error) {
+	switch v.Type().Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v, nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return reflect.ValueOf(int64(v.Uint())), nil
+	case reflect.String:
+		i64, err := strconv.ParseInt(v.String(), 10, 64)
+		if err != nil {
+			return nilValue, err
+		}
+		return reflect.ValueOf(i64), nil
+	case reflect.Bool:
+		if v.Bool() {
+			return reflect.ValueOf(int64(1)), nil
+		}
+		return reflect.ValueOf(int64(0)), nil
+	case reflect.Float32, reflect.Float64:
+		return reflect.ValueOf(int64(v.Float())), nil
+	case reflect.Array:
+		if v.Len() > 0 {
+			return castInt(v.Index(0))
+		}
+		return nilValue, fmt.Errorf("failed to cast to int64 from empty array")
+	case reflect.Slice:
+		if v.Len() > 0 {
+			return castInt(v.Index(0))
+		}
+		return nilValue, fmt.Errorf("failed to cast to int64 from empty slice")
+	case reflect.Interface:
+		return castInt(reflect.ValueOf(v.Interface()))
+	case reflect.Map:
+		return nilValue, fmt.Errorf("failed to cast to int64 from map")
+	case reflect.Struct:
+		return nilValue, fmt.Errorf("failed to cast to int64 from struct")
+	case reflect.Ptr:
+		return castInt(v.Elem())
+	}
+	return nilValue, fmt.Errorf("failed to cast to int64 from %s", v.Type().Kind())
+}
+
+func castUint(v reflect.Value) (reflect.Value, error) {
+	switch v.Type().Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return reflect.ValueOf(uint64(v.Int())), nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v, nil
+	case reflect.String:
+		u64, err := strconv.ParseUint(v.String(), 10, 64)
+		if err != nil {
+			return nilValue, err
+		}
+		return reflect.ValueOf(u64), nil
+	case reflect.Bool:
+		if v.Bool() {
+			return reflect.ValueOf(uint64(1)), nil
+		}
+		return reflect.ValueOf(uint64(0)), nil
+	case reflect.Float32, reflect.Float64:
+		return reflect.ValueOf(uint64(v.Float())), nil
+	case reflect.Array:
+		if v.Len() > 0 {
+			return castUint(v.Index(0))
+		}
+		return nilValue, fmt.Errorf("failed to cast to uint64 from empty array")
+	case reflect.Slice:
+		if v.Len() > 0 {
+			return castUint(v.Index(0))
+		}
+		return nilValue, fmt.Errorf("failed to cast to uint64 from empty slice")
+	case reflect.Interface:
+		return castUint(reflect.ValueOf(v.Interface()))
+	case reflect.Map:
+		return nilValue, fmt.Errorf("failed to cast to uint64 from map")
+	case reflect.Struct:
+		return nilValue, fmt.Errorf("failed to cast to uint64 from struct")
+	case reflect.Ptr:
+		return castUint(v.Elem())
+	}
+	return nilValue, fmt.Errorf("failed to cast to uint64 from %s", v.Type().Kind())
+}
+
+func castString(v reflect.Value) (reflect.Value, error) {
+	switch v.Type().Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return reflect.ValueOf(fmt.Sprint(v.Int())), nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return reflect.ValueOf(fmt.Sprint(v.Uint())), nil
+	case reflect.String:
+		return v, nil
+	case reflect.Bool:
+		if v.Bool() {
+			return reflect.ValueOf("true"), nil
+		}
+		return reflect.ValueOf("false"), nil
+	case reflect.Float32, reflect.Float64:
+		return reflect.ValueOf(fmt.Sprint(v.Float())), nil
+	case reflect.Array:
+		if v.Len() > 0 {
+			return castString(v.Index(0))
+		}
+		return nilValue, fmt.Errorf("failed to cast to string from empty array")
+	case reflect.Slice:
+		if v.Len() > 0 {
+			return castString(v.Index(0))
+		}
+		return nilValue, fmt.Errorf("failed to cast to string from empty slice")
+	case reflect.Interface:
+		return castString(reflect.ValueOf(v.Interface()))
+	case reflect.Map:
+		return nilValue, fmt.Errorf("failed to cast to string from map")
+	case reflect.Struct:
+		return nilValue, fmt.Errorf("failed to cast to string from struct")
+	case reflect.Ptr:
+		return castString(v.Elem())
+	}
+	return nilValue, fmt.Errorf("failed to cast to string from %s", v.Type().Kind())
+}
+
+func castBool(v reflect.Value) (reflect.Value, error) {
+	switch v.Type().Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch v.Int() {
+		case 0:
+			return reflect.ValueOf(false), nil
+		case 1:
+			return reflect.ValueOf(true), nil
+		}
+		return nilValue, fmt.Errorf("failed to cast to bool from %d", v.Int())
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		switch v.Uint() {
+		case 0:
+			return reflect.ValueOf(false), nil
+		case 1:
+			return reflect.ValueOf(true), nil
+		}
+		return nilValue, fmt.Errorf("failed to cast to bool from %d", v.Uint())
+	case reflect.String:
+		b, err := strconv.ParseBool(v.String())
+		if err != nil {
+			return nilValue, err
+		}
+		return reflect.ValueOf(b), nil
+	case reflect.Bool:
+		return v, nil
+	case reflect.Float32, reflect.Float64:
+		switch v.Float() {
+		case 0:
+			return reflect.ValueOf(false), nil
+		case 1:
+			return reflect.ValueOf(true), nil
+		}
+		return nilValue, fmt.Errorf("failed to cast to bool from %f", v.Float())
+	case reflect.Array:
+		if v.Len() > 0 {
+			return castBool(v.Index(0))
+		}
+		return nilValue, fmt.Errorf("failed to cast to string from empty array")
+	case reflect.Slice:
+		if v.Len() > 0 {
+			return castBool(v.Index(0))
+		}
+		return nilValue, fmt.Errorf("failed to cast to string from empty slice")
+	case reflect.Interface:
+		return castBool(reflect.ValueOf(v.Interface()))
+	case reflect.Map:
+		return nilValue, fmt.Errorf("failed to cast to string from map")
+	case reflect.Struct:
+		return nilValue, fmt.Errorf("failed to cast to string from struct")
+	case reflect.Ptr:
+		return castBool(v.Elem())
+	}
+	return nilValue, fmt.Errorf("failed to cast to bool from %s", v.Type().Kind())
+}
+
+func castFloat(v reflect.Value) (reflect.Value, error) {
+	switch v.Type().Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return reflect.ValueOf(float64(v.Int())), nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return reflect.ValueOf(float64(v.Uint())), nil
+	case reflect.String:
+		f64, err := strconv.ParseFloat(v.String(), 64)
+		if err != nil {
+			return nilValue, err
+		}
+		return reflect.ValueOf(f64), nil
+	case reflect.Bool:
+		if v.Bool() {
+			return reflect.ValueOf(float64(1)), nil
+		}
+		return reflect.ValueOf(float64(0)), nil
+	case reflect.Float32, reflect.Float64:
+		return v, nil
+	case reflect.Array:
+		if v.Len() > 0 {
+			return castFloat(v.Index(0))
+		}
+		return nilValue, fmt.Errorf("failed to cast to float64 from empty array")
+	case reflect.Slice:
+		if v.Len() > 0 {
+			return castFloat(v.Index(0))
+		}
+		return nilValue, fmt.Errorf("failed to cast to float64 from empty slice")
+	case reflect.Interface:
+		return castFloat(reflect.ValueOf(v.Interface()))
+	case reflect.Map:
+		return nilValue, fmt.Errorf("failed to cast to float64 from map")
+	case reflect.Struct:
+		return nilValue, fmt.Errorf("failed to cast to float64 from struct")
+	case reflect.Ptr:
+		return castFloat(v.Elem())
+	}
+	return nilValue, fmt.Errorf("failed to cast to float64 from %s", v.Type().Kind())
+}
+
+func castArray(t reflect.Type, v reflect.Value) (reflect.Value, error) {
+	kind := v.Type().Kind()
+	if kind == reflect.Interface {
+		return castArray(t, reflect.ValueOf(v.Interface()))
+	}
+	if kind != reflect.Slice && kind != reflect.Array {
+		return nilValue, fmt.Errorf("failed to cast to array from %s", kind)
+	}
+	if t.Elem() == v.Type().Elem() {
+		return v, nil
+	}
+	if t.Len() != v.Len() {
+		return nilValue, fmt.Errorf("failed to cast [%d]array from slice of %d length", t.Len(), v.Len())
+	}
+	ret := reflect.New(t).Elem()
+	for i := 0; i < v.Len(); i++ {
+		vv, err := castValue(t.Elem(), v.Index(i))
+		if err != nil {
+			return nilValue, err
+		}
+		ret.Index(i).Set(vv)
+	}
+	return ret, nil
+}
+
+func castSlice(t reflect.Type, v reflect.Value) (reflect.Value, error) {
+	kind := v.Type().Kind()
+	if kind == reflect.Interface {
+		return castSlice(t, reflect.ValueOf(v.Interface()))
+	}
+	if kind != reflect.Slice && kind != reflect.Array {
+		return nilValue, fmt.Errorf("failed to cast to slice from %s", kind)
+	}
+	if t.Elem() == v.Type().Elem() {
+		return v, nil
+	}
+	ret := reflect.MakeSlice(t, v.Len(), v.Len())
+	for i := 0; i < v.Len(); i++ {
+		vv, err := castValue(t.Elem(), v.Index(i))
+		if err != nil {
+			return nilValue, err
+		}
+		ret.Index(i).Set(vv)
+	}
+	return ret, nil
+}
+
+func castMap(t reflect.Type, v reflect.Value) (reflect.Value, error) {
+	ret := reflect.MakeMap(t)
+	switch v.Type().Kind() {
+	case reflect.Map:
+		iter := v.MapRange()
+		for iter.Next() {
+			key, err := castValue(t.Key(), iter.Key())
+			if err != nil {
+				return nilValue, err
+			}
+			value, err := castValue(t.Elem(), iter.Value())
+			if err != nil {
+				return nilValue, err
+			}
+			ret.SetMapIndex(key, value)
+		}
+		return ret, nil
+	case reflect.Interface:
+		return castMap(t, reflect.ValueOf(v.Interface()))
+	case reflect.Slice:
+		if v.Len() > 0 {
+			return castMap(t, v.Index(0))
+		}
+		return nilValue, fmt.Errorf("failed to cast to map from empty slice")
+	}
+	return nilValue, fmt.Errorf("failed to cast to map from %s", v.Type().Kind())
+}
+
+func castStruct(t reflect.Type, v reflect.Value) (reflect.Value, error) {
+	ret := reflect.New(t).Elem()
+	switch v.Type().Kind() {
+	case reflect.Map:
+		iter := v.MapRange()
+		for iter.Next() {
+			key := iter.Key()
+			k, err := castString(key)
+			if err != nil {
+				return nilValue, err
+			}
+			fieldName := k.String()
+			field, ok := t.FieldByName(fieldName)
+			if ok {
+				value, err := castValue(field.Type, iter.Value())
+				if err != nil {
+					return nilValue, err
+				}
+				ret.FieldByName(fieldName).Set(value)
+			}
+		}
+		return ret, nil
+	case reflect.Struct:
+		for i := 0; i < v.Type().NumField(); i++ {
+			name := v.Type().Field(i).Name
+			ret.FieldByName(name).Set(v.FieldByName(name))
+		}
+		return ret, nil
+	case reflect.Interface:
+		return castStruct(t, reflect.ValueOf(v.Interface()))
+	case reflect.Slice:
+		if v.Len() > 0 {
+			return castStruct(t, v.Index(0))
+		}
+		return nilValue, fmt.Errorf("failed to cast to struct from empty slice")
+	default:
+		return nilValue, fmt.Errorf("failed to cast to struct from %s", v.Type().Kind())
+	}
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/bool.go b/vendor/github.com/goccy/go-json/internal/decoder/bool.go
new file mode 100644
index 0000000000..ba6cf5bc49
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/bool.go
@@ -0,0 +1,83 @@
+package decoder
+
+import (
+	"fmt"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+)
+
+type boolDecoder struct {
+	structName string
+	fieldName  string
+}
+
+func newBoolDecoder(structName, fieldName string) *boolDecoder {
+	return &boolDecoder{structName: structName, fieldName: fieldName}
+}
+
+func (d *boolDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	c := s.skipWhiteSpace()
+	for {
+		switch c {
+		case 't':
+			if err := trueBytes(s); err != nil {
+				return err
+			}
+			**(**bool)(unsafe.Pointer(&p)) = true
+			return nil
+		case 'f':
+			if err := falseBytes(s); err != nil {
+				return err
+			}
+			**(**bool)(unsafe.Pointer(&p)) = false
+			return nil
+		case 'n':
+			if err := nullBytes(s); err != nil {
+				return err
+			}
+			return nil
+		case nul:
+			if s.read() {
+				c = s.char()
+				continue
+			}
+			goto ERROR
+		}
+		break
+	}
+ERROR:
+	return errors.ErrUnexpectedEndOfJSON("bool", s.totalOffset())
+}
+
+func (d *boolDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	buf := ctx.Buf
+	cursor = skipWhiteSpace(buf, cursor)
+	switch buf[cursor] {
+	case 't':
+		if err := validateTrue(buf, cursor); err != nil {
+			return 0, err
+		}
+		cursor += 4
+		**(**bool)(unsafe.Pointer(&p)) = true
+		return cursor, nil
+	case 'f':
+		if err := validateFalse(buf, cursor); err != nil {
+			return 0, err
+		}
+		cursor += 5
+		**(**bool)(unsafe.Pointer(&p)) = false
+		return cursor, nil
+	case 'n':
+		if err := validateNull(buf, cursor); err != nil {
+			return 0, err
+		}
+		cursor += 4
+		return cursor, nil
+	}
+	return 0, errors.ErrUnexpectedEndOfJSON("bool", cursor)
+}
+
+func (d *boolDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	return nil, 0, fmt.Errorf("json: bool decoder does not support decode path")
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/bytes.go b/vendor/github.com/goccy/go-json/internal/decoder/bytes.go
new file mode 100644
index 0000000000..939bf43274
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/bytes.go
@@ -0,0 +1,118 @@
+package decoder
+
+import (
+	"encoding/base64"
+	"fmt"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type bytesDecoder struct {
+	typ           *runtime.Type
+	sliceDecoder  Decoder
+	stringDecoder *stringDecoder
+	structName    string
+	fieldName     string
+}
+
+func byteUnmarshalerSliceDecoder(typ *runtime.Type, structName string, fieldName string) Decoder {
+	var unmarshalDecoder Decoder
+	switch {
+	case runtime.PtrTo(typ).Implements(unmarshalJSONType):
+		unmarshalDecoder = newUnmarshalJSONDecoder(runtime.PtrTo(typ), structName, fieldName)
+	case runtime.PtrTo(typ).Implements(unmarshalTextType):
+		unmarshalDecoder = newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName)
+	default:
+		unmarshalDecoder, _ = compileUint8(typ, structName, fieldName)
+	}
+	return newSliceDecoder(unmarshalDecoder, typ, 1, structName, fieldName)
+}
+
+func newBytesDecoder(typ *runtime.Type, structName string, fieldName string) *bytesDecoder {
+	return &bytesDecoder{
+		typ:           typ,
+		sliceDecoder:  byteUnmarshalerSliceDecoder(typ, structName, fieldName),
+		stringDecoder: newStringDecoder(structName, fieldName),
+		structName:    structName,
+		fieldName:     fieldName,
+	}
+}
+
+func (d *bytesDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	bytes, err := d.decodeStreamBinary(s, depth, p)
+	if err != nil {
+		return err
+	}
+	if bytes == nil {
+		s.reset()
+		return nil
+	}
+	decodedLen := base64.StdEncoding.DecodedLen(len(bytes))
+	buf := make([]byte, decodedLen)
+	n, err := base64.StdEncoding.Decode(buf, bytes)
+	if err != nil {
+		return err
+	}
+	*(*[]byte)(p) = buf[:n]
+	s.reset()
+	return nil
+}
+
+func (d *bytesDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	bytes, c, err := d.decodeBinary(ctx, cursor, depth, p)
+	if err != nil {
+		return 0, err
+	}
+	if bytes == nil {
+		return c, nil
+	}
+	cursor = c
+	decodedLen := base64.StdEncoding.DecodedLen(len(bytes))
+	b := make([]byte, decodedLen)
+	n, err := base64.StdEncoding.Decode(b, bytes)
+	if err != nil {
+		return 0, err
+	}
+	*(*[]byte)(p) = b[:n]
+	return cursor, nil
+}
+
+func (d *bytesDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	return nil, 0, fmt.Errorf("json: []byte decoder does not support decode path")
+}
+
+func (d *bytesDecoder) decodeStreamBinary(s *Stream, depth int64, p unsafe.Pointer) ([]byte, error) {
+	c := s.skipWhiteSpace()
+	if c == '[' {
+		if d.sliceDecoder == nil {
+			return nil, &errors.UnmarshalTypeError{
+				Type:   runtime.RType2Type(d.typ),
+				Offset: s.totalOffset(),
+			}
+		}
+		err := d.sliceDecoder.DecodeStream(s, depth, p)
+		return nil, err
+	}
+	return d.stringDecoder.decodeStreamByte(s)
+}
+
+func (d *bytesDecoder) decodeBinary(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) ([]byte, int64, error) {
+	buf := ctx.Buf
+	cursor = skipWhiteSpace(buf, cursor)
+	if buf[cursor] == '[' {
+		if d.sliceDecoder == nil {
+			return nil, 0, &errors.UnmarshalTypeError{
+				Type:   runtime.RType2Type(d.typ),
+				Offset: cursor,
+			}
+		}
+		c, err := d.sliceDecoder.Decode(ctx, cursor, depth, p)
+		if err != nil {
+			return nil, 0, err
+		}
+		return nil, c, nil
+	}
+	return d.stringDecoder.decodeByte(buf, cursor)
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile.go b/vendor/github.com/goccy/go-json/internal/decoder/compile.go
new file mode 100644
index 0000000000..fab6437647
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/compile.go
@@ -0,0 +1,487 @@
+package decoder
+
+import (
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strings"
+	"sync/atomic"
+	"unicode"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+var (
+	jsonNumberType   = reflect.TypeOf(json.Number(""))
+	typeAddr         *runtime.TypeAddr
+	cachedDecoderMap unsafe.Pointer // map[uintptr]decoder
+	cachedDecoder    []Decoder
+)
+
+func init() {
+	typeAddr = runtime.AnalyzeTypeAddr()
+	if typeAddr == nil {
+		typeAddr = &runtime.TypeAddr{}
+	}
+	cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1)
+}
+
+func loadDecoderMap() map[uintptr]Decoder {
+	p := atomic.LoadPointer(&cachedDecoderMap)
+	return *(*map[uintptr]Decoder)(unsafe.Pointer(&p))
+}
+
+func storeDecoder(typ uintptr, dec Decoder, m map[uintptr]Decoder) {
+	newDecoderMap := make(map[uintptr]Decoder, len(m)+1)
+	newDecoderMap[typ] = dec
+
+	for k, v := range m {
+		newDecoderMap[k] = v
+	}
+
+	atomic.StorePointer(&cachedDecoderMap, *(*unsafe.Pointer)(unsafe.Pointer(&newDecoderMap)))
+}
+
+func compileToGetDecoderSlowPath(typeptr uintptr, typ *runtime.Type) (Decoder, error) {
+	decoderMap := loadDecoderMap()
+	if dec, exists := decoderMap[typeptr]; exists {
+		return dec, nil
+	}
+
+	dec, err := compileHead(typ, map[uintptr]Decoder{})
+	if err != nil {
+		return nil, err
+	}
+	storeDecoder(typeptr, dec, decoderMap)
+	return dec, nil
+}
+
+func compileHead(typ *runtime.Type, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) {
+	switch {
+	case implementsUnmarshalJSONType(runtime.PtrTo(typ)):
+		return newUnmarshalJSONDecoder(runtime.PtrTo(typ), "", ""), nil
+	case runtime.PtrTo(typ).Implements(unmarshalTextType):
+		return newUnmarshalTextDecoder(runtime.PtrTo(typ), "", ""), nil
+	}
+	return compile(typ.Elem(), "", "", structTypeToDecoder)
+}
+
+func compile(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) {
+	switch {
+	case implementsUnmarshalJSONType(runtime.PtrTo(typ)):
+		return newUnmarshalJSONDecoder(runtime.PtrTo(typ), structName, fieldName), nil
+	case runtime.PtrTo(typ).Implements(unmarshalTextType):
+		return newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName), nil
+	}
+
+	switch typ.Kind() {
+	case reflect.Ptr:
+		return compilePtr(typ, structName, fieldName, structTypeToDecoder)
+	case reflect.Struct:
+		return compileStruct(typ, structName, fieldName, structTypeToDecoder)
+	case reflect.Slice:
+		elem := typ.Elem()
+		if elem.Kind() == reflect.Uint8 {
+			return compileBytes(elem, structName, fieldName)
+		}
+		return compileSlice(typ, structName, fieldName, structTypeToDecoder)
+	case reflect.Array:
+		return compileArray(typ, structName, fieldName, structTypeToDecoder)
+	case reflect.Map:
+		return compileMap(typ, structName, fieldName, structTypeToDecoder)
+	case reflect.Interface:
+		return compileInterface(typ, structName, fieldName)
+	case reflect.Uintptr:
+		return compileUint(typ, structName, fieldName)
+	case reflect.Int:
+		return compileInt(typ, structName, fieldName)
+	case reflect.Int8:
+		return compileInt8(typ, structName, fieldName)
+	case reflect.Int16:
+		return compileInt16(typ, structName, fieldName)
+	case reflect.Int32:
+		return compileInt32(typ, structName, fieldName)
+	case reflect.Int64:
+		return compileInt64(typ, structName, fieldName)
+	case reflect.Uint:
+		return compileUint(typ, structName, fieldName)
+	case reflect.Uint8:
+		return compileUint8(typ, structName, fieldName)
+	case reflect.Uint16:
+		return compileUint16(typ, structName, fieldName)
+	case reflect.Uint32:
+		return compileUint32(typ, structName, fieldName)
+	case reflect.Uint64:
+		return compileUint64(typ, structName, fieldName)
+	case reflect.String:
+		return compileString(typ, structName, fieldName)
+	case reflect.Bool:
+		return compileBool(structName, fieldName)
+	case reflect.Float32:
+		return compileFloat32(structName, fieldName)
+	case reflect.Float64:
+		return compileFloat64(structName, fieldName)
+	case reflect.Func:
+		return compileFunc(typ, structName, fieldName)
+	}
+	return newInvalidDecoder(typ, structName, fieldName), nil
+}
+
+func isStringTagSupportedType(typ *runtime.Type) bool {
+	switch {
+	case implementsUnmarshalJSONType(runtime.PtrTo(typ)):
+		return false
+	case runtime.PtrTo(typ).Implements(unmarshalTextType):
+		return false
+	}
+	switch typ.Kind() {
+	case reflect.Map:
+		return false
+	case reflect.Slice:
+		return false
+	case reflect.Array:
+		return false
+	case reflect.Struct:
+		return false
+	case reflect.Interface:
+		return false
+	}
+	return true
+}
+
+func compileMapKey(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) {
+	if runtime.PtrTo(typ).Implements(unmarshalTextType) {
+		return newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName), nil
+	}
+	if typ.Kind() == reflect.String {
+		return newStringDecoder(structName, fieldName), nil
+	}
+	dec, err := compile(typ, structName, fieldName, structTypeToDecoder)
+	if err != nil {
+		return nil, err
+	}
+	for {
+		switch t := dec.(type) {
+		case *stringDecoder, *interfaceDecoder:
+			return dec, nil
+		case *boolDecoder, *intDecoder, *uintDecoder, *numberDecoder:
+			return newWrappedStringDecoder(typ, dec, structName, fieldName), nil
+		case *ptrDecoder:
+			dec = t.dec
+		default:
+			return newInvalidDecoder(typ, structName, fieldName), nil
+		}
+	}
+}
+
+func compilePtr(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) {
+	dec, err := compile(typ.Elem(), structName, fieldName, structTypeToDecoder)
+	if err != nil {
+		return nil, err
+	}
+	return newPtrDecoder(dec, typ.Elem(), structName, fieldName), nil
+}
+
+func compileInt(typ *runtime.Type, structName, fieldName string) (Decoder, error) {
+	return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) {
+		*(*int)(p) = int(v)
+	}), nil
+}
+
+func compileInt8(typ *runtime.Type, structName, fieldName string) (Decoder, error) {
+	return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) {
+		*(*int8)(p) = int8(v)
+	}), nil
+}
+
+func compileInt16(typ *runtime.Type, structName, fieldName string) (Decoder, error) {
+	return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) {
+		*(*int16)(p) = int16(v)
+	}), nil
+}
+
+func compileInt32(typ *runtime.Type, structName, fieldName string) (Decoder, error) {
+	return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) {
+		*(*int32)(p) = int32(v)
+	}), nil
+}
+
+func compileInt64(typ *runtime.Type, structName, fieldName string) (Decoder, error) {
+	return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) {
+		*(*int64)(p) = v
+	}), nil
+}
+
+func compileUint(typ *runtime.Type, structName, fieldName string) (Decoder, error) {
+	return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) {
+		*(*uint)(p) = uint(v)
+	}), nil
+}
+
+func compileUint8(typ *runtime.Type, structName, fieldName string) (Decoder, error) {
+	return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) {
+		*(*uint8)(p) = uint8(v)
+	}), nil
+}
+
+func compileUint16(typ *runtime.Type, structName, fieldName string) (Decoder, error) {
+	return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) {
+		*(*uint16)(p) = uint16(v)
+	}), nil
+}
+
+func compileUint32(typ *runtime.Type, structName, fieldName string) (Decoder, error) {
+	return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) {
+		*(*uint32)(p) = uint32(v)
+	}), nil
+}
+
+func compileUint64(typ *runtime.Type, structName, fieldName string) (Decoder, error) {
+	return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) {
+		*(*uint64)(p) = v
+	}), nil
+}
+
+func compileFloat32(structName, fieldName string) (Decoder, error) {
+	return newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) {
+		*(*float32)(p) = float32(v)
+	}), nil
+}
+
+func compileFloat64(structName, fieldName string) (Decoder, error) {
+	return newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) {
+		*(*float64)(p) = v
+	}), nil
+}
+
+func compileString(typ *runtime.Type, structName, fieldName string) (Decoder, error) {
+	if typ == runtime.Type2RType(jsonNumberType) {
+		return newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) {
+			*(*json.Number)(p) = v
+		}), nil
+	}
+	return newStringDecoder(structName, fieldName), nil
+}
+
+func compileBool(structName, fieldName string) (Decoder, error) {
+	return newBoolDecoder(structName, fieldName), nil
+}
+
+func compileBytes(typ *runtime.Type, structName, fieldName string) (Decoder, error) {
+	return newBytesDecoder(typ, structName, fieldName), nil
+}
+
+func compileSlice(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) {
+	elem := typ.Elem()
+	decoder, err := compile(elem, structName, fieldName, structTypeToDecoder)
+	if err != nil {
+		return nil, err
+	}
+	return newSliceDecoder(decoder, elem, elem.Size(), structName, fieldName), nil
+}
+
+func compileArray(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) {
+	elem := typ.Elem()
+	decoder, err := compile(elem, structName, fieldName, structTypeToDecoder)
+	if err != nil {
+		return nil, err
+	}
+	return newArrayDecoder(decoder, elem, typ.Len(), structName, fieldName), nil
+}
+
+func compileMap(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) {
+	keyDec, err := compileMapKey(typ.Key(), structName, fieldName, structTypeToDecoder)
+	if err != nil {
+		return nil, err
+	}
+	valueDec, err := compile(typ.Elem(), structName, fieldName, structTypeToDecoder)
+	if err != nil {
+		return nil, err
+	}
+	return newMapDecoder(typ, typ.Key(), keyDec, typ.Elem(), valueDec, structName, fieldName), nil
+}
+
+func compileInterface(typ *runtime.Type, structName, fieldName string) (Decoder, error) {
+	return newInterfaceDecoder(typ, structName, fieldName), nil
+}
+
+func compileFunc(typ *runtime.Type, strutName, fieldName string) (Decoder, error) {
+	return newFuncDecoder(typ, strutName, fieldName), nil
+}
+
+func typeToStructTags(typ *runtime.Type) runtime.StructTags {
+	tags := runtime.StructTags{}
+	fieldNum := typ.NumField()
+	for i := 0; i < fieldNum; i++ {
+		field := typ.Field(i)
+		if runtime.IsIgnoredStructField(field) {
+			continue
+		}
+		tags = append(tags, runtime.StructTagFromField(field))
+	}
+	return tags
+}
+
+func compileStruct(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) {
+	fieldNum := typ.NumField()
+	fieldMap := map[string]*structFieldSet{}
+	typeptr := uintptr(unsafe.Pointer(typ))
+	if dec, exists := structTypeToDecoder[typeptr]; exists {
+		return dec, nil
+	}
+	structDec := newStructDecoder(structName, fieldName, fieldMap)
+	structTypeToDecoder[typeptr] = structDec
+	structName = typ.Name()
+	tags := typeToStructTags(typ)
+	allFields := []*structFieldSet{}
+	for i := 0; i < fieldNum; i++ {
+		field := typ.Field(i)
+		if runtime.IsIgnoredStructField(field) {
+			continue
+		}
+		isUnexportedField := unicode.IsLower([]rune(field.Name)[0])
+		tag := runtime.StructTagFromField(field)
+		dec, err := compile(runtime.Type2RType(field.Type), structName, field.Name, structTypeToDecoder)
+		if err != nil {
+			return nil, err
+		}
+		if field.Anonymous && !tag.IsTaggedKey {
+			if stDec, ok := dec.(*structDecoder); ok {
+				if runtime.Type2RType(field.Type) == typ {
+					// recursive definition
+					continue
+				}
+				for k, v := range stDec.fieldMap {
+					if tags.ExistsKey(k) {
+						continue
+					}
+					fieldSet := &structFieldSet{
+						dec:         v.dec,
+						offset:      field.Offset + v.offset,
+						isTaggedKey: v.isTaggedKey,
+						key:         k,
+						keyLen:      int64(len(k)),
+					}
+					allFields = append(allFields, fieldSet)
+				}
+			} else if pdec, ok := dec.(*ptrDecoder); ok {
+				contentDec := pdec.contentDecoder()
+				if pdec.typ == typ {
+					// recursive definition
+					continue
+				}
+				var fieldSetErr error
+				if isUnexportedField {
+					fieldSetErr = fmt.Errorf(
+						"json: cannot set embedded pointer to unexported struct: %v",
+						field.Type.Elem(),
+					)
+				}
+				if dec, ok := contentDec.(*structDecoder); ok {
+					for k, v := range dec.fieldMap {
+						if tags.ExistsKey(k) {
+							continue
+						}
+						fieldSet := &structFieldSet{
+							dec:         newAnonymousFieldDecoder(pdec.typ, v.offset, v.dec),
+							offset:      field.Offset,
+							isTaggedKey: v.isTaggedKey,
+							key:         k,
+							keyLen:      int64(len(k)),
+							err:         fieldSetErr,
+						}
+						allFields = append(allFields, fieldSet)
+					}
+				} else {
+					fieldSet := &structFieldSet{
+						dec:         pdec,
+						offset:      field.Offset,
+						isTaggedKey: tag.IsTaggedKey,
+						key:         field.Name,
+						keyLen:      int64(len(field.Name)),
+					}
+					allFields = append(allFields, fieldSet)
+				}
+			} else {
+				fieldSet := &structFieldSet{
+					dec:         dec,
+					offset:      field.Offset,
+					isTaggedKey: tag.IsTaggedKey,
+					key:         field.Name,
+					keyLen:      int64(len(field.Name)),
+				}
+				allFields = append(allFields, fieldSet)
+			}
+		} else {
+			if tag.IsString && isStringTagSupportedType(runtime.Type2RType(field.Type)) {
+				dec = newWrappedStringDecoder(runtime.Type2RType(field.Type), dec, structName, field.Name)
+			}
+			var key string
+			if tag.Key != "" {
+				key = tag.Key
+			} else {
+				key = field.Name
+			}
+			fieldSet := &structFieldSet{
+				dec:         dec,
+				offset:      field.Offset,
+				isTaggedKey: tag.IsTaggedKey,
+				key:         key,
+				keyLen:      int64(len(key)),
+			}
+			allFields = append(allFields, fieldSet)
+		}
+	}
+	for _, set := range filterDuplicatedFields(allFields) {
+		fieldMap[set.key] = set
+		lower := strings.ToLower(set.key)
+		if _, exists := fieldMap[lower]; !exists {
+			// first win
+			fieldMap[lower] = set
+		}
+	}
+	delete(structTypeToDecoder, typeptr)
+	structDec.tryOptimize()
+	return structDec, nil
+}
+
+func filterDuplicatedFields(allFields []*structFieldSet) []*structFieldSet {
+	fieldMap := map[string][]*structFieldSet{}
+	for _, field := range allFields {
+		fieldMap[field.key] = append(fieldMap[field.key], field)
+	}
+	duplicatedFieldMap := map[string]struct{}{}
+	for k, sets := range fieldMap {
+		sets = filterFieldSets(sets)
+		if len(sets) != 1 {
+			duplicatedFieldMap[k] = struct{}{}
+		}
+	}
+
+	filtered := make([]*structFieldSet, 0, len(allFields))
+	for _, field := range allFields {
+		if _, exists := duplicatedFieldMap[field.key]; exists {
+			continue
+		}
+		filtered = append(filtered, field)
+	}
+	return filtered
+}
+
+func filterFieldSets(sets []*structFieldSet) []*structFieldSet {
+	if len(sets) == 1 {
+		return sets
+	}
+	filtered := make([]*structFieldSet, 0, len(sets))
+	for _, set := range sets {
+		if set.isTaggedKey {
+			filtered = append(filtered, set)
+		}
+	}
+	return filtered
+}
+
+func implementsUnmarshalJSONType(typ *runtime.Type) bool {
+	return typ.Implements(unmarshalJSONType) || typ.Implements(unmarshalJSONContextType)
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go
new file mode 100644
index 0000000000..eb7e2b1345
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go
@@ -0,0 +1,29 @@
+//go:build !race
+// +build !race
+
+package decoder
+
+import (
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) {
+	typeptr := uintptr(unsafe.Pointer(typ))
+	if typeptr > typeAddr.MaxTypeAddr {
+		return compileToGetDecoderSlowPath(typeptr, typ)
+	}
+
+	index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift
+	if dec := cachedDecoder[index]; dec != nil {
+		return dec, nil
+	}
+
+	dec, err := compileHead(typ, map[uintptr]Decoder{})
+	if err != nil {
+		return nil, err
+	}
+	cachedDecoder[index] = dec
+	return dec, nil
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go
new file mode 100644
index 0000000000..49cdda4a17
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go
@@ -0,0 +1,37 @@
+//go:build race
+// +build race
+
+package decoder
+
+import (
+	"sync"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+var decMu sync.RWMutex
+
+func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) {
+	typeptr := uintptr(unsafe.Pointer(typ))
+	if typeptr > typeAddr.MaxTypeAddr {
+		return compileToGetDecoderSlowPath(typeptr, typ)
+	}
+
+	index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift
+	decMu.RLock()
+	if dec := cachedDecoder[index]; dec != nil {
+		decMu.RUnlock()
+		return dec, nil
+	}
+	decMu.RUnlock()
+
+	dec, err := compileHead(typ, map[uintptr]Decoder{})
+	if err != nil {
+		return nil, err
+	}
+	decMu.Lock()
+	cachedDecoder[index] = dec
+	decMu.Unlock()
+	return dec, nil
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/context.go b/vendor/github.com/goccy/go-json/internal/decoder/context.go
new file mode 100644
index 0000000000..cb2ffdafd0
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/context.go
@@ -0,0 +1,254 @@
+package decoder
+
+import (
+	"sync"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+)
+
+type RuntimeContext struct {
+	Buf    []byte
+	Option *Option
+}
+
+var (
+	runtimeContextPool = sync.Pool{
+		New: func() interface{} {
+			return &RuntimeContext{
+				Option: &Option{},
+			}
+		},
+	}
+)
+
+func TakeRuntimeContext() *RuntimeContext {
+	return runtimeContextPool.Get().(*RuntimeContext)
+}
+
+func ReleaseRuntimeContext(ctx *RuntimeContext) {
+	runtimeContextPool.Put(ctx)
+}
+
+var (
+	isWhiteSpace = [256]bool{}
+)
+
+func init() {
+	isWhiteSpace[' '] = true
+	isWhiteSpace['\n'] = true
+	isWhiteSpace['\t'] = true
+	isWhiteSpace['\r'] = true
+}
+
+func char(ptr unsafe.Pointer, offset int64) byte {
+	return *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(offset)))
+}
+
+func skipWhiteSpace(buf []byte, cursor int64) int64 {
+	for isWhiteSpace[buf[cursor]] {
+		cursor++
+	}
+	return cursor
+}
+
+func skipObject(buf []byte, cursor, depth int64) (int64, error) {
+	braceCount := 1
+	for {
+		switch buf[cursor] {
+		case '{':
+			braceCount++
+			depth++
+			if depth > maxDecodeNestingDepth {
+				return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor)
+			}
+		case '}':
+			depth--
+			braceCount--
+			if braceCount == 0 {
+				return cursor + 1, nil
+			}
+		case '[':
+			depth++
+			if depth > maxDecodeNestingDepth {
+				return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor)
+			}
+		case ']':
+			depth--
+		case '"':
+			for {
+				cursor++
+				switch buf[cursor] {
+				case '\\':
+					cursor++
+					if buf[cursor] == nul {
+						return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor)
+					}
+				case '"':
+					goto SWITCH_OUT
+				case nul:
+					return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor)
+				}
+			}
+		case nul:
+			return 0, errors.ErrUnexpectedEndOfJSON("object of object", cursor)
+		}
+	SWITCH_OUT:
+		cursor++
+	}
+}
+
+func skipArray(buf []byte, cursor, depth int64) (int64, error) {
+	bracketCount := 1
+	for {
+		switch buf[cursor] {
+		case '[':
+			bracketCount++
+			depth++
+			if depth > maxDecodeNestingDepth {
+				return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor)
+			}
+		case ']':
+			bracketCount--
+			depth--
+			if bracketCount == 0 {
+				return cursor + 1, nil
+			}
+		case '{':
+			depth++
+			if depth > maxDecodeNestingDepth {
+				return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor)
+			}
+		case '}':
+			depth--
+		case '"':
+			for {
+				cursor++
+				switch buf[cursor] {
+				case '\\':
+					cursor++
+					if buf[cursor] == nul {
+						return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor)
+					}
+				case '"':
+					goto SWITCH_OUT
+				case nul:
+					return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor)
+				}
+			}
+		case nul:
+			return 0, errors.ErrUnexpectedEndOfJSON("array of object", cursor)
+		}
+	SWITCH_OUT:
+		cursor++
+	}
+}
+
+func skipValue(buf []byte, cursor, depth int64) (int64, error) {
+	for {
+		switch buf[cursor] {
+		case ' ', '\t', '\n', '\r':
+			cursor++
+			continue
+		case '{':
+			return skipObject(buf, cursor+1, depth+1)
+		case '[':
+			return skipArray(buf, cursor+1, depth+1)
+		case '"':
+			for {
+				cursor++
+				switch buf[cursor] {
+				case '\\':
+					cursor++
+					if buf[cursor] == nul {
+						return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor)
+					}
+				case '"':
+					return cursor + 1, nil
+				case nul:
+					return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor)
+				}
+			}
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			for {
+				cursor++
+				if floatTable[buf[cursor]] {
+					continue
+				}
+				break
+			}
+			return cursor, nil
+		case 't':
+			if err := validateTrue(buf, cursor); err != nil {
+				return 0, err
+			}
+			cursor += 4
+			return cursor, nil
+		case 'f':
+			if err := validateFalse(buf, cursor); err != nil {
+				return 0, err
+			}
+			cursor += 5
+			return cursor, nil
+		case 'n':
+			if err := validateNull(buf, cursor); err != nil {
+				return 0, err
+			}
+			cursor += 4
+			return cursor, nil
+		default:
+			return cursor, errors.ErrUnexpectedEndOfJSON("null", cursor)
+		}
+	}
+}
+
+func validateTrue(buf []byte, cursor int64) error {
+	if cursor+3 >= int64(len(buf)) {
+		return errors.ErrUnexpectedEndOfJSON("true", cursor)
+	}
+	if buf[cursor+1] != 'r' {
+		return errors.ErrInvalidCharacter(buf[cursor+1], "true", cursor)
+	}
+	if buf[cursor+2] != 'u' {
+		return errors.ErrInvalidCharacter(buf[cursor+2], "true", cursor)
+	}
+	if buf[cursor+3] != 'e' {
+		return errors.ErrInvalidCharacter(buf[cursor+3], "true", cursor)
+	}
+	return nil
+}
+
+func validateFalse(buf []byte, cursor int64) error {
+	if cursor+4 >= int64(len(buf)) {
+		return errors.ErrUnexpectedEndOfJSON("false", cursor)
+	}
+	if buf[cursor+1] != 'a' {
+		return errors.ErrInvalidCharacter(buf[cursor+1], "false", cursor)
+	}
+	if buf[cursor+2] != 'l' {
+		return errors.ErrInvalidCharacter(buf[cursor+2], "false", cursor)
+	}
+	if buf[cursor+3] != 's' {
+		return errors.ErrInvalidCharacter(buf[cursor+3], "false", cursor)
+	}
+	if buf[cursor+4] != 'e' {
+		return errors.ErrInvalidCharacter(buf[cursor+4], "false", cursor)
+	}
+	return nil
+}
+
+func validateNull(buf []byte, cursor int64) error {
+	if cursor+3 >= int64(len(buf)) {
+		return errors.ErrUnexpectedEndOfJSON("null", cursor)
+	}
+	if buf[cursor+1] != 'u' {
+		return errors.ErrInvalidCharacter(buf[cursor+1], "null", cursor)
+	}
+	if buf[cursor+2] != 'l' {
+		return errors.ErrInvalidCharacter(buf[cursor+2], "null", cursor)
+	}
+	if buf[cursor+3] != 'l' {
+		return errors.ErrInvalidCharacter(buf[cursor+3], "null", cursor)
+	}
+	return nil
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/float.go b/vendor/github.com/goccy/go-json/internal/decoder/float.go
new file mode 100644
index 0000000000..9b2eb8b35a
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/float.go
@@ -0,0 +1,170 @@
+package decoder
+
+import (
+	"strconv"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+)
+
+type floatDecoder struct {
+	op         func(unsafe.Pointer, float64)
+	structName string
+	fieldName  string
+}
+
+func newFloatDecoder(structName, fieldName string, op func(unsafe.Pointer, float64)) *floatDecoder {
+	return &floatDecoder{op: op, structName: structName, fieldName: fieldName}
+}
+
+var (
+	floatTable = [256]bool{
+		'0': true,
+		'1': true,
+		'2': true,
+		'3': true,
+		'4': true,
+		'5': true,
+		'6': true,
+		'7': true,
+		'8': true,
+		'9': true,
+		'.': true,
+		'e': true,
+		'E': true,
+		'+': true,
+		'-': true,
+	}
+
+	validEndNumberChar = [256]bool{
+		nul:  true,
+		' ':  true,
+		'\t': true,
+		'\r': true,
+		'\n': true,
+		',':  true,
+		':':  true,
+		'}':  true,
+		']':  true,
+	}
+)
+
+func floatBytes(s *Stream) []byte {
+	start := s.cursor
+	for {
+		s.cursor++
+		if floatTable[s.char()] {
+			continue
+		} else if s.char() == nul {
+			if s.read() {
+				s.cursor-- // for retry current character
+				continue
+			}
+		}
+		break
+	}
+	return s.buf[start:s.cursor]
+}
+
+func (d *floatDecoder) decodeStreamByte(s *Stream) ([]byte, error) {
+	for {
+		switch s.char() {
+		case ' ', '\n', '\t', '\r':
+			s.cursor++
+			continue
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			return floatBytes(s), nil
+		case 'n':
+			if err := nullBytes(s); err != nil {
+				return nil, err
+			}
+			return nil, nil
+		case nul:
+			if s.read() {
+				continue
+			}
+			goto ERROR
+		default:
+			goto ERROR
+		}
+	}
+ERROR:
+	return nil, errors.ErrUnexpectedEndOfJSON("float", s.totalOffset())
+}
+
+func (d *floatDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) {
+	for {
+		switch buf[cursor] {
+		case ' ', '\n', '\t', '\r':
+			cursor++
+			continue
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			start := cursor
+			cursor++
+			for floatTable[buf[cursor]] {
+				cursor++
+			}
+			num := buf[start:cursor]
+			return num, cursor, nil
+		case 'n':
+			if err := validateNull(buf, cursor); err != nil {
+				return nil, 0, err
+			}
+			cursor += 4
+			return nil, cursor, nil
+		default:
+			return nil, 0, errors.ErrUnexpectedEndOfJSON("float", cursor)
+		}
+	}
+}
+
+func (d *floatDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	bytes, err := d.decodeStreamByte(s)
+	if err != nil {
+		return err
+	}
+	if bytes == nil {
+		return nil
+	}
+	str := *(*string)(unsafe.Pointer(&bytes))
+	f64, err := strconv.ParseFloat(str, 64)
+	if err != nil {
+		return errors.ErrSyntax(err.Error(), s.totalOffset())
+	}
+	d.op(p, f64)
+	return nil
+}
+
+func (d *floatDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	buf := ctx.Buf
+	bytes, c, err := d.decodeByte(buf, cursor)
+	if err != nil {
+		return 0, err
+	}
+	if bytes == nil {
+		return c, nil
+	}
+	cursor = c
+	if !validEndNumberChar[buf[cursor]] {
+		return 0, errors.ErrUnexpectedEndOfJSON("float", cursor)
+	}
+	s := *(*string)(unsafe.Pointer(&bytes))
+	f64, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		return 0, errors.ErrSyntax(err.Error(), cursor)
+	}
+	d.op(p, f64)
+	return cursor, nil
+}
+
+func (d *floatDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	buf := ctx.Buf
+	bytes, c, err := d.decodeByte(buf, cursor)
+	if err != nil {
+		return nil, 0, err
+	}
+	if bytes == nil {
+		return [][]byte{nullbytes}, c, nil
+	}
+	return [][]byte{bytes}, c, nil
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/func.go b/vendor/github.com/goccy/go-json/internal/decoder/func.go
new file mode 100644
index 0000000000..4cc12ca81f
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/func.go
@@ -0,0 +1,146 @@
+package decoder
+
+import (
+	"bytes"
+	"fmt"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type funcDecoder struct {
+	typ        *runtime.Type
+	structName string
+	fieldName  string
+}
+
+func newFuncDecoder(typ *runtime.Type, structName, fieldName string) *funcDecoder {
+	fnDecoder := &funcDecoder{typ, structName, fieldName}
+	return fnDecoder
+}
+
+func (d *funcDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	s.skipWhiteSpace()
+	start := s.cursor
+	if err := s.skipValue(depth); err != nil {
+		return err
+	}
+	src := s.buf[start:s.cursor]
+	if len(src) > 0 {
+		switch src[0] {
+		case '"':
+			return &errors.UnmarshalTypeError{
+				Value:  "string",
+				Type:   runtime.RType2Type(d.typ),
+				Offset: s.totalOffset(),
+			}
+		case '[':
+			return &errors.UnmarshalTypeError{
+				Value:  "array",
+				Type:   runtime.RType2Type(d.typ),
+				Offset: s.totalOffset(),
+			}
+		case '{':
+			return &errors.UnmarshalTypeError{
+				Value:  "object",
+				Type:   runtime.RType2Type(d.typ),
+				Offset: s.totalOffset(),
+			}
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			return &errors.UnmarshalTypeError{
+				Value:  "number",
+				Type:   runtime.RType2Type(d.typ),
+				Offset: s.totalOffset(),
+			}
+		case 'n':
+			if err := nullBytes(s); err != nil {
+				return err
+			}
+			*(*unsafe.Pointer)(p) = nil
+			return nil
+		case 't':
+			if err := trueBytes(s); err == nil {
+				return &errors.UnmarshalTypeError{
+					Value:  "boolean",
+					Type:   runtime.RType2Type(d.typ),
+					Offset: s.totalOffset(),
+				}
+			}
+		case 'f':
+			if err := falseBytes(s); err == nil {
+				return &errors.UnmarshalTypeError{
+					Value:  "boolean",
+					Type:   runtime.RType2Type(d.typ),
+					Offset: s.totalOffset(),
+				}
+			}
+		}
+	}
+	return errors.ErrInvalidBeginningOfValue(s.buf[s.cursor], s.totalOffset())
+}
+
+func (d *funcDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	buf := ctx.Buf
+	cursor = skipWhiteSpace(buf, cursor)
+	start := cursor
+	end, err := skipValue(buf, cursor, depth)
+	if err != nil {
+		return 0, err
+	}
+	src := buf[start:end]
+	if len(src) > 0 {
+		switch src[0] {
+		case '"':
+			return 0, &errors.UnmarshalTypeError{
+				Value:  "string",
+				Type:   runtime.RType2Type(d.typ),
+				Offset: start,
+			}
+		case '[':
+			return 0, &errors.UnmarshalTypeError{
+				Value:  "array",
+				Type:   runtime.RType2Type(d.typ),
+				Offset: start,
+			}
+		case '{':
+			return 0, &errors.UnmarshalTypeError{
+				Value:  "object",
+				Type:   runtime.RType2Type(d.typ),
+				Offset: start,
+			}
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			return 0, &errors.UnmarshalTypeError{
+				Value:  "number",
+				Type:   runtime.RType2Type(d.typ),
+				Offset: start,
+			}
+		case 'n':
+			if bytes.Equal(src, nullbytes) {
+				*(*unsafe.Pointer)(p) = nil
+				return end, nil
+			}
+		case 't':
+			if err := validateTrue(buf, start); err == nil {
+				return 0, &errors.UnmarshalTypeError{
+					Value:  "boolean",
+					Type:   runtime.RType2Type(d.typ),
+					Offset: start,
+				}
+			}
+		case 'f':
+			if err := validateFalse(buf, start); err == nil {
+				return 0, &errors.UnmarshalTypeError{
+					Value:  "boolean",
+					Type:   runtime.RType2Type(d.typ),
+					Offset: start,
+				}
+			}
+		}
+	}
+	return cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor)
+}
+
+func (d *funcDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	return nil, 0, fmt.Errorf("json: func decoder does not support decode path")
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/int.go b/vendor/github.com/goccy/go-json/internal/decoder/int.go
new file mode 100644
index 0000000000..1a7f081994
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/int.go
@@ -0,0 +1,246 @@
+package decoder
+
+import (
+	"fmt"
+	"reflect"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type intDecoder struct {
+	typ        *runtime.Type
+	kind       reflect.Kind
+	op         func(unsafe.Pointer, int64)
+	structName string
+	fieldName  string
+}
+
+func newIntDecoder(typ *runtime.Type, structName, fieldName string, op func(unsafe.Pointer, int64)) *intDecoder {
+	return &intDecoder{
+		typ:        typ,
+		kind:       typ.Kind(),
+		op:         op,
+		structName: structName,
+		fieldName:  fieldName,
+	}
+}
+
+func (d *intDecoder) typeError(buf []byte, offset int64) *errors.UnmarshalTypeError {
+	return &errors.UnmarshalTypeError{
+		Value:  fmt.Sprintf("number %s", string(buf)),
+		Type:   runtime.RType2Type(d.typ),
+		Struct: d.structName,
+		Field:  d.fieldName,
+		Offset: offset,
+	}
+}
+
+var (
+	pow10i64 = [...]int64{
+		1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09,
+		1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18,
+	}
+	pow10i64Len = len(pow10i64)
+)
+
+func (d *intDecoder) parseInt(b []byte) (int64, error) {
+	isNegative := false
+	if b[0] == '-' {
+		b = b[1:]
+		isNegative = true
+	}
+	maxDigit := len(b)
+	if maxDigit > pow10i64Len {
+		return 0, fmt.Errorf("invalid length of number")
+	}
+	sum := int64(0)
+	for i := 0; i < maxDigit; i++ {
+		c := int64(b[i]) - 48
+		digitValue := pow10i64[maxDigit-i-1]
+		sum += c * digitValue
+	}
+	if isNegative {
+		return -1 * sum, nil
+	}
+	return sum, nil
+}
+
+var (
+	numTable = [256]bool{
+		'0': true,
+		'1': true,
+		'2': true,
+		'3': true,
+		'4': true,
+		'5': true,
+		'6': true,
+		'7': true,
+		'8': true,
+		'9': true,
+	}
+)
+
+var (
+	numZeroBuf = []byte{'0'}
+)
+
+func (d *intDecoder) decodeStreamByte(s *Stream) ([]byte, error) {
+	for {
+		switch s.char() {
+		case ' ', '\n', '\t', '\r':
+			s.cursor++
+			continue
+		case '-':
+			start := s.cursor
+			for {
+				s.cursor++
+				if numTable[s.char()] {
+					continue
+				} else if s.char() == nul {
+					if s.read() {
+						s.cursor-- // for retry current character
+						continue
+					}
+				}
+				break
+			}
+			num := s.buf[start:s.cursor]
+			if len(num) < 2 {
+				goto ERROR
+			}
+			return num, nil
+		case '0':
+			s.cursor++
+			return numZeroBuf, nil
+		case '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			start := s.cursor
+			for {
+				s.cursor++
+				if numTable[s.char()] {
+					continue
+				} else if s.char() == nul {
+					if s.read() {
+						s.cursor-- // for retry current character
+						continue
+					}
+				}
+				break
+			}
+			num := s.buf[start:s.cursor]
+			return num, nil
+		case 'n':
+			if err := nullBytes(s); err != nil {
+				return nil, err
+			}
+			return nil, nil
+		case nul:
+			if s.read() {
+				continue
+			}
+			goto ERROR
+		default:
+			return nil, d.typeError([]byte{s.char()}, s.totalOffset())
+		}
+	}
+ERROR:
+	return nil, errors.ErrUnexpectedEndOfJSON("number(integer)", s.totalOffset())
+}
+
+func (d *intDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) {
+	b := (*sliceHeader)(unsafe.Pointer(&buf)).data
+	for {
+		switch char(b, cursor) {
+		case ' ', '\n', '\t', '\r':
+			cursor++
+			continue
+		case '0':
+			cursor++
+			return numZeroBuf, cursor, nil
+		case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			start := cursor
+			cursor++
+			for numTable[char(b, cursor)] {
+				cursor++
+			}
+			num := buf[start:cursor]
+			return num, cursor, nil
+		case 'n':
+			if err := validateNull(buf, cursor); err != nil {
+				return nil, 0, err
+			}
+			cursor += 4
+			return nil, cursor, nil
+		default:
+			return nil, 0, d.typeError([]byte{char(b, cursor)}, cursor)
+		}
+	}
+}
+
+func (d *intDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	bytes, err := d.decodeStreamByte(s)
+	if err != nil {
+		return err
+	}
+	if bytes == nil {
+		return nil
+	}
+	i64, err := d.parseInt(bytes)
+	if err != nil {
+		return d.typeError(bytes, s.totalOffset())
+	}
+	switch d.kind {
+	case reflect.Int8:
+		if i64 < -1*(1<<7) || (1<<7) <= i64 {
+			return d.typeError(bytes, s.totalOffset())
+		}
+	case reflect.Int16:
+		if i64 < -1*(1<<15) || (1<<15) <= i64 {
+			return d.typeError(bytes, s.totalOffset())
+		}
+	case reflect.Int32:
+		if i64 < -1*(1<<31) || (1<<31) <= i64 {
+			return d.typeError(bytes, s.totalOffset())
+		}
+	}
+	d.op(p, i64)
+	s.reset()
+	return nil
+}
+
+func (d *intDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	bytes, c, err := d.decodeByte(ctx.Buf, cursor)
+	if err != nil {
+		return 0, err
+	}
+	if bytes == nil {
+		return c, nil
+	}
+	cursor = c
+
+	i64, err := d.parseInt(bytes)
+	if err != nil {
+		return 0, d.typeError(bytes, cursor)
+	}
+	switch d.kind {
+	case reflect.Int8:
+		if i64 < -1*(1<<7) || (1<<7) <= i64 {
+			return 0, d.typeError(bytes, cursor)
+		}
+	case reflect.Int16:
+		if i64 < -1*(1<<15) || (1<<15) <= i64 {
+			return 0, d.typeError(bytes, cursor)
+		}
+	case reflect.Int32:
+		if i64 < -1*(1<<31) || (1<<31) <= i64 {
+			return 0, d.typeError(bytes, cursor)
+		}
+	}
+	d.op(p, i64)
+	return cursor, nil
+}
+
+func (d *intDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	return nil, 0, fmt.Errorf("json: int decoder does not support decode path")
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/interface.go b/vendor/github.com/goccy/go-json/internal/decoder/interface.go
new file mode 100644
index 0000000000..45c69ab8c7
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/interface.go
@@ -0,0 +1,528 @@
+package decoder
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/json"
+	"reflect"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type interfaceDecoder struct {
+	typ           *runtime.Type
+	structName    string
+	fieldName     string
+	sliceDecoder  *sliceDecoder
+	mapDecoder    *mapDecoder
+	floatDecoder  *floatDecoder
+	numberDecoder *numberDecoder
+	stringDecoder *stringDecoder
+}
+
+func newEmptyInterfaceDecoder(structName, fieldName string) *interfaceDecoder {
+	ifaceDecoder := &interfaceDecoder{
+		typ:        emptyInterfaceType,
+		structName: structName,
+		fieldName:  fieldName,
+		floatDecoder: newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) {
+			*(*interface{})(p) = v
+		}),
+		numberDecoder: newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) {
+			*(*interface{})(p) = v
+		}),
+		stringDecoder: newStringDecoder(structName, fieldName),
+	}
+	ifaceDecoder.sliceDecoder = newSliceDecoder(
+		ifaceDecoder,
+		emptyInterfaceType,
+		emptyInterfaceType.Size(),
+		structName, fieldName,
+	)
+	ifaceDecoder.mapDecoder = newMapDecoder(
+		interfaceMapType,
+		stringType,
+		ifaceDecoder.stringDecoder,
+		interfaceMapType.Elem(),
+		ifaceDecoder,
+		structName,
+		fieldName,
+	)
+	return ifaceDecoder
+}
+
+func newInterfaceDecoder(typ *runtime.Type, structName, fieldName string) *interfaceDecoder {
+	emptyIfaceDecoder := newEmptyInterfaceDecoder(structName, fieldName)
+	stringDecoder := newStringDecoder(structName, fieldName)
+	return &interfaceDecoder{
+		typ:        typ,
+		structName: structName,
+		fieldName:  fieldName,
+		sliceDecoder: newSliceDecoder(
+			emptyIfaceDecoder,
+			emptyInterfaceType,
+			emptyInterfaceType.Size(),
+			structName, fieldName,
+		),
+		mapDecoder: newMapDecoder(
+			interfaceMapType,
+			stringType,
+			stringDecoder,
+			interfaceMapType.Elem(),
+			emptyIfaceDecoder,
+			structName,
+			fieldName,
+		),
+		floatDecoder: newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) {
+			*(*interface{})(p) = v
+		}),
+		numberDecoder: newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) {
+			*(*interface{})(p) = v
+		}),
+		stringDecoder: stringDecoder,
+	}
+}
+
+func (d *interfaceDecoder) numDecoder(s *Stream) Decoder {
+	if s.UseNumber {
+		return d.numberDecoder
+	}
+	return d.floatDecoder
+}
+
+var (
+	emptyInterfaceType = runtime.Type2RType(reflect.TypeOf((*interface{})(nil)).Elem())
+	EmptyInterfaceType = emptyInterfaceType
+	interfaceMapType   = runtime.Type2RType(
+		reflect.TypeOf((*map[string]interface{})(nil)).Elem(),
+	)
+	stringType = runtime.Type2RType(
+		reflect.TypeOf(""),
+	)
+)
+
+func decodeStreamUnmarshaler(s *Stream, depth int64, unmarshaler json.Unmarshaler) error {
+	start := s.cursor
+	if err := s.skipValue(depth); err != nil {
+		return err
+	}
+	src := s.buf[start:s.cursor]
+	dst := make([]byte, len(src))
+	copy(dst, src)
+
+	if err := unmarshaler.UnmarshalJSON(dst); err != nil {
+		return err
+	}
+	return nil
+}
+
+func decodeStreamUnmarshalerContext(s *Stream, depth int64, unmarshaler unmarshalerContext) error {
+	start := s.cursor
+	if err := s.skipValue(depth); err != nil {
+		return err
+	}
+	src := s.buf[start:s.cursor]
+	dst := make([]byte, len(src))
+	copy(dst, src)
+
+	if err := unmarshaler.UnmarshalJSON(s.Option.Context, dst); err != nil {
+		return err
+	}
+	return nil
+}
+
+func decodeUnmarshaler(buf []byte, cursor, depth int64, unmarshaler json.Unmarshaler) (int64, error) {
+	cursor = skipWhiteSpace(buf, cursor)
+	start := cursor
+	end, err := skipValue(buf, cursor, depth)
+	if err != nil {
+		return 0, err
+	}
+	src := buf[start:end]
+	dst := make([]byte, len(src))
+	copy(dst, src)
+
+	if err := unmarshaler.UnmarshalJSON(dst); err != nil {
+		return 0, err
+	}
+	return end, nil
+}
+
+func decodeUnmarshalerContext(ctx *RuntimeContext, buf []byte, cursor, depth int64, unmarshaler unmarshalerContext) (int64, error) {
+	cursor = skipWhiteSpace(buf, cursor)
+	start := cursor
+	end, err := skipValue(buf, cursor, depth)
+	if err != nil {
+		return 0, err
+	}
+	src := buf[start:end]
+	dst := make([]byte, len(src))
+	copy(dst, src)
+
+	if err := unmarshaler.UnmarshalJSON(ctx.Option.Context, dst); err != nil {
+		return 0, err
+	}
+	return end, nil
+}
+
+func decodeStreamTextUnmarshaler(s *Stream, depth int64, unmarshaler encoding.TextUnmarshaler, p unsafe.Pointer) error {
+	start := s.cursor
+	if err := s.skipValue(depth); err != nil {
+		return err
+	}
+	src := s.buf[start:s.cursor]
+	if bytes.Equal(src, nullbytes) {
+		*(*unsafe.Pointer)(p) = nil
+		return nil
+	}
+
+	dst := make([]byte, len(src))
+	copy(dst, src)
+
+	if err := unmarshaler.UnmarshalText(dst); err != nil {
+		return err
+	}
+	return nil
+}
+
+func decodeTextUnmarshaler(buf []byte, cursor, depth int64, unmarshaler encoding.TextUnmarshaler, p unsafe.Pointer) (int64, error) {
+	cursor = skipWhiteSpace(buf, cursor)
+	start := cursor
+	end, err := skipValue(buf, cursor, depth)
+	if err != nil {
+		return 0, err
+	}
+	src := buf[start:end]
+	if bytes.Equal(src, nullbytes) {
+		*(*unsafe.Pointer)(p) = nil
+		return end, nil
+	}
+	if s, ok := unquoteBytes(src); ok {
+		src = s
+	}
+	if err := unmarshaler.UnmarshalText(src); err != nil {
+		return 0, err
+	}
+	return end, nil
+}
+
+func (d *interfaceDecoder) decodeStreamEmptyInterface(s *Stream, depth int64, p unsafe.Pointer) error {
+	c := s.skipWhiteSpace()
+	for {
+		switch c {
+		case '{':
+			var v map[string]interface{}
+			ptr := unsafe.Pointer(&v)
+			if err := d.mapDecoder.DecodeStream(s, depth, ptr); err != nil {
+				return err
+			}
+			*(*interface{})(p) = v
+			return nil
+		case '[':
+			var v []interface{}
+			ptr := unsafe.Pointer(&v)
+			if err := d.sliceDecoder.DecodeStream(s, depth, ptr); err != nil {
+				return err
+			}
+			*(*interface{})(p) = v
+			return nil
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			return d.numDecoder(s).DecodeStream(s, depth, p)
+		case '"':
+			s.cursor++
+			start := s.cursor
+			for {
+				switch s.char() {
+				case '\\':
+					if _, err := decodeEscapeString(s, nil); err != nil {
+						return err
+					}
+				case '"':
+					literal := s.buf[start:s.cursor]
+					s.cursor++
+					*(*interface{})(p) = string(literal)
+					return nil
+				case nul:
+					if s.read() {
+						continue
+					}
+					return errors.ErrUnexpectedEndOfJSON("string", s.totalOffset())
+				}
+				s.cursor++
+			}
+		case 't':
+			if err := trueBytes(s); err != nil {
+				return err
+			}
+			**(**interface{})(unsafe.Pointer(&p)) = true
+			return nil
+		case 'f':
+			if err := falseBytes(s); err != nil {
+				return err
+			}
+			**(**interface{})(unsafe.Pointer(&p)) = false
+			return nil
+		case 'n':
+			if err := nullBytes(s); err != nil {
+				return err
+			}
+			*(*interface{})(p) = nil
+			return nil
+		case nul:
+			if s.read() {
+				c = s.char()
+				continue
+			}
+		}
+		break
+	}
+	return errors.ErrInvalidBeginningOfValue(c, s.totalOffset())
+}
+
+type emptyInterface struct {
+	typ *runtime.Type
+	ptr unsafe.Pointer
+}
+
+func (d *interfaceDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	runtimeInterfaceValue := *(*interface{})(unsafe.Pointer(&emptyInterface{
+		typ: d.typ,
+		ptr: p,
+	}))
+	rv := reflect.ValueOf(runtimeInterfaceValue)
+	if rv.NumMethod() > 0 && rv.CanInterface() {
+		if u, ok := rv.Interface().(unmarshalerContext); ok {
+			return decodeStreamUnmarshalerContext(s, depth, u)
+		}
+		if u, ok := rv.Interface().(json.Unmarshaler); ok {
+			return decodeStreamUnmarshaler(s, depth, u)
+		}
+		if u, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
+			return decodeStreamTextUnmarshaler(s, depth, u, p)
+		}
+		if s.skipWhiteSpace() == 'n' {
+			if err := nullBytes(s); err != nil {
+				return err
+			}
+			*(*interface{})(p) = nil
+			return nil
+		}
+		return d.errUnmarshalType(rv.Type(), s.totalOffset())
+	}
+	iface := rv.Interface()
+	ifaceHeader := (*emptyInterface)(unsafe.Pointer(&iface))
+	typ := ifaceHeader.typ
+	if ifaceHeader.ptr == nil || d.typ == typ || typ == nil {
+		// concrete type is empty interface
+		return d.decodeStreamEmptyInterface(s, depth, p)
+	}
+	if typ.Kind() == reflect.Ptr && typ.Elem() == d.typ || typ.Kind() != reflect.Ptr {
+		return d.decodeStreamEmptyInterface(s, depth, p)
+	}
+	if s.skipWhiteSpace() == 'n' {
+		if err := nullBytes(s); err != nil {
+			return err
+		}
+		*(*interface{})(p) = nil
+		return nil
+	}
+	decoder, err := CompileToGetDecoder(typ)
+	if err != nil {
+		return err
+	}
+	return decoder.DecodeStream(s, depth, ifaceHeader.ptr)
+}
+
+func (d *interfaceDecoder) errUnmarshalType(typ reflect.Type, offset int64) *errors.UnmarshalTypeError {
+	return &errors.UnmarshalTypeError{
+		Value:  typ.String(),
+		Type:   typ,
+		Offset: offset,
+		Struct: d.structName,
+		Field:  d.fieldName,
+	}
+}
+
+func (d *interfaceDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	buf := ctx.Buf
+	runtimeInterfaceValue := *(*interface{})(unsafe.Pointer(&emptyInterface{
+		typ: d.typ,
+		ptr: p,
+	}))
+	rv := reflect.ValueOf(runtimeInterfaceValue)
+	if rv.NumMethod() > 0 && rv.CanInterface() {
+		if u, ok := rv.Interface().(unmarshalerContext); ok {
+			return decodeUnmarshalerContext(ctx, buf, cursor, depth, u)
+		}
+		if u, ok := rv.Interface().(json.Unmarshaler); ok {
+			return decodeUnmarshaler(buf, cursor, depth, u)
+		}
+		if u, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
+			return decodeTextUnmarshaler(buf, cursor, depth, u, p)
+		}
+		cursor = skipWhiteSpace(buf, cursor)
+		if buf[cursor] == 'n' {
+			if err := validateNull(buf, cursor); err != nil {
+				return 0, err
+			}
+			cursor += 4
+			**(**interface{})(unsafe.Pointer(&p)) = nil
+			return cursor, nil
+		}
+		return 0, d.errUnmarshalType(rv.Type(), cursor)
+	}
+
+	iface := rv.Interface()
+	ifaceHeader := (*emptyInterface)(unsafe.Pointer(&iface))
+	typ := ifaceHeader.typ
+	if ifaceHeader.ptr == nil || d.typ == typ || typ == nil {
+		// concrete type is empty interface
+		return d.decodeEmptyInterface(ctx, cursor, depth, p)
+	}
+	if typ.Kind() == reflect.Ptr && typ.Elem() == d.typ || typ.Kind() != reflect.Ptr {
+		return d.decodeEmptyInterface(ctx, cursor, depth, p)
+	}
+	cursor = skipWhiteSpace(buf, cursor)
+	if buf[cursor] == 'n' {
+		if err := validateNull(buf, cursor); err != nil {
+			return 0, err
+		}
+		cursor += 4
+		**(**interface{})(unsafe.Pointer(&p)) = nil
+		return cursor, nil
+	}
+	decoder, err := CompileToGetDecoder(typ)
+	if err != nil {
+		return 0, err
+	}
+	return decoder.Decode(ctx, cursor, depth, ifaceHeader.ptr)
+}
+
+func (d *interfaceDecoder) decodeEmptyInterface(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	buf := ctx.Buf
+	cursor = skipWhiteSpace(buf, cursor)
+	switch buf[cursor] {
+	case '{':
+		var v map[string]interface{}
+		ptr := unsafe.Pointer(&v)
+		cursor, err := d.mapDecoder.Decode(ctx, cursor, depth, ptr)
+		if err != nil {
+			return 0, err
+		}
+		**(**interface{})(unsafe.Pointer(&p)) = v
+		return cursor, nil
+	case '[':
+		var v []interface{}
+		ptr := unsafe.Pointer(&v)
+		cursor, err := d.sliceDecoder.Decode(ctx, cursor, depth, ptr)
+		if err != nil {
+			return 0, err
+		}
+		**(**interface{})(unsafe.Pointer(&p)) = v
+		return cursor, nil
+	case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+		return d.floatDecoder.Decode(ctx, cursor, depth, p)
+	case '"':
+		var v string
+		ptr := unsafe.Pointer(&v)
+		cursor, err := d.stringDecoder.Decode(ctx, cursor, depth, ptr)
+		if err != nil {
+			return 0, err
+		}
+		**(**interface{})(unsafe.Pointer(&p)) = v
+		return cursor, nil
+	case 't':
+		if err := validateTrue(buf, cursor); err != nil {
+			return 0, err
+		}
+		cursor += 4
+		**(**interface{})(unsafe.Pointer(&p)) = true
+		return cursor, nil
+	case 'f':
+		if err := validateFalse(buf, cursor); err != nil {
+			return 0, err
+		}
+		cursor += 5
+		**(**interface{})(unsafe.Pointer(&p)) = false
+		return cursor, nil
+	case 'n':
+		if err := validateNull(buf, cursor); err != nil {
+			return 0, err
+		}
+		cursor += 4
+		**(**interface{})(unsafe.Pointer(&p)) = nil
+		return cursor, nil
+	}
+	return cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor)
+}
+
+func NewPathDecoder() Decoder {
+	ifaceDecoder := &interfaceDecoder{
+		typ:        emptyInterfaceType,
+		structName: "",
+		fieldName:  "",
+		floatDecoder: newFloatDecoder("", "", func(p unsafe.Pointer, v float64) {
+			*(*interface{})(p) = v
+		}),
+		numberDecoder: newNumberDecoder("", "", func(p unsafe.Pointer, v json.Number) {
+			*(*interface{})(p) = v
+		}),
+		stringDecoder: newStringDecoder("", ""),
+	}
+	ifaceDecoder.sliceDecoder = newSliceDecoder(
+		ifaceDecoder,
+		emptyInterfaceType,
+		emptyInterfaceType.Size(),
+		"", "",
+	)
+	ifaceDecoder.mapDecoder = newMapDecoder(
+		interfaceMapType,
+		stringType,
+		ifaceDecoder.stringDecoder,
+		interfaceMapType.Elem(),
+		ifaceDecoder,
+		"", "",
+	)
+	return ifaceDecoder
+}
+
+var (
+	truebytes  = []byte("true")
+	falsebytes = []byte("false")
+)
+
+func (d *interfaceDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	buf := ctx.Buf
+	cursor = skipWhiteSpace(buf, cursor)
+	switch buf[cursor] {
+	case '{':
+		return d.mapDecoder.DecodePath(ctx, cursor, depth)
+	case '[':
+		return d.sliceDecoder.DecodePath(ctx, cursor, depth)
+	case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+		return d.floatDecoder.DecodePath(ctx, cursor, depth)
+	case '"':
+		return d.stringDecoder.DecodePath(ctx, cursor, depth)
+	case 't':
+		if err := validateTrue(buf, cursor); err != nil {
+			return nil, 0, err
+		}
+		cursor += 4
+		return [][]byte{truebytes}, cursor, nil
+	case 'f':
+		if err := validateFalse(buf, cursor); err != nil {
+			return nil, 0, err
+		}
+		cursor += 5
+		return [][]byte{falsebytes}, cursor, nil
+	case 'n':
+		if err := validateNull(buf, cursor); err != nil {
+			return nil, 0, err
+		}
+		cursor += 4
+		return [][]byte{nullbytes}, cursor, nil
+	}
+	return nil, cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor)
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/invalid.go b/vendor/github.com/goccy/go-json/internal/decoder/invalid.go
new file mode 100644
index 0000000000..4c9721b098
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/invalid.go
@@ -0,0 +1,55 @@
+package decoder
+
+import (
+	"reflect"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type invalidDecoder struct {
+	typ        *runtime.Type
+	kind       reflect.Kind
+	structName string
+	fieldName  string
+}
+
+func newInvalidDecoder(typ *runtime.Type, structName, fieldName string) *invalidDecoder {
+	return &invalidDecoder{
+		typ:        typ,
+		kind:       typ.Kind(),
+		structName: structName,
+		fieldName:  fieldName,
+	}
+}
+
+func (d *invalidDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	return &errors.UnmarshalTypeError{
+		Value:  "object",
+		Type:   runtime.RType2Type(d.typ),
+		Offset: s.totalOffset(),
+		Struct: d.structName,
+		Field:  d.fieldName,
+	}
+}
+
+func (d *invalidDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	return 0, &errors.UnmarshalTypeError{
+		Value:  "object",
+		Type:   runtime.RType2Type(d.typ),
+		Offset: cursor,
+		Struct: d.structName,
+		Field:  d.fieldName,
+	}
+}
+
+func (d *invalidDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	return nil, 0, &errors.UnmarshalTypeError{
+		Value:  "object",
+		Type:   runtime.RType2Type(d.typ),
+		Offset: cursor,
+		Struct: d.structName,
+		Field:  d.fieldName,
+	}
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/map.go b/vendor/github.com/goccy/go-json/internal/decoder/map.go
new file mode 100644
index 0000000000..07a9caea65
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/map.go
@@ -0,0 +1,280 @@
+package decoder
+
+import (
+	"reflect"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type mapDecoder struct {
+	mapType                 *runtime.Type
+	keyType                 *runtime.Type
+	valueType               *runtime.Type
+	canUseAssignFaststrType bool
+	keyDecoder              Decoder
+	valueDecoder            Decoder
+	structName              string
+	fieldName               string
+}
+
+func newMapDecoder(mapType *runtime.Type, keyType *runtime.Type, keyDec Decoder, valueType *runtime.Type, valueDec Decoder, structName, fieldName string) *mapDecoder {
+	return &mapDecoder{
+		mapType:                 mapType,
+		keyDecoder:              keyDec,
+		keyType:                 keyType,
+		canUseAssignFaststrType: canUseAssignFaststrType(keyType, valueType),
+		valueType:               valueType,
+		valueDecoder:            valueDec,
+		structName:              structName,
+		fieldName:               fieldName,
+	}
+}
+
+const (
+	mapMaxElemSize = 128
+)
+
+// See detail: https://github.com/goccy/go-json/pull/283
+func canUseAssignFaststrType(key *runtime.Type, value *runtime.Type) bool {
+	indirectElem := value.Size() > mapMaxElemSize
+	if indirectElem {
+		return false
+	}
+	return key.Kind() == reflect.String
+}
+
+//go:linkname makemap reflect.makemap
+func makemap(*runtime.Type, int) unsafe.Pointer
+
+//nolint:golint
+//go:linkname mapassign_faststr runtime.mapassign_faststr
+//go:noescape
+func mapassign_faststr(t *runtime.Type, m unsafe.Pointer, s string) unsafe.Pointer
+
+//go:linkname mapassign reflect.mapassign
+//go:noescape
+func mapassign(t *runtime.Type, m unsafe.Pointer, k, v unsafe.Pointer)
+
+func (d *mapDecoder) mapassign(t *runtime.Type, m, k, v unsafe.Pointer) {
+	if d.canUseAssignFaststrType {
+		mapV := mapassign_faststr(t, m, *(*string)(k))
+		typedmemmove(d.valueType, mapV, v)
+	} else {
+		mapassign(t, m, k, v)
+	}
+}
+
+func (d *mapDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	depth++
+	if depth > maxDecodeNestingDepth {
+		return errors.ErrExceededMaxDepth(s.char(), s.cursor)
+	}
+
+	switch s.skipWhiteSpace() {
+	case 'n':
+		if err := nullBytes(s); err != nil {
+			return err
+		}
+		**(**unsafe.Pointer)(unsafe.Pointer(&p)) = nil
+		return nil
+	case '{':
+	default:
+		return errors.ErrExpected("{ character for map value", s.totalOffset())
+	}
+	mapValue := *(*unsafe.Pointer)(p)
+	if mapValue == nil {
+		mapValue = makemap(d.mapType, 0)
+	}
+	s.cursor++
+	if s.skipWhiteSpace() == '}' {
+		*(*unsafe.Pointer)(p) = mapValue
+		s.cursor++
+		return nil
+	}
+	for {
+		k := unsafe_New(d.keyType)
+		if err := d.keyDecoder.DecodeStream(s, depth, k); err != nil {
+			return err
+		}
+		s.skipWhiteSpace()
+		if !s.equalChar(':') {
+			return errors.ErrExpected("colon after object key", s.totalOffset())
+		}
+		s.cursor++
+		v := unsafe_New(d.valueType)
+		if err := d.valueDecoder.DecodeStream(s, depth, v); err != nil {
+			return err
+		}
+		d.mapassign(d.mapType, mapValue, k, v)
+		s.skipWhiteSpace()
+		if s.equalChar('}') {
+			**(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue
+			s.cursor++
+			return nil
+		}
+		if !s.equalChar(',') {
+			return errors.ErrExpected("comma after object value", s.totalOffset())
+		}
+		s.cursor++
+	}
+}
+
+func (d *mapDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	buf := ctx.Buf
+	depth++
+	if depth > maxDecodeNestingDepth {
+		return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor)
+	}
+
+	cursor = skipWhiteSpace(buf, cursor)
+	buflen := int64(len(buf))
+	if buflen < 2 {
+		return 0, errors.ErrExpected("{} for map", cursor)
+	}
+	switch buf[cursor] {
+	case 'n':
+		if err := validateNull(buf, cursor); err != nil {
+			return 0, err
+		}
+		cursor += 4
+		**(**unsafe.Pointer)(unsafe.Pointer(&p)) = nil
+		return cursor, nil
+	case '{':
+	default:
+		return 0, errors.ErrExpected("{ character for map value", cursor)
+	}
+	cursor++
+	cursor = skipWhiteSpace(buf, cursor)
+	mapValue := *(*unsafe.Pointer)(p)
+	if mapValue == nil {
+		mapValue = makemap(d.mapType, 0)
+	}
+	if buf[cursor] == '}' {
+		**(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue
+		cursor++
+		return cursor, nil
+	}
+	for {
+		k := unsafe_New(d.keyType)
+		keyCursor, err := d.keyDecoder.Decode(ctx, cursor, depth, k)
+		if err != nil {
+			return 0, err
+		}
+		cursor = skipWhiteSpace(buf, keyCursor)
+		if buf[cursor] != ':' {
+			return 0, errors.ErrExpected("colon after object key", cursor)
+		}
+		cursor++
+		v := unsafe_New(d.valueType)
+		valueCursor, err := d.valueDecoder.Decode(ctx, cursor, depth, v)
+		if err != nil {
+			return 0, err
+		}
+		d.mapassign(d.mapType, mapValue, k, v)
+		cursor = skipWhiteSpace(buf, valueCursor)
+		if buf[cursor] == '}' {
+			**(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue
+			cursor++
+			return cursor, nil
+		}
+		if buf[cursor] != ',' {
+			return 0, errors.ErrExpected("comma after object value", cursor)
+		}
+		cursor++
+	}
+}
+
+func (d *mapDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	buf := ctx.Buf
+	depth++
+	if depth > maxDecodeNestingDepth {
+		return nil, 0, errors.ErrExceededMaxDepth(buf[cursor], cursor)
+	}
+
+	cursor = skipWhiteSpace(buf, cursor)
+	buflen := int64(len(buf))
+	if buflen < 2 {
+		return nil, 0, errors.ErrExpected("{} for map", cursor)
+	}
+	switch buf[cursor] {
+	case 'n':
+		if err := validateNull(buf, cursor); err != nil {
+			return nil, 0, err
+		}
+		cursor += 4
+		return [][]byte{nullbytes}, cursor, nil
+	case '{':
+	default:
+		return nil, 0, errors.ErrExpected("{ character for map value", cursor)
+	}
+	cursor++
+	cursor = skipWhiteSpace(buf, cursor)
+	if buf[cursor] == '}' {
+		cursor++
+		return nil, cursor, nil
+	}
+	keyDecoder, ok := d.keyDecoder.(*stringDecoder)
+	if !ok {
+		return nil, 0, &errors.UnmarshalTypeError{
+			Value:  "string",
+			Type:   reflect.TypeOf(""),
+			Offset: cursor,
+			Struct: d.structName,
+			Field:  d.fieldName,
+		}
+	}
+	ret := [][]byte{}
+	for {
+		key, keyCursor, err := keyDecoder.decodeByte(buf, cursor)
+		if err != nil {
+			return nil, 0, err
+		}
+		cursor = skipWhiteSpace(buf, keyCursor)
+		if buf[cursor] != ':' {
+			return nil, 0, errors.ErrExpected("colon after object key", cursor)
+		}
+		cursor++
+		child, found, err := ctx.Option.Path.Field(string(key))
+		if err != nil {
+			return nil, 0, err
+		}
+		if found {
+			if child != nil {
+				oldPath := ctx.Option.Path.node
+				ctx.Option.Path.node = child
+				paths, c, err := d.valueDecoder.DecodePath(ctx, cursor, depth)
+				if err != nil {
+					return nil, 0, err
+				}
+				ctx.Option.Path.node = oldPath
+				ret = append(ret, paths...)
+				cursor = c
+			} else {
+				start := cursor
+				end, err := skipValue(buf, cursor, depth)
+				if err != nil {
+					return nil, 0, err
+				}
+				ret = append(ret, buf[start:end])
+				cursor = end
+			}
+		} else {
+			c, err := skipValue(buf, cursor, depth)
+			if err != nil {
+				return nil, 0, err
+			}
+			cursor = c
+		}
+		cursor = skipWhiteSpace(buf, cursor)
+		if buf[cursor] == '}' {
+			cursor++
+			return ret, cursor, nil
+		}
+		if buf[cursor] != ',' {
+			return nil, 0, errors.ErrExpected("comma after object value", cursor)
+		}
+		cursor++
+	}
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/number.go b/vendor/github.com/goccy/go-json/internal/decoder/number.go
new file mode 100644
index 0000000000..10e5435e6c
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/number.go
@@ -0,0 +1,123 @@
+package decoder
+
+import (
+	"encoding/json"
+	"strconv"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+)
+
+type numberDecoder struct {
+	stringDecoder *stringDecoder
+	op            func(unsafe.Pointer, json.Number)
+	structName    string
+	fieldName     string
+}
+
+func newNumberDecoder(structName, fieldName string, op func(unsafe.Pointer, json.Number)) *numberDecoder {
+	return &numberDecoder{
+		stringDecoder: newStringDecoder(structName, fieldName),
+		op:            op,
+		structName:    structName,
+		fieldName:     fieldName,
+	}
+}
+
+func (d *numberDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	bytes, err := d.decodeStreamByte(s)
+	if err != nil {
+		return err
+	}
+	if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&bytes)), 64); err != nil {
+		return errors.ErrSyntax(err.Error(), s.totalOffset())
+	}
+	d.op(p, json.Number(string(bytes)))
+	s.reset()
+	return nil
+}
+
+func (d *numberDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	bytes, c, err := d.decodeByte(ctx.Buf, cursor)
+	if err != nil {
+		return 0, err
+	}
+	if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&bytes)), 64); err != nil {
+		return 0, errors.ErrSyntax(err.Error(), c)
+	}
+	cursor = c
+	s := *(*string)(unsafe.Pointer(&bytes))
+	d.op(p, json.Number(s))
+	return cursor, nil
+}
+
+func (d *numberDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	bytes, c, err := d.decodeByte(ctx.Buf, cursor)
+	if err != nil {
+		return nil, 0, err
+	}
+	if bytes == nil {
+		return [][]byte{nullbytes}, c, nil
+	}
+	return [][]byte{bytes}, c, nil
+}
+
+func (d *numberDecoder) decodeStreamByte(s *Stream) ([]byte, error) {
+	start := s.cursor
+	for {
+		switch s.char() {
+		case ' ', '\n', '\t', '\r':
+			s.cursor++
+			continue
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			return floatBytes(s), nil
+		case 'n':
+			if err := nullBytes(s); err != nil {
+				return nil, err
+			}
+			return nil, nil
+		case '"':
+			return d.stringDecoder.decodeStreamByte(s)
+		case nul:
+			if s.read() {
+				continue
+			}
+			goto ERROR
+		default:
+			goto ERROR
+		}
+	}
+ERROR:
+	if s.cursor == start {
+		return nil, errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset())
+	}
+	return nil, errors.ErrUnexpectedEndOfJSON("json.Number", s.totalOffset())
+}
+
+func (d *numberDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) {
+	for {
+		switch buf[cursor] {
+		case ' ', '\n', '\t', '\r':
+			cursor++
+			continue
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			start := cursor
+			cursor++
+			for floatTable[buf[cursor]] {
+				cursor++
+			}
+			num := buf[start:cursor]
+			return num, cursor, nil
+		case 'n':
+			if err := validateNull(buf, cursor); err != nil {
+				return nil, 0, err
+			}
+			cursor += 4
+			return nil, cursor, nil
+		case '"':
+			return d.stringDecoder.decodeByte(buf, cursor)
+		default:
+			return nil, 0, errors.ErrUnexpectedEndOfJSON("json.Number", cursor)
+		}
+	}
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/option.go b/vendor/github.com/goccy/go-json/internal/decoder/option.go
new file mode 100644
index 0000000000..502f772eba
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/option.go
@@ -0,0 +1,17 @@
+package decoder
+
+import "context"
+
+type OptionFlags uint8
+
+const (
+	FirstWinOption OptionFlags = 1 << iota
+	ContextOption
+	PathOption
+)
+
+type Option struct {
+	Flags   OptionFlags
+	Context context.Context
+	Path    *Path
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/path.go b/vendor/github.com/goccy/go-json/internal/decoder/path.go
new file mode 100644
index 0000000000..a15ff69e3c
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/path.go
@@ -0,0 +1,670 @@
+package decoder
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type PathString string
+
+func (s PathString) Build() (*Path, error) {
+	builder := new(PathBuilder)
+	return builder.Build([]rune(s))
+}
+
+type PathBuilder struct {
+	root                    PathNode
+	node                    PathNode
+	singleQuotePathSelector bool
+	doubleQuotePathSelector bool
+}
+
+func (b *PathBuilder) Build(buf []rune) (*Path, error) {
+	node, err := b.build(buf)
+	if err != nil {
+		return nil, err
+	}
+	return &Path{
+		node:                    node,
+		RootSelectorOnly:        node == nil,
+		SingleQuotePathSelector: b.singleQuotePathSelector,
+		DoubleQuotePathSelector: b.doubleQuotePathSelector,
+	}, nil
+}
+
+func (b *PathBuilder) build(buf []rune) (PathNode, error) {
+	if len(buf) == 0 {
+		return nil, errors.ErrEmptyPath()
+	}
+	if buf[0] != '$' {
+		return nil, errors.ErrInvalidPath("JSON Path must start with a $ character")
+	}
+	if len(buf) == 1 {
+		return nil, nil
+	}
+	buf = buf[1:]
+	offset, err := b.buildNext(buf)
+	if err != nil {
+		return nil, err
+	}
+	if len(buf) > offset {
+		return nil, errors.ErrInvalidPath("remain invalid path %q", buf[offset:])
+	}
+	return b.root, nil
+}
+
+func (b *PathBuilder) buildNextCharIfExists(buf []rune, cursor int) (int, error) {
+	if len(buf) > cursor {
+		offset, err := b.buildNext(buf[cursor:])
+		if err != nil {
+			return 0, err
+		}
+		return cursor + 1 + offset, nil
+	}
+	return cursor, nil
+}
+
+func (b *PathBuilder) buildNext(buf []rune) (int, error) {
+	switch buf[0] {
+	case '.':
+		if len(buf) == 1 {
+			return 0, errors.ErrInvalidPath("JSON Path ends with dot character")
+		}
+		offset, err := b.buildSelector(buf[1:])
+		if err != nil {
+			return 0, err
+		}
+		return offset + 1, nil
+	case '[':
+		if len(buf) == 1 {
+			return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character")
+		}
+		offset, err := b.buildIndex(buf[1:])
+		if err != nil {
+			return 0, err
+		}
+		return offset + 1, nil
+	default:
+		return 0, errors.ErrInvalidPath("expect dot or left bracket character. but found %c character", buf[0])
+	}
+}
+
+func (b *PathBuilder) buildSelector(buf []rune) (int, error) {
+	switch buf[0] {
+	case '.':
+		if len(buf) == 1 {
+			return 0, errors.ErrInvalidPath("JSON Path ends with double dot character")
+		}
+		offset, err := b.buildPathRecursive(buf[1:])
+		if err != nil {
+			return 0, err
+		}
+		return 1 + offset, nil
+	case '[', ']', '$', '*':
+		return 0, errors.ErrInvalidPath("found invalid path character %c after dot", buf[0])
+	}
+	for cursor := 0; cursor < len(buf); cursor++ {
+		switch buf[cursor] {
+		case '$', '*', ']':
+			return 0, errors.ErrInvalidPath("found %c character in field selector context", buf[cursor])
+		case '.':
+			if cursor+1 >= len(buf) {
+				return 0, errors.ErrInvalidPath("JSON Path ends with dot character")
+			}
+			selector := buf[:cursor]
+			b.addSelectorNode(string(selector))
+			offset, err := b.buildSelector(buf[cursor+1:])
+			if err != nil {
+				return 0, err
+			}
+			return cursor + 1 + offset, nil
+		case '[':
+			if cursor+1 >= len(buf) {
+				return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character")
+			}
+			selector := buf[:cursor]
+			b.addSelectorNode(string(selector))
+			offset, err := b.buildIndex(buf[cursor+1:])
+			if err != nil {
+				return 0, err
+			}
+			return cursor + 1 + offset, nil
+		case '"':
+			if cursor+1 >= len(buf) {
+				return 0, errors.ErrInvalidPath("JSON Path ends with double quote character")
+			}
+			offset, err := b.buildQuoteSelector(buf[cursor+1:], DoubleQuotePathSelector)
+			if err != nil {
+				return 0, err
+			}
+			return cursor + 1 + offset, nil
+		}
+	}
+	b.addSelectorNode(string(buf))
+	return len(buf), nil
+}
+
+func (b *PathBuilder) buildQuoteSelector(buf []rune, sel QuotePathSelector) (int, error) {
+	switch buf[0] {
+	case '[', ']', '$', '.', '*', '\'', '"':
+		return 0, errors.ErrInvalidPath("found invalid path character %c after quote", buf[0])
+	}
+	for cursor := 0; cursor < len(buf); cursor++ {
+		switch buf[cursor] {
+		case '\'':
+			if sel != SingleQuotePathSelector {
+				return 0, errors.ErrInvalidPath("found double quote character in field selector with single quote context")
+			}
+			if len(buf) <= cursor+1 {
+				return 0, errors.ErrInvalidPath("JSON Path ends with single quote character in field selector context")
+			}
+			if buf[cursor+1] != ']' {
+				return 0, errors.ErrInvalidPath("expect right bracket for field selector with single quote but found %c", buf[cursor+1])
+			}
+			selector := buf[:cursor]
+			b.addSelectorNode(string(selector))
+			b.singleQuotePathSelector = true
+			return b.buildNextCharIfExists(buf, cursor+2)
+		case '"':
+			if sel != DoubleQuotePathSelector {
+				return 0, errors.ErrInvalidPath("found single quote character in field selector with double quote context")
+			}
+			selector := buf[:cursor]
+			b.addSelectorNode(string(selector))
+			b.doubleQuotePathSelector = true
+			return b.buildNextCharIfExists(buf, cursor+1)
+		}
+	}
+	return 0, errors.ErrInvalidPath("couldn't find quote character in selector quote path context")
+}
+
+func (b *PathBuilder) buildPathRecursive(buf []rune) (int, error) {
+	switch buf[0] {
+	case '.', '[', ']', '$', '*':
+		return 0, errors.ErrInvalidPath("found invalid path character %c after double dot", buf[0])
+	}
+	for cursor := 0; cursor < len(buf); cursor++ {
+		switch buf[cursor] {
+		case '$', '*', ']':
+			return 0, errors.ErrInvalidPath("found %c character in field selector context", buf[cursor])
+		case '.':
+			if cursor+1 >= len(buf) {
+				return 0, errors.ErrInvalidPath("JSON Path ends with dot character")
+			}
+			selector := buf[:cursor]
+			b.addRecursiveNode(string(selector))
+			offset, err := b.buildSelector(buf[cursor+1:])
+			if err != nil {
+				return 0, err
+			}
+			return cursor + 1 + offset, nil
+		case '[':
+			if cursor+1 >= len(buf) {
+				return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character")
+			}
+			selector := buf[:cursor]
+			b.addRecursiveNode(string(selector))
+			offset, err := b.buildIndex(buf[cursor+1:])
+			if err != nil {
+				return 0, err
+			}
+			return cursor + 1 + offset, nil
+		}
+	}
+	b.addRecursiveNode(string(buf))
+	return len(buf), nil
+}
+
+func (b *PathBuilder) buildIndex(buf []rune) (int, error) {
+	switch buf[0] {
+	case '.', '[', ']', '$':
+		return 0, errors.ErrInvalidPath("found invalid path character %c after left bracket", buf[0])
+	case '\'':
+		if len(buf) == 1 {
+			return 0, errors.ErrInvalidPath("JSON Path ends with single quote character")
+		}
+		offset, err := b.buildQuoteSelector(buf[1:], SingleQuotePathSelector)
+		if err != nil {
+			return 0, err
+		}
+		return 1 + offset, nil
+	case '*':
+		if len(buf) == 1 {
+			return 0, errors.ErrInvalidPath("JSON Path ends with star character")
+		}
+		if buf[1] != ']' {
+			return 0, errors.ErrInvalidPath("expect right bracket character for index all path but found %c character", buf[1])
+		}
+		b.addIndexAllNode()
+		offset := len("*]")
+		if len(buf) > 2 {
+			buildOffset, err := b.buildNext(buf[2:])
+			if err != nil {
+				return 0, err
+			}
+			return offset + buildOffset, nil
+		}
+		return offset, nil
+	}
+
+	for cursor := 0; cursor < len(buf); cursor++ {
+		switch buf[cursor] {
+		case ']':
+			index, err := strconv.ParseInt(string(buf[:cursor]), 10, 64)
+			if err != nil {
+				return 0, errors.ErrInvalidPath("%q is unexpected index path", buf[:cursor])
+			}
+			b.addIndexNode(int(index))
+			return b.buildNextCharIfExists(buf, cursor+1)
+		}
+	}
+	return 0, errors.ErrInvalidPath("couldn't find right bracket character in index path context")
+}
+
+func (b *PathBuilder) addIndexAllNode() {
+	node := newPathIndexAllNode()
+	if b.root == nil {
+		b.root = node
+		b.node = node
+	} else {
+		b.node = b.node.chain(node)
+	}
+}
+
+func (b *PathBuilder) addRecursiveNode(selector string) {
+	node := newPathRecursiveNode(selector)
+	if b.root == nil {
+		b.root = node
+		b.node = node
+	} else {
+		b.node = b.node.chain(node)
+	}
+}
+
+func (b *PathBuilder) addSelectorNode(name string) {
+	node := newPathSelectorNode(name)
+	if b.root == nil {
+		b.root = node
+		b.node = node
+	} else {
+		b.node = b.node.chain(node)
+	}
+}
+
+func (b *PathBuilder) addIndexNode(idx int) {
+	node := newPathIndexNode(idx)
+	if b.root == nil {
+		b.root = node
+		b.node = node
+	} else {
+		b.node = b.node.chain(node)
+	}
+}
+
+type QuotePathSelector int
+
+const (
+	SingleQuotePathSelector QuotePathSelector = 1
+	DoubleQuotePathSelector QuotePathSelector = 2
+)
+
+type Path struct {
+	node                    PathNode
+	RootSelectorOnly        bool
+	SingleQuotePathSelector bool
+	DoubleQuotePathSelector bool
+}
+
+func (p *Path) Field(sel string) (PathNode, bool, error) {
+	if p.node == nil {
+		return nil, false, nil
+	}
+	return p.node.Field(sel)
+}
+
+func (p *Path) Get(src, dst reflect.Value) error {
+	if p.node == nil {
+		return nil
+	}
+	return p.node.Get(src, dst)
+}
+
+func (p *Path) String() string {
+	if p.node == nil {
+		return "$"
+	}
+	return p.node.String()
+}
+
+type PathNode interface {
+	fmt.Stringer
+	Index(idx int) (PathNode, bool, error)
+	Field(fieldName string) (PathNode, bool, error)
+	Get(src, dst reflect.Value) error
+	chain(PathNode) PathNode
+	target() bool
+	single() bool
+}
+
+type BasePathNode struct {
+	child PathNode
+}
+
+func (n *BasePathNode) chain(node PathNode) PathNode {
+	n.child = node
+	return node
+}
+
+func (n *BasePathNode) target() bool {
+	return n.child == nil
+}
+
+func (n *BasePathNode) single() bool {
+	return true
+}
+
+type PathSelectorNode struct {
+	*BasePathNode
+	selector string
+}
+
+func newPathSelectorNode(selector string) *PathSelectorNode {
+	return &PathSelectorNode{
+		BasePathNode: &BasePathNode{},
+		selector:     selector,
+	}
+}
+
+func (n *PathSelectorNode) Index(idx int) (PathNode, bool, error) {
+	return nil, false, &errors.PathError{}
+}
+
+func (n *PathSelectorNode) Field(fieldName string) (PathNode, bool, error) {
+	if n.selector == fieldName {
+		return n.child, true, nil
+	}
+	return nil, false, nil
+}
+
+func (n *PathSelectorNode) Get(src, dst reflect.Value) error {
+	switch src.Type().Kind() {
+	case reflect.Map:
+		iter := src.MapRange()
+		for iter.Next() {
+			key, ok := iter.Key().Interface().(string)
+			if !ok {
+				return fmt.Errorf("invalid map key type %T", src.Type().Key())
+			}
+			child, found, err := n.Field(key)
+			if err != nil {
+				return err
+			}
+			if found {
+				if child != nil {
+					return child.Get(iter.Value(), dst)
+				}
+				return AssignValue(iter.Value(), dst)
+			}
+		}
+	case reflect.Struct:
+		typ := src.Type()
+		for i := 0; i < typ.Len(); i++ {
+			tag := runtime.StructTagFromField(typ.Field(i))
+			child, found, err := n.Field(tag.Key)
+			if err != nil {
+				return err
+			}
+			if found {
+				if child != nil {
+					return child.Get(src.Field(i), dst)
+				}
+				return AssignValue(src.Field(i), dst)
+			}
+		}
+	case reflect.Ptr:
+		return n.Get(src.Elem(), dst)
+	case reflect.Interface:
+		return n.Get(reflect.ValueOf(src.Interface()), dst)
+	case reflect.Float64, reflect.String, reflect.Bool:
+		return AssignValue(src, dst)
+	}
+	return fmt.Errorf("failed to get %s value from %s", n.selector, src.Type())
+}
+
+func (n *PathSelectorNode) String() string {
+	s := fmt.Sprintf(".%s", n.selector)
+	if n.child != nil {
+		s += n.child.String()
+	}
+	return s
+}
+
+type PathIndexNode struct {
+	*BasePathNode
+	selector int
+}
+
+func newPathIndexNode(selector int) *PathIndexNode {
+	return &PathIndexNode{
+		BasePathNode: &BasePathNode{},
+		selector:     selector,
+	}
+}
+
+func (n *PathIndexNode) Index(idx int) (PathNode, bool, error) {
+	if n.selector == idx {
+		return n.child, true, nil
+	}
+	return nil, false, nil
+}
+
+func (n *PathIndexNode) Field(fieldName string) (PathNode, bool, error) {
+	return nil, false, &errors.PathError{}
+}
+
+func (n *PathIndexNode) Get(src, dst reflect.Value) error {
+	switch src.Type().Kind() {
+	case reflect.Array, reflect.Slice:
+		if src.Len() > n.selector {
+			if n.child != nil {
+				return n.child.Get(src.Index(n.selector), dst)
+			}
+			return AssignValue(src.Index(n.selector), dst)
+		}
+	case reflect.Ptr:
+		return n.Get(src.Elem(), dst)
+	case reflect.Interface:
+		return n.Get(reflect.ValueOf(src.Interface()), dst)
+	}
+	return fmt.Errorf("failed to get [%d] value from %s", n.selector, src.Type())
+}
+
+func (n *PathIndexNode) String() string {
+	s := fmt.Sprintf("[%d]", n.selector)
+	if n.child != nil {
+		s += n.child.String()
+	}
+	return s
+}
+
+type PathIndexAllNode struct {
+	*BasePathNode
+}
+
+func newPathIndexAllNode() *PathIndexAllNode {
+	return &PathIndexAllNode{
+		BasePathNode: &BasePathNode{},
+	}
+}
+
+func (n *PathIndexAllNode) Index(idx int) (PathNode, bool, error) {
+	return n.child, true, nil
+}
+
+func (n *PathIndexAllNode) Field(fieldName string) (PathNode, bool, error) {
+	return nil, false, &errors.PathError{}
+}
+
+func (n *PathIndexAllNode) Get(src, dst reflect.Value) error {
+	switch src.Type().Kind() {
+	case reflect.Array, reflect.Slice:
+		var arr []interface{}
+		for i := 0; i < src.Len(); i++ {
+			var v interface{}
+			rv := reflect.ValueOf(&v)
+			if n.child != nil {
+				if err := n.child.Get(src.Index(i), rv); err != nil {
+					return err
+				}
+			} else {
+				if err := AssignValue(src.Index(i), rv); err != nil {
+					return err
+				}
+			}
+			arr = append(arr, v)
+		}
+		if err := AssignValue(reflect.ValueOf(arr), dst); err != nil {
+			return err
+		}
+		return nil
+	case reflect.Ptr:
+		return n.Get(src.Elem(), dst)
+	case reflect.Interface:
+		return n.Get(reflect.ValueOf(src.Interface()), dst)
+	}
+	return fmt.Errorf("failed to get all value from %s", src.Type())
+}
+
+func (n *PathIndexAllNode) String() string {
+	s := "[*]"
+	if n.child != nil {
+		s += n.child.String()
+	}
+	return s
+}
+
+type PathRecursiveNode struct {
+	*BasePathNode
+	selector string
+}
+
+func newPathRecursiveNode(selector string) *PathRecursiveNode {
+	node := newPathSelectorNode(selector)
+	return &PathRecursiveNode{
+		BasePathNode: &BasePathNode{
+			child: node,
+		},
+		selector: selector,
+	}
+}
+
+func (n *PathRecursiveNode) Field(fieldName string) (PathNode, bool, error) {
+	if n.selector == fieldName {
+		return n.child, true, nil
+	}
+	return nil, false, nil
+}
+
+func (n *PathRecursiveNode) Index(_ int) (PathNode, bool, error) {
+	return n, true, nil
+}
+
+func valueToSliceValue(v interface{}) []interface{} {
+	rv := reflect.ValueOf(v)
+	ret := []interface{}{}
+	if rv.Type().Kind() == reflect.Slice || rv.Type().Kind() == reflect.Array {
+		for i := 0; i < rv.Len(); i++ {
+			ret = append(ret, rv.Index(i).Interface())
+		}
+		return ret
+	}
+	return []interface{}{v}
+}
+
+func (n *PathRecursiveNode) Get(src, dst reflect.Value) error {
+	if n.child == nil {
+		return fmt.Errorf("failed to get by recursive path ..%s", n.selector)
+	}
+	var arr []interface{}
+	switch src.Type().Kind() {
+	case reflect.Map:
+		iter := src.MapRange()
+		for iter.Next() {
+			key, ok := iter.Key().Interface().(string)
+			if !ok {
+				return fmt.Errorf("invalid map key type %T", src.Type().Key())
+			}
+			child, found, err := n.Field(key)
+			if err != nil {
+				return err
+			}
+			if found {
+				var v interface{}
+				rv := reflect.ValueOf(&v)
+				_ = child.Get(iter.Value(), rv)
+				arr = append(arr, valueToSliceValue(v)...)
+			} else {
+				var v interface{}
+				rv := reflect.ValueOf(&v)
+				_ = n.Get(iter.Value(), rv)
+				if v != nil {
+					arr = append(arr, valueToSliceValue(v)...)
+				}
+			}
+		}
+		_ = AssignValue(reflect.ValueOf(arr), dst)
+		return nil
+	case reflect.Struct:
+		typ := src.Type()
+		for i := 0; i < typ.Len(); i++ {
+			tag := runtime.StructTagFromField(typ.Field(i))
+			child, found, err := n.Field(tag.Key)
+			if err != nil {
+				return err
+			}
+			if found {
+				var v interface{}
+				rv := reflect.ValueOf(&v)
+				_ = child.Get(src.Field(i), rv)
+				arr = append(arr, valueToSliceValue(v)...)
+			} else {
+				var v interface{}
+				rv := reflect.ValueOf(&v)
+				_ = n.Get(src.Field(i), rv)
+				if v != nil {
+					arr = append(arr, valueToSliceValue(v)...)
+				}
+			}
+		}
+		_ = AssignValue(reflect.ValueOf(arr), dst)
+		return nil
+	case reflect.Array, reflect.Slice:
+		for i := 0; i < src.Len(); i++ {
+			var v interface{}
+			rv := reflect.ValueOf(&v)
+			_ = n.Get(src.Index(i), rv)
+			if v != nil {
+				arr = append(arr, valueToSliceValue(v)...)
+			}
+		}
+		_ = AssignValue(reflect.ValueOf(arr), dst)
+		return nil
+	case reflect.Ptr:
+		return n.Get(src.Elem(), dst)
+	case reflect.Interface:
+		return n.Get(reflect.ValueOf(src.Interface()), dst)
+	}
+	return fmt.Errorf("failed to get %s value from %s", n.selector, src.Type())
+}
+
+func (n *PathRecursiveNode) String() string {
+	s := fmt.Sprintf("..%s", n.selector)
+	if n.child != nil {
+		s += n.child.String()
+	}
+	return s
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/ptr.go b/vendor/github.com/goccy/go-json/internal/decoder/ptr.go
new file mode 100644
index 0000000000..de12e105c6
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/ptr.go
@@ -0,0 +1,96 @@
+package decoder
+
+import (
+	"fmt"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type ptrDecoder struct {
+	dec        Decoder
+	typ        *runtime.Type
+	structName string
+	fieldName  string
+}
+
+func newPtrDecoder(dec Decoder, typ *runtime.Type, structName, fieldName string) *ptrDecoder {
+	return &ptrDecoder{
+		dec:        dec,
+		typ:        typ,
+		structName: structName,
+		fieldName:  fieldName,
+	}
+}
+
+func (d *ptrDecoder) contentDecoder() Decoder {
+	dec, ok := d.dec.(*ptrDecoder)
+	if !ok {
+		return d.dec
+	}
+	return dec.contentDecoder()
+}
+
+//nolint:golint
+//go:linkname unsafe_New reflect.unsafe_New
+func unsafe_New(*runtime.Type) unsafe.Pointer
+
+func UnsafeNew(t *runtime.Type) unsafe.Pointer {
+	return unsafe_New(t)
+}
+
+func (d *ptrDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	if s.skipWhiteSpace() == nul {
+		s.read()
+	}
+	if s.char() == 'n' {
+		if err := nullBytes(s); err != nil {
+			return err
+		}
+		*(*unsafe.Pointer)(p) = nil
+		return nil
+	}
+	var newptr unsafe.Pointer
+	if *(*unsafe.Pointer)(p) == nil {
+		newptr = unsafe_New(d.typ)
+		*(*unsafe.Pointer)(p) = newptr
+	} else {
+		newptr = *(*unsafe.Pointer)(p)
+	}
+	if err := d.dec.DecodeStream(s, depth, newptr); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (d *ptrDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	buf := ctx.Buf
+	cursor = skipWhiteSpace(buf, cursor)
+	if buf[cursor] == 'n' {
+		if err := validateNull(buf, cursor); err != nil {
+			return 0, err
+		}
+		if p != nil {
+			*(*unsafe.Pointer)(p) = nil
+		}
+		cursor += 4
+		return cursor, nil
+	}
+	var newptr unsafe.Pointer
+	if *(*unsafe.Pointer)(p) == nil {
+		newptr = unsafe_New(d.typ)
+		*(*unsafe.Pointer)(p) = newptr
+	} else {
+		newptr = *(*unsafe.Pointer)(p)
+	}
+	c, err := d.dec.Decode(ctx, cursor, depth, newptr)
+	if err != nil {
+		return 0, err
+	}
+	cursor = c
+	return cursor, nil
+}
+
+func (d *ptrDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	return nil, 0, fmt.Errorf("json: ptr decoder does not support decode path")
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/slice.go b/vendor/github.com/goccy/go-json/internal/decoder/slice.go
new file mode 100644
index 0000000000..30a23e4b51
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/slice.go
@@ -0,0 +1,380 @@
+package decoder
+
+import (
+	"reflect"
+	"sync"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+var (
+	sliceType = runtime.Type2RType(
+		reflect.TypeOf((*sliceHeader)(nil)).Elem(),
+	)
+	nilSlice = unsafe.Pointer(&sliceHeader{})
+)
+
+type sliceDecoder struct {
+	elemType          *runtime.Type
+	isElemPointerType bool
+	valueDecoder      Decoder
+	size              uintptr
+	arrayPool         sync.Pool
+	structName        string
+	fieldName         string
+}
+
+// If use reflect.SliceHeader, data type is uintptr.
+// In this case, Go compiler cannot trace reference created by newArray().
+// So, define using unsafe.Pointer as data type
+type sliceHeader struct {
+	data unsafe.Pointer
+	len  int
+	cap  int
+}
+
+const (
+	defaultSliceCapacity = 2
+)
+
+func newSliceDecoder(dec Decoder, elemType *runtime.Type, size uintptr, structName, fieldName string) *sliceDecoder {
+	return &sliceDecoder{
+		valueDecoder:      dec,
+		elemType:          elemType,
+		isElemPointerType: elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map,
+		size:              size,
+		arrayPool: sync.Pool{
+			New: func() interface{} {
+				return &sliceHeader{
+					data: newArray(elemType, defaultSliceCapacity),
+					len:  0,
+					cap:  defaultSliceCapacity,
+				}
+			},
+		},
+		structName: structName,
+		fieldName:  fieldName,
+	}
+}
+
+func (d *sliceDecoder) newSlice(src *sliceHeader) *sliceHeader {
+	slice := d.arrayPool.Get().(*sliceHeader)
+	if src.len > 0 {
+		// copy original elem
+		if slice.cap < src.cap {
+			data := newArray(d.elemType, src.cap)
+			slice = &sliceHeader{data: data, len: src.len, cap: src.cap}
+		} else {
+			slice.len = src.len
+		}
+		copySlice(d.elemType, *slice, *src)
+	} else {
+		slice.len = 0
+	}
+	return slice
+}
+
+func (d *sliceDecoder) releaseSlice(p *sliceHeader) {
+	d.arrayPool.Put(p)
+}
+
+//go:linkname copySlice reflect.typedslicecopy
+func copySlice(elemType *runtime.Type, dst, src sliceHeader) int
+
+//go:linkname newArray reflect.unsafe_NewArray
+func newArray(*runtime.Type, int) unsafe.Pointer
+
+//go:linkname typedmemmove reflect.typedmemmove
+func typedmemmove(t *runtime.Type, dst, src unsafe.Pointer)
+
+func (d *sliceDecoder) errNumber(offset int64) *errors.UnmarshalTypeError {
+	return &errors.UnmarshalTypeError{
+		Value:  "number",
+		Type:   reflect.SliceOf(runtime.RType2Type(d.elemType)),
+		Struct: d.structName,
+		Field:  d.fieldName,
+		Offset: offset,
+	}
+}
+
+func (d *sliceDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	depth++
+	if depth > maxDecodeNestingDepth {
+		return errors.ErrExceededMaxDepth(s.char(), s.cursor)
+	}
+
+	for {
+		switch s.char() {
+		case ' ', '\n', '\t', '\r':
+			s.cursor++
+			continue
+		case 'n':
+			if err := nullBytes(s); err != nil {
+				return err
+			}
+			typedmemmove(sliceType, p, nilSlice)
+			return nil
+		case '[':
+			s.cursor++
+			if s.skipWhiteSpace() == ']' {
+				dst := (*sliceHeader)(p)
+				if dst.data == nil {
+					dst.data = newArray(d.elemType, 0)
+				} else {
+					dst.len = 0
+				}
+				s.cursor++
+				return nil
+			}
+			idx := 0
+			slice := d.newSlice((*sliceHeader)(p))
+			srcLen := slice.len
+			capacity := slice.cap
+			data := slice.data
+			for {
+				if capacity <= idx {
+					src := sliceHeader{data: data, len: idx, cap: capacity}
+					capacity *= 2
+					data = newArray(d.elemType, capacity)
+					dst := sliceHeader{data: data, len: idx, cap: capacity}
+					copySlice(d.elemType, dst, src)
+				}
+				ep := unsafe.Pointer(uintptr(data) + uintptr(idx)*d.size)
+
+				// if srcLen is greater than idx, keep the original reference
+				if srcLen <= idx {
+					if d.isElemPointerType {
+						**(**unsafe.Pointer)(unsafe.Pointer(&ep)) = nil // initialize elem pointer
+					} else {
+						// assign new element to the slice
+						typedmemmove(d.elemType, ep, unsafe_New(d.elemType))
+					}
+				}
+
+				if err := d.valueDecoder.DecodeStream(s, depth, ep); err != nil {
+					return err
+				}
+				s.skipWhiteSpace()
+			RETRY:
+				switch s.char() {
+				case ']':
+					slice.cap = capacity
+					slice.len = idx + 1
+					slice.data = data
+					dst := (*sliceHeader)(p)
+					dst.len = idx + 1
+					if dst.len > dst.cap {
+						dst.data = newArray(d.elemType, dst.len)
+						dst.cap = dst.len
+					}
+					copySlice(d.elemType, *dst, *slice)
+					d.releaseSlice(slice)
+					s.cursor++
+					return nil
+				case ',':
+					idx++
+				case nul:
+					if s.read() {
+						goto RETRY
+					}
+					slice.cap = capacity
+					slice.data = data
+					d.releaseSlice(slice)
+					goto ERROR
+				default:
+					slice.cap = capacity
+					slice.data = data
+					d.releaseSlice(slice)
+					goto ERROR
+				}
+				s.cursor++
+			}
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			return d.errNumber(s.totalOffset())
+		case nul:
+			if s.read() {
+				continue
+			}
+			goto ERROR
+		default:
+			goto ERROR
+		}
+	}
+ERROR:
+	return errors.ErrUnexpectedEndOfJSON("slice", s.totalOffset())
+}
+
+func (d *sliceDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	buf := ctx.Buf
+	depth++
+	if depth > maxDecodeNestingDepth {
+		return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor)
+	}
+
+	for {
+		switch buf[cursor] {
+		case ' ', '\n', '\t', '\r':
+			cursor++
+			continue
+		case 'n':
+			if err := validateNull(buf, cursor); err != nil {
+				return 0, err
+			}
+			cursor += 4
+			typedmemmove(sliceType, p, nilSlice)
+			return cursor, nil
+		case '[':
+			cursor++
+			cursor = skipWhiteSpace(buf, cursor)
+			if buf[cursor] == ']' {
+				dst := (*sliceHeader)(p)
+				if dst.data == nil {
+					dst.data = newArray(d.elemType, 0)
+				} else {
+					dst.len = 0
+				}
+				cursor++
+				return cursor, nil
+			}
+			idx := 0
+			slice := d.newSlice((*sliceHeader)(p))
+			srcLen := slice.len
+			capacity := slice.cap
+			data := slice.data
+			for {
+				if capacity <= idx {
+					src := sliceHeader{data: data, len: idx, cap: capacity}
+					capacity *= 2
+					data = newArray(d.elemType, capacity)
+					dst := sliceHeader{data: data, len: idx, cap: capacity}
+					copySlice(d.elemType, dst, src)
+				}
+				ep := unsafe.Pointer(uintptr(data) + uintptr(idx)*d.size)
+				// if srcLen is greater than idx, keep the original reference
+				if srcLen <= idx {
+					if d.isElemPointerType {
+						**(**unsafe.Pointer)(unsafe.Pointer(&ep)) = nil // initialize elem pointer
+					} else {
+						// assign new element to the slice
+						typedmemmove(d.elemType, ep, unsafe_New(d.elemType))
+					}
+				}
+				c, err := d.valueDecoder.Decode(ctx, cursor, depth, ep)
+				if err != nil {
+					return 0, err
+				}
+				cursor = c
+				cursor = skipWhiteSpace(buf, cursor)
+				switch buf[cursor] {
+				case ']':
+					slice.cap = capacity
+					slice.len = idx + 1
+					slice.data = data
+					dst := (*sliceHeader)(p)
+					dst.len = idx + 1
+					if dst.len > dst.cap {
+						dst.data = newArray(d.elemType, dst.len)
+						dst.cap = dst.len
+					}
+					copySlice(d.elemType, *dst, *slice)
+					d.releaseSlice(slice)
+					cursor++
+					return cursor, nil
+				case ',':
+					idx++
+				default:
+					slice.cap = capacity
+					slice.data = data
+					d.releaseSlice(slice)
+					return 0, errors.ErrInvalidCharacter(buf[cursor], "slice", cursor)
+				}
+				cursor++
+			}
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			return 0, d.errNumber(cursor)
+		default:
+			return 0, errors.ErrUnexpectedEndOfJSON("slice", cursor)
+		}
+	}
+}
+
+func (d *sliceDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	buf := ctx.Buf
+	depth++
+	if depth > maxDecodeNestingDepth {
+		return nil, 0, errors.ErrExceededMaxDepth(buf[cursor], cursor)
+	}
+
+	ret := [][]byte{}
+	for {
+		switch buf[cursor] {
+		case ' ', '\n', '\t', '\r':
+			cursor++
+			continue
+		case 'n':
+			if err := validateNull(buf, cursor); err != nil {
+				return nil, 0, err
+			}
+			cursor += 4
+			return [][]byte{nullbytes}, cursor, nil
+		case '[':
+			cursor++
+			cursor = skipWhiteSpace(buf, cursor)
+			if buf[cursor] == ']' {
+				cursor++
+				return ret, cursor, nil
+			}
+			idx := 0
+			for {
+				child, found, err := ctx.Option.Path.node.Index(idx)
+				if err != nil {
+					return nil, 0, err
+				}
+				if found {
+					if child != nil {
+						oldPath := ctx.Option.Path.node
+						ctx.Option.Path.node = child
+						paths, c, err := d.valueDecoder.DecodePath(ctx, cursor, depth)
+						if err != nil {
+							return nil, 0, err
+						}
+						ctx.Option.Path.node = oldPath
+						ret = append(ret, paths...)
+						cursor = c
+					} else {
+						start := cursor
+						end, err := skipValue(buf, cursor, depth)
+						if err != nil {
+							return nil, 0, err
+						}
+						ret = append(ret, buf[start:end])
+						cursor = end
+					}
+				} else {
+					c, err := skipValue(buf, cursor, depth)
+					if err != nil {
+						return nil, 0, err
+					}
+					cursor = c
+				}
+				cursor = skipWhiteSpace(buf, cursor)
+				switch buf[cursor] {
+				case ']':
+					cursor++
+					return ret, cursor, nil
+				case ',':
+					idx++
+				default:
+					return nil, 0, errors.ErrInvalidCharacter(buf[cursor], "slice", cursor)
+				}
+				cursor++
+			}
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			return nil, 0, d.errNumber(cursor)
+		default:
+			return nil, 0, errors.ErrUnexpectedEndOfJSON("slice", cursor)
+		}
+	}
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/stream.go b/vendor/github.com/goccy/go-json/internal/decoder/stream.go
new file mode 100644
index 0000000000..a383f72596
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/stream.go
@@ -0,0 +1,556 @@
+package decoder
+
+import (
+	"bytes"
+	"encoding/json"
+	"io"
+	"strconv"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+)
+
+const (
+	initBufSize = 512
+)
+
+type Stream struct {
+	buf                   []byte
+	bufSize               int64
+	length                int64
+	r                     io.Reader
+	offset                int64
+	cursor                int64
+	filledBuffer          bool
+	allRead               bool
+	UseNumber             bool
+	DisallowUnknownFields bool
+	Option                *Option
+}
+
+func NewStream(r io.Reader) *Stream {
+	return &Stream{
+		r:       r,
+		bufSize: initBufSize,
+		buf:     make([]byte, initBufSize),
+		Option:  &Option{},
+	}
+}
+
+func (s *Stream) TotalOffset() int64 {
+	return s.totalOffset()
+}
+
+func (s *Stream) Buffered() io.Reader {
+	buflen := int64(len(s.buf))
+	for i := s.cursor; i < buflen; i++ {
+		if s.buf[i] == nul {
+			return bytes.NewReader(s.buf[s.cursor:i])
+		}
+	}
+	return bytes.NewReader(s.buf[s.cursor:])
+}
+
+func (s *Stream) PrepareForDecode() error {
+	for {
+		switch s.char() {
+		case ' ', '\t', '\r', '\n':
+			s.cursor++
+			continue
+		case ',', ':':
+			s.cursor++
+			return nil
+		case nul:
+			if s.read() {
+				continue
+			}
+			return io.EOF
+		}
+		break
+	}
+	return nil
+}
+
+func (s *Stream) totalOffset() int64 {
+	return s.offset + s.cursor
+}
+
+func (s *Stream) char() byte {
+	return s.buf[s.cursor]
+}
+
+func (s *Stream) equalChar(c byte) bool {
+	cur := s.buf[s.cursor]
+	if cur == nul {
+		s.read()
+		cur = s.buf[s.cursor]
+	}
+	return cur == c
+}
+
+func (s *Stream) stat() ([]byte, int64, unsafe.Pointer) {
+	return s.buf, s.cursor, (*sliceHeader)(unsafe.Pointer(&s.buf)).data
+}
+
+func (s *Stream) bufptr() unsafe.Pointer {
+	return (*sliceHeader)(unsafe.Pointer(&s.buf)).data
+}
+
+func (s *Stream) statForRetry() ([]byte, int64, unsafe.Pointer) {
+	s.cursor-- // for retry ( because caller progress cursor position in each loop )
+	return s.buf, s.cursor, (*sliceHeader)(unsafe.Pointer(&s.buf)).data
+}
+
+func (s *Stream) Reset() {
+	s.reset()
+	s.bufSize = int64(len(s.buf))
+}
+
+func (s *Stream) More() bool {
+	for {
+		switch s.char() {
+		case ' ', '\n', '\r', '\t':
+			s.cursor++
+			continue
+		case '}', ']':
+			return false
+		case nul:
+			if s.read() {
+				continue
+			}
+			return false
+		}
+		break
+	}
+	return true
+}
+
+func (s *Stream) Token() (interface{}, error) {
+	for {
+		c := s.char()
+		switch c {
+		case ' ', '\n', '\r', '\t':
+			s.cursor++
+		case '{', '[', ']', '}':
+			s.cursor++
+			return json.Delim(c), nil
+		case ',', ':':
+			s.cursor++
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			bytes := floatBytes(s)
+			str := *(*string)(unsafe.Pointer(&bytes))
+			if s.UseNumber {
+				return json.Number(str), nil
+			}
+			f64, err := strconv.ParseFloat(str, 64)
+			if err != nil {
+				return nil, err
+			}
+			return f64, nil
+		case '"':
+			bytes, err := stringBytes(s)
+			if err != nil {
+				return nil, err
+			}
+			return string(bytes), nil
+		case 't':
+			if err := trueBytes(s); err != nil {
+				return nil, err
+			}
+			return true, nil
+		case 'f':
+			if err := falseBytes(s); err != nil {
+				return nil, err
+			}
+			return false, nil
+		case 'n':
+			if err := nullBytes(s); err != nil {
+				return nil, err
+			}
+			return nil, nil
+		case nul:
+			if s.read() {
+				continue
+			}
+			goto END
+		default:
+			return nil, errors.ErrInvalidCharacter(s.char(), "token", s.totalOffset())
+		}
+	}
+END:
+	return nil, io.EOF
+}
+
+func (s *Stream) reset() {
+	s.offset += s.cursor
+	s.buf = s.buf[s.cursor:]
+	s.length -= s.cursor
+	s.cursor = 0
+}
+
+func (s *Stream) readBuf() []byte {
+	if s.filledBuffer {
+		s.bufSize *= 2
+		remainBuf := s.buf
+		s.buf = make([]byte, s.bufSize)
+		copy(s.buf, remainBuf)
+	}
+	remainLen := s.length - s.cursor
+	remainNotNulCharNum := int64(0)
+	for i := int64(0); i < remainLen; i++ {
+		if s.buf[s.cursor+i] == nul {
+			break
+		}
+		remainNotNulCharNum++
+	}
+	s.length = s.cursor + remainNotNulCharNum
+	return s.buf[s.cursor+remainNotNulCharNum:]
+}
+
+func (s *Stream) read() bool {
+	if s.allRead {
+		return false
+	}
+	buf := s.readBuf()
+	last := len(buf) - 1
+	buf[last] = nul
+	n, err := s.r.Read(buf[:last])
+	s.length += int64(n)
+	if n == last {
+		s.filledBuffer = true
+	} else {
+		s.filledBuffer = false
+	}
+	if err == io.EOF {
+		s.allRead = true
+	} else if err != nil {
+		return false
+	}
+	return true
+}
+
+func (s *Stream) skipWhiteSpace() byte {
+	p := s.bufptr()
+LOOP:
+	c := char(p, s.cursor)
+	switch c {
+	case ' ', '\n', '\t', '\r':
+		s.cursor++
+		goto LOOP
+	case nul:
+		if s.read() {
+			p = s.bufptr()
+			goto LOOP
+		}
+	}
+	return c
+}
+
+func (s *Stream) skipObject(depth int64) error {
+	braceCount := 1
+	_, cursor, p := s.stat()
+	for {
+		switch char(p, cursor) {
+		case '{':
+			braceCount++
+			depth++
+			if depth > maxDecodeNestingDepth {
+				return errors.ErrExceededMaxDepth(s.char(), s.cursor)
+			}
+		case '}':
+			braceCount--
+			depth--
+			if braceCount == 0 {
+				s.cursor = cursor + 1
+				return nil
+			}
+		case '[':
+			depth++
+			if depth > maxDecodeNestingDepth {
+				return errors.ErrExceededMaxDepth(s.char(), s.cursor)
+			}
+		case ']':
+			depth--
+		case '"':
+			for {
+				cursor++
+				switch char(p, cursor) {
+				case '\\':
+					cursor++
+					if char(p, cursor) == nul {
+						s.cursor = cursor
+						if s.read() {
+							_, cursor, p = s.stat()
+							continue
+						}
+						return errors.ErrUnexpectedEndOfJSON("string of object", cursor)
+					}
+				case '"':
+					goto SWITCH_OUT
+				case nul:
+					s.cursor = cursor
+					if s.read() {
+						_, cursor, p = s.statForRetry()
+						continue
+					}
+					return errors.ErrUnexpectedEndOfJSON("string of object", cursor)
+				}
+			}
+		case nul:
+			s.cursor = cursor
+			if s.read() {
+				_, cursor, p = s.stat()
+				continue
+			}
+			return errors.ErrUnexpectedEndOfJSON("object of object", cursor)
+		}
+	SWITCH_OUT:
+		cursor++
+	}
+}
+
+func (s *Stream) skipArray(depth int64) error {
+	bracketCount := 1
+	_, cursor, p := s.stat()
+	for {
+		switch char(p, cursor) {
+		case '[':
+			bracketCount++
+			depth++
+			if depth > maxDecodeNestingDepth {
+				return errors.ErrExceededMaxDepth(s.char(), s.cursor)
+			}
+		case ']':
+			bracketCount--
+			depth--
+			if bracketCount == 0 {
+				s.cursor = cursor + 1
+				return nil
+			}
+		case '{':
+			depth++
+			if depth > maxDecodeNestingDepth {
+				return errors.ErrExceededMaxDepth(s.char(), s.cursor)
+			}
+		case '}':
+			depth--
+		case '"':
+			for {
+				cursor++
+				switch char(p, cursor) {
+				case '\\':
+					cursor++
+					if char(p, cursor) == nul {
+						s.cursor = cursor
+						if s.read() {
+							_, cursor, p = s.stat()
+							continue
+						}
+						return errors.ErrUnexpectedEndOfJSON("string of object", cursor)
+					}
+				case '"':
+					goto SWITCH_OUT
+				case nul:
+					s.cursor = cursor
+					if s.read() {
+						_, cursor, p = s.statForRetry()
+						continue
+					}
+					return errors.ErrUnexpectedEndOfJSON("string of object", cursor)
+				}
+			}
+		case nul:
+			s.cursor = cursor
+			if s.read() {
+				_, cursor, p = s.stat()
+				continue
+			}
+			return errors.ErrUnexpectedEndOfJSON("array of object", cursor)
+		}
+	SWITCH_OUT:
+		cursor++
+	}
+}
+
+func (s *Stream) skipValue(depth int64) error {
+	_, cursor, p := s.stat()
+	for {
+		switch char(p, cursor) {
+		case ' ', '\n', '\t', '\r':
+			cursor++
+			continue
+		case nul:
+			s.cursor = cursor
+			if s.read() {
+				_, cursor, p = s.stat()
+				continue
+			}
+			return errors.ErrUnexpectedEndOfJSON("value of object", s.totalOffset())
+		case '{':
+			s.cursor = cursor + 1
+			return s.skipObject(depth + 1)
+		case '[':
+			s.cursor = cursor + 1
+			return s.skipArray(depth + 1)
+		case '"':
+			for {
+				cursor++
+				switch char(p, cursor) {
+				case '\\':
+					cursor++
+					if char(p, cursor) == nul {
+						s.cursor = cursor
+						if s.read() {
+							_, cursor, p = s.stat()
+							continue
+						}
+						return errors.ErrUnexpectedEndOfJSON("value of string", s.totalOffset())
+					}
+				case '"':
+					s.cursor = cursor + 1
+					return nil
+				case nul:
+					s.cursor = cursor
+					if s.read() {
+						_, cursor, p = s.statForRetry()
+						continue
+					}
+					return errors.ErrUnexpectedEndOfJSON("value of string", s.totalOffset())
+				}
+			}
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			for {
+				cursor++
+				c := char(p, cursor)
+				if floatTable[c] {
+					continue
+				} else if c == nul {
+					if s.read() {
+						_, cursor, p = s.stat()
+						continue
+					}
+				}
+				s.cursor = cursor
+				return nil
+			}
+		case 't':
+			s.cursor = cursor
+			if err := trueBytes(s); err != nil {
+				return err
+			}
+			return nil
+		case 'f':
+			s.cursor = cursor
+			if err := falseBytes(s); err != nil {
+				return err
+			}
+			return nil
+		case 'n':
+			s.cursor = cursor
+			if err := nullBytes(s); err != nil {
+				return err
+			}
+			return nil
+		}
+		cursor++
+	}
+}
+
+func nullBytes(s *Stream) error {
+	// current cursor's character is 'n'
+	s.cursor++
+	if s.char() != 'u' {
+		if err := retryReadNull(s); err != nil {
+			return err
+		}
+	}
+	s.cursor++
+	if s.char() != 'l' {
+		if err := retryReadNull(s); err != nil {
+			return err
+		}
+	}
+	s.cursor++
+	if s.char() != 'l' {
+		if err := retryReadNull(s); err != nil {
+			return err
+		}
+	}
+	s.cursor++
+	return nil
+}
+
+func retryReadNull(s *Stream) error {
+	if s.char() == nul && s.read() {
+		return nil
+	}
+	return errors.ErrInvalidCharacter(s.char(), "null", s.totalOffset())
+}
+
+func trueBytes(s *Stream) error {
+	// current cursor's character is 't'
+	s.cursor++
+	if s.char() != 'r' {
+		if err := retryReadTrue(s); err != nil {
+			return err
+		}
+	}
+	s.cursor++
+	if s.char() != 'u' {
+		if err := retryReadTrue(s); err != nil {
+			return err
+		}
+	}
+	s.cursor++
+	if s.char() != 'e' {
+		if err := retryReadTrue(s); err != nil {
+			return err
+		}
+	}
+	s.cursor++
+	return nil
+}
+
+func retryReadTrue(s *Stream) error {
+	if s.char() == nul && s.read() {
+		return nil
+	}
+	return errors.ErrInvalidCharacter(s.char(), "bool(true)", s.totalOffset())
+}
+
+func falseBytes(s *Stream) error {
+	// current cursor's character is 'f'
+	s.cursor++
+	if s.char() != 'a' {
+		if err := retryReadFalse(s); err != nil {
+			return err
+		}
+	}
+	s.cursor++
+	if s.char() != 'l' {
+		if err := retryReadFalse(s); err != nil {
+			return err
+		}
+	}
+	s.cursor++
+	if s.char() != 's' {
+		if err := retryReadFalse(s); err != nil {
+			return err
+		}
+	}
+	s.cursor++
+	if s.char() != 'e' {
+		if err := retryReadFalse(s); err != nil {
+			return err
+		}
+	}
+	s.cursor++
+	return nil
+}
+
+func retryReadFalse(s *Stream) error {
+	if s.char() == nul && s.read() {
+		return nil
+	}
+	return errors.ErrInvalidCharacter(s.char(), "bool(false)", s.totalOffset())
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/string.go b/vendor/github.com/goccy/go-json/internal/decoder/string.go
new file mode 100644
index 0000000000..32602c908a
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/string.go
@@ -0,0 +1,452 @@
+package decoder
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"unicode"
+	"unicode/utf16"
+	"unicode/utf8"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+)
+
+type stringDecoder struct {
+	structName string
+	fieldName  string
+}
+
+func newStringDecoder(structName, fieldName string) *stringDecoder {
+	return &stringDecoder{
+		structName: structName,
+		fieldName:  fieldName,
+	}
+}
+
+func (d *stringDecoder) errUnmarshalType(typeName string, offset int64) *errors.UnmarshalTypeError {
+	return &errors.UnmarshalTypeError{
+		Value:  typeName,
+		Type:   reflect.TypeOf(""),
+		Offset: offset,
+		Struct: d.structName,
+		Field:  d.fieldName,
+	}
+}
+
+func (d *stringDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	bytes, err := d.decodeStreamByte(s)
+	if err != nil {
+		return err
+	}
+	if bytes == nil {
+		return nil
+	}
+	**(**string)(unsafe.Pointer(&p)) = *(*string)(unsafe.Pointer(&bytes))
+	s.reset()
+	return nil
+}
+
+func (d *stringDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	bytes, c, err := d.decodeByte(ctx.Buf, cursor)
+	if err != nil {
+		return 0, err
+	}
+	if bytes == nil {
+		return c, nil
+	}
+	cursor = c
+	**(**string)(unsafe.Pointer(&p)) = *(*string)(unsafe.Pointer(&bytes))
+	return cursor, nil
+}
+
+func (d *stringDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	bytes, c, err := d.decodeByte(ctx.Buf, cursor)
+	if err != nil {
+		return nil, 0, err
+	}
+	if bytes == nil {
+		return [][]byte{nullbytes}, c, nil
+	}
+	return [][]byte{bytes}, c, nil
+}
+
+var (
+	hexToInt = [256]int{
+		'0': 0,
+		'1': 1,
+		'2': 2,
+		'3': 3,
+		'4': 4,
+		'5': 5,
+		'6': 6,
+		'7': 7,
+		'8': 8,
+		'9': 9,
+		'A': 10,
+		'B': 11,
+		'C': 12,
+		'D': 13,
+		'E': 14,
+		'F': 15,
+		'a': 10,
+		'b': 11,
+		'c': 12,
+		'd': 13,
+		'e': 14,
+		'f': 15,
+	}
+)
+
+func unicodeToRune(code []byte) rune {
+	var r rune
+	for i := 0; i < len(code); i++ {
+		r = r*16 + rune(hexToInt[code[i]])
+	}
+	return r
+}
+
+func readAtLeast(s *Stream, n int64, p *unsafe.Pointer) bool {
+	for s.cursor+n >= s.length {
+		if !s.read() {
+			return false
+		}
+		*p = s.bufptr()
+	}
+	return true
+}
+
+func decodeUnicodeRune(s *Stream, p unsafe.Pointer) (rune, int64, unsafe.Pointer, error) {
+	const defaultOffset = 5
+	const surrogateOffset = 11
+
+	if !readAtLeast(s, defaultOffset, &p) {
+		return rune(0), 0, nil, errors.ErrInvalidCharacter(s.char(), "escaped string", s.totalOffset())
+	}
+
+	r := unicodeToRune(s.buf[s.cursor+1 : s.cursor+defaultOffset])
+	if utf16.IsSurrogate(r) {
+		if !readAtLeast(s, surrogateOffset, &p) {
+			return unicode.ReplacementChar, defaultOffset, p, nil
+		}
+		if s.buf[s.cursor+defaultOffset] != '\\' || s.buf[s.cursor+defaultOffset+1] != 'u' {
+			return unicode.ReplacementChar, defaultOffset, p, nil
+		}
+		r2 := unicodeToRune(s.buf[s.cursor+defaultOffset+2 : s.cursor+surrogateOffset])
+		if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar {
+			return r, surrogateOffset, p, nil
+		}
+	}
+	return r, defaultOffset, p, nil
+}
+
+func decodeUnicode(s *Stream, p unsafe.Pointer) (unsafe.Pointer, error) {
+	const backSlashAndULen = 2 // length of \u
+
+	r, offset, pp, err := decodeUnicodeRune(s, p)
+	if err != nil {
+		return nil, err
+	}
+	unicode := []byte(string(r))
+	unicodeLen := int64(len(unicode))
+	s.buf = append(append(s.buf[:s.cursor-1], unicode...), s.buf[s.cursor+offset:]...)
+	unicodeOrgLen := offset - 1
+	s.length = s.length - (backSlashAndULen + (unicodeOrgLen - unicodeLen))
+	s.cursor = s.cursor - backSlashAndULen + unicodeLen
+	return pp, nil
+}
+
+func decodeEscapeString(s *Stream, p unsafe.Pointer) (unsafe.Pointer, error) {
+	s.cursor++
+RETRY:
+	switch s.buf[s.cursor] {
+	case '"':
+		s.buf[s.cursor] = '"'
+	case '\\':
+		s.buf[s.cursor] = '\\'
+	case '/':
+		s.buf[s.cursor] = '/'
+	case 'b':
+		s.buf[s.cursor] = '\b'
+	case 'f':
+		s.buf[s.cursor] = '\f'
+	case 'n':
+		s.buf[s.cursor] = '\n'
+	case 'r':
+		s.buf[s.cursor] = '\r'
+	case 't':
+		s.buf[s.cursor] = '\t'
+	case 'u':
+		return decodeUnicode(s, p)
+	case nul:
+		if !s.read() {
+			return nil, errors.ErrInvalidCharacter(s.char(), "escaped string", s.totalOffset())
+		}
+		p = s.bufptr()
+		goto RETRY
+	default:
+		return nil, errors.ErrUnexpectedEndOfJSON("string", s.totalOffset())
+	}
+	s.buf = append(s.buf[:s.cursor-1], s.buf[s.cursor:]...)
+	s.length--
+	s.cursor--
+	p = s.bufptr()
+	return p, nil
+}
+
+var (
+	runeErrBytes    = []byte(string(utf8.RuneError))
+	runeErrBytesLen = int64(len(runeErrBytes))
+)
+
+func stringBytes(s *Stream) ([]byte, error) {
+	_, cursor, p := s.stat()
+	cursor++ // skip double quote char
+	start := cursor
+	for {
+		switch char(p, cursor) {
+		case '\\':
+			s.cursor = cursor
+			pp, err := decodeEscapeString(s, p)
+			if err != nil {
+				return nil, err
+			}
+			p = pp
+			cursor = s.cursor
+		case '"':
+			literal := s.buf[start:cursor]
+			cursor++
+			s.cursor = cursor
+			return literal, nil
+		case
+			// 0x00 is nul, 0x5c is '\\', 0x22 is '"' .
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, // 0x00-0x0F
+			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, // 0x10-0x1F
+			0x20, 0x21 /*0x22,*/, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, // 0x20-0x2F
+			0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, // 0x30-0x3F
+			0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, // 0x40-0x4F
+			0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B /*0x5C,*/, 0x5D, 0x5E, 0x5F, // 0x50-0x5F
+			0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, // 0x60-0x6F
+			0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F: // 0x70-0x7F
+			// character is ASCII. skip to next char
+		case
+			0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F, // 0x80-0x8F
+			0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F, // 0x90-0x9F
+			0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, // 0xA0-0xAF
+			0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF, // 0xB0-0xBF
+			0xC0, 0xC1, // 0xC0-0xC1
+			0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF: // 0xF5-0xFE
+			// character is invalid
+			s.buf = append(append(append([]byte{}, s.buf[:cursor]...), runeErrBytes...), s.buf[cursor+1:]...)
+			_, _, p = s.stat()
+			cursor += runeErrBytesLen
+			s.length += runeErrBytesLen
+			continue
+		case nul:
+			s.cursor = cursor
+			if s.read() {
+				_, cursor, p = s.stat()
+				continue
+			}
+			goto ERROR
+		case 0xEF:
+			// RuneError is {0xEF, 0xBF, 0xBD}
+			if s.buf[cursor+1] == 0xBF && s.buf[cursor+2] == 0xBD {
+				// found RuneError: skip
+				cursor += 2
+				break
+			}
+			fallthrough
+		default:
+			// multi bytes character
+			if !utf8.FullRune(s.buf[cursor : len(s.buf)-1]) {
+				s.cursor = cursor
+				if s.read() {
+					_, cursor, p = s.stat()
+					continue
+				}
+				goto ERROR
+			}
+			r, size := utf8.DecodeRune(s.buf[cursor:])
+			if r == utf8.RuneError {
+				s.buf = append(append(append([]byte{}, s.buf[:cursor]...), runeErrBytes...), s.buf[cursor+1:]...)
+				cursor += runeErrBytesLen
+				s.length += runeErrBytesLen
+				_, _, p = s.stat()
+			} else {
+				cursor += int64(size)
+			}
+			continue
+		}
+		cursor++
+	}
+ERROR:
+	return nil, errors.ErrUnexpectedEndOfJSON("string", s.totalOffset())
+}
+
+func (d *stringDecoder) decodeStreamByte(s *Stream) ([]byte, error) {
+	for {
+		switch s.char() {
+		case ' ', '\n', '\t', '\r':
+			s.cursor++
+			continue
+		case '[':
+			return nil, d.errUnmarshalType("array", s.totalOffset())
+		case '{':
+			return nil, d.errUnmarshalType("object", s.totalOffset())
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			return nil, d.errUnmarshalType("number", s.totalOffset())
+		case '"':
+			return stringBytes(s)
+		case 'n':
+			if err := nullBytes(s); err != nil {
+				return nil, err
+			}
+			return nil, nil
+		case nul:
+			if s.read() {
+				continue
+			}
+		}
+		break
+	}
+	return nil, errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset())
+}
+
+func (d *stringDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) {
+	for {
+		switch buf[cursor] {
+		case ' ', '\n', '\t', '\r':
+			cursor++
+		case '[':
+			return nil, 0, d.errUnmarshalType("array", cursor)
+		case '{':
+			return nil, 0, d.errUnmarshalType("object", cursor)
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			return nil, 0, d.errUnmarshalType("number", cursor)
+		case '"':
+			cursor++
+			start := cursor
+			b := (*sliceHeader)(unsafe.Pointer(&buf)).data
+			escaped := 0
+			for {
+				switch char(b, cursor) {
+				case '\\':
+					escaped++
+					cursor++
+					switch char(b, cursor) {
+					case '"', '\\', '/', 'b', 'f', 'n', 'r', 't':
+						cursor++
+					case 'u':
+						buflen := int64(len(buf))
+						if cursor+5 >= buflen {
+							return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor)
+						}
+						for i := int64(1); i <= 4; i++ {
+							c := char(b, cursor+i)
+							if !(('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')) {
+								return nil, 0, errors.ErrSyntax(fmt.Sprintf("json: invalid character %c in \\u hexadecimal character escape", c), cursor+i)
+							}
+						}
+						cursor += 5
+					default:
+						return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor)
+					}
+					continue
+				case '"':
+					literal := buf[start:cursor]
+					if escaped > 0 {
+						literal = literal[:unescapeString(literal)]
+					}
+					cursor++
+					return literal, cursor, nil
+				case nul:
+					return nil, 0, errors.ErrUnexpectedEndOfJSON("string", cursor)
+				}
+				cursor++
+			}
+		case 'n':
+			if err := validateNull(buf, cursor); err != nil {
+				return nil, 0, err
+			}
+			cursor += 4
+			return nil, cursor, nil
+		default:
+			return nil, 0, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor)
+		}
+	}
+}
+
+var unescapeMap = [256]byte{
+	'"':  '"',
+	'\\': '\\',
+	'/':  '/',
+	'b':  '\b',
+	'f':  '\f',
+	'n':  '\n',
+	'r':  '\r',
+	't':  '\t',
+}
+
+func unsafeAdd(ptr unsafe.Pointer, offset int) unsafe.Pointer {
+	return unsafe.Pointer(uintptr(ptr) + uintptr(offset))
+}
+
+func unescapeString(buf []byte) int {
+	p := (*sliceHeader)(unsafe.Pointer(&buf)).data
+	end := unsafeAdd(p, len(buf))
+	src := unsafeAdd(p, bytes.IndexByte(buf, '\\'))
+	dst := src
+	for src != end {
+		c := char(src, 0)
+		if c == '\\' {
+			escapeChar := char(src, 1)
+			if escapeChar != 'u' {
+				*(*byte)(dst) = unescapeMap[escapeChar]
+				src = unsafeAdd(src, 2)
+				dst = unsafeAdd(dst, 1)
+			} else {
+				v1 := hexToInt[char(src, 2)]
+				v2 := hexToInt[char(src, 3)]
+				v3 := hexToInt[char(src, 4)]
+				v4 := hexToInt[char(src, 5)]
+				code := rune((v1 << 12) | (v2 << 8) | (v3 << 4) | v4)
+				if code >= 0xd800 && code < 0xdc00 && uintptr(unsafeAdd(src, 11)) < uintptr(end) {
+					if char(src, 6) == '\\' && char(src, 7) == 'u' {
+						v1 := hexToInt[char(src, 8)]
+						v2 := hexToInt[char(src, 9)]
+						v3 := hexToInt[char(src, 10)]
+						v4 := hexToInt[char(src, 11)]
+						lo := rune((v1 << 12) | (v2 << 8) | (v3 << 4) | v4)
+						if lo >= 0xdc00 && lo < 0xe000 {
+							code = (code-0xd800)<<10 | (lo - 0xdc00) + 0x10000
+							src = unsafeAdd(src, 6)
+						}
+					}
+				}
+				var b [utf8.UTFMax]byte
+				n := utf8.EncodeRune(b[:], code)
+				switch n {
+				case 4:
+					*(*byte)(unsafeAdd(dst, 3)) = b[3]
+					fallthrough
+				case 3:
+					*(*byte)(unsafeAdd(dst, 2)) = b[2]
+					fallthrough
+				case 2:
+					*(*byte)(unsafeAdd(dst, 1)) = b[1]
+					fallthrough
+				case 1:
+					*(*byte)(unsafeAdd(dst, 0)) = b[0]
+				}
+				src = unsafeAdd(src, 6)
+				dst = unsafeAdd(dst, n)
+			}
+		} else {
+			*(*byte)(dst) = c
+			src = unsafeAdd(src, 1)
+			dst = unsafeAdd(dst, 1)
+		}
+	}
+	return int(uintptr(dst) - uintptr(p))
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/struct.go b/vendor/github.com/goccy/go-json/internal/decoder/struct.go
new file mode 100644
index 0000000000..313da153b3
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/struct.go
@@ -0,0 +1,845 @@
+package decoder
+
+import (
+	"fmt"
+	"math"
+	"math/bits"
+	"sort"
+	"strings"
+	"unicode"
+	"unicode/utf16"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+)
+
+type structFieldSet struct {
+	dec         Decoder
+	offset      uintptr
+	isTaggedKey bool
+	fieldIdx    int
+	key         string
+	keyLen      int64
+	err         error
+}
+
+type structDecoder struct {
+	fieldMap           map[string]*structFieldSet
+	fieldUniqueNameNum int
+	stringDecoder      *stringDecoder
+	structName         string
+	fieldName          string
+	isTriedOptimize    bool
+	keyBitmapUint8     [][256]uint8
+	keyBitmapUint16    [][256]uint16
+	sortedFieldSets    []*structFieldSet
+	keyDecoder         func(*structDecoder, []byte, int64) (int64, *structFieldSet, error)
+	keyStreamDecoder   func(*structDecoder, *Stream) (*structFieldSet, string, error)
+}
+
+var (
+	largeToSmallTable [256]byte
+)
+
+func init() {
+	for i := 0; i < 256; i++ {
+		c := i
+		if 'A' <= c && c <= 'Z' {
+			c += 'a' - 'A'
+		}
+		largeToSmallTable[i] = byte(c)
+	}
+}
+
+func toASCIILower(s string) string {
+	b := []byte(s)
+	for i := range b {
+		b[i] = largeToSmallTable[b[i]]
+	}
+	return string(b)
+}
+
+func newStructDecoder(structName, fieldName string, fieldMap map[string]*structFieldSet) *structDecoder {
+	return &structDecoder{
+		fieldMap:         fieldMap,
+		stringDecoder:    newStringDecoder(structName, fieldName),
+		structName:       structName,
+		fieldName:        fieldName,
+		keyDecoder:       decodeKey,
+		keyStreamDecoder: decodeKeyStream,
+	}
+}
+
+const (
+	allowOptimizeMaxKeyLen   = 64
+	allowOptimizeMaxFieldLen = 16
+)
+
+func (d *structDecoder) tryOptimize() {
+	fieldUniqueNameMap := map[string]int{}
+	fieldIdx := -1
+	for k, v := range d.fieldMap {
+		lower := strings.ToLower(k)
+		idx, exists := fieldUniqueNameMap[lower]
+		if exists {
+			v.fieldIdx = idx
+		} else {
+			fieldIdx++
+			v.fieldIdx = fieldIdx
+		}
+		fieldUniqueNameMap[lower] = fieldIdx
+	}
+	d.fieldUniqueNameNum = len(fieldUniqueNameMap)
+
+	if d.isTriedOptimize {
+		return
+	}
+	fieldMap := map[string]*structFieldSet{}
+	conflicted := map[string]struct{}{}
+	for k, v := range d.fieldMap {
+		key := strings.ToLower(k)
+		if key != k {
+			if key != toASCIILower(k) {
+				d.isTriedOptimize = true
+				return
+			}
+			// already exists same key (e.g. Hello and HELLO has same lower case key
+			if _, exists := conflicted[key]; exists {
+				d.isTriedOptimize = true
+				return
+			}
+			conflicted[key] = struct{}{}
+		}
+		if field, exists := fieldMap[key]; exists {
+			if field != v {
+				d.isTriedOptimize = true
+				return
+			}
+		}
+		fieldMap[key] = v
+	}
+
+	if len(fieldMap) > allowOptimizeMaxFieldLen {
+		d.isTriedOptimize = true
+		return
+	}
+
+	var maxKeyLen int
+	sortedKeys := []string{}
+	for key := range fieldMap {
+		keyLen := len(key)
+		if keyLen > allowOptimizeMaxKeyLen {
+			d.isTriedOptimize = true
+			return
+		}
+		if maxKeyLen < keyLen {
+			maxKeyLen = keyLen
+		}
+		sortedKeys = append(sortedKeys, key)
+	}
+	sort.Strings(sortedKeys)
+
+	// By allocating one extra capacity than `maxKeyLen`,
+	// it is possible to avoid the process of comparing the index of the key with the length of the bitmap each time.
+	bitmapLen := maxKeyLen + 1
+	if len(sortedKeys) <= 8 {
+		keyBitmap := make([][256]uint8, bitmapLen)
+		for i, key := range sortedKeys {
+			for j := 0; j < len(key); j++ {
+				c := key[j]
+				keyBitmap[j][c] |= (1 << uint(i))
+			}
+			d.sortedFieldSets = append(d.sortedFieldSets, fieldMap[key])
+		}
+		d.keyBitmapUint8 = keyBitmap
+		d.keyDecoder = decodeKeyByBitmapUint8
+		d.keyStreamDecoder = decodeKeyByBitmapUint8Stream
+	} else {
+		keyBitmap := make([][256]uint16, bitmapLen)
+		for i, key := range sortedKeys {
+			for j := 0; j < len(key); j++ {
+				c := key[j]
+				keyBitmap[j][c] |= (1 << uint(i))
+			}
+			d.sortedFieldSets = append(d.sortedFieldSets, fieldMap[key])
+		}
+		d.keyBitmapUint16 = keyBitmap
+		d.keyDecoder = decodeKeyByBitmapUint16
+		d.keyStreamDecoder = decodeKeyByBitmapUint16Stream
+	}
+}
+
+// decode from '\uXXXX'
+func decodeKeyCharByUnicodeRune(buf []byte, cursor int64) ([]byte, int64, error) {
+	const defaultOffset = 4
+	const surrogateOffset = 6
+
+	if cursor+defaultOffset >= int64(len(buf)) {
+		return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor)
+	}
+
+	r := unicodeToRune(buf[cursor : cursor+defaultOffset])
+	if utf16.IsSurrogate(r) {
+		cursor += defaultOffset
+		if cursor+surrogateOffset >= int64(len(buf)) || buf[cursor] != '\\' || buf[cursor+1] != 'u' {
+			return []byte(string(unicode.ReplacementChar)), cursor + defaultOffset - 1, nil
+		}
+		cursor += 2
+		r2 := unicodeToRune(buf[cursor : cursor+defaultOffset])
+		if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar {
+			return []byte(string(r)), cursor + defaultOffset - 1, nil
+		}
+	}
+	return []byte(string(r)), cursor + defaultOffset - 1, nil
+}
+
+func decodeKeyCharByEscapedChar(buf []byte, cursor int64) ([]byte, int64, error) {
+	c := buf[cursor]
+	cursor++
+	switch c {
+	case '"':
+		return []byte{'"'}, cursor, nil
+	case '\\':
+		return []byte{'\\'}, cursor, nil
+	case '/':
+		return []byte{'/'}, cursor, nil
+	case 'b':
+		return []byte{'\b'}, cursor, nil
+	case 'f':
+		return []byte{'\f'}, cursor, nil
+	case 'n':
+		return []byte{'\n'}, cursor, nil
+	case 'r':
+		return []byte{'\r'}, cursor, nil
+	case 't':
+		return []byte{'\t'}, cursor, nil
+	case 'u':
+		return decodeKeyCharByUnicodeRune(buf, cursor)
+	}
+	return nil, cursor, nil
+}
+
+func decodeKeyByBitmapUint8(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) {
+	var (
+		curBit uint8 = math.MaxUint8
+	)
+	b := (*sliceHeader)(unsafe.Pointer(&buf)).data
+	for {
+		switch char(b, cursor) {
+		case ' ', '\n', '\t', '\r':
+			cursor++
+		case '"':
+			cursor++
+			c := char(b, cursor)
+			switch c {
+			case '"':
+				cursor++
+				return cursor, nil, nil
+			case nul:
+				return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor)
+			}
+			keyIdx := 0
+			bitmap := d.keyBitmapUint8
+			start := cursor
+			for {
+				c := char(b, cursor)
+				switch c {
+				case '"':
+					fieldSetIndex := bits.TrailingZeros8(curBit)
+					field := d.sortedFieldSets[fieldSetIndex]
+					keyLen := cursor - start
+					cursor++
+					if keyLen < field.keyLen {
+						// early match
+						return cursor, nil, nil
+					}
+					return cursor, field, nil
+				case nul:
+					return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor)
+				case '\\':
+					cursor++
+					chars, nextCursor, err := decodeKeyCharByEscapedChar(buf, cursor)
+					if err != nil {
+						return 0, nil, err
+					}
+					for _, c := range chars {
+						curBit &= bitmap[keyIdx][largeToSmallTable[c]]
+						if curBit == 0 {
+							return decodeKeyNotFound(b, cursor)
+						}
+						keyIdx++
+					}
+					cursor = nextCursor
+				default:
+					curBit &= bitmap[keyIdx][largeToSmallTable[c]]
+					if curBit == 0 {
+						return decodeKeyNotFound(b, cursor)
+					}
+					keyIdx++
+				}
+				cursor++
+			}
+		default:
+			return cursor, nil, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor)
+		}
+	}
+}
+
+func decodeKeyByBitmapUint16(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) {
+	var (
+		curBit uint16 = math.MaxUint16
+	)
+	b := (*sliceHeader)(unsafe.Pointer(&buf)).data
+	for {
+		switch char(b, cursor) {
+		case ' ', '\n', '\t', '\r':
+			cursor++
+		case '"':
+			cursor++
+			c := char(b, cursor)
+			switch c {
+			case '"':
+				cursor++
+				return cursor, nil, nil
+			case nul:
+				return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor)
+			}
+			keyIdx := 0
+			bitmap := d.keyBitmapUint16
+			start := cursor
+			for {
+				c := char(b, cursor)
+				switch c {
+				case '"':
+					fieldSetIndex := bits.TrailingZeros16(curBit)
+					field := d.sortedFieldSets[fieldSetIndex]
+					keyLen := cursor - start
+					cursor++
+					if keyLen < field.keyLen {
+						// early match
+						return cursor, nil, nil
+					}
+					return cursor, field, nil
+				case nul:
+					return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor)
+				case '\\':
+					cursor++
+					chars, nextCursor, err := decodeKeyCharByEscapedChar(buf, cursor)
+					if err != nil {
+						return 0, nil, err
+					}
+					for _, c := range chars {
+						curBit &= bitmap[keyIdx][largeToSmallTable[c]]
+						if curBit == 0 {
+							return decodeKeyNotFound(b, cursor)
+						}
+						keyIdx++
+					}
+					cursor = nextCursor
+				default:
+					curBit &= bitmap[keyIdx][largeToSmallTable[c]]
+					if curBit == 0 {
+						return decodeKeyNotFound(b, cursor)
+					}
+					keyIdx++
+				}
+				cursor++
+			}
+		default:
+			return cursor, nil, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor)
+		}
+	}
+}
+
+func decodeKeyNotFound(b unsafe.Pointer, cursor int64) (int64, *structFieldSet, error) {
+	for {
+		cursor++
+		switch char(b, cursor) {
+		case '"':
+			cursor++
+			return cursor, nil, nil
+		case '\\':
+			cursor++
+			if char(b, cursor) == nul {
+				return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor)
+			}
+		case nul:
+			return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor)
+		}
+	}
+}
+
+func decodeKey(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) {
+	key, c, err := d.stringDecoder.decodeByte(buf, cursor)
+	if err != nil {
+		return 0, nil, err
+	}
+	cursor = c
+	k := *(*string)(unsafe.Pointer(&key))
+	field, exists := d.fieldMap[k]
+	if !exists {
+		return cursor, nil, nil
+	}
+	return cursor, field, nil
+}
+
+func decodeKeyByBitmapUint8Stream(d *structDecoder, s *Stream) (*structFieldSet, string, error) {
+	var (
+		curBit uint8 = math.MaxUint8
+	)
+	_, cursor, p := s.stat()
+	for {
+		switch char(p, cursor) {
+		case ' ', '\n', '\t', '\r':
+			cursor++
+		case nul:
+			s.cursor = cursor
+			if s.read() {
+				_, cursor, p = s.stat()
+				continue
+			}
+			return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset())
+		case '"':
+			cursor++
+		FIRST_CHAR:
+			start := cursor
+			switch char(p, cursor) {
+			case '"':
+				cursor++
+				s.cursor = cursor
+				return nil, "", nil
+			case nul:
+				s.cursor = cursor
+				if s.read() {
+					_, cursor, p = s.stat()
+					goto FIRST_CHAR
+				}
+				return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset())
+			}
+			keyIdx := 0
+			bitmap := d.keyBitmapUint8
+			for {
+				c := char(p, cursor)
+				switch c {
+				case '"':
+					fieldSetIndex := bits.TrailingZeros8(curBit)
+					field := d.sortedFieldSets[fieldSetIndex]
+					keyLen := cursor - start
+					cursor++
+					s.cursor = cursor
+					if keyLen < field.keyLen {
+						// early match
+						return nil, field.key, nil
+					}
+					return field, field.key, nil
+				case nul:
+					s.cursor = cursor
+					if s.read() {
+						_, cursor, p = s.stat()
+						continue
+					}
+					return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset())
+				case '\\':
+					s.cursor = cursor + 1 // skip '\' char
+					chars, err := decodeKeyCharByEscapeCharStream(s)
+					if err != nil {
+						return nil, "", err
+					}
+					cursor = s.cursor
+					for _, c := range chars {
+						curBit &= bitmap[keyIdx][largeToSmallTable[c]]
+						if curBit == 0 {
+							s.cursor = cursor
+							return decodeKeyNotFoundStream(s, start)
+						}
+						keyIdx++
+					}
+				default:
+					curBit &= bitmap[keyIdx][largeToSmallTable[c]]
+					if curBit == 0 {
+						s.cursor = cursor
+						return decodeKeyNotFoundStream(s, start)
+					}
+					keyIdx++
+				}
+				cursor++
+			}
+		default:
+			return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset())
+		}
+	}
+}
+
+func decodeKeyByBitmapUint16Stream(d *structDecoder, s *Stream) (*structFieldSet, string, error) {
+	var (
+		curBit uint16 = math.MaxUint16
+	)
+	_, cursor, p := s.stat()
+	for {
+		switch char(p, cursor) {
+		case ' ', '\n', '\t', '\r':
+			cursor++
+		case nul:
+			s.cursor = cursor
+			if s.read() {
+				_, cursor, p = s.stat()
+				continue
+			}
+			return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset())
+		case '"':
+			cursor++
+		FIRST_CHAR:
+			start := cursor
+			switch char(p, cursor) {
+			case '"':
+				cursor++
+				s.cursor = cursor
+				return nil, "", nil
+			case nul:
+				s.cursor = cursor
+				if s.read() {
+					_, cursor, p = s.stat()
+					goto FIRST_CHAR
+				}
+				return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset())
+			}
+			keyIdx := 0
+			bitmap := d.keyBitmapUint16
+			for {
+				c := char(p, cursor)
+				switch c {
+				case '"':
+					fieldSetIndex := bits.TrailingZeros16(curBit)
+					field := d.sortedFieldSets[fieldSetIndex]
+					keyLen := cursor - start
+					cursor++
+					s.cursor = cursor
+					if keyLen < field.keyLen {
+						// early match
+						return nil, field.key, nil
+					}
+					return field, field.key, nil
+				case nul:
+					s.cursor = cursor
+					if s.read() {
+						_, cursor, p = s.stat()
+						continue
+					}
+					return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset())
+				case '\\':
+					s.cursor = cursor + 1 // skip '\' char
+					chars, err := decodeKeyCharByEscapeCharStream(s)
+					if err != nil {
+						return nil, "", err
+					}
+					cursor = s.cursor
+					for _, c := range chars {
+						curBit &= bitmap[keyIdx][largeToSmallTable[c]]
+						if curBit == 0 {
+							s.cursor = cursor
+							return decodeKeyNotFoundStream(s, start)
+						}
+						keyIdx++
+					}
+				default:
+					curBit &= bitmap[keyIdx][largeToSmallTable[c]]
+					if curBit == 0 {
+						s.cursor = cursor
+						return decodeKeyNotFoundStream(s, start)
+					}
+					keyIdx++
+				}
+				cursor++
+			}
+		default:
+			return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset())
+		}
+	}
+}
+
+// decode from '\uXXXX'
+func decodeKeyCharByUnicodeRuneStream(s *Stream) ([]byte, error) {
+	const defaultOffset = 4
+	const surrogateOffset = 6
+
+	if s.cursor+defaultOffset >= s.length {
+		if !s.read() {
+			return nil, errors.ErrInvalidCharacter(s.char(), "escaped unicode char", s.totalOffset())
+		}
+	}
+
+	r := unicodeToRune(s.buf[s.cursor : s.cursor+defaultOffset])
+	if utf16.IsSurrogate(r) {
+		s.cursor += defaultOffset
+		if s.cursor+surrogateOffset >= s.length {
+			s.read()
+		}
+		if s.cursor+surrogateOffset >= s.length || s.buf[s.cursor] != '\\' || s.buf[s.cursor+1] != 'u' {
+			s.cursor += defaultOffset - 1
+			return []byte(string(unicode.ReplacementChar)), nil
+		}
+		r2 := unicodeToRune(s.buf[s.cursor+defaultOffset+2 : s.cursor+surrogateOffset])
+		if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar {
+			s.cursor += defaultOffset - 1
+			return []byte(string(r)), nil
+		}
+	}
+	s.cursor += defaultOffset - 1
+	return []byte(string(r)), nil
+}
+
+func decodeKeyCharByEscapeCharStream(s *Stream) ([]byte, error) {
+	c := s.buf[s.cursor]
+	s.cursor++
+RETRY:
+	switch c {
+	case '"':
+		return []byte{'"'}, nil
+	case '\\':
+		return []byte{'\\'}, nil
+	case '/':
+		return []byte{'/'}, nil
+	case 'b':
+		return []byte{'\b'}, nil
+	case 'f':
+		return []byte{'\f'}, nil
+	case 'n':
+		return []byte{'\n'}, nil
+	case 'r':
+		return []byte{'\r'}, nil
+	case 't':
+		return []byte{'\t'}, nil
+	case 'u':
+		return decodeKeyCharByUnicodeRuneStream(s)
+	case nul:
+		if !s.read() {
+			return nil, errors.ErrInvalidCharacter(s.char(), "escaped char", s.totalOffset())
+		}
+		goto RETRY
+	default:
+		return nil, errors.ErrUnexpectedEndOfJSON("struct field", s.totalOffset())
+	}
+}
+
+func decodeKeyNotFoundStream(s *Stream, start int64) (*structFieldSet, string, error) {
+	buf, cursor, p := s.stat()
+	for {
+		cursor++
+		switch char(p, cursor) {
+		case '"':
+			b := buf[start:cursor]
+			key := *(*string)(unsafe.Pointer(&b))
+			cursor++
+			s.cursor = cursor
+			return nil, key, nil
+		case '\\':
+			cursor++
+			if char(p, cursor) == nul {
+				s.cursor = cursor
+				if !s.read() {
+					return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset())
+				}
+				buf, cursor, p = s.statForRetry()
+			}
+		case nul:
+			s.cursor = cursor
+			if !s.read() {
+				return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset())
+			}
+			buf, cursor, p = s.statForRetry()
+		}
+	}
+}
+
+func decodeKeyStream(d *structDecoder, s *Stream) (*structFieldSet, string, error) {
+	key, err := d.stringDecoder.decodeStreamByte(s)
+	if err != nil {
+		return nil, "", err
+	}
+	k := *(*string)(unsafe.Pointer(&key))
+	return d.fieldMap[k], k, nil
+}
+
+func (d *structDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	depth++
+	if depth > maxDecodeNestingDepth {
+		return errors.ErrExceededMaxDepth(s.char(), s.cursor)
+	}
+
+	c := s.skipWhiteSpace()
+	switch c {
+	case 'n':
+		if err := nullBytes(s); err != nil {
+			return err
+		}
+		return nil
+	default:
+		if s.char() != '{' {
+			return errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset())
+		}
+	}
+	s.cursor++
+	if s.skipWhiteSpace() == '}' {
+		s.cursor++
+		return nil
+	}
+	var (
+		seenFields   map[int]struct{}
+		seenFieldNum int
+	)
+	firstWin := (s.Option.Flags & FirstWinOption) != 0
+	if firstWin {
+		seenFields = make(map[int]struct{}, d.fieldUniqueNameNum)
+	}
+	for {
+		s.reset()
+		field, key, err := d.keyStreamDecoder(d, s)
+		if err != nil {
+			return err
+		}
+		if s.skipWhiteSpace() != ':' {
+			return errors.ErrExpected("colon after object key", s.totalOffset())
+		}
+		s.cursor++
+		if field != nil {
+			if field.err != nil {
+				return field.err
+			}
+			if firstWin {
+				if _, exists := seenFields[field.fieldIdx]; exists {
+					if err := s.skipValue(depth); err != nil {
+						return err
+					}
+				} else {
+					if err := field.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+field.offset)); err != nil {
+						return err
+					}
+					seenFieldNum++
+					if d.fieldUniqueNameNum <= seenFieldNum {
+						return s.skipObject(depth)
+					}
+					seenFields[field.fieldIdx] = struct{}{}
+				}
+			} else {
+				if err := field.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+field.offset)); err != nil {
+					return err
+				}
+			}
+		} else if s.DisallowUnknownFields {
+			return fmt.Errorf("json: unknown field %q", key)
+		} else {
+			if err := s.skipValue(depth); err != nil {
+				return err
+			}
+		}
+		c := s.skipWhiteSpace()
+		if c == '}' {
+			s.cursor++
+			return nil
+		}
+		if c != ',' {
+			return errors.ErrExpected("comma after object element", s.totalOffset())
+		}
+		s.cursor++
+	}
+}
+
+func (d *structDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	buf := ctx.Buf
+	depth++
+	if depth > maxDecodeNestingDepth {
+		return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor)
+	}
+	buflen := int64(len(buf))
+	cursor = skipWhiteSpace(buf, cursor)
+	b := (*sliceHeader)(unsafe.Pointer(&buf)).data
+	switch char(b, cursor) {
+	case 'n':
+		if err := validateNull(buf, cursor); err != nil {
+			return 0, err
+		}
+		cursor += 4
+		return cursor, nil
+	case '{':
+	default:
+		return 0, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor)
+	}
+	cursor++
+	cursor = skipWhiteSpace(buf, cursor)
+	if buf[cursor] == '}' {
+		cursor++
+		return cursor, nil
+	}
+	var (
+		seenFields   map[int]struct{}
+		seenFieldNum int
+	)
+	firstWin := (ctx.Option.Flags & FirstWinOption) != 0
+	if firstWin {
+		seenFields = make(map[int]struct{}, d.fieldUniqueNameNum)
+	}
+	for {
+		c, field, err := d.keyDecoder(d, buf, cursor)
+		if err != nil {
+			return 0, err
+		}
+		cursor = skipWhiteSpace(buf, c)
+		if char(b, cursor) != ':' {
+			return 0, errors.ErrExpected("colon after object key", cursor)
+		}
+		cursor++
+		if cursor >= buflen {
+			return 0, errors.ErrExpected("object value after colon", cursor)
+		}
+		if field != nil {
+			if field.err != nil {
+				return 0, field.err
+			}
+			if firstWin {
+				if _, exists := seenFields[field.fieldIdx]; exists {
+					c, err := skipValue(buf, cursor, depth)
+					if err != nil {
+						return 0, err
+					}
+					cursor = c
+				} else {
+					c, err := field.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+field.offset))
+					if err != nil {
+						return 0, err
+					}
+					cursor = c
+					seenFieldNum++
+					if d.fieldUniqueNameNum <= seenFieldNum {
+						return skipObject(buf, cursor, depth)
+					}
+					seenFields[field.fieldIdx] = struct{}{}
+				}
+			} else {
+				c, err := field.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+field.offset))
+				if err != nil {
+					return 0, err
+				}
+				cursor = c
+			}
+		} else {
+			c, err := skipValue(buf, cursor, depth)
+			if err != nil {
+				return 0, err
+			}
+			cursor = c
+		}
+		cursor = skipWhiteSpace(buf, cursor)
+		if char(b, cursor) == '}' {
+			cursor++
+			return cursor, nil
+		}
+		if char(b, cursor) != ',' {
+			return 0, errors.ErrExpected("comma after object element", cursor)
+		}
+		cursor++
+	}
+}
+
+func (d *structDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	return nil, 0, fmt.Errorf("json: struct decoder does not support decode path")
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/type.go b/vendor/github.com/goccy/go-json/internal/decoder/type.go
new file mode 100644
index 0000000000..beaf3ab866
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/type.go
@@ -0,0 +1,30 @@
+package decoder
+
+import (
+	"context"
+	"encoding"
+	"encoding/json"
+	"reflect"
+	"unsafe"
+)
+
+type Decoder interface {
+	Decode(*RuntimeContext, int64, int64, unsafe.Pointer) (int64, error)
+	DecodePath(*RuntimeContext, int64, int64) ([][]byte, int64, error)
+	DecodeStream(*Stream, int64, unsafe.Pointer) error
+}
+
+const (
+	nul                   = '\000'
+	maxDecodeNestingDepth = 10000
+)
+
+type unmarshalerContext interface {
+	UnmarshalJSON(context.Context, []byte) error
+}
+
+var (
+	unmarshalJSONType        = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()
+	unmarshalJSONContextType = reflect.TypeOf((*unmarshalerContext)(nil)).Elem()
+	unmarshalTextType        = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/uint.go b/vendor/github.com/goccy/go-json/internal/decoder/uint.go
new file mode 100644
index 0000000000..4131731b8e
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/uint.go
@@ -0,0 +1,194 @@
+package decoder
+
+import (
+	"fmt"
+	"reflect"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type uintDecoder struct {
+	typ        *runtime.Type
+	kind       reflect.Kind
+	op         func(unsafe.Pointer, uint64)
+	structName string
+	fieldName  string
+}
+
+func newUintDecoder(typ *runtime.Type, structName, fieldName string, op func(unsafe.Pointer, uint64)) *uintDecoder {
+	return &uintDecoder{
+		typ:        typ,
+		kind:       typ.Kind(),
+		op:         op,
+		structName: structName,
+		fieldName:  fieldName,
+	}
+}
+
+func (d *uintDecoder) typeError(buf []byte, offset int64) *errors.UnmarshalTypeError {
+	return &errors.UnmarshalTypeError{
+		Value:  fmt.Sprintf("number %s", string(buf)),
+		Type:   runtime.RType2Type(d.typ),
+		Offset: offset,
+	}
+}
+
+var (
+	pow10u64 = [...]uint64{
+		1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09,
+		1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+	}
+	pow10u64Len = len(pow10u64)
+)
+
+func (d *uintDecoder) parseUint(b []byte) (uint64, error) {
+	maxDigit := len(b)
+	if maxDigit > pow10u64Len {
+		return 0, fmt.Errorf("invalid length of number")
+	}
+	sum := uint64(0)
+	for i := 0; i < maxDigit; i++ {
+		c := uint64(b[i]) - 48
+		digitValue := pow10u64[maxDigit-i-1]
+		sum += c * digitValue
+	}
+	return sum, nil
+}
+
+func (d *uintDecoder) decodeStreamByte(s *Stream) ([]byte, error) {
+	for {
+		switch s.char() {
+		case ' ', '\n', '\t', '\r':
+			s.cursor++
+			continue
+		case '0':
+			s.cursor++
+			return numZeroBuf, nil
+		case '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			start := s.cursor
+			for {
+				s.cursor++
+				if numTable[s.char()] {
+					continue
+				} else if s.char() == nul {
+					if s.read() {
+						s.cursor-- // for retry current character
+						continue
+					}
+				}
+				break
+			}
+			num := s.buf[start:s.cursor]
+			return num, nil
+		case 'n':
+			if err := nullBytes(s); err != nil {
+				return nil, err
+			}
+			return nil, nil
+		case nul:
+			if s.read() {
+				continue
+			}
+		default:
+			return nil, d.typeError([]byte{s.char()}, s.totalOffset())
+		}
+		break
+	}
+	return nil, errors.ErrUnexpectedEndOfJSON("number(unsigned integer)", s.totalOffset())
+}
+
+func (d *uintDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) {
+	for {
+		switch buf[cursor] {
+		case ' ', '\n', '\t', '\r':
+			cursor++
+			continue
+		case '0':
+			cursor++
+			return numZeroBuf, cursor, nil
+		case '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			start := cursor
+			cursor++
+			for numTable[buf[cursor]] {
+				cursor++
+			}
+			num := buf[start:cursor]
+			return num, cursor, nil
+		case 'n':
+			if err := validateNull(buf, cursor); err != nil {
+				return nil, 0, err
+			}
+			cursor += 4
+			return nil, cursor, nil
+		default:
+			return nil, 0, d.typeError([]byte{buf[cursor]}, cursor)
+		}
+	}
+}
+
+func (d *uintDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	bytes, err := d.decodeStreamByte(s)
+	if err != nil {
+		return err
+	}
+	if bytes == nil {
+		return nil
+	}
+	u64, err := d.parseUint(bytes)
+	if err != nil {
+		return d.typeError(bytes, s.totalOffset())
+	}
+	switch d.kind {
+	case reflect.Uint8:
+		if (1 << 8) <= u64 {
+			return d.typeError(bytes, s.totalOffset())
+		}
+	case reflect.Uint16:
+		if (1 << 16) <= u64 {
+			return d.typeError(bytes, s.totalOffset())
+		}
+	case reflect.Uint32:
+		if (1 << 32) <= u64 {
+			return d.typeError(bytes, s.totalOffset())
+		}
+	}
+	d.op(p, u64)
+	return nil
+}
+
+func (d *uintDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	bytes, c, err := d.decodeByte(ctx.Buf, cursor)
+	if err != nil {
+		return 0, err
+	}
+	if bytes == nil {
+		return c, nil
+	}
+	cursor = c
+	u64, err := d.parseUint(bytes)
+	if err != nil {
+		return 0, d.typeError(bytes, cursor)
+	}
+	switch d.kind {
+	case reflect.Uint8:
+		if (1 << 8) <= u64 {
+			return 0, d.typeError(bytes, cursor)
+		}
+	case reflect.Uint16:
+		if (1 << 16) <= u64 {
+			return 0, d.typeError(bytes, cursor)
+		}
+	case reflect.Uint32:
+		if (1 << 32) <= u64 {
+			return 0, d.typeError(bytes, cursor)
+		}
+	}
+	d.op(p, u64)
+	return cursor, nil
+}
+
+func (d *uintDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	return nil, 0, fmt.Errorf("json: uint decoder does not support decode path")
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go
new file mode 100644
index 0000000000..4cd6dbd573
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go
@@ -0,0 +1,104 @@
+package decoder
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type unmarshalJSONDecoder struct {
+	typ        *runtime.Type
+	structName string
+	fieldName  string
+}
+
+func newUnmarshalJSONDecoder(typ *runtime.Type, structName, fieldName string) *unmarshalJSONDecoder {
+	return &unmarshalJSONDecoder{
+		typ:        typ,
+		structName: structName,
+		fieldName:  fieldName,
+	}
+}
+
+func (d *unmarshalJSONDecoder) annotateError(cursor int64, err error) {
+	switch e := err.(type) {
+	case *errors.UnmarshalTypeError:
+		e.Struct = d.structName
+		e.Field = d.fieldName
+	case *errors.SyntaxError:
+		e.Offset = cursor
+	}
+}
+
+func (d *unmarshalJSONDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	s.skipWhiteSpace()
+	start := s.cursor
+	if err := s.skipValue(depth); err != nil {
+		return err
+	}
+	src := s.buf[start:s.cursor]
+	dst := make([]byte, len(src))
+	copy(dst, src)
+
+	v := *(*interface{})(unsafe.Pointer(&emptyInterface{
+		typ: d.typ,
+		ptr: p,
+	}))
+	switch v := v.(type) {
+	case unmarshalerContext:
+		var ctx context.Context
+		if (s.Option.Flags & ContextOption) != 0 {
+			ctx = s.Option.Context
+		} else {
+			ctx = context.Background()
+		}
+		if err := v.UnmarshalJSON(ctx, dst); err != nil {
+			d.annotateError(s.cursor, err)
+			return err
+		}
+	case json.Unmarshaler:
+		if err := v.UnmarshalJSON(dst); err != nil {
+			d.annotateError(s.cursor, err)
+			return err
+		}
+	}
+	return nil
+}
+
+func (d *unmarshalJSONDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	buf := ctx.Buf
+	cursor = skipWhiteSpace(buf, cursor)
+	start := cursor
+	end, err := skipValue(buf, cursor, depth)
+	if err != nil {
+		return 0, err
+	}
+	src := buf[start:end]
+	dst := make([]byte, len(src))
+	copy(dst, src)
+
+	v := *(*interface{})(unsafe.Pointer(&emptyInterface{
+		typ: d.typ,
+		ptr: p,
+	}))
+	if (ctx.Option.Flags & ContextOption) != 0 {
+		if err := v.(unmarshalerContext).UnmarshalJSON(ctx.Option.Context, dst); err != nil {
+			d.annotateError(cursor, err)
+			return 0, err
+		}
+	} else {
+		if err := v.(json.Unmarshaler).UnmarshalJSON(dst); err != nil {
+			d.annotateError(cursor, err)
+			return 0, err
+		}
+	}
+	return end, nil
+}
+
+func (d *unmarshalJSONDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	return nil, 0, fmt.Errorf("json: unmarshal json decoder does not support decode path")
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go
new file mode 100644
index 0000000000..6d37993f07
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go
@@ -0,0 +1,285 @@
+package decoder
+
+import (
+	"bytes"
+	"encoding"
+	"fmt"
+	"unicode"
+	"unicode/utf16"
+	"unicode/utf8"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type unmarshalTextDecoder struct {
+	typ        *runtime.Type
+	structName string
+	fieldName  string
+}
+
+func newUnmarshalTextDecoder(typ *runtime.Type, structName, fieldName string) *unmarshalTextDecoder {
+	return &unmarshalTextDecoder{
+		typ:        typ,
+		structName: structName,
+		fieldName:  fieldName,
+	}
+}
+
+func (d *unmarshalTextDecoder) annotateError(cursor int64, err error) {
+	switch e := err.(type) {
+	case *errors.UnmarshalTypeError:
+		e.Struct = d.structName
+		e.Field = d.fieldName
+	case *errors.SyntaxError:
+		e.Offset = cursor
+	}
+}
+
+var (
+	nullbytes = []byte(`null`)
+)
+
+func (d *unmarshalTextDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	s.skipWhiteSpace()
+	start := s.cursor
+	if err := s.skipValue(depth); err != nil {
+		return err
+	}
+	src := s.buf[start:s.cursor]
+	if len(src) > 0 {
+		switch src[0] {
+		case '[':
+			return &errors.UnmarshalTypeError{
+				Value:  "array",
+				Type:   runtime.RType2Type(d.typ),
+				Offset: s.totalOffset(),
+			}
+		case '{':
+			return &errors.UnmarshalTypeError{
+				Value:  "object",
+				Type:   runtime.RType2Type(d.typ),
+				Offset: s.totalOffset(),
+			}
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			return &errors.UnmarshalTypeError{
+				Value:  "number",
+				Type:   runtime.RType2Type(d.typ),
+				Offset: s.totalOffset(),
+			}
+		case 'n':
+			if bytes.Equal(src, nullbytes) {
+				*(*unsafe.Pointer)(p) = nil
+				return nil
+			}
+		}
+	}
+	dst := make([]byte, len(src))
+	copy(dst, src)
+
+	if b, ok := unquoteBytes(dst); ok {
+		dst = b
+	}
+	v := *(*interface{})(unsafe.Pointer(&emptyInterface{
+		typ: d.typ,
+		ptr: p,
+	}))
+	if err := v.(encoding.TextUnmarshaler).UnmarshalText(dst); err != nil {
+		d.annotateError(s.cursor, err)
+		return err
+	}
+	return nil
+}
+
+func (d *unmarshalTextDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	buf := ctx.Buf
+	cursor = skipWhiteSpace(buf, cursor)
+	start := cursor
+	end, err := skipValue(buf, cursor, depth)
+	if err != nil {
+		return 0, err
+	}
+	src := buf[start:end]
+	if len(src) > 0 {
+		switch src[0] {
+		case '[':
+			return 0, &errors.UnmarshalTypeError{
+				Value:  "array",
+				Type:   runtime.RType2Type(d.typ),
+				Offset: start,
+			}
+		case '{':
+			return 0, &errors.UnmarshalTypeError{
+				Value:  "object",
+				Type:   runtime.RType2Type(d.typ),
+				Offset: start,
+			}
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			return 0, &errors.UnmarshalTypeError{
+				Value:  "number",
+				Type:   runtime.RType2Type(d.typ),
+				Offset: start,
+			}
+		case 'n':
+			if bytes.Equal(src, nullbytes) {
+				*(*unsafe.Pointer)(p) = nil
+				return end, nil
+			}
+		}
+	}
+
+	if s, ok := unquoteBytes(src); ok {
+		src = s
+	}
+	v := *(*interface{})(unsafe.Pointer(&emptyInterface{
+		typ: d.typ,
+		ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)),
+	}))
+	if err := v.(encoding.TextUnmarshaler).UnmarshalText(src); err != nil {
+		d.annotateError(cursor, err)
+		return 0, err
+	}
+	return end, nil
+}
+
+func (d *unmarshalTextDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	return nil, 0, fmt.Errorf("json: unmarshal text decoder does not support decode path")
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+	length := len(s)
+	if length < 2 || s[0] != '"' || s[length-1] != '"' {
+		return
+	}
+	s = s[1 : length-1]
+	length -= 2
+
+	// Check for unusual characters. If there are none,
+	// then no unquoting is needed, so return a slice of the
+	// original bytes.
+	r := 0
+	for r < length {
+		c := s[r]
+		if c == '\\' || c == '"' || c < ' ' {
+			break
+		}
+		if c < utf8.RuneSelf {
+			r++
+			continue
+		}
+		rr, size := utf8.DecodeRune(s[r:])
+		if rr == utf8.RuneError && size == 1 {
+			break
+		}
+		r += size
+	}
+	if r == length {
+		return s, true
+	}
+
+	b := make([]byte, length+2*utf8.UTFMax)
+	w := copy(b, s[0:r])
+	for r < length {
+		// Out of room? Can only happen if s is full of
+		// malformed UTF-8 and we're replacing each
+		// byte with RuneError.
+		if w >= len(b)-2*utf8.UTFMax {
+			nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+			copy(nb, b[0:w])
+			b = nb
+		}
+		switch c := s[r]; {
+		case c == '\\':
+			r++
+			if r >= length {
+				return
+			}
+			switch s[r] {
+			default:
+				return
+			case '"', '\\', '/', '\'':
+				b[w] = s[r]
+				r++
+				w++
+			case 'b':
+				b[w] = '\b'
+				r++
+				w++
+			case 'f':
+				b[w] = '\f'
+				r++
+				w++
+			case 'n':
+				b[w] = '\n'
+				r++
+				w++
+			case 'r':
+				b[w] = '\r'
+				r++
+				w++
+			case 't':
+				b[w] = '\t'
+				r++
+				w++
+			case 'u':
+				r--
+				rr := getu4(s[r:])
+				if rr < 0 {
+					return
+				}
+				r += 6
+				if utf16.IsSurrogate(rr) {
+					rr1 := getu4(s[r:])
+					if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+						// A valid pair; consume.
+						r += 6
+						w += utf8.EncodeRune(b[w:], dec)
+						break
+					}
+					// Invalid surrogate; fall back to replacement rune.
+					rr = unicode.ReplacementChar
+				}
+				w += utf8.EncodeRune(b[w:], rr)
+			}
+
+		// Quote, control characters are invalid.
+		case c == '"', c < ' ':
+			return
+
+		// ASCII
+		case c < utf8.RuneSelf:
+			b[w] = c
+			r++
+			w++
+
+		// Coerce to well-formed UTF-8.
+		default:
+			rr, size := utf8.DecodeRune(s[r:])
+			r += size
+			w += utf8.EncodeRune(b[w:], rr)
+		}
+	}
+	return b[0:w], true
+}
+
+func getu4(s []byte) rune {
+	if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+		return -1
+	}
+	var r rune
+	for _, c := range s[2:6] {
+		switch {
+		case '0' <= c && c <= '9':
+			c = c - '0'
+		case 'a' <= c && c <= 'f':
+			c = c - 'a' + 10
+		case 'A' <= c && c <= 'F':
+			c = c - 'A' + 10
+		default:
+			return -1
+		}
+		r = r*16 + rune(c)
+	}
+	return r
+}
diff --git a/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go b/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go
new file mode 100644
index 0000000000..0c4e2e6eac
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go
@@ -0,0 +1,73 @@
+package decoder
+
+import (
+	"fmt"
+	"reflect"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type wrappedStringDecoder struct {
+	typ           *runtime.Type
+	dec           Decoder
+	stringDecoder *stringDecoder
+	structName    string
+	fieldName     string
+	isPtrType     bool
+}
+
+func newWrappedStringDecoder(typ *runtime.Type, dec Decoder, structName, fieldName string) *wrappedStringDecoder {
+	return &wrappedStringDecoder{
+		typ:           typ,
+		dec:           dec,
+		stringDecoder: newStringDecoder(structName, fieldName),
+		structName:    structName,
+		fieldName:     fieldName,
+		isPtrType:     typ.Kind() == reflect.Ptr,
+	}
+}
+
+func (d *wrappedStringDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error {
+	bytes, err := d.stringDecoder.decodeStreamByte(s)
+	if err != nil {
+		return err
+	}
+	if bytes == nil {
+		if d.isPtrType {
+			*(*unsafe.Pointer)(p) = nil
+		}
+		return nil
+	}
+	b := make([]byte, len(bytes)+1)
+	copy(b, bytes)
+	if _, err := d.dec.Decode(&RuntimeContext{Buf: b}, 0, depth, p); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (d *wrappedStringDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) {
+	bytes, c, err := d.stringDecoder.decodeByte(ctx.Buf, cursor)
+	if err != nil {
+		return 0, err
+	}
+	if bytes == nil {
+		if d.isPtrType {
+			*(*unsafe.Pointer)(p) = nil
+		}
+		return c, nil
+	}
+	bytes = append(bytes, nul)
+	oldBuf := ctx.Buf
+	ctx.Buf = bytes
+	if _, err := d.dec.Decode(ctx, 0, depth, p); err != nil {
+		return 0, err
+	}
+	ctx.Buf = oldBuf
+	return c, nil
+}
+
+func (d *wrappedStringDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) {
+	return nil, 0, fmt.Errorf("json: wrapped string decoder does not support decode path")
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/code.go b/vendor/github.com/goccy/go-json/internal/encoder/code.go
new file mode 100644
index 0000000000..5b08faefc7
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/code.go
@@ -0,0 +1,1023 @@
+package encoder
+
+import (
+	"fmt"
+	"reflect"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type Code interface {
+	Kind() CodeKind
+	ToOpcode(*compileContext) Opcodes
+	Filter(*FieldQuery) Code
+}
+
+type AnonymousCode interface {
+	ToAnonymousOpcode(*compileContext) Opcodes
+}
+
+type Opcodes []*Opcode
+
+func (o Opcodes) First() *Opcode {
+	if len(o) == 0 {
+		return nil
+	}
+	return o[0]
+}
+
+func (o Opcodes) Last() *Opcode {
+	if len(o) == 0 {
+		return nil
+	}
+	return o[len(o)-1]
+}
+
+func (o Opcodes) Add(codes ...*Opcode) Opcodes {
+	return append(o, codes...)
+}
+
+type CodeKind int
+
+const (
+	CodeKindInterface CodeKind = iota
+	CodeKindPtr
+	CodeKindInt
+	CodeKindUint
+	CodeKindFloat
+	CodeKindString
+	CodeKindBool
+	CodeKindStruct
+	CodeKindMap
+	CodeKindSlice
+	CodeKindArray
+	CodeKindBytes
+	CodeKindMarshalJSON
+	CodeKindMarshalText
+	CodeKindRecursive
+)
+
+type IntCode struct {
+	typ      *runtime.Type
+	bitSize  uint8
+	isString bool
+	isPtr    bool
+}
+
+func (c *IntCode) Kind() CodeKind {
+	return CodeKindInt
+}
+
+func (c *IntCode) ToOpcode(ctx *compileContext) Opcodes {
+	var code *Opcode
+	switch {
+	case c.isPtr:
+		code = newOpCode(ctx, c.typ, OpIntPtr)
+	case c.isString:
+		code = newOpCode(ctx, c.typ, OpIntString)
+	default:
+		code = newOpCode(ctx, c.typ, OpInt)
+	}
+	code.NumBitSize = c.bitSize
+	ctx.incIndex()
+	return Opcodes{code}
+}
+
+func (c *IntCode) Filter(_ *FieldQuery) Code {
+	return c
+}
+
+type UintCode struct {
+	typ      *runtime.Type
+	bitSize  uint8
+	isString bool
+	isPtr    bool
+}
+
+func (c *UintCode) Kind() CodeKind {
+	return CodeKindUint
+}
+
+func (c *UintCode) ToOpcode(ctx *compileContext) Opcodes {
+	var code *Opcode
+	switch {
+	case c.isPtr:
+		code = newOpCode(ctx, c.typ, OpUintPtr)
+	case c.isString:
+		code = newOpCode(ctx, c.typ, OpUintString)
+	default:
+		code = newOpCode(ctx, c.typ, OpUint)
+	}
+	code.NumBitSize = c.bitSize
+	ctx.incIndex()
+	return Opcodes{code}
+}
+
+func (c *UintCode) Filter(_ *FieldQuery) Code {
+	return c
+}
+
+type FloatCode struct {
+	typ     *runtime.Type
+	bitSize uint8
+	isPtr   bool
+}
+
+func (c *FloatCode) Kind() CodeKind {
+	return CodeKindFloat
+}
+
+func (c *FloatCode) ToOpcode(ctx *compileContext) Opcodes {
+	var code *Opcode
+	switch {
+	case c.isPtr:
+		switch c.bitSize {
+		case 32:
+			code = newOpCode(ctx, c.typ, OpFloat32Ptr)
+		default:
+			code = newOpCode(ctx, c.typ, OpFloat64Ptr)
+		}
+	default:
+		switch c.bitSize {
+		case 32:
+			code = newOpCode(ctx, c.typ, OpFloat32)
+		default:
+			code = newOpCode(ctx, c.typ, OpFloat64)
+		}
+	}
+	ctx.incIndex()
+	return Opcodes{code}
+}
+
+func (c *FloatCode) Filter(_ *FieldQuery) Code {
+	return c
+}
+
+type StringCode struct {
+	typ   *runtime.Type
+	isPtr bool
+}
+
+func (c *StringCode) Kind() CodeKind {
+	return CodeKindString
+}
+
+func (c *StringCode) ToOpcode(ctx *compileContext) Opcodes {
+	isJSONNumberType := c.typ == runtime.Type2RType(jsonNumberType)
+	var code *Opcode
+	if c.isPtr {
+		if isJSONNumberType {
+			code = newOpCode(ctx, c.typ, OpNumberPtr)
+		} else {
+			code = newOpCode(ctx, c.typ, OpStringPtr)
+		}
+	} else {
+		if isJSONNumberType {
+			code = newOpCode(ctx, c.typ, OpNumber)
+		} else {
+			code = newOpCode(ctx, c.typ, OpString)
+		}
+	}
+	ctx.incIndex()
+	return Opcodes{code}
+}
+
+func (c *StringCode) Filter(_ *FieldQuery) Code {
+	return c
+}
+
+type BoolCode struct {
+	typ   *runtime.Type
+	isPtr bool
+}
+
+func (c *BoolCode) Kind() CodeKind {
+	return CodeKindBool
+}
+
+func (c *BoolCode) ToOpcode(ctx *compileContext) Opcodes {
+	var code *Opcode
+	switch {
+	case c.isPtr:
+		code = newOpCode(ctx, c.typ, OpBoolPtr)
+	default:
+		code = newOpCode(ctx, c.typ, OpBool)
+	}
+	ctx.incIndex()
+	return Opcodes{code}
+}
+
+func (c *BoolCode) Filter(_ *FieldQuery) Code {
+	return c
+}
+
+type BytesCode struct {
+	typ   *runtime.Type
+	isPtr bool
+}
+
+func (c *BytesCode) Kind() CodeKind {
+	return CodeKindBytes
+}
+
+func (c *BytesCode) ToOpcode(ctx *compileContext) Opcodes {
+	var code *Opcode
+	switch {
+	case c.isPtr:
+		code = newOpCode(ctx, c.typ, OpBytesPtr)
+	default:
+		code = newOpCode(ctx, c.typ, OpBytes)
+	}
+	ctx.incIndex()
+	return Opcodes{code}
+}
+
+func (c *BytesCode) Filter(_ *FieldQuery) Code {
+	return c
+}
+
+type SliceCode struct {
+	typ   *runtime.Type
+	value Code
+}
+
+func (c *SliceCode) Kind() CodeKind {
+	return CodeKindSlice
+}
+
+func (c *SliceCode) ToOpcode(ctx *compileContext) Opcodes {
+	// header => opcode => elem => end
+	//             ^        |
+	//             |________|
+	size := c.typ.Elem().Size()
+	header := newSliceHeaderCode(ctx, c.typ)
+	ctx.incIndex()
+
+	ctx.incIndent()
+	codes := c.value.ToOpcode(ctx)
+	ctx.decIndent()
+
+	codes.First().Flags |= IndirectFlags
+	elemCode := newSliceElemCode(ctx, c.typ.Elem(), header, size)
+	ctx.incIndex()
+	end := newOpCode(ctx, c.typ, OpSliceEnd)
+	ctx.incIndex()
+	header.End = end
+	header.Next = codes.First()
+	codes.Last().Next = elemCode
+	elemCode.Next = codes.First()
+	elemCode.End = end
+	return Opcodes{header}.Add(codes...).Add(elemCode).Add(end)
+}
+
+func (c *SliceCode) Filter(_ *FieldQuery) Code {
+	return c
+}
+
+type ArrayCode struct {
+	typ   *runtime.Type
+	value Code
+}
+
+func (c *ArrayCode) Kind() CodeKind {
+	return CodeKindArray
+}
+
+func (c *ArrayCode) ToOpcode(ctx *compileContext) Opcodes {
+	// header => opcode => elem => end
+	//             ^        |
+	//             |________|
+	elem := c.typ.Elem()
+	alen := c.typ.Len()
+	size := elem.Size()
+
+	header := newArrayHeaderCode(ctx, c.typ, alen)
+	ctx.incIndex()
+
+	ctx.incIndent()
+	codes := c.value.ToOpcode(ctx)
+	ctx.decIndent()
+
+	codes.First().Flags |= IndirectFlags
+
+	elemCode := newArrayElemCode(ctx, elem, header, alen, size)
+	ctx.incIndex()
+
+	end := newOpCode(ctx, c.typ, OpArrayEnd)
+	ctx.incIndex()
+
+	header.End = end
+	header.Next = codes.First()
+	codes.Last().Next = elemCode
+	elemCode.Next = codes.First()
+	elemCode.End = end
+
+	return Opcodes{header}.Add(codes...).Add(elemCode).Add(end)
+}
+
+func (c *ArrayCode) Filter(_ *FieldQuery) Code {
+	return c
+}
+
+type MapCode struct {
+	typ   *runtime.Type
+	key   Code
+	value Code
+}
+
+func (c *MapCode) Kind() CodeKind {
+	return CodeKindMap
+}
+
+func (c *MapCode) ToOpcode(ctx *compileContext) Opcodes {
+	// header => code => value => code => key => code => value => code => end
+	//                                     ^                       |
+	//                                     |_______________________|
+	header := newMapHeaderCode(ctx, c.typ)
+	ctx.incIndex()
+
+	keyCodes := c.key.ToOpcode(ctx)
+
+	value := newMapValueCode(ctx, c.typ.Elem(), header)
+	ctx.incIndex()
+
+	ctx.incIndent()
+	valueCodes := c.value.ToOpcode(ctx)
+	ctx.decIndent()
+
+	valueCodes.First().Flags |= IndirectFlags
+
+	key := newMapKeyCode(ctx, c.typ.Key(), header)
+	ctx.incIndex()
+
+	end := newMapEndCode(ctx, c.typ, header)
+	ctx.incIndex()
+
+	header.Next = keyCodes.First()
+	keyCodes.Last().Next = value
+	value.Next = valueCodes.First()
+	valueCodes.Last().Next = key
+	key.Next = keyCodes.First()
+
+	header.End = end
+	key.End = end
+	value.End = end
+	return Opcodes{header}.Add(keyCodes...).Add(value).Add(valueCodes...).Add(key).Add(end)
+}
+
+func (c *MapCode) Filter(_ *FieldQuery) Code {
+	return c
+}
+
+type StructCode struct {
+	typ                       *runtime.Type
+	fields                    []*StructFieldCode
+	isPtr                     bool
+	disableIndirectConversion bool
+	isIndirect                bool
+	isRecursive               bool
+}
+
+func (c *StructCode) Kind() CodeKind {
+	return CodeKindStruct
+}
+
+func (c *StructCode) lastFieldCode(field *StructFieldCode, firstField *Opcode) *Opcode {
+	if isEmbeddedStruct(field) {
+		return c.lastAnonymousFieldCode(firstField)
+	}
+	lastField := firstField
+	for lastField.NextField != nil {
+		lastField = lastField.NextField
+	}
+	return lastField
+}
+
+func (c *StructCode) lastAnonymousFieldCode(firstField *Opcode) *Opcode {
+	// firstField is special StructHead operation for anonymous structure.
+	// So, StructHead's next operation is truly struct head operation.
+	for firstField.Op == OpStructHead || firstField.Op == OpStructField {
+		firstField = firstField.Next
+	}
+	lastField := firstField
+	for lastField.NextField != nil {
+		lastField = lastField.NextField
+	}
+	return lastField
+}
+
+func (c *StructCode) ToOpcode(ctx *compileContext) Opcodes {
+	// header => code => structField => code => end
+	//                        ^          |
+	//                        |__________|
+	if c.isRecursive {
+		recursive := newRecursiveCode(ctx, c.typ, &CompiledCode{})
+		recursive.Type = c.typ
+		ctx.incIndex()
+		*ctx.recursiveCodes = append(*ctx.recursiveCodes, recursive)
+		return Opcodes{recursive}
+	}
+	codes := Opcodes{}
+	var prevField *Opcode
+	ctx.incIndent()
+	for idx, field := range c.fields {
+		isFirstField := idx == 0
+		isEndField := idx == len(c.fields)-1
+		fieldCodes := field.ToOpcode(ctx, isFirstField, isEndField)
+		for _, code := range fieldCodes {
+			if c.isIndirect {
+				code.Flags |= IndirectFlags
+			}
+		}
+		firstField := fieldCodes.First()
+		if len(codes) > 0 {
+			codes.Last().Next = firstField
+			firstField.Idx = codes.First().Idx
+		}
+		if prevField != nil {
+			prevField.NextField = firstField
+		}
+		if isEndField {
+			endField := fieldCodes.Last()
+			if len(codes) > 0 {
+				codes.First().End = endField
+			} else {
+				firstField.End = endField
+			}
+			codes = codes.Add(fieldCodes...)
+			break
+		}
+		prevField = c.lastFieldCode(field, firstField)
+		codes = codes.Add(fieldCodes...)
+	}
+	if len(codes) == 0 {
+		head := &Opcode{
+			Op:         OpStructHead,
+			Idx:        opcodeOffset(ctx.ptrIndex),
+			Type:       c.typ,
+			DisplayIdx: ctx.opcodeIndex,
+			Indent:     ctx.indent,
+		}
+		ctx.incOpcodeIndex()
+		end := &Opcode{
+			Op:         OpStructEnd,
+			Idx:        opcodeOffset(ctx.ptrIndex),
+			DisplayIdx: ctx.opcodeIndex,
+			Indent:     ctx.indent,
+		}
+		head.NextField = end
+		head.Next = end
+		head.End = end
+		codes = codes.Add(head, end)
+		ctx.incIndex()
+	}
+	ctx.decIndent()
+	ctx.structTypeToCodes[uintptr(unsafe.Pointer(c.typ))] = codes
+	return codes
+}
+
+func (c *StructCode) ToAnonymousOpcode(ctx *compileContext) Opcodes {
+	// header => code => structField => code => end
+	//                        ^          |
+	//                        |__________|
+	if c.isRecursive {
+		recursive := newRecursiveCode(ctx, c.typ, &CompiledCode{})
+		recursive.Type = c.typ
+		ctx.incIndex()
+		*ctx.recursiveCodes = append(*ctx.recursiveCodes, recursive)
+		return Opcodes{recursive}
+	}
+	codes := Opcodes{}
+	var prevField *Opcode
+	for idx, field := range c.fields {
+		isFirstField := idx == 0
+		isEndField := idx == len(c.fields)-1
+		fieldCodes := field.ToAnonymousOpcode(ctx, isFirstField, isEndField)
+		for _, code := range fieldCodes {
+			if c.isIndirect {
+				code.Flags |= IndirectFlags
+			}
+		}
+		firstField := fieldCodes.First()
+		if len(codes) > 0 {
+			codes.Last().Next = firstField
+			firstField.Idx = codes.First().Idx
+		}
+		if prevField != nil {
+			prevField.NextField = firstField
+		}
+		if isEndField {
+			lastField := fieldCodes.Last()
+			if len(codes) > 0 {
+				codes.First().End = lastField
+			} else {
+				firstField.End = lastField
+			}
+		}
+		prevField = firstField
+		codes = codes.Add(fieldCodes...)
+	}
+	return codes
+}
+
+func (c *StructCode) removeFieldsByTags(tags runtime.StructTags) {
+	fields := make([]*StructFieldCode, 0, len(c.fields))
+	for _, field := range c.fields {
+		if field.isAnonymous {
+			structCode := field.getAnonymousStruct()
+			if structCode != nil && !structCode.isRecursive {
+				structCode.removeFieldsByTags(tags)
+				if len(structCode.fields) > 0 {
+					fields = append(fields, field)
+				}
+				continue
+			}
+		}
+		if tags.ExistsKey(field.key) {
+			continue
+		}
+		fields = append(fields, field)
+	}
+	c.fields = fields
+}
+
+func (c *StructCode) enableIndirect() {
+	if c.isIndirect {
+		return
+	}
+	c.isIndirect = true
+	if len(c.fields) == 0 {
+		return
+	}
+	structCode := c.fields[0].getStruct()
+	if structCode == nil {
+		return
+	}
+	structCode.enableIndirect()
+}
+
+func (c *StructCode) Filter(query *FieldQuery) Code {
+	fieldMap := map[string]*FieldQuery{}
+	for _, field := range query.Fields {
+		fieldMap[field.Name] = field
+	}
+	fields := make([]*StructFieldCode, 0, len(c.fields))
+	for _, field := range c.fields {
+		query, exists := fieldMap[field.key]
+		if !exists {
+			continue
+		}
+		fieldCode := &StructFieldCode{
+			typ:                field.typ,
+			key:                field.key,
+			tag:                field.tag,
+			value:              field.value,
+			offset:             field.offset,
+			isAnonymous:        field.isAnonymous,
+			isTaggedKey:        field.isTaggedKey,
+			isNilableType:      field.isNilableType,
+			isNilCheck:         field.isNilCheck,
+			isAddrForMarshaler: field.isAddrForMarshaler,
+			isNextOpPtrType:    field.isNextOpPtrType,
+		}
+		if len(query.Fields) > 0 {
+			fieldCode.value = fieldCode.value.Filter(query)
+		}
+		fields = append(fields, fieldCode)
+	}
+	return &StructCode{
+		typ:                       c.typ,
+		fields:                    fields,
+		isPtr:                     c.isPtr,
+		disableIndirectConversion: c.disableIndirectConversion,
+		isIndirect:                c.isIndirect,
+		isRecursive:               c.isRecursive,
+	}
+}
+
+type StructFieldCode struct {
+	typ                *runtime.Type
+	key                string
+	tag                *runtime.StructTag
+	value              Code
+	offset             uintptr
+	isAnonymous        bool
+	isTaggedKey        bool
+	isNilableType      bool
+	isNilCheck         bool
+	isAddrForMarshaler bool
+	isNextOpPtrType    bool
+	isMarshalerContext bool
+}
+
+func (c *StructFieldCode) getStruct() *StructCode {
+	value := c.value
+	ptr, ok := value.(*PtrCode)
+	if ok {
+		value = ptr.value
+	}
+	structCode, ok := value.(*StructCode)
+	if ok {
+		return structCode
+	}
+	return nil
+}
+
+func (c *StructFieldCode) getAnonymousStruct() *StructCode {
+	if !c.isAnonymous {
+		return nil
+	}
+	return c.getStruct()
+}
+
+func optimizeStructHeader(code *Opcode, tag *runtime.StructTag) OpType {
+	headType := code.ToHeaderType(tag.IsString)
+	if tag.IsOmitEmpty {
+		headType = headType.HeadToOmitEmptyHead()
+	}
+	return headType
+}
+
+func optimizeStructField(code *Opcode, tag *runtime.StructTag) OpType {
+	fieldType := code.ToFieldType(tag.IsString)
+	if tag.IsOmitEmpty {
+		fieldType = fieldType.FieldToOmitEmptyField()
+	}
+	return fieldType
+}
+
+func (c *StructFieldCode) headerOpcodes(ctx *compileContext, field *Opcode, valueCodes Opcodes) Opcodes {
+	value := valueCodes.First()
+	op := optimizeStructHeader(value, c.tag)
+	field.Op = op
+	if value.Flags&MarshalerContextFlags != 0 {
+		field.Flags |= MarshalerContextFlags
+	}
+	field.NumBitSize = value.NumBitSize
+	field.PtrNum = value.PtrNum
+	field.FieldQuery = value.FieldQuery
+	fieldCodes := Opcodes{field}
+	if op.IsMultipleOpHead() {
+		field.Next = value
+		fieldCodes = fieldCodes.Add(valueCodes...)
+	} else {
+		ctx.decIndex()
+	}
+	return fieldCodes
+}
+
+func (c *StructFieldCode) fieldOpcodes(ctx *compileContext, field *Opcode, valueCodes Opcodes) Opcodes {
+	value := valueCodes.First()
+	op := optimizeStructField(value, c.tag)
+	field.Op = op
+	if value.Flags&MarshalerContextFlags != 0 {
+		field.Flags |= MarshalerContextFlags
+	}
+	field.NumBitSize = value.NumBitSize
+	field.PtrNum = value.PtrNum
+	field.FieldQuery = value.FieldQuery
+
+	fieldCodes := Opcodes{field}
+	if op.IsMultipleOpField() {
+		field.Next = value
+		fieldCodes = fieldCodes.Add(valueCodes...)
+	} else {
+		ctx.decIndex()
+	}
+	return fieldCodes
+}
+
+func (c *StructFieldCode) addStructEndCode(ctx *compileContext, codes Opcodes) Opcodes {
+	end := &Opcode{
+		Op:         OpStructEnd,
+		Idx:        opcodeOffset(ctx.ptrIndex),
+		DisplayIdx: ctx.opcodeIndex,
+		Indent:     ctx.indent,
+	}
+	codes.Last().Next = end
+	code := codes.First()
+	for code.Op == OpStructField || code.Op == OpStructHead {
+		code = code.Next
+	}
+	for code.NextField != nil {
+		code = code.NextField
+	}
+	code.NextField = end
+
+	codes = codes.Add(end)
+	ctx.incOpcodeIndex()
+	return codes
+}
+
+func (c *StructFieldCode) structKey(ctx *compileContext) string {
+	if ctx.escapeKey {
+		rctx := &RuntimeContext{Option: &Option{Flag: HTMLEscapeOption}}
+		return fmt.Sprintf(`%s:`, string(AppendString(rctx, []byte{}, c.key)))
+	}
+	return fmt.Sprintf(`"%s":`, c.key)
+}
+
+func (c *StructFieldCode) flags() OpFlags {
+	var flags OpFlags
+	if c.isTaggedKey {
+		flags |= IsTaggedKeyFlags
+	}
+	if c.isNilableType {
+		flags |= IsNilableTypeFlags
+	}
+	if c.isNilCheck {
+		flags |= NilCheckFlags
+	}
+	if c.isAddrForMarshaler {
+		flags |= AddrForMarshalerFlags
+	}
+	if c.isNextOpPtrType {
+		flags |= IsNextOpPtrTypeFlags
+	}
+	if c.isAnonymous {
+		flags |= AnonymousKeyFlags
+	}
+	if c.isMarshalerContext {
+		flags |= MarshalerContextFlags
+	}
+	return flags
+}
+
+func (c *StructFieldCode) toValueOpcodes(ctx *compileContext) Opcodes {
+	if c.isAnonymous {
+		anonymCode, ok := c.value.(AnonymousCode)
+		if ok {
+			return anonymCode.ToAnonymousOpcode(ctx)
+		}
+	}
+	return c.value.ToOpcode(ctx)
+}
+
+func (c *StructFieldCode) ToOpcode(ctx *compileContext, isFirstField, isEndField bool) Opcodes {
+	field := &Opcode{
+		Idx:        opcodeOffset(ctx.ptrIndex),
+		Flags:      c.flags(),
+		Key:        c.structKey(ctx),
+		Offset:     uint32(c.offset),
+		Type:       c.typ,
+		DisplayIdx: ctx.opcodeIndex,
+		Indent:     ctx.indent,
+		DisplayKey: c.key,
+	}
+	ctx.incIndex()
+	valueCodes := c.toValueOpcodes(ctx)
+	if isFirstField {
+		codes := c.headerOpcodes(ctx, field, valueCodes)
+		if isEndField {
+			codes = c.addStructEndCode(ctx, codes)
+		}
+		return codes
+	}
+	codes := c.fieldOpcodes(ctx, field, valueCodes)
+	if isEndField {
+		if isEnableStructEndOptimization(c.value) {
+			field.Op = field.Op.FieldToEnd()
+		} else {
+			codes = c.addStructEndCode(ctx, codes)
+		}
+	}
+	return codes
+}
+
+func (c *StructFieldCode) ToAnonymousOpcode(ctx *compileContext, isFirstField, isEndField bool) Opcodes {
+	field := &Opcode{
+		Idx:        opcodeOffset(ctx.ptrIndex),
+		Flags:      c.flags() | AnonymousHeadFlags,
+		Key:        c.structKey(ctx),
+		Offset:     uint32(c.offset),
+		Type:       c.typ,
+		DisplayIdx: ctx.opcodeIndex,
+		Indent:     ctx.indent,
+		DisplayKey: c.key,
+	}
+	ctx.incIndex()
+	valueCodes := c.toValueOpcodes(ctx)
+	if isFirstField {
+		return c.headerOpcodes(ctx, field, valueCodes)
+	}
+	return c.fieldOpcodes(ctx, field, valueCodes)
+}
+
+func isEnableStructEndOptimization(value Code) bool {
+	switch value.Kind() {
+	case CodeKindInt,
+		CodeKindUint,
+		CodeKindFloat,
+		CodeKindString,
+		CodeKindBool,
+		CodeKindBytes:
+		return true
+	case CodeKindPtr:
+		return isEnableStructEndOptimization(value.(*PtrCode).value)
+	default:
+		return false
+	}
+}
+
+type InterfaceCode struct {
+	typ        *runtime.Type
+	fieldQuery *FieldQuery
+	isPtr      bool
+}
+
+func (c *InterfaceCode) Kind() CodeKind {
+	return CodeKindInterface
+}
+
+func (c *InterfaceCode) ToOpcode(ctx *compileContext) Opcodes {
+	var code *Opcode
+	switch {
+	case c.isPtr:
+		code = newOpCode(ctx, c.typ, OpInterfacePtr)
+	default:
+		code = newOpCode(ctx, c.typ, OpInterface)
+	}
+	code.FieldQuery = c.fieldQuery
+	if c.typ.NumMethod() > 0 {
+		code.Flags |= NonEmptyInterfaceFlags
+	}
+	ctx.incIndex()
+	return Opcodes{code}
+}
+
+func (c *InterfaceCode) Filter(query *FieldQuery) Code {
+	return &InterfaceCode{
+		typ:        c.typ,
+		fieldQuery: query,
+		isPtr:      c.isPtr,
+	}
+}
+
+type MarshalJSONCode struct {
+	typ                *runtime.Type
+	fieldQuery         *FieldQuery
+	isAddrForMarshaler bool
+	isNilableType      bool
+	isMarshalerContext bool
+}
+
+func (c *MarshalJSONCode) Kind() CodeKind {
+	return CodeKindMarshalJSON
+}
+
+func (c *MarshalJSONCode) ToOpcode(ctx *compileContext) Opcodes {
+	code := newOpCode(ctx, c.typ, OpMarshalJSON)
+	code.FieldQuery = c.fieldQuery
+	if c.isAddrForMarshaler {
+		code.Flags |= AddrForMarshalerFlags
+	}
+	if c.isMarshalerContext {
+		code.Flags |= MarshalerContextFlags
+	}
+	if c.isNilableType {
+		code.Flags |= IsNilableTypeFlags
+	} else {
+		code.Flags &= ^IsNilableTypeFlags
+	}
+	ctx.incIndex()
+	return Opcodes{code}
+}
+
+func (c *MarshalJSONCode) Filter(query *FieldQuery) Code {
+	return &MarshalJSONCode{
+		typ:                c.typ,
+		fieldQuery:         query,
+		isAddrForMarshaler: c.isAddrForMarshaler,
+		isNilableType:      c.isNilableType,
+		isMarshalerContext: c.isMarshalerContext,
+	}
+}
+
+type MarshalTextCode struct {
+	typ                *runtime.Type
+	fieldQuery         *FieldQuery
+	isAddrForMarshaler bool
+	isNilableType      bool
+}
+
+func (c *MarshalTextCode) Kind() CodeKind {
+	return CodeKindMarshalText
+}
+
+func (c *MarshalTextCode) ToOpcode(ctx *compileContext) Opcodes {
+	code := newOpCode(ctx, c.typ, OpMarshalText)
+	code.FieldQuery = c.fieldQuery
+	if c.isAddrForMarshaler {
+		code.Flags |= AddrForMarshalerFlags
+	}
+	if c.isNilableType {
+		code.Flags |= IsNilableTypeFlags
+	} else {
+		code.Flags &= ^IsNilableTypeFlags
+	}
+	ctx.incIndex()
+	return Opcodes{code}
+}
+
+func (c *MarshalTextCode) Filter(query *FieldQuery) Code {
+	return &MarshalTextCode{
+		typ:                c.typ,
+		fieldQuery:         query,
+		isAddrForMarshaler: c.isAddrForMarshaler,
+		isNilableType:      c.isNilableType,
+	}
+}
+
+type PtrCode struct {
+	typ    *runtime.Type
+	value  Code
+	ptrNum uint8
+}
+
+func (c *PtrCode) Kind() CodeKind {
+	return CodeKindPtr
+}
+
+func (c *PtrCode) ToOpcode(ctx *compileContext) Opcodes {
+	codes := c.value.ToOpcode(ctx)
+	codes.First().Op = convertPtrOp(codes.First())
+	codes.First().PtrNum = c.ptrNum
+	return codes
+}
+
+func (c *PtrCode) ToAnonymousOpcode(ctx *compileContext) Opcodes {
+	var codes Opcodes
+	anonymCode, ok := c.value.(AnonymousCode)
+	if ok {
+		codes = anonymCode.ToAnonymousOpcode(ctx)
+	} else {
+		codes = c.value.ToOpcode(ctx)
+	}
+	codes.First().Op = convertPtrOp(codes.First())
+	codes.First().PtrNum = c.ptrNum
+	return codes
+}
+
+func (c *PtrCode) Filter(query *FieldQuery) Code {
+	return &PtrCode{
+		typ:    c.typ,
+		value:  c.value.Filter(query),
+		ptrNum: c.ptrNum,
+	}
+}
+
+func convertPtrOp(code *Opcode) OpType {
+	ptrHeadOp := code.Op.HeadToPtrHead()
+	if code.Op != ptrHeadOp {
+		if code.PtrNum > 0 {
+			// ptr field and ptr head
+			code.PtrNum--
+		}
+		return ptrHeadOp
+	}
+	switch code.Op {
+	case OpInt:
+		return OpIntPtr
+	case OpUint:
+		return OpUintPtr
+	case OpFloat32:
+		return OpFloat32Ptr
+	case OpFloat64:
+		return OpFloat64Ptr
+	case OpString:
+		return OpStringPtr
+	case OpBool:
+		return OpBoolPtr
+	case OpBytes:
+		return OpBytesPtr
+	case OpNumber:
+		return OpNumberPtr
+	case OpArray:
+		return OpArrayPtr
+	case OpSlice:
+		return OpSlicePtr
+	case OpMap:
+		return OpMapPtr
+	case OpMarshalJSON:
+		return OpMarshalJSONPtr
+	case OpMarshalText:
+		return OpMarshalTextPtr
+	case OpInterface:
+		return OpInterfacePtr
+	case OpRecursive:
+		return OpRecursivePtr
+	}
+	return code.Op
+}
+
+func isEmbeddedStruct(field *StructFieldCode) bool {
+	if !field.isAnonymous {
+		return false
+	}
+	t := field.typ
+	if t.Kind() == reflect.Ptr {
+		t = t.Elem()
+	}
+	return t.Kind() == reflect.Struct
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compact.go b/vendor/github.com/goccy/go-json/internal/encoder/compact.go
new file mode 100644
index 0000000000..0eb9545d89
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/compact.go
@@ -0,0 +1,286 @@
+package encoder
+
+import (
+	"bytes"
+	"fmt"
+	"strconv"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+)
+
+var (
+	isWhiteSpace = [256]bool{
+		' ':  true,
+		'\n': true,
+		'\t': true,
+		'\r': true,
+	}
+	isHTMLEscapeChar = [256]bool{
+		'<': true,
+		'>': true,
+		'&': true,
+	}
+	nul = byte('\000')
+)
+
+func Compact(buf *bytes.Buffer, src []byte, escape bool) error {
+	if len(src) == 0 {
+		return errors.ErrUnexpectedEndOfJSON("", 0)
+	}
+	buf.Grow(len(src))
+	dst := buf.Bytes()
+
+	ctx := TakeRuntimeContext()
+	ctxBuf := ctx.Buf[:0]
+	ctxBuf = append(append(ctxBuf, src...), nul)
+	ctx.Buf = ctxBuf
+
+	if err := compactAndWrite(buf, dst, ctxBuf, escape); err != nil {
+		ReleaseRuntimeContext(ctx)
+		return err
+	}
+	ReleaseRuntimeContext(ctx)
+	return nil
+}
+
+func compactAndWrite(buf *bytes.Buffer, dst []byte, src []byte, escape bool) error {
+	dst, err := compact(dst, src, escape)
+	if err != nil {
+		return err
+	}
+	if _, err := buf.Write(dst); err != nil {
+		return err
+	}
+	return nil
+}
+
+func compact(dst, src []byte, escape bool) ([]byte, error) {
+	buf, cursor, err := compactValue(dst, src, 0, escape)
+	if err != nil {
+		return nil, err
+	}
+	if err := validateEndBuf(src, cursor); err != nil {
+		return nil, err
+	}
+	return buf, nil
+}
+
+func validateEndBuf(src []byte, cursor int64) error {
+	for {
+		switch src[cursor] {
+		case ' ', '\t', '\n', '\r':
+			cursor++
+			continue
+		case nul:
+			return nil
+		}
+		return errors.ErrSyntax(
+			fmt.Sprintf("invalid character '%c' after top-level value", src[cursor]),
+			cursor+1,
+		)
+	}
+}
+
+func skipWhiteSpace(buf []byte, cursor int64) int64 {
+LOOP:
+	if isWhiteSpace[buf[cursor]] {
+		cursor++
+		goto LOOP
+	}
+	return cursor
+}
+
+func compactValue(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) {
+	for {
+		switch src[cursor] {
+		case ' ', '\t', '\n', '\r':
+			cursor++
+			continue
+		case '{':
+			return compactObject(dst, src, cursor, escape)
+		case '}':
+			return nil, 0, errors.ErrSyntax("unexpected character '}'", cursor)
+		case '[':
+			return compactArray(dst, src, cursor, escape)
+		case ']':
+			return nil, 0, errors.ErrSyntax("unexpected character ']'", cursor)
+		case '"':
+			return compactString(dst, src, cursor, escape)
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			return compactNumber(dst, src, cursor)
+		case 't':
+			return compactTrue(dst, src, cursor)
+		case 'f':
+			return compactFalse(dst, src, cursor)
+		case 'n':
+			return compactNull(dst, src, cursor)
+		default:
+			return nil, 0, errors.ErrSyntax(fmt.Sprintf("unexpected character '%c'", src[cursor]), cursor)
+		}
+	}
+}
+
+func compactObject(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) {
+	if src[cursor] == '{' {
+		dst = append(dst, '{')
+	} else {
+		return nil, 0, errors.ErrExpected("expected { character for object value", cursor)
+	}
+	cursor = skipWhiteSpace(src, cursor+1)
+	if src[cursor] == '}' {
+		dst = append(dst, '}')
+		return dst, cursor + 1, nil
+	}
+	var err error
+	for {
+		cursor = skipWhiteSpace(src, cursor)
+		dst, cursor, err = compactString(dst, src, cursor, escape)
+		if err != nil {
+			return nil, 0, err
+		}
+		cursor = skipWhiteSpace(src, cursor)
+		if src[cursor] != ':' {
+			return nil, 0, errors.ErrExpected("colon after object key", cursor)
+		}
+		dst = append(dst, ':')
+		dst, cursor, err = compactValue(dst, src, cursor+1, escape)
+		if err != nil {
+			return nil, 0, err
+		}
+		cursor = skipWhiteSpace(src, cursor)
+		switch src[cursor] {
+		case '}':
+			dst = append(dst, '}')
+			cursor++
+			return dst, cursor, nil
+		case ',':
+			dst = append(dst, ',')
+		default:
+			return nil, 0, errors.ErrExpected("comma after object value", cursor)
+		}
+		cursor++
+	}
+}
+
+func compactArray(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) {
+	if src[cursor] == '[' {
+		dst = append(dst, '[')
+	} else {
+		return nil, 0, errors.ErrExpected("expected [ character for array value", cursor)
+	}
+	cursor = skipWhiteSpace(src, cursor+1)
+	if src[cursor] == ']' {
+		dst = append(dst, ']')
+		return dst, cursor + 1, nil
+	}
+	var err error
+	for {
+		dst, cursor, err = compactValue(dst, src, cursor, escape)
+		if err != nil {
+			return nil, 0, err
+		}
+		cursor = skipWhiteSpace(src, cursor)
+		switch src[cursor] {
+		case ']':
+			dst = append(dst, ']')
+			cursor++
+			return dst, cursor, nil
+		case ',':
+			dst = append(dst, ',')
+		default:
+			return nil, 0, errors.ErrExpected("comma after array value", cursor)
+		}
+		cursor++
+	}
+}
+
+func compactString(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) {
+	if src[cursor] != '"' {
+		return nil, 0, errors.ErrInvalidCharacter(src[cursor], "string", cursor)
+	}
+	start := cursor
+	for {
+		cursor++
+		c := src[cursor]
+		if escape {
+			if isHTMLEscapeChar[c] {
+				dst = append(dst, src[start:cursor]...)
+				dst = append(dst, `\u00`...)
+				dst = append(dst, hex[c>>4], hex[c&0xF])
+				start = cursor + 1
+			} else if c == 0xE2 && cursor+2 < int64(len(src)) && src[cursor+1] == 0x80 && src[cursor+2]&^1 == 0xA8 {
+				dst = append(dst, src[start:cursor]...)
+				dst = append(dst, `\u202`...)
+				dst = append(dst, hex[src[cursor+2]&0xF])
+				cursor += 2
+				start = cursor + 3
+			}
+		}
+		switch c {
+		case '\\':
+			cursor++
+			if src[cursor] == nul {
+				return nil, 0, errors.ErrUnexpectedEndOfJSON("string", int64(len(src)))
+			}
+		case '"':
+			cursor++
+			return append(dst, src[start:cursor]...), cursor, nil
+		case nul:
+			return nil, 0, errors.ErrUnexpectedEndOfJSON("string", int64(len(src)))
+		}
+	}
+}
+
+func compactNumber(dst, src []byte, cursor int64) ([]byte, int64, error) {
+	start := cursor
+	for {
+		cursor++
+		if floatTable[src[cursor]] {
+			continue
+		}
+		break
+	}
+	num := src[start:cursor]
+	if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&num)), 64); err != nil {
+		return nil, 0, err
+	}
+	dst = append(dst, num...)
+	return dst, cursor, nil
+}
+
+func compactTrue(dst, src []byte, cursor int64) ([]byte, int64, error) {
+	if cursor+3 >= int64(len(src)) {
+		return nil, 0, errors.ErrUnexpectedEndOfJSON("true", cursor)
+	}
+	if !bytes.Equal(src[cursor:cursor+4], []byte(`true`)) {
+		return nil, 0, errors.ErrInvalidCharacter(src[cursor], "true", cursor)
+	}
+	dst = append(dst, "true"...)
+	cursor += 4
+	return dst, cursor, nil
+}
+
+func compactFalse(dst, src []byte, cursor int64) ([]byte, int64, error) {
+	if cursor+4 >= int64(len(src)) {
+		return nil, 0, errors.ErrUnexpectedEndOfJSON("false", cursor)
+	}
+	if !bytes.Equal(src[cursor:cursor+5], []byte(`false`)) {
+		return nil, 0, errors.ErrInvalidCharacter(src[cursor], "false", cursor)
+	}
+	dst = append(dst, "false"...)
+	cursor += 5
+	return dst, cursor, nil
+}
+
+func compactNull(dst, src []byte, cursor int64) ([]byte, int64, error) {
+	if cursor+3 >= int64(len(src)) {
+		return nil, 0, errors.ErrUnexpectedEndOfJSON("null", cursor)
+	}
+	if !bytes.Equal(src[cursor:cursor+4], []byte(`null`)) {
+		return nil, 0, errors.ErrInvalidCharacter(src[cursor], "null", cursor)
+	}
+	dst = append(dst, "null"...)
+	cursor += 4
+	return dst, cursor, nil
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go
new file mode 100644
index 0000000000..3ae39ba8c7
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go
@@ -0,0 +1,935 @@
+package encoder
+
+import (
+	"context"
+	"encoding"
+	"encoding/json"
+	"reflect"
+	"sync/atomic"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type marshalerContext interface {
+	MarshalJSON(context.Context) ([]byte, error)
+}
+
+var (
+	marshalJSONType        = reflect.TypeOf((*json.Marshaler)(nil)).Elem()
+	marshalJSONContextType = reflect.TypeOf((*marshalerContext)(nil)).Elem()
+	marshalTextType        = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+	jsonNumberType         = reflect.TypeOf(json.Number(""))
+	cachedOpcodeSets       []*OpcodeSet
+	cachedOpcodeMap        unsafe.Pointer // map[uintptr]*OpcodeSet
+	typeAddr               *runtime.TypeAddr
+)
+
+func init() {
+	typeAddr = runtime.AnalyzeTypeAddr()
+	if typeAddr == nil {
+		typeAddr = &runtime.TypeAddr{}
+	}
+	cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1)
+}
+
+func loadOpcodeMap() map[uintptr]*OpcodeSet {
+	p := atomic.LoadPointer(&cachedOpcodeMap)
+	return *(*map[uintptr]*OpcodeSet)(unsafe.Pointer(&p))
+}
+
+func storeOpcodeSet(typ uintptr, set *OpcodeSet, m map[uintptr]*OpcodeSet) {
+	newOpcodeMap := make(map[uintptr]*OpcodeSet, len(m)+1)
+	newOpcodeMap[typ] = set
+
+	for k, v := range m {
+		newOpcodeMap[k] = v
+	}
+
+	atomic.StorePointer(&cachedOpcodeMap, *(*unsafe.Pointer)(unsafe.Pointer(&newOpcodeMap)))
+}
+
+func compileToGetCodeSetSlowPath(typeptr uintptr) (*OpcodeSet, error) {
+	opcodeMap := loadOpcodeMap()
+	if codeSet, exists := opcodeMap[typeptr]; exists {
+		return codeSet, nil
+	}
+	codeSet, err := newCompiler().compile(typeptr)
+	if err != nil {
+		return nil, err
+	}
+	storeOpcodeSet(typeptr, codeSet, opcodeMap)
+	return codeSet, nil
+}
+
+func getFilteredCodeSetIfNeeded(ctx *RuntimeContext, codeSet *OpcodeSet) (*OpcodeSet, error) {
+	if (ctx.Option.Flag & ContextOption) == 0 {
+		return codeSet, nil
+	}
+	query := FieldQueryFromContext(ctx.Option.Context)
+	if query == nil {
+		return codeSet, nil
+	}
+	ctx.Option.Flag |= FieldQueryOption
+	cacheCodeSet := codeSet.getQueryCache(query.Hash())
+	if cacheCodeSet != nil {
+		return cacheCodeSet, nil
+	}
+	queryCodeSet, err := newCompiler().codeToOpcodeSet(codeSet.Type, codeSet.Code.Filter(query))
+	if err != nil {
+		return nil, err
+	}
+	codeSet.setQueryCache(query.Hash(), queryCodeSet)
+	return queryCodeSet, nil
+}
+
+type Compiler struct {
+	structTypeToCode map[uintptr]*StructCode
+}
+
+func newCompiler() *Compiler {
+	return &Compiler{
+		structTypeToCode: map[uintptr]*StructCode{},
+	}
+}
+
+func (c *Compiler) compile(typeptr uintptr) (*OpcodeSet, error) {
+	// noescape trick for header.typ ( reflect.*rtype )
+	typ := *(**runtime.Type)(unsafe.Pointer(&typeptr))
+	code, err := c.typeToCode(typ)
+	if err != nil {
+		return nil, err
+	}
+	return c.codeToOpcodeSet(typ, code)
+}
+
+func (c *Compiler) codeToOpcodeSet(typ *runtime.Type, code Code) (*OpcodeSet, error) {
+	noescapeKeyCode := c.codeToOpcode(&compileContext{
+		structTypeToCodes: map[uintptr]Opcodes{},
+		recursiveCodes:    &Opcodes{},
+	}, typ, code)
+	if err := noescapeKeyCode.Validate(); err != nil {
+		return nil, err
+	}
+	escapeKeyCode := c.codeToOpcode(&compileContext{
+		structTypeToCodes: map[uintptr]Opcodes{},
+		recursiveCodes:    &Opcodes{},
+		escapeKey:         true,
+	}, typ, code)
+	noescapeKeyCode = copyOpcode(noescapeKeyCode)
+	escapeKeyCode = copyOpcode(escapeKeyCode)
+	setTotalLengthToInterfaceOp(noescapeKeyCode)
+	setTotalLengthToInterfaceOp(escapeKeyCode)
+	interfaceNoescapeKeyCode := copyToInterfaceOpcode(noescapeKeyCode)
+	interfaceEscapeKeyCode := copyToInterfaceOpcode(escapeKeyCode)
+	codeLength := noescapeKeyCode.TotalLength()
+	return &OpcodeSet{
+		Type:                     typ,
+		NoescapeKeyCode:          noescapeKeyCode,
+		EscapeKeyCode:            escapeKeyCode,
+		InterfaceNoescapeKeyCode: interfaceNoescapeKeyCode,
+		InterfaceEscapeKeyCode:   interfaceEscapeKeyCode,
+		CodeLength:               codeLength,
+		EndCode:                  ToEndCode(interfaceNoescapeKeyCode),
+		Code:                     code,
+		QueryCache:               map[string]*OpcodeSet{},
+	}, nil
+}
+
+func (c *Compiler) typeToCode(typ *runtime.Type) (Code, error) {
+	switch {
+	case c.implementsMarshalJSON(typ):
+		return c.marshalJSONCode(typ)
+	case c.implementsMarshalText(typ):
+		return c.marshalTextCode(typ)
+	}
+
+	isPtr := false
+	orgType := typ
+	if typ.Kind() == reflect.Ptr {
+		typ = typ.Elem()
+		isPtr = true
+	}
+	switch {
+	case c.implementsMarshalJSON(typ):
+		return c.marshalJSONCode(orgType)
+	case c.implementsMarshalText(typ):
+		return c.marshalTextCode(orgType)
+	}
+	switch typ.Kind() {
+	case reflect.Slice:
+		elem := typ.Elem()
+		if elem.Kind() == reflect.Uint8 {
+			p := runtime.PtrTo(elem)
+			if !c.implementsMarshalJSONType(p) && !p.Implements(marshalTextType) {
+				return c.bytesCode(typ, isPtr)
+			}
+		}
+		return c.sliceCode(typ)
+	case reflect.Map:
+		if isPtr {
+			return c.ptrCode(runtime.PtrTo(typ))
+		}
+		return c.mapCode(typ)
+	case reflect.Struct:
+		return c.structCode(typ, isPtr)
+	case reflect.Int:
+		return c.intCode(typ, isPtr)
+	case reflect.Int8:
+		return c.int8Code(typ, isPtr)
+	case reflect.Int16:
+		return c.int16Code(typ, isPtr)
+	case reflect.Int32:
+		return c.int32Code(typ, isPtr)
+	case reflect.Int64:
+		return c.int64Code(typ, isPtr)
+	case reflect.Uint, reflect.Uintptr:
+		return c.uintCode(typ, isPtr)
+	case reflect.Uint8:
+		return c.uint8Code(typ, isPtr)
+	case reflect.Uint16:
+		return c.uint16Code(typ, isPtr)
+	case reflect.Uint32:
+		return c.uint32Code(typ, isPtr)
+	case reflect.Uint64:
+		return c.uint64Code(typ, isPtr)
+	case reflect.Float32:
+		return c.float32Code(typ, isPtr)
+	case reflect.Float64:
+		return c.float64Code(typ, isPtr)
+	case reflect.String:
+		return c.stringCode(typ, isPtr)
+	case reflect.Bool:
+		return c.boolCode(typ, isPtr)
+	case reflect.Interface:
+		return c.interfaceCode(typ, isPtr)
+	default:
+		if isPtr && typ.Implements(marshalTextType) {
+			typ = orgType
+		}
+		return c.typeToCodeWithPtr(typ, isPtr)
+	}
+}
+
+func (c *Compiler) typeToCodeWithPtr(typ *runtime.Type, isPtr bool) (Code, error) {
+	switch {
+	case c.implementsMarshalJSON(typ):
+		return c.marshalJSONCode(typ)
+	case c.implementsMarshalText(typ):
+		return c.marshalTextCode(typ)
+	}
+	switch typ.Kind() {
+	case reflect.Ptr:
+		return c.ptrCode(typ)
+	case reflect.Slice:
+		elem := typ.Elem()
+		if elem.Kind() == reflect.Uint8 {
+			p := runtime.PtrTo(elem)
+			if !c.implementsMarshalJSONType(p) && !p.Implements(marshalTextType) {
+				return c.bytesCode(typ, false)
+			}
+		}
+		return c.sliceCode(typ)
+	case reflect.Array:
+		return c.arrayCode(typ)
+	case reflect.Map:
+		return c.mapCode(typ)
+	case reflect.Struct:
+		return c.structCode(typ, isPtr)
+	case reflect.Interface:
+		return c.interfaceCode(typ, false)
+	case reflect.Int:
+		return c.intCode(typ, false)
+	case reflect.Int8:
+		return c.int8Code(typ, false)
+	case reflect.Int16:
+		return c.int16Code(typ, false)
+	case reflect.Int32:
+		return c.int32Code(typ, false)
+	case reflect.Int64:
+		return c.int64Code(typ, false)
+	case reflect.Uint:
+		return c.uintCode(typ, false)
+	case reflect.Uint8:
+		return c.uint8Code(typ, false)
+	case reflect.Uint16:
+		return c.uint16Code(typ, false)
+	case reflect.Uint32:
+		return c.uint32Code(typ, false)
+	case reflect.Uint64:
+		return c.uint64Code(typ, false)
+	case reflect.Uintptr:
+		return c.uintCode(typ, false)
+	case reflect.Float32:
+		return c.float32Code(typ, false)
+	case reflect.Float64:
+		return c.float64Code(typ, false)
+	case reflect.String:
+		return c.stringCode(typ, false)
+	case reflect.Bool:
+		return c.boolCode(typ, false)
+	}
+	return nil, &errors.UnsupportedTypeError{Type: runtime.RType2Type(typ)}
+}
+
+const intSize = 32 << (^uint(0) >> 63)
+
+//nolint:unparam
+func (c *Compiler) intCode(typ *runtime.Type, isPtr bool) (*IntCode, error) {
+	return &IntCode{typ: typ, bitSize: intSize, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) int8Code(typ *runtime.Type, isPtr bool) (*IntCode, error) {
+	return &IntCode{typ: typ, bitSize: 8, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) int16Code(typ *runtime.Type, isPtr bool) (*IntCode, error) {
+	return &IntCode{typ: typ, bitSize: 16, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) int32Code(typ *runtime.Type, isPtr bool) (*IntCode, error) {
+	return &IntCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) int64Code(typ *runtime.Type, isPtr bool) (*IntCode, error) {
+	return &IntCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) uintCode(typ *runtime.Type, isPtr bool) (*UintCode, error) {
+	return &UintCode{typ: typ, bitSize: intSize, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) uint8Code(typ *runtime.Type, isPtr bool) (*UintCode, error) {
+	return &UintCode{typ: typ, bitSize: 8, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) uint16Code(typ *runtime.Type, isPtr bool) (*UintCode, error) {
+	return &UintCode{typ: typ, bitSize: 16, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) uint32Code(typ *runtime.Type, isPtr bool) (*UintCode, error) {
+	return &UintCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) uint64Code(typ *runtime.Type, isPtr bool) (*UintCode, error) {
+	return &UintCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) float32Code(typ *runtime.Type, isPtr bool) (*FloatCode, error) {
+	return &FloatCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) float64Code(typ *runtime.Type, isPtr bool) (*FloatCode, error) {
+	return &FloatCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) stringCode(typ *runtime.Type, isPtr bool) (*StringCode, error) {
+	return &StringCode{typ: typ, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) boolCode(typ *runtime.Type, isPtr bool) (*BoolCode, error) {
+	return &BoolCode{typ: typ, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) intStringCode(typ *runtime.Type) (*IntCode, error) {
+	return &IntCode{typ: typ, bitSize: intSize, isString: true}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) int8StringCode(typ *runtime.Type) (*IntCode, error) {
+	return &IntCode{typ: typ, bitSize: 8, isString: true}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) int16StringCode(typ *runtime.Type) (*IntCode, error) {
+	return &IntCode{typ: typ, bitSize: 16, isString: true}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) int32StringCode(typ *runtime.Type) (*IntCode, error) {
+	return &IntCode{typ: typ, bitSize: 32, isString: true}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) int64StringCode(typ *runtime.Type) (*IntCode, error) {
+	return &IntCode{typ: typ, bitSize: 64, isString: true}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) uintStringCode(typ *runtime.Type) (*UintCode, error) {
+	return &UintCode{typ: typ, bitSize: intSize, isString: true}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) uint8StringCode(typ *runtime.Type) (*UintCode, error) {
+	return &UintCode{typ: typ, bitSize: 8, isString: true}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) uint16StringCode(typ *runtime.Type) (*UintCode, error) {
+	return &UintCode{typ: typ, bitSize: 16, isString: true}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) uint32StringCode(typ *runtime.Type) (*UintCode, error) {
+	return &UintCode{typ: typ, bitSize: 32, isString: true}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) uint64StringCode(typ *runtime.Type) (*UintCode, error) {
+	return &UintCode{typ: typ, bitSize: 64, isString: true}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) bytesCode(typ *runtime.Type, isPtr bool) (*BytesCode, error) {
+	return &BytesCode{typ: typ, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) interfaceCode(typ *runtime.Type, isPtr bool) (*InterfaceCode, error) {
+	return &InterfaceCode{typ: typ, isPtr: isPtr}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) marshalJSONCode(typ *runtime.Type) (*MarshalJSONCode, error) {
+	return &MarshalJSONCode{
+		typ:                typ,
+		isAddrForMarshaler: c.isPtrMarshalJSONType(typ),
+		isNilableType:      c.isNilableType(typ),
+		isMarshalerContext: typ.Implements(marshalJSONContextType) || runtime.PtrTo(typ).Implements(marshalJSONContextType),
+	}, nil
+}
+
+//nolint:unparam
+func (c *Compiler) marshalTextCode(typ *runtime.Type) (*MarshalTextCode, error) {
+	return &MarshalTextCode{
+		typ:                typ,
+		isAddrForMarshaler: c.isPtrMarshalTextType(typ),
+		isNilableType:      c.isNilableType(typ),
+	}, nil
+}
+
+func (c *Compiler) ptrCode(typ *runtime.Type) (*PtrCode, error) {
+	code, err := c.typeToCodeWithPtr(typ.Elem(), true)
+	if err != nil {
+		return nil, err
+	}
+	ptr, ok := code.(*PtrCode)
+	if ok {
+		return &PtrCode{typ: typ, value: ptr.value, ptrNum: ptr.ptrNum + 1}, nil
+	}
+	return &PtrCode{typ: typ, value: code, ptrNum: 1}, nil
+}
+
+func (c *Compiler) sliceCode(typ *runtime.Type) (*SliceCode, error) {
+	elem := typ.Elem()
+	code, err := c.listElemCode(elem)
+	if err != nil {
+		return nil, err
+	}
+	if code.Kind() == CodeKindStruct {
+		structCode := code.(*StructCode)
+		structCode.enableIndirect()
+	}
+	return &SliceCode{typ: typ, value: code}, nil
+}
+
+func (c *Compiler) arrayCode(typ *runtime.Type) (*ArrayCode, error) {
+	elem := typ.Elem()
+	code, err := c.listElemCode(elem)
+	if err != nil {
+		return nil, err
+	}
+	if code.Kind() == CodeKindStruct {
+		structCode := code.(*StructCode)
+		structCode.enableIndirect()
+	}
+	return &ArrayCode{typ: typ, value: code}, nil
+}
+
+func (c *Compiler) mapCode(typ *runtime.Type) (*MapCode, error) {
+	keyCode, err := c.mapKeyCode(typ.Key())
+	if err != nil {
+		return nil, err
+	}
+	valueCode, err := c.mapValueCode(typ.Elem())
+	if err != nil {
+		return nil, err
+	}
+	if valueCode.Kind() == CodeKindStruct {
+		structCode := valueCode.(*StructCode)
+		structCode.enableIndirect()
+	}
+	return &MapCode{typ: typ, key: keyCode, value: valueCode}, nil
+}
+
+func (c *Compiler) listElemCode(typ *runtime.Type) (Code, error) {
+	switch {
+	case c.isPtrMarshalJSONType(typ):
+		return c.marshalJSONCode(typ)
+	case !typ.Implements(marshalTextType) && runtime.PtrTo(typ).Implements(marshalTextType):
+		return c.marshalTextCode(typ)
+	case typ.Kind() == reflect.Map:
+		return c.ptrCode(runtime.PtrTo(typ))
+	default:
+		// isPtr was originally used to indicate whether the type of top level is pointer.
+		// However, since the slice/array element is a specification that can get the pointer address, explicitly set isPtr to true.
+		// See here for related issues: https://github.com/goccy/go-json/issues/370
+		code, err := c.typeToCodeWithPtr(typ, true)
+		if err != nil {
+			return nil, err
+		}
+		ptr, ok := code.(*PtrCode)
+		if ok {
+			if ptr.value.Kind() == CodeKindMap {
+				ptr.ptrNum++
+			}
+		}
+		return code, nil
+	}
+}
+
+func (c *Compiler) mapKeyCode(typ *runtime.Type) (Code, error) {
+	switch {
+	case c.implementsMarshalText(typ):
+		return c.marshalTextCode(typ)
+	}
+	switch typ.Kind() {
+	case reflect.Ptr:
+		return c.ptrCode(typ)
+	case reflect.String:
+		return c.stringCode(typ, false)
+	case reflect.Int:
+		return c.intStringCode(typ)
+	case reflect.Int8:
+		return c.int8StringCode(typ)
+	case reflect.Int16:
+		return c.int16StringCode(typ)
+	case reflect.Int32:
+		return c.int32StringCode(typ)
+	case reflect.Int64:
+		return c.int64StringCode(typ)
+	case reflect.Uint:
+		return c.uintStringCode(typ)
+	case reflect.Uint8:
+		return c.uint8StringCode(typ)
+	case reflect.Uint16:
+		return c.uint16StringCode(typ)
+	case reflect.Uint32:
+		return c.uint32StringCode(typ)
+	case reflect.Uint64:
+		return c.uint64StringCode(typ)
+	case reflect.Uintptr:
+		return c.uintStringCode(typ)
+	}
+	return nil, &errors.UnsupportedTypeError{Type: runtime.RType2Type(typ)}
+}
+
+func (c *Compiler) mapValueCode(typ *runtime.Type) (Code, error) {
+	switch typ.Kind() {
+	case reflect.Map:
+		return c.ptrCode(runtime.PtrTo(typ))
+	default:
+		code, err := c.typeToCodeWithPtr(typ, false)
+		if err != nil {
+			return nil, err
+		}
+		ptr, ok := code.(*PtrCode)
+		if ok {
+			if ptr.value.Kind() == CodeKindMap {
+				ptr.ptrNum++
+			}
+		}
+		return code, nil
+	}
+}
+
+func (c *Compiler) structCode(typ *runtime.Type, isPtr bool) (*StructCode, error) {
+	typeptr := uintptr(unsafe.Pointer(typ))
+	if code, exists := c.structTypeToCode[typeptr]; exists {
+		derefCode := *code
+		derefCode.isRecursive = true
+		return &derefCode, nil
+	}
+	indirect := runtime.IfaceIndir(typ)
+	code := &StructCode{typ: typ, isPtr: isPtr, isIndirect: indirect}
+	c.structTypeToCode[typeptr] = code
+
+	fieldNum := typ.NumField()
+	tags := c.typeToStructTags(typ)
+	fields := []*StructFieldCode{}
+	for i, tag := range tags {
+		isOnlyOneFirstField := i == 0 && fieldNum == 1
+		field, err := c.structFieldCode(code, tag, isPtr, isOnlyOneFirstField)
+		if err != nil {
+			return nil, err
+		}
+		if field.isAnonymous {
+			structCode := field.getAnonymousStruct()
+			if structCode != nil {
+				structCode.removeFieldsByTags(tags)
+				if c.isAssignableIndirect(field, isPtr) {
+					if indirect {
+						structCode.isIndirect = true
+					} else {
+						structCode.isIndirect = false
+					}
+				}
+			}
+		} else {
+			structCode := field.getStruct()
+			if structCode != nil {
+				if indirect {
+					// if parent is indirect type, set child indirect property to true
+					structCode.isIndirect = true
+				} else {
+					// if parent is not indirect type, set child indirect property to false.
+					// but if parent's indirect is false and isPtr is true, then indirect must be true.
+					// Do this only if indirectConversion is enabled at the end of compileStruct.
+					structCode.isIndirect = false
+				}
+			}
+		}
+		fields = append(fields, field)
+	}
+	fieldMap := c.getFieldMap(fields)
+	duplicatedFieldMap := c.getDuplicatedFieldMap(fieldMap)
+	code.fields = c.filteredDuplicatedFields(fields, duplicatedFieldMap)
+	if !code.disableIndirectConversion && !indirect && isPtr {
+		code.enableIndirect()
+	}
+	delete(c.structTypeToCode, typeptr)
+	return code, nil
+}
+
+func toElemType(t *runtime.Type) *runtime.Type {
+	for t.Kind() == reflect.Ptr {
+		t = t.Elem()
+	}
+	return t
+}
+
+func (c *Compiler) structFieldCode(structCode *StructCode, tag *runtime.StructTag, isPtr, isOnlyOneFirstField bool) (*StructFieldCode, error) {
+	field := tag.Field
+	fieldType := runtime.Type2RType(field.Type)
+	isIndirectSpecialCase := isPtr && isOnlyOneFirstField
+	fieldCode := &StructFieldCode{
+		typ:           fieldType,
+		key:           tag.Key,
+		tag:           tag,
+		offset:        field.Offset,
+		isAnonymous:   field.Anonymous && !tag.IsTaggedKey && toElemType(fieldType).Kind() == reflect.Struct,
+		isTaggedKey:   tag.IsTaggedKey,
+		isNilableType: c.isNilableType(fieldType),
+		isNilCheck:    true,
+	}
+	switch {
+	case c.isMovePointerPositionFromHeadToFirstMarshalJSONFieldCase(fieldType, isIndirectSpecialCase):
+		code, err := c.marshalJSONCode(fieldType)
+		if err != nil {
+			return nil, err
+		}
+		fieldCode.value = code
+		fieldCode.isAddrForMarshaler = true
+		fieldCode.isNilCheck = false
+		structCode.isIndirect = false
+		structCode.disableIndirectConversion = true
+	case c.isMovePointerPositionFromHeadToFirstMarshalTextFieldCase(fieldType, isIndirectSpecialCase):
+		code, err := c.marshalTextCode(fieldType)
+		if err != nil {
+			return nil, err
+		}
+		fieldCode.value = code
+		fieldCode.isAddrForMarshaler = true
+		fieldCode.isNilCheck = false
+		structCode.isIndirect = false
+		structCode.disableIndirectConversion = true
+	case isPtr && c.isPtrMarshalJSONType(fieldType):
+		// *struct{ field T }
+		// func (*T) MarshalJSON() ([]byte, error)
+		code, err := c.marshalJSONCode(fieldType)
+		if err != nil {
+			return nil, err
+		}
+		fieldCode.value = code
+		fieldCode.isAddrForMarshaler = true
+		fieldCode.isNilCheck = false
+	case isPtr && c.isPtrMarshalTextType(fieldType):
+		// *struct{ field T }
+		// func (*T) MarshalText() ([]byte, error)
+		code, err := c.marshalTextCode(fieldType)
+		if err != nil {
+			return nil, err
+		}
+		fieldCode.value = code
+		fieldCode.isAddrForMarshaler = true
+		fieldCode.isNilCheck = false
+	default:
+		code, err := c.typeToCodeWithPtr(fieldType, isPtr)
+		if err != nil {
+			return nil, err
+		}
+		switch code.Kind() {
+		case CodeKindPtr, CodeKindInterface:
+			fieldCode.isNextOpPtrType = true
+		}
+		fieldCode.value = code
+	}
+	return fieldCode, nil
+}
+
+func (c *Compiler) isAssignableIndirect(fieldCode *StructFieldCode, isPtr bool) bool {
+	if isPtr {
+		return false
+	}
+	codeType := fieldCode.value.Kind()
+	if codeType == CodeKindMarshalJSON {
+		return false
+	}
+	if codeType == CodeKindMarshalText {
+		return false
+	}
+	return true
+}
+
+func (c *Compiler) getFieldMap(fields []*StructFieldCode) map[string][]*StructFieldCode {
+	fieldMap := map[string][]*StructFieldCode{}
+	for _, field := range fields {
+		if field.isAnonymous {
+			for k, v := range c.getAnonymousFieldMap(field) {
+				fieldMap[k] = append(fieldMap[k], v...)
+			}
+			continue
+		}
+		fieldMap[field.key] = append(fieldMap[field.key], field)
+	}
+	return fieldMap
+}
+
+func (c *Compiler) getAnonymousFieldMap(field *StructFieldCode) map[string][]*StructFieldCode {
+	fieldMap := map[string][]*StructFieldCode{}
+	structCode := field.getAnonymousStruct()
+	if structCode == nil || structCode.isRecursive {
+		fieldMap[field.key] = append(fieldMap[field.key], field)
+		return fieldMap
+	}
+	for k, v := range c.getFieldMapFromAnonymousParent(structCode.fields) {
+		fieldMap[k] = append(fieldMap[k], v...)
+	}
+	return fieldMap
+}
+
+func (c *Compiler) getFieldMapFromAnonymousParent(fields []*StructFieldCode) map[string][]*StructFieldCode {
+	fieldMap := map[string][]*StructFieldCode{}
+	for _, field := range fields {
+		if field.isAnonymous {
+			for k, v := range c.getAnonymousFieldMap(field) {
+				// Do not handle tagged key when embedding more than once
+				for _, vv := range v {
+					vv.isTaggedKey = false
+				}
+				fieldMap[k] = append(fieldMap[k], v...)
+			}
+			continue
+		}
+		fieldMap[field.key] = append(fieldMap[field.key], field)
+	}
+	return fieldMap
+}
+
+func (c *Compiler) getDuplicatedFieldMap(fieldMap map[string][]*StructFieldCode) map[*StructFieldCode]struct{} {
+	duplicatedFieldMap := map[*StructFieldCode]struct{}{}
+	for _, fields := range fieldMap {
+		if len(fields) == 1 {
+			continue
+		}
+		if c.isTaggedKeyOnly(fields) {
+			for _, field := range fields {
+				if field.isTaggedKey {
+					continue
+				}
+				duplicatedFieldMap[field] = struct{}{}
+			}
+		} else {
+			for _, field := range fields {
+				duplicatedFieldMap[field] = struct{}{}
+			}
+		}
+	}
+	return duplicatedFieldMap
+}
+
+func (c *Compiler) filteredDuplicatedFields(fields []*StructFieldCode, duplicatedFieldMap map[*StructFieldCode]struct{}) []*StructFieldCode {
+	filteredFields := make([]*StructFieldCode, 0, len(fields))
+	for _, field := range fields {
+		if field.isAnonymous {
+			structCode := field.getAnonymousStruct()
+			if structCode != nil && !structCode.isRecursive {
+				structCode.fields = c.filteredDuplicatedFields(structCode.fields, duplicatedFieldMap)
+				if len(structCode.fields) > 0 {
+					filteredFields = append(filteredFields, field)
+				}
+				continue
+			}
+		}
+		if _, exists := duplicatedFieldMap[field]; exists {
+			continue
+		}
+		filteredFields = append(filteredFields, field)
+	}
+	return filteredFields
+}
+
+func (c *Compiler) isTaggedKeyOnly(fields []*StructFieldCode) bool {
+	var taggedKeyFieldCount int
+	for _, field := range fields {
+		if field.isTaggedKey {
+			taggedKeyFieldCount++
+		}
+	}
+	return taggedKeyFieldCount == 1
+}
+
+func (c *Compiler) typeToStructTags(typ *runtime.Type) runtime.StructTags {
+	tags := runtime.StructTags{}
+	fieldNum := typ.NumField()
+	for i := 0; i < fieldNum; i++ {
+		field := typ.Field(i)
+		if runtime.IsIgnoredStructField(field) {
+			continue
+		}
+		tags = append(tags, runtime.StructTagFromField(field))
+	}
+	return tags
+}
+
+// *struct{ field T } => struct { field *T }
+// func (*T) MarshalJSON() ([]byte, error)
+func (c *Compiler) isMovePointerPositionFromHeadToFirstMarshalJSONFieldCase(typ *runtime.Type, isIndirectSpecialCase bool) bool {
+	return isIndirectSpecialCase && !c.isNilableType(typ) && c.isPtrMarshalJSONType(typ)
+}
+
+// *struct{ field T } => struct { field *T }
+// func (*T) MarshalText() ([]byte, error)
+func (c *Compiler) isMovePointerPositionFromHeadToFirstMarshalTextFieldCase(typ *runtime.Type, isIndirectSpecialCase bool) bool {
+	return isIndirectSpecialCase && !c.isNilableType(typ) && c.isPtrMarshalTextType(typ)
+}
+
+func (c *Compiler) implementsMarshalJSON(typ *runtime.Type) bool {
+	if !c.implementsMarshalJSONType(typ) {
+		return false
+	}
+	if typ.Kind() != reflect.Ptr {
+		return true
+	}
+	// type kind is reflect.Ptr
+	if !c.implementsMarshalJSONType(typ.Elem()) {
+		return true
+	}
+	// needs to dereference
+	return false
+}
+
+func (c *Compiler) implementsMarshalText(typ *runtime.Type) bool {
+	if !typ.Implements(marshalTextType) {
+		return false
+	}
+	if typ.Kind() != reflect.Ptr {
+		return true
+	}
+	// type kind is reflect.Ptr
+	if !typ.Elem().Implements(marshalTextType) {
+		return true
+	}
+	// needs to dereference
+	return false
+}
+
+func (c *Compiler) isNilableType(typ *runtime.Type) bool {
+	if !runtime.IfaceIndir(typ) {
+		return true
+	}
+	switch typ.Kind() {
+	case reflect.Ptr:
+		return true
+	case reflect.Map:
+		return true
+	case reflect.Func:
+		return true
+	default:
+		return false
+	}
+}
+
+func (c *Compiler) implementsMarshalJSONType(typ *runtime.Type) bool {
+	return typ.Implements(marshalJSONType) || typ.Implements(marshalJSONContextType)
+}
+
+func (c *Compiler) isPtrMarshalJSONType(typ *runtime.Type) bool {
+	return !c.implementsMarshalJSONType(typ) && c.implementsMarshalJSONType(runtime.PtrTo(typ))
+}
+
+func (c *Compiler) isPtrMarshalTextType(typ *runtime.Type) bool {
+	return !typ.Implements(marshalTextType) && runtime.PtrTo(typ).Implements(marshalTextType)
+}
+
+func (c *Compiler) codeToOpcode(ctx *compileContext, typ *runtime.Type, code Code) *Opcode {
+	codes := code.ToOpcode(ctx)
+	codes.Last().Next = newEndOp(ctx, typ)
+	c.linkRecursiveCode(ctx)
+	return codes.First()
+}
+
+func (c *Compiler) linkRecursiveCode(ctx *compileContext) {
+	recursiveCodes := map[uintptr]*CompiledCode{}
+	for _, recursive := range *ctx.recursiveCodes {
+		typeptr := uintptr(unsafe.Pointer(recursive.Type))
+		codes := ctx.structTypeToCodes[typeptr]
+		if recursiveCode, ok := recursiveCodes[typeptr]; ok {
+			*recursive.Jmp = *recursiveCode
+			continue
+		}
+
+		code := copyOpcode(codes.First())
+		code.Op = code.Op.PtrHeadToHead()
+		lastCode := newEndOp(&compileContext{}, recursive.Type)
+		lastCode.Op = OpRecursiveEnd
+
+		// OpRecursiveEnd must set before call TotalLength
+		code.End.Next = lastCode
+
+		totalLength := code.TotalLength()
+
+		// Idx, ElemIdx, Length must set after call TotalLength
+		lastCode.Idx = uint32((totalLength + 1) * uintptrSize)
+		lastCode.ElemIdx = lastCode.Idx + uintptrSize
+		lastCode.Length = lastCode.Idx + 2*uintptrSize
+
+		// extend length to alloc slot for elemIdx + length
+		curTotalLength := uintptr(recursive.TotalLength()) + 3
+		nextTotalLength := uintptr(totalLength) + 3
+
+		compiled := recursive.Jmp
+		compiled.Code = code
+		compiled.CurLen = curTotalLength
+		compiled.NextLen = nextTotalLength
+		compiled.Linked = true
+
+		recursiveCodes[typeptr] = compiled
+	}
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go
new file mode 100644
index 0000000000..20c93cbf70
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go
@@ -0,0 +1,32 @@
+//go:build !race
+// +build !race
+
+package encoder
+
+func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) {
+	if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr {
+		codeSet, err := compileToGetCodeSetSlowPath(typeptr)
+		if err != nil {
+			return nil, err
+		}
+		return getFilteredCodeSetIfNeeded(ctx, codeSet)
+	}
+	index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift
+	if codeSet := cachedOpcodeSets[index]; codeSet != nil {
+		filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet)
+		if err != nil {
+			return nil, err
+		}
+		return filtered, nil
+	}
+	codeSet, err := newCompiler().compile(typeptr)
+	if err != nil {
+		return nil, err
+	}
+	filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet)
+	if err != nil {
+		return nil, err
+	}
+	cachedOpcodeSets[index] = codeSet
+	return filtered, nil
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go
new file mode 100644
index 0000000000..13ba23fdff
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go
@@ -0,0 +1,45 @@
+//go:build race
+// +build race
+
+package encoder
+
+import (
+	"sync"
+)
+
+var setsMu sync.RWMutex
+
+func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) {
+	if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr {
+		codeSet, err := compileToGetCodeSetSlowPath(typeptr)
+		if err != nil {
+			return nil, err
+		}
+		return getFilteredCodeSetIfNeeded(ctx, codeSet)
+	}
+	index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift
+	setsMu.RLock()
+	if codeSet := cachedOpcodeSets[index]; codeSet != nil {
+		filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet)
+		if err != nil {
+			setsMu.RUnlock()
+			return nil, err
+		}
+		setsMu.RUnlock()
+		return filtered, nil
+	}
+	setsMu.RUnlock()
+
+	codeSet, err := newCompiler().compile(typeptr)
+	if err != nil {
+		return nil, err
+	}
+	filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet)
+	if err != nil {
+		return nil, err
+	}
+	setsMu.Lock()
+	cachedOpcodeSets[index] = codeSet
+	setsMu.Unlock()
+	return filtered, nil
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/context.go b/vendor/github.com/goccy/go-json/internal/encoder/context.go
new file mode 100644
index 0000000000..3833d0c86d
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/context.go
@@ -0,0 +1,105 @@
+package encoder
+
+import (
+	"context"
+	"sync"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+type compileContext struct {
+	opcodeIndex       uint32
+	ptrIndex          int
+	indent            uint32
+	escapeKey         bool
+	structTypeToCodes map[uintptr]Opcodes
+	recursiveCodes    *Opcodes
+}
+
+func (c *compileContext) incIndent() {
+	c.indent++
+}
+
+func (c *compileContext) decIndent() {
+	c.indent--
+}
+
+func (c *compileContext) incIndex() {
+	c.incOpcodeIndex()
+	c.incPtrIndex()
+}
+
+func (c *compileContext) decIndex() {
+	c.decOpcodeIndex()
+	c.decPtrIndex()
+}
+
+func (c *compileContext) incOpcodeIndex() {
+	c.opcodeIndex++
+}
+
+func (c *compileContext) decOpcodeIndex() {
+	c.opcodeIndex--
+}
+
+func (c *compileContext) incPtrIndex() {
+	c.ptrIndex++
+}
+
+func (c *compileContext) decPtrIndex() {
+	c.ptrIndex--
+}
+
+const (
+	bufSize = 1024
+)
+
+var (
+	runtimeContextPool = sync.Pool{
+		New: func() interface{} {
+			return &RuntimeContext{
+				Buf:      make([]byte, 0, bufSize),
+				Ptrs:     make([]uintptr, 128),
+				KeepRefs: make([]unsafe.Pointer, 0, 8),
+				Option:   &Option{},
+			}
+		},
+	}
+)
+
+type RuntimeContext struct {
+	Context    context.Context
+	Buf        []byte
+	MarshalBuf []byte
+	Ptrs       []uintptr
+	KeepRefs   []unsafe.Pointer
+	SeenPtr    []uintptr
+	BaseIndent uint32
+	Prefix     []byte
+	IndentStr  []byte
+	Option     *Option
+}
+
+func (c *RuntimeContext) Init(p uintptr, codelen int) {
+	if len(c.Ptrs) < codelen {
+		c.Ptrs = make([]uintptr, codelen)
+	}
+	c.Ptrs[0] = p
+	c.KeepRefs = c.KeepRefs[:0]
+	c.SeenPtr = c.SeenPtr[:0]
+	c.BaseIndent = 0
+}
+
+func (c *RuntimeContext) Ptr() uintptr {
+	header := (*runtime.SliceHeader)(unsafe.Pointer(&c.Ptrs))
+	return uintptr(header.Data)
+}
+
+func TakeRuntimeContext() *RuntimeContext {
+	return runtimeContextPool.Get().(*RuntimeContext)
+}
+
+func ReleaseRuntimeContext(ctx *RuntimeContext) {
+	runtimeContextPool.Put(ctx)
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go b/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go
new file mode 100644
index 0000000000..35c959d481
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go
@@ -0,0 +1,126 @@
+package encoder
+
+import "unicode/utf8"
+
+const (
+	// The default lowest and highest continuation byte.
+	locb = 128 //0b10000000
+	hicb = 191 //0b10111111
+
+	// These names of these constants are chosen to give nice alignment in the
+	// table below. The first nibble is an index into acceptRanges or F for
+	// special one-byte cases. The second nibble is the Rune length or the
+	// Status for the special one-byte case.
+	xx = 0xF1 // invalid: size 1
+	as = 0xF0 // ASCII: size 1
+	s1 = 0x02 // accept 0, size 2
+	s2 = 0x13 // accept 1, size 3
+	s3 = 0x03 // accept 0, size 3
+	s4 = 0x23 // accept 2, size 3
+	s5 = 0x34 // accept 3, size 4
+	s6 = 0x04 // accept 0, size 4
+	s7 = 0x44 // accept 4, size 4
+)
+
+// first is information about the first byte in a UTF-8 sequence.
+var first = [256]uint8{
+	//   1   2   3   4   5   6   7   8   9   A   B   C   D   E   F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
+	//   1   2   3   4   5   6   7   8   9   A   B   C   D   E   F
+	xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
+	xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
+	xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
+	xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
+	xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
+	s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
+	s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
+	s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
+}
+
+const (
+	lineSep      = byte(168) //'\u2028'
+	paragraphSep = byte(169) //'\u2029'
+)
+
+type decodeRuneState int
+
+const (
+	validUTF8State decodeRuneState = iota
+	runeErrorState
+	lineSepState
+	paragraphSepState
+)
+
+func decodeRuneInString(s string) (decodeRuneState, int) {
+	n := len(s)
+	s0 := s[0]
+	x := first[s0]
+	if x >= as {
+		// The following code simulates an additional check for x == xx and
+		// handling the ASCII and invalid cases accordingly. This mask-and-or
+		// approach prevents an additional branch.
+		mask := rune(x) << 31 >> 31 // Create 0x0000 or 0xFFFF.
+		if rune(s[0])&^mask|utf8.RuneError&mask == utf8.RuneError {
+			return runeErrorState, 1
+		}
+		return validUTF8State, 1
+	}
+	sz := int(x & 7)
+	if n < sz {
+		return runeErrorState, 1
+	}
+	s1 := s[1]
+	switch x >> 4 {
+	case 0:
+		if s1 < locb || hicb < s1 {
+			return runeErrorState, 1
+		}
+	case 1:
+		if s1 < 0xA0 || hicb < s1 {
+			return runeErrorState, 1
+		}
+	case 2:
+		if s1 < locb || 0x9F < s1 {
+			return runeErrorState, 1
+		}
+	case 3:
+		if s1 < 0x90 || hicb < s1 {
+			return runeErrorState, 1
+		}
+	case 4:
+		if s1 < locb || 0x8F < s1 {
+			return runeErrorState, 1
+		}
+	}
+	if sz <= 2 {
+		return validUTF8State, 2
+	}
+	s2 := s[2]
+	if s2 < locb || hicb < s2 {
+		return runeErrorState, 1
+	}
+	if sz <= 3 {
+		// separator character prefixes: [2]byte{226, 128}
+		if s0 == 226 && s1 == 128 {
+			switch s2 {
+			case lineSep:
+				return lineSepState, 3
+			case paragraphSep:
+				return paragraphSepState, 3
+			}
+		}
+		return validUTF8State, 3
+	}
+	s3 := s[3]
+	if s3 < locb || hicb < s3 {
+		return runeErrorState, 1
+	}
+	return validUTF8State, 4
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go
new file mode 100644
index 0000000000..14eb6a0d64
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go
@@ -0,0 +1,596 @@
+package encoder
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/errors"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+func (t OpType) IsMultipleOpHead() bool {
+	switch t {
+	case OpStructHead:
+		return true
+	case OpStructHeadSlice:
+		return true
+	case OpStructHeadArray:
+		return true
+	case OpStructHeadMap:
+		return true
+	case OpStructHeadStruct:
+		return true
+	case OpStructHeadOmitEmpty:
+		return true
+	case OpStructHeadOmitEmptySlice:
+		return true
+	case OpStructHeadOmitEmptyArray:
+		return true
+	case OpStructHeadOmitEmptyMap:
+		return true
+	case OpStructHeadOmitEmptyStruct:
+		return true
+	case OpStructHeadSlicePtr:
+		return true
+	case OpStructHeadOmitEmptySlicePtr:
+		return true
+	case OpStructHeadArrayPtr:
+		return true
+	case OpStructHeadOmitEmptyArrayPtr:
+		return true
+	case OpStructHeadMapPtr:
+		return true
+	case OpStructHeadOmitEmptyMapPtr:
+		return true
+	}
+	return false
+}
+
+func (t OpType) IsMultipleOpField() bool {
+	switch t {
+	case OpStructField:
+		return true
+	case OpStructFieldSlice:
+		return true
+	case OpStructFieldArray:
+		return true
+	case OpStructFieldMap:
+		return true
+	case OpStructFieldStruct:
+		return true
+	case OpStructFieldOmitEmpty:
+		return true
+	case OpStructFieldOmitEmptySlice:
+		return true
+	case OpStructFieldOmitEmptyArray:
+		return true
+	case OpStructFieldOmitEmptyMap:
+		return true
+	case OpStructFieldOmitEmptyStruct:
+		return true
+	case OpStructFieldSlicePtr:
+		return true
+	case OpStructFieldOmitEmptySlicePtr:
+		return true
+	case OpStructFieldArrayPtr:
+		return true
+	case OpStructFieldOmitEmptyArrayPtr:
+		return true
+	case OpStructFieldMapPtr:
+		return true
+	case OpStructFieldOmitEmptyMapPtr:
+		return true
+	}
+	return false
+}
+
+type OpcodeSet struct {
+	Type                     *runtime.Type
+	NoescapeKeyCode          *Opcode
+	EscapeKeyCode            *Opcode
+	InterfaceNoescapeKeyCode *Opcode
+	InterfaceEscapeKeyCode   *Opcode
+	CodeLength               int
+	EndCode                  *Opcode
+	Code                     Code
+	QueryCache               map[string]*OpcodeSet
+	cacheMu                  sync.RWMutex
+}
+
+func (s *OpcodeSet) getQueryCache(hash string) *OpcodeSet {
+	s.cacheMu.RLock()
+	codeSet := s.QueryCache[hash]
+	s.cacheMu.RUnlock()
+	return codeSet
+}
+
+func (s *OpcodeSet) setQueryCache(hash string, codeSet *OpcodeSet) {
+	s.cacheMu.Lock()
+	s.QueryCache[hash] = codeSet
+	s.cacheMu.Unlock()
+}
+
+type CompiledCode struct {
+	Code    *Opcode
+	Linked  bool // whether recursive code already have linked
+	CurLen  uintptr
+	NextLen uintptr
+}
+
+const StartDetectingCyclesAfter = 1000
+
+func Load(base uintptr, idx uintptr) uintptr {
+	addr := base + idx
+	return **(**uintptr)(unsafe.Pointer(&addr))
+}
+
+func Store(base uintptr, idx uintptr, p uintptr) {
+	addr := base + idx
+	**(**uintptr)(unsafe.Pointer(&addr)) = p
+}
+
+func LoadNPtr(base uintptr, idx uintptr, ptrNum int) uintptr {
+	addr := base + idx
+	p := **(**uintptr)(unsafe.Pointer(&addr))
+	if p == 0 {
+		return 0
+	}
+	return PtrToPtr(p)
+	/*
+		for i := 0; i < ptrNum; i++ {
+			if p == 0 {
+				return p
+			}
+			p = PtrToPtr(p)
+		}
+		return p
+	*/
+}
+
+func PtrToUint64(p uintptr) uint64              { return **(**uint64)(unsafe.Pointer(&p)) }
+func PtrToFloat32(p uintptr) float32            { return **(**float32)(unsafe.Pointer(&p)) }
+func PtrToFloat64(p uintptr) float64            { return **(**float64)(unsafe.Pointer(&p)) }
+func PtrToBool(p uintptr) bool                  { return **(**bool)(unsafe.Pointer(&p)) }
+func PtrToBytes(p uintptr) []byte               { return **(**[]byte)(unsafe.Pointer(&p)) }
+func PtrToNumber(p uintptr) json.Number         { return **(**json.Number)(unsafe.Pointer(&p)) }
+func PtrToString(p uintptr) string              { return **(**string)(unsafe.Pointer(&p)) }
+func PtrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) }
+func PtrToPtr(p uintptr) uintptr {
+	return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p)))
+}
+func PtrToNPtr(p uintptr, ptrNum int) uintptr {
+	for i := 0; i < ptrNum; i++ {
+		if p == 0 {
+			return 0
+		}
+		p = PtrToPtr(p)
+	}
+	return p
+}
+
+func PtrToUnsafePtr(p uintptr) unsafe.Pointer {
+	return *(*unsafe.Pointer)(unsafe.Pointer(&p))
+}
+func PtrToInterface(code *Opcode, p uintptr) interface{} {
+	return *(*interface{})(unsafe.Pointer(&emptyInterface{
+		typ: code.Type,
+		ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)),
+	}))
+}
+
+func ErrUnsupportedValue(code *Opcode, ptr uintptr) *errors.UnsupportedValueError {
+	v := *(*interface{})(unsafe.Pointer(&emptyInterface{
+		typ: code.Type,
+		ptr: *(*unsafe.Pointer)(unsafe.Pointer(&ptr)),
+	}))
+	return &errors.UnsupportedValueError{
+		Value: reflect.ValueOf(v),
+		Str:   fmt.Sprintf("encountered a cycle via %s", code.Type),
+	}
+}
+
+func ErrUnsupportedFloat(v float64) *errors.UnsupportedValueError {
+	return &errors.UnsupportedValueError{
+		Value: reflect.ValueOf(v),
+		Str:   strconv.FormatFloat(v, 'g', -1, 64),
+	}
+}
+
+func ErrMarshalerWithCode(code *Opcode, err error) *errors.MarshalerError {
+	return &errors.MarshalerError{
+		Type: runtime.RType2Type(code.Type),
+		Err:  err,
+	}
+}
+
+type emptyInterface struct {
+	typ *runtime.Type
+	ptr unsafe.Pointer
+}
+
+type MapItem struct {
+	Key   []byte
+	Value []byte
+}
+
+type Mapslice struct {
+	Items []MapItem
+}
+
+func (m *Mapslice) Len() int {
+	return len(m.Items)
+}
+
+func (m *Mapslice) Less(i, j int) bool {
+	return bytes.Compare(m.Items[i].Key, m.Items[j].Key) < 0
+}
+
+func (m *Mapslice) Swap(i, j int) {
+	m.Items[i], m.Items[j] = m.Items[j], m.Items[i]
+}
+
+//nolint:structcheck,unused
+type mapIter struct {
+	key         unsafe.Pointer
+	elem        unsafe.Pointer
+	t           unsafe.Pointer
+	h           unsafe.Pointer
+	buckets     unsafe.Pointer
+	bptr        unsafe.Pointer
+	overflow    unsafe.Pointer
+	oldoverflow unsafe.Pointer
+	startBucket uintptr
+	offset      uint8
+	wrapped     bool
+	B           uint8
+	i           uint8
+	bucket      uintptr
+	checkBucket uintptr
+}
+
+type MapContext struct {
+	Start int
+	First int
+	Idx   int
+	Slice *Mapslice
+	Buf   []byte
+	Len   int
+	Iter  mapIter
+}
+
+var mapContextPool = sync.Pool{
+	New: func() interface{} {
+		return &MapContext{
+			Slice: &Mapslice{},
+		}
+	},
+}
+
+func NewMapContext(mapLen int, unorderedMap bool) *MapContext {
+	ctx := mapContextPool.Get().(*MapContext)
+	if !unorderedMap {
+		if len(ctx.Slice.Items) < mapLen {
+			ctx.Slice.Items = make([]MapItem, mapLen)
+		} else {
+			ctx.Slice.Items = ctx.Slice.Items[:mapLen]
+		}
+	}
+	ctx.Buf = ctx.Buf[:0]
+	ctx.Iter = mapIter{}
+	ctx.Idx = 0
+	ctx.Len = mapLen
+	return ctx
+}
+
+func ReleaseMapContext(c *MapContext) {
+	mapContextPool.Put(c)
+}
+
+//go:linkname MapIterInit runtime.mapiterinit
+//go:noescape
+func MapIterInit(mapType *runtime.Type, m unsafe.Pointer, it *mapIter)
+
+//go:linkname MapIterKey reflect.mapiterkey
+//go:noescape
+func MapIterKey(it *mapIter) unsafe.Pointer
+
+//go:linkname MapIterNext reflect.mapiternext
+//go:noescape
+func MapIterNext(it *mapIter)
+
+//go:linkname MapLen reflect.maplen
+//go:noescape
+func MapLen(m unsafe.Pointer) int
+
+func AppendByteSlice(_ *RuntimeContext, b []byte, src []byte) []byte {
+	if src == nil {
+		return append(b, `null`...)
+	}
+	encodedLen := base64.StdEncoding.EncodedLen(len(src))
+	b = append(b, '"')
+	pos := len(b)
+	remainLen := cap(b[pos:])
+	var buf []byte
+	if remainLen > encodedLen {
+		buf = b[pos : pos+encodedLen]
+	} else {
+		buf = make([]byte, encodedLen)
+	}
+	base64.StdEncoding.Encode(buf, src)
+	return append(append(b, buf...), '"')
+}
+
+func AppendFloat32(_ *RuntimeContext, b []byte, v float32) []byte {
+	f64 := float64(v)
+	abs := math.Abs(f64)
+	fmt := byte('f')
+	// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+	if abs != 0 {
+		f32 := float32(abs)
+		if f32 < 1e-6 || f32 >= 1e21 {
+			fmt = 'e'
+		}
+	}
+	return strconv.AppendFloat(b, f64, fmt, -1, 32)
+}
+
+func AppendFloat64(_ *RuntimeContext, b []byte, v float64) []byte {
+	abs := math.Abs(v)
+	fmt := byte('f')
+	// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+	if abs != 0 {
+		if abs < 1e-6 || abs >= 1e21 {
+			fmt = 'e'
+		}
+	}
+	return strconv.AppendFloat(b, v, fmt, -1, 64)
+}
+
+func AppendBool(_ *RuntimeContext, b []byte, v bool) []byte {
+	if v {
+		return append(b, "true"...)
+	}
+	return append(b, "false"...)
+}
+
+var (
+	floatTable = [256]bool{
+		'0': true,
+		'1': true,
+		'2': true,
+		'3': true,
+		'4': true,
+		'5': true,
+		'6': true,
+		'7': true,
+		'8': true,
+		'9': true,
+		'.': true,
+		'e': true,
+		'E': true,
+		'+': true,
+		'-': true,
+	}
+)
+
+func AppendNumber(_ *RuntimeContext, b []byte, n json.Number) ([]byte, error) {
+	if len(n) == 0 {
+		return append(b, '0'), nil
+	}
+	for i := 0; i < len(n); i++ {
+		if !floatTable[n[i]] {
+			return nil, fmt.Errorf("json: invalid number literal %q", n)
+		}
+	}
+	b = append(b, n...)
+	return b, nil
+}
+
+func AppendMarshalJSON(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) {
+	rv := reflect.ValueOf(v) // convert by dynamic interface type
+	if (code.Flags & AddrForMarshalerFlags) != 0 {
+		if rv.CanAddr() {
+			rv = rv.Addr()
+		} else {
+			newV := reflect.New(rv.Type())
+			newV.Elem().Set(rv)
+			rv = newV
+		}
+	}
+	v = rv.Interface()
+	var bb []byte
+	if (code.Flags & MarshalerContextFlags) != 0 {
+		marshaler, ok := v.(marshalerContext)
+		if !ok {
+			return AppendNull(ctx, b), nil
+		}
+		stdctx := ctx.Option.Context
+		if ctx.Option.Flag&FieldQueryOption != 0 {
+			stdctx = SetFieldQueryToContext(stdctx, code.FieldQuery)
+		}
+		b, err := marshaler.MarshalJSON(stdctx)
+		if err != nil {
+			return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err}
+		}
+		bb = b
+	} else {
+		marshaler, ok := v.(json.Marshaler)
+		if !ok {
+			return AppendNull(ctx, b), nil
+		}
+		b, err := marshaler.MarshalJSON()
+		if err != nil {
+			return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err}
+		}
+		bb = b
+	}
+	marshalBuf := ctx.MarshalBuf[:0]
+	marshalBuf = append(append(marshalBuf, bb...), nul)
+	compactedBuf, err := compact(b, marshalBuf, (ctx.Option.Flag&HTMLEscapeOption) != 0)
+	if err != nil {
+		return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err}
+	}
+	ctx.MarshalBuf = marshalBuf
+	return compactedBuf, nil
+}
+
+func AppendMarshalJSONIndent(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) {
+	rv := reflect.ValueOf(v) // convert by dynamic interface type
+	if (code.Flags & AddrForMarshalerFlags) != 0 {
+		if rv.CanAddr() {
+			rv = rv.Addr()
+		} else {
+			newV := reflect.New(rv.Type())
+			newV.Elem().Set(rv)
+			rv = newV
+		}
+	}
+	v = rv.Interface()
+	var bb []byte
+	if (code.Flags & MarshalerContextFlags) != 0 {
+		marshaler, ok := v.(marshalerContext)
+		if !ok {
+			return AppendNull(ctx, b), nil
+		}
+		b, err := marshaler.MarshalJSON(ctx.Option.Context)
+		if err != nil {
+			return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err}
+		}
+		bb = b
+	} else {
+		marshaler, ok := v.(json.Marshaler)
+		if !ok {
+			return AppendNull(ctx, b), nil
+		}
+		b, err := marshaler.MarshalJSON()
+		if err != nil {
+			return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err}
+		}
+		bb = b
+	}
+	marshalBuf := ctx.MarshalBuf[:0]
+	marshalBuf = append(append(marshalBuf, bb...), nul)
+	indentedBuf, err := doIndent(
+		b,
+		marshalBuf,
+		string(ctx.Prefix)+strings.Repeat(string(ctx.IndentStr), int(ctx.BaseIndent+code.Indent)),
+		string(ctx.IndentStr),
+		(ctx.Option.Flag&HTMLEscapeOption) != 0,
+	)
+	if err != nil {
+		return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err}
+	}
+	ctx.MarshalBuf = marshalBuf
+	return indentedBuf, nil
+}
+
+func AppendMarshalText(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) {
+	rv := reflect.ValueOf(v) // convert by dynamic interface type
+	if (code.Flags & AddrForMarshalerFlags) != 0 {
+		if rv.CanAddr() {
+			rv = rv.Addr()
+		} else {
+			newV := reflect.New(rv.Type())
+			newV.Elem().Set(rv)
+			rv = newV
+		}
+	}
+	v = rv.Interface()
+	marshaler, ok := v.(encoding.TextMarshaler)
+	if !ok {
+		return AppendNull(ctx, b), nil
+	}
+	bytes, err := marshaler.MarshalText()
+	if err != nil {
+		return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err}
+	}
+	return AppendString(ctx, b, *(*string)(unsafe.Pointer(&bytes))), nil
+}
+
+func AppendMarshalTextIndent(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) {
+	rv := reflect.ValueOf(v) // convert by dynamic interface type
+	if (code.Flags & AddrForMarshalerFlags) != 0 {
+		if rv.CanAddr() {
+			rv = rv.Addr()
+		} else {
+			newV := reflect.New(rv.Type())
+			newV.Elem().Set(rv)
+			rv = newV
+		}
+	}
+	v = rv.Interface()
+	marshaler, ok := v.(encoding.TextMarshaler)
+	if !ok {
+		return AppendNull(ctx, b), nil
+	}
+	bytes, err := marshaler.MarshalText()
+	if err != nil {
+		return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err}
+	}
+	return AppendString(ctx, b, *(*string)(unsafe.Pointer(&bytes))), nil
+}
+
+func AppendNull(_ *RuntimeContext, b []byte) []byte {
+	return append(b, "null"...)
+}
+
+func AppendComma(_ *RuntimeContext, b []byte) []byte {
+	return append(b, ',')
+}
+
+func AppendCommaIndent(_ *RuntimeContext, b []byte) []byte {
+	return append(b, ',', '\n')
+}
+
+func AppendStructEnd(_ *RuntimeContext, b []byte) []byte {
+	return append(b, '}', ',')
+}
+
+func AppendStructEndIndent(ctx *RuntimeContext, code *Opcode, b []byte) []byte {
+	b = append(b, '\n')
+	b = append(b, ctx.Prefix...)
+	indentNum := ctx.BaseIndent + code.Indent - 1
+	for i := uint32(0); i < indentNum; i++ {
+		b = append(b, ctx.IndentStr...)
+	}
+	return append(b, '}', ',', '\n')
+}
+
+func AppendIndent(ctx *RuntimeContext, b []byte, indent uint32) []byte {
+	b = append(b, ctx.Prefix...)
+	indentNum := ctx.BaseIndent + indent
+	for i := uint32(0); i < indentNum; i++ {
+		b = append(b, ctx.IndentStr...)
+	}
+	return b
+}
+
+func IsNilForMarshaler(v interface{}) bool {
+	rv := reflect.ValueOf(v)
+	switch rv.Kind() {
+	case reflect.Bool:
+		return !rv.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return rv.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return rv.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return math.Float64bits(rv.Float()) == 0
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Func:
+		return rv.IsNil()
+	case reflect.Slice:
+		return rv.IsNil() || rv.Len() == 0
+	case reflect.String:
+		return rv.Len() == 0
+	}
+	return false
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/indent.go b/vendor/github.com/goccy/go-json/internal/encoder/indent.go
new file mode 100644
index 0000000000..dfe04b5e3c
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/indent.go
@@ -0,0 +1,211 @@
+package encoder
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/goccy/go-json/internal/errors"
+)
+
+func takeIndentSrcRuntimeContext(src []byte) (*RuntimeContext, []byte) {
+	ctx := TakeRuntimeContext()
+	buf := ctx.Buf[:0]
+	buf = append(append(buf, src...), nul)
+	ctx.Buf = buf
+	return ctx, buf
+}
+
+func Indent(buf *bytes.Buffer, src []byte, prefix, indentStr string) error {
+	if len(src) == 0 {
+		return errors.ErrUnexpectedEndOfJSON("", 0)
+	}
+
+	srcCtx, srcBuf := takeIndentSrcRuntimeContext(src)
+	dstCtx := TakeRuntimeContext()
+	dst := dstCtx.Buf[:0]
+
+	dst, err := indentAndWrite(buf, dst, srcBuf, prefix, indentStr)
+	if err != nil {
+		ReleaseRuntimeContext(srcCtx)
+		ReleaseRuntimeContext(dstCtx)
+		return err
+	}
+	dstCtx.Buf = dst
+	ReleaseRuntimeContext(srcCtx)
+	ReleaseRuntimeContext(dstCtx)
+	return nil
+}
+
+func indentAndWrite(buf *bytes.Buffer, dst []byte, src []byte, prefix, indentStr string) ([]byte, error) {
+	dst, err := doIndent(dst, src, prefix, indentStr, false)
+	if err != nil {
+		return nil, err
+	}
+	if _, err := buf.Write(dst); err != nil {
+		return nil, err
+	}
+	return dst, nil
+}
+
+func doIndent(dst, src []byte, prefix, indentStr string, escape bool) ([]byte, error) {
+	buf, cursor, err := indentValue(dst, src, 0, 0, []byte(prefix), []byte(indentStr), escape)
+	if err != nil {
+		return nil, err
+	}
+	if err := validateEndBuf(src, cursor); err != nil {
+		return nil, err
+	}
+	return buf, nil
+}
+
+func indentValue(
+	dst []byte,
+	src []byte,
+	indentNum int,
+	cursor int64,
+	prefix []byte,
+	indentBytes []byte,
+	escape bool) ([]byte, int64, error) {
+	for {
+		switch src[cursor] {
+		case ' ', '\t', '\n', '\r':
+			cursor++
+			continue
+		case '{':
+			return indentObject(dst, src, indentNum, cursor, prefix, indentBytes, escape)
+		case '}':
+			return nil, 0, errors.ErrSyntax("unexpected character '}'", cursor)
+		case '[':
+			return indentArray(dst, src, indentNum, cursor, prefix, indentBytes, escape)
+		case ']':
+			return nil, 0, errors.ErrSyntax("unexpected character ']'", cursor)
+		case '"':
+			return compactString(dst, src, cursor, escape)
+		case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			return compactNumber(dst, src, cursor)
+		case 't':
+			return compactTrue(dst, src, cursor)
+		case 'f':
+			return compactFalse(dst, src, cursor)
+		case 'n':
+			return compactNull(dst, src, cursor)
+		default:
+			return nil, 0, errors.ErrSyntax(fmt.Sprintf("unexpected character '%c'", src[cursor]), cursor)
+		}
+	}
+}
+
+func indentObject(
+	dst []byte,
+	src []byte,
+	indentNum int,
+	cursor int64,
+	prefix []byte,
+	indentBytes []byte,
+	escape bool) ([]byte, int64, error) {
+	if src[cursor] == '{' {
+		dst = append(dst, '{')
+	} else {
+		return nil, 0, errors.ErrExpected("expected { character for object value", cursor)
+	}
+	cursor = skipWhiteSpace(src, cursor+1)
+	if src[cursor] == '}' {
+		dst = append(dst, '}')
+		return dst, cursor + 1, nil
+	}
+	indentNum++
+	var err error
+	for {
+		dst = append(append(dst, '\n'), prefix...)
+		for i := 0; i < indentNum; i++ {
+			dst = append(dst, indentBytes...)
+		}
+		cursor = skipWhiteSpace(src, cursor)
+		dst, cursor, err = compactString(dst, src, cursor, escape)
+		if err != nil {
+			return nil, 0, err
+		}
+		cursor = skipWhiteSpace(src, cursor)
+		if src[cursor] != ':' {
+			return nil, 0, errors.ErrSyntax(
+				fmt.Sprintf("invalid character '%c' after object key", src[cursor]),
+				cursor+1,
+			)
+		}
+		dst = append(dst, ':', ' ')
+		dst, cursor, err = indentValue(dst, src, indentNum, cursor+1, prefix, indentBytes, escape)
+		if err != nil {
+			return nil, 0, err
+		}
+		cursor = skipWhiteSpace(src, cursor)
+		switch src[cursor] {
+		case '}':
+			dst = append(append(dst, '\n'), prefix...)
+			for i := 0; i < indentNum-1; i++ {
+				dst = append(dst, indentBytes...)
+			}
+			dst = append(dst, '}')
+			cursor++
+			return dst, cursor, nil
+		case ',':
+			dst = append(dst, ',')
+		default:
+			return nil, 0, errors.ErrSyntax(
+				fmt.Sprintf("invalid character '%c' after object key:value pair", src[cursor]),
+				cursor+1,
+			)
+		}
+		cursor++
+	}
+}
+
+func indentArray(
+	dst []byte,
+	src []byte,
+	indentNum int,
+	cursor int64,
+	prefix []byte,
+	indentBytes []byte,
+	escape bool) ([]byte, int64, error) {
+	if src[cursor] == '[' {
+		dst = append(dst, '[')
+	} else {
+		return nil, 0, errors.ErrExpected("expected [ character for array value", cursor)
+	}
+	cursor = skipWhiteSpace(src, cursor+1)
+	if src[cursor] == ']' {
+		dst = append(dst, ']')
+		return dst, cursor + 1, nil
+	}
+	indentNum++
+	var err error
+	for {
+		dst = append(append(dst, '\n'), prefix...)
+		for i := 0; i < indentNum; i++ {
+			dst = append(dst, indentBytes...)
+		}
+		dst, cursor, err = indentValue(dst, src, indentNum, cursor, prefix, indentBytes, escape)
+		if err != nil {
+			return nil, 0, err
+		}
+		cursor = skipWhiteSpace(src, cursor)
+		switch src[cursor] {
+		case ']':
+			dst = append(append(dst, '\n'), prefix...)
+			for i := 0; i < indentNum-1; i++ {
+				dst = append(dst, indentBytes...)
+			}
+			dst = append(dst, ']')
+			cursor++
+			return dst, cursor, nil
+		case ',':
+			dst = append(dst, ',')
+		default:
+			return nil, 0, errors.ErrSyntax(
+				fmt.Sprintf("invalid character '%c' after array value", src[cursor]),
+				cursor+1,
+			)
+		}
+		cursor++
+	}
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/int.go b/vendor/github.com/goccy/go-json/internal/encoder/int.go
new file mode 100644
index 0000000000..85f0796098
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/int.go
@@ -0,0 +1,152 @@
+package encoder
+
+import (
+	"unsafe"
+)
+
+var endianness int
+
+func init() {
+	var b [2]byte
+	*(*uint16)(unsafe.Pointer(&b)) = uint16(0xABCD)
+
+	switch b[0] {
+	case 0xCD:
+		endianness = 0 // LE
+	case 0xAB:
+		endianness = 1 // BE
+	default:
+		panic("could not determine endianness")
+	}
+}
+
+// "00010203...96979899" cast to []uint16
+var intLELookup = [100]uint16{
+	0x3030, 0x3130, 0x3230, 0x3330, 0x3430, 0x3530, 0x3630, 0x3730, 0x3830, 0x3930,
+	0x3031, 0x3131, 0x3231, 0x3331, 0x3431, 0x3531, 0x3631, 0x3731, 0x3831, 0x3931,
+	0x3032, 0x3132, 0x3232, 0x3332, 0x3432, 0x3532, 0x3632, 0x3732, 0x3832, 0x3932,
+	0x3033, 0x3133, 0x3233, 0x3333, 0x3433, 0x3533, 0x3633, 0x3733, 0x3833, 0x3933,
+	0x3034, 0x3134, 0x3234, 0x3334, 0x3434, 0x3534, 0x3634, 0x3734, 0x3834, 0x3934,
+	0x3035, 0x3135, 0x3235, 0x3335, 0x3435, 0x3535, 0x3635, 0x3735, 0x3835, 0x3935,
+	0x3036, 0x3136, 0x3236, 0x3336, 0x3436, 0x3536, 0x3636, 0x3736, 0x3836, 0x3936,
+	0x3037, 0x3137, 0x3237, 0x3337, 0x3437, 0x3537, 0x3637, 0x3737, 0x3837, 0x3937,
+	0x3038, 0x3138, 0x3238, 0x3338, 0x3438, 0x3538, 0x3638, 0x3738, 0x3838, 0x3938,
+	0x3039, 0x3139, 0x3239, 0x3339, 0x3439, 0x3539, 0x3639, 0x3739, 0x3839, 0x3939,
+}
+
+var intBELookup = [100]uint16{
+	0x3030, 0x3031, 0x3032, 0x3033, 0x3034, 0x3035, 0x3036, 0x3037, 0x3038, 0x3039,
+	0x3130, 0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136, 0x3137, 0x3138, 0x3139,
+	0x3230, 0x3231, 0x3232, 0x3233, 0x3234, 0x3235, 0x3236, 0x3237, 0x3238, 0x3239,
+	0x3330, 0x3331, 0x3332, 0x3333, 0x3334, 0x3335, 0x3336, 0x3337, 0x3338, 0x3339,
+	0x3430, 0x3431, 0x3432, 0x3433, 0x3434, 0x3435, 0x3436, 0x3437, 0x3438, 0x3439,
+	0x3530, 0x3531, 0x3532, 0x3533, 0x3534, 0x3535, 0x3536, 0x3537, 0x3538, 0x3539,
+	0x3630, 0x3631, 0x3632, 0x3633, 0x3634, 0x3635, 0x3636, 0x3637, 0x3638, 0x3639,
+	0x3730, 0x3731, 0x3732, 0x3733, 0x3734, 0x3735, 0x3736, 0x3737, 0x3738, 0x3739,
+	0x3830, 0x3831, 0x3832, 0x3833, 0x3834, 0x3835, 0x3836, 0x3837, 0x3838, 0x3839,
+	0x3930, 0x3931, 0x3932, 0x3933, 0x3934, 0x3935, 0x3936, 0x3937, 0x3938, 0x3939,
+}
+
+var intLookup = [2]*[100]uint16{&intLELookup, &intBELookup}
+
+func numMask(numBitSize uint8) uint64 {
+	return 1<<numBitSize - 1
+}
+
+func AppendInt(_ *RuntimeContext, out []byte, p uintptr, code *Opcode) []byte {
+	var u64 uint64
+	switch code.NumBitSize {
+	case 8:
+		u64 = (uint64)(**(**uint8)(unsafe.Pointer(&p)))
+	case 16:
+		u64 = (uint64)(**(**uint16)(unsafe.Pointer(&p)))
+	case 32:
+		u64 = (uint64)(**(**uint32)(unsafe.Pointer(&p)))
+	case 64:
+		u64 = **(**uint64)(unsafe.Pointer(&p))
+	}
+	mask := numMask(code.NumBitSize)
+	n := u64 & mask
+	negative := (u64>>(code.NumBitSize-1))&1 == 1
+	if !negative {
+		if n < 10 {
+			return append(out, byte(n+'0'))
+		} else if n < 100 {
+			u := intLELookup[n]
+			return append(out, byte(u), byte(u>>8))
+		}
+	} else {
+		n = -n & mask
+	}
+
+	lookup := intLookup[endianness]
+
+	var b [22]byte
+	u := (*[11]uint16)(unsafe.Pointer(&b))
+	i := 11
+
+	for n >= 100 {
+		j := n % 100
+		n /= 100
+		i--
+		u[i] = lookup[j]
+	}
+
+	i--
+	u[i] = lookup[n]
+
+	i *= 2 // convert to byte index
+	if n < 10 {
+		i++ // remove leading zero
+	}
+	if negative {
+		i--
+		b[i] = '-'
+	}
+
+	return append(out, b[i:]...)
+}
+
+func AppendUint(_ *RuntimeContext, out []byte, p uintptr, code *Opcode) []byte {
+	var u64 uint64
+	switch code.NumBitSize {
+	case 8:
+		u64 = (uint64)(**(**uint8)(unsafe.Pointer(&p)))
+	case 16:
+		u64 = (uint64)(**(**uint16)(unsafe.Pointer(&p)))
+	case 32:
+		u64 = (uint64)(**(**uint32)(unsafe.Pointer(&p)))
+	case 64:
+		u64 = **(**uint64)(unsafe.Pointer(&p))
+	}
+	mask := numMask(code.NumBitSize)
+	n := u64 & mask
+	if n < 10 {
+		return append(out, byte(n+'0'))
+	} else if n < 100 {
+		u := intLELookup[n]
+		return append(out, byte(u), byte(u>>8))
+	}
+
+	lookup := intLookup[endianness]
+
+	var b [22]byte
+	u := (*[11]uint16)(unsafe.Pointer(&b))
+	i := 11
+
+	for n >= 100 {
+		j := n % 100
+		n /= 100
+		i--
+		u[i] = lookup[j]
+	}
+
+	i--
+	u[i] = lookup[n]
+
+	i *= 2 // convert to byte index
+	if n < 10 {
+		i++ // remove leading zero
+	}
+	return append(out, b[i:]...)
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/map112.go b/vendor/github.com/goccy/go-json/internal/encoder/map112.go
new file mode 100644
index 0000000000..e96ffadf7a
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/map112.go
@@ -0,0 +1,9 @@
+//go:build !go1.13
+// +build !go1.13
+
+package encoder
+
+import "unsafe"
+
+//go:linkname MapIterValue reflect.mapitervalue
+func MapIterValue(it *mapIter) unsafe.Pointer
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/map113.go b/vendor/github.com/goccy/go-json/internal/encoder/map113.go
new file mode 100644
index 0000000000..9b69dcc360
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/map113.go
@@ -0,0 +1,9 @@
+//go:build go1.13
+// +build go1.13
+
+package encoder
+
+import "unsafe"
+
+//go:linkname MapIterValue reflect.mapiterelem
+func MapIterValue(it *mapIter) unsafe.Pointer
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/opcode.go b/vendor/github.com/goccy/go-json/internal/encoder/opcode.go
new file mode 100644
index 0000000000..df22f55423
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/opcode.go
@@ -0,0 +1,752 @@
+package encoder
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"strings"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+const uintptrSize = 4 << (^uintptr(0) >> 63)
+
+type OpFlags uint16
+
+const (
+	AnonymousHeadFlags     OpFlags = 1 << 0
+	AnonymousKeyFlags      OpFlags = 1 << 1
+	IndirectFlags          OpFlags = 1 << 2
+	IsTaggedKeyFlags       OpFlags = 1 << 3
+	NilCheckFlags          OpFlags = 1 << 4
+	AddrForMarshalerFlags  OpFlags = 1 << 5
+	IsNextOpPtrTypeFlags   OpFlags = 1 << 6
+	IsNilableTypeFlags     OpFlags = 1 << 7
+	MarshalerContextFlags  OpFlags = 1 << 8
+	NonEmptyInterfaceFlags OpFlags = 1 << 9
+)
+
+type Opcode struct {
+	Op         OpType  // operation type
+	Idx        uint32  // offset to access ptr
+	Next       *Opcode // next opcode
+	End        *Opcode // array/slice/struct/map end
+	NextField  *Opcode // next struct field
+	Key        string  // struct field key
+	Offset     uint32  // offset size from struct header
+	PtrNum     uint8   // pointer number: e.g. double pointer is 2.
+	NumBitSize uint8
+	Flags      OpFlags
+
+	Type       *runtime.Type // go type
+	Jmp        *CompiledCode // for recursive call
+	FieldQuery *FieldQuery   // field query for Interface / MarshalJSON / MarshalText
+	ElemIdx    uint32        // offset to access array/slice elem
+	Length     uint32        // offset to access slice length or array length
+	Indent     uint32        // indent number
+	Size       uint32        // array/slice elem size
+	DisplayIdx uint32        // opcode index
+	DisplayKey string        // key text to display
+}
+
+func (c *Opcode) Validate() error {
+	var prevIdx uint32
+	for code := c; !code.IsEnd(); {
+		if prevIdx != 0 {
+			if code.DisplayIdx != prevIdx+1 {
+				return fmt.Errorf(
+					"invalid index. previous display index is %d but next is %d. dump = %s",
+					prevIdx, code.DisplayIdx, c.Dump(),
+				)
+			}
+		}
+		prevIdx = code.DisplayIdx
+		code = code.IterNext()
+	}
+	return nil
+}
+
+func (c *Opcode) IterNext() *Opcode {
+	if c == nil {
+		return nil
+	}
+	switch c.Op.CodeType() {
+	case CodeArrayElem, CodeSliceElem, CodeMapKey:
+		return c.End
+	default:
+		return c.Next
+	}
+}
+
+func (c *Opcode) IsEnd() bool {
+	if c == nil {
+		return true
+	}
+	return c.Op == OpEnd || c.Op == OpInterfaceEnd || c.Op == OpRecursiveEnd
+}
+
+func (c *Opcode) MaxIdx() uint32 {
+	max := uint32(0)
+	for _, value := range []uint32{
+		c.Idx,
+		c.ElemIdx,
+		c.Length,
+		c.Size,
+	} {
+		if max < value {
+			max = value
+		}
+	}
+	return max
+}
+
+func (c *Opcode) ToHeaderType(isString bool) OpType {
+	switch c.Op {
+	case OpInt:
+		if isString {
+			return OpStructHeadIntString
+		}
+		return OpStructHeadInt
+	case OpIntPtr:
+		if isString {
+			return OpStructHeadIntPtrString
+		}
+		return OpStructHeadIntPtr
+	case OpUint:
+		if isString {
+			return OpStructHeadUintString
+		}
+		return OpStructHeadUint
+	case OpUintPtr:
+		if isString {
+			return OpStructHeadUintPtrString
+		}
+		return OpStructHeadUintPtr
+	case OpFloat32:
+		if isString {
+			return OpStructHeadFloat32String
+		}
+		return OpStructHeadFloat32
+	case OpFloat32Ptr:
+		if isString {
+			return OpStructHeadFloat32PtrString
+		}
+		return OpStructHeadFloat32Ptr
+	case OpFloat64:
+		if isString {
+			return OpStructHeadFloat64String
+		}
+		return OpStructHeadFloat64
+	case OpFloat64Ptr:
+		if isString {
+			return OpStructHeadFloat64PtrString
+		}
+		return OpStructHeadFloat64Ptr
+	case OpString:
+		if isString {
+			return OpStructHeadStringString
+		}
+		return OpStructHeadString
+	case OpStringPtr:
+		if isString {
+			return OpStructHeadStringPtrString
+		}
+		return OpStructHeadStringPtr
+	case OpNumber:
+		if isString {
+			return OpStructHeadNumberString
+		}
+		return OpStructHeadNumber
+	case OpNumberPtr:
+		if isString {
+			return OpStructHeadNumberPtrString
+		}
+		return OpStructHeadNumberPtr
+	case OpBool:
+		if isString {
+			return OpStructHeadBoolString
+		}
+		return OpStructHeadBool
+	case OpBoolPtr:
+		if isString {
+			return OpStructHeadBoolPtrString
+		}
+		return OpStructHeadBoolPtr
+	case OpBytes:
+		return OpStructHeadBytes
+	case OpBytesPtr:
+		return OpStructHeadBytesPtr
+	case OpMap:
+		return OpStructHeadMap
+	case OpMapPtr:
+		c.Op = OpMap
+		return OpStructHeadMapPtr
+	case OpArray:
+		return OpStructHeadArray
+	case OpArrayPtr:
+		c.Op = OpArray
+		return OpStructHeadArrayPtr
+	case OpSlice:
+		return OpStructHeadSlice
+	case OpSlicePtr:
+		c.Op = OpSlice
+		return OpStructHeadSlicePtr
+	case OpMarshalJSON:
+		return OpStructHeadMarshalJSON
+	case OpMarshalJSONPtr:
+		return OpStructHeadMarshalJSONPtr
+	case OpMarshalText:
+		return OpStructHeadMarshalText
+	case OpMarshalTextPtr:
+		return OpStructHeadMarshalTextPtr
+	}
+	return OpStructHead
+}
+
+func (c *Opcode) ToFieldType(isString bool) OpType {
+	switch c.Op {
+	case OpInt:
+		if isString {
+			return OpStructFieldIntString
+		}
+		return OpStructFieldInt
+	case OpIntPtr:
+		if isString {
+			return OpStructFieldIntPtrString
+		}
+		return OpStructFieldIntPtr
+	case OpUint:
+		if isString {
+			return OpStructFieldUintString
+		}
+		return OpStructFieldUint
+	case OpUintPtr:
+		if isString {
+			return OpStructFieldUintPtrString
+		}
+		return OpStructFieldUintPtr
+	case OpFloat32:
+		if isString {
+			return OpStructFieldFloat32String
+		}
+		return OpStructFieldFloat32
+	case OpFloat32Ptr:
+		if isString {
+			return OpStructFieldFloat32PtrString
+		}
+		return OpStructFieldFloat32Ptr
+	case OpFloat64:
+		if isString {
+			return OpStructFieldFloat64String
+		}
+		return OpStructFieldFloat64
+	case OpFloat64Ptr:
+		if isString {
+			return OpStructFieldFloat64PtrString
+		}
+		return OpStructFieldFloat64Ptr
+	case OpString:
+		if isString {
+			return OpStructFieldStringString
+		}
+		return OpStructFieldString
+	case OpStringPtr:
+		if isString {
+			return OpStructFieldStringPtrString
+		}
+		return OpStructFieldStringPtr
+	case OpNumber:
+		if isString {
+			return OpStructFieldNumberString
+		}
+		return OpStructFieldNumber
+	case OpNumberPtr:
+		if isString {
+			return OpStructFieldNumberPtrString
+		}
+		return OpStructFieldNumberPtr
+	case OpBool:
+		if isString {
+			return OpStructFieldBoolString
+		}
+		return OpStructFieldBool
+	case OpBoolPtr:
+		if isString {
+			return OpStructFieldBoolPtrString
+		}
+		return OpStructFieldBoolPtr
+	case OpBytes:
+		return OpStructFieldBytes
+	case OpBytesPtr:
+		return OpStructFieldBytesPtr
+	case OpMap:
+		return OpStructFieldMap
+	case OpMapPtr:
+		c.Op = OpMap
+		return OpStructFieldMapPtr
+	case OpArray:
+		return OpStructFieldArray
+	case OpArrayPtr:
+		c.Op = OpArray
+		return OpStructFieldArrayPtr
+	case OpSlice:
+		return OpStructFieldSlice
+	case OpSlicePtr:
+		c.Op = OpSlice
+		return OpStructFieldSlicePtr
+	case OpMarshalJSON:
+		return OpStructFieldMarshalJSON
+	case OpMarshalJSONPtr:
+		return OpStructFieldMarshalJSONPtr
+	case OpMarshalText:
+		return OpStructFieldMarshalText
+	case OpMarshalTextPtr:
+		return OpStructFieldMarshalTextPtr
+	}
+	return OpStructField
+}
+
+func newOpCode(ctx *compileContext, typ *runtime.Type, op OpType) *Opcode {
+	return newOpCodeWithNext(ctx, typ, op, newEndOp(ctx, typ))
+}
+
+func opcodeOffset(idx int) uint32 {
+	return uint32(idx) * uintptrSize
+}
+
+func getCodeAddrByIdx(head *Opcode, idx uint32) *Opcode {
+	addr := uintptr(unsafe.Pointer(head)) + uintptr(idx)*unsafe.Sizeof(Opcode{})
+	return *(**Opcode)(unsafe.Pointer(&addr))
+}
+
+func copyOpcode(code *Opcode) *Opcode {
+	codeNum := ToEndCode(code).DisplayIdx + 1
+	codeSlice := make([]Opcode, codeNum)
+	head := (*Opcode)((*runtime.SliceHeader)(unsafe.Pointer(&codeSlice)).Data)
+	ptr := head
+	c := code
+	for {
+		*ptr = Opcode{
+			Op:         c.Op,
+			Key:        c.Key,
+			PtrNum:     c.PtrNum,
+			NumBitSize: c.NumBitSize,
+			Flags:      c.Flags,
+			Idx:        c.Idx,
+			Offset:     c.Offset,
+			Type:       c.Type,
+			FieldQuery: c.FieldQuery,
+			DisplayIdx: c.DisplayIdx,
+			DisplayKey: c.DisplayKey,
+			ElemIdx:    c.ElemIdx,
+			Length:     c.Length,
+			Size:       c.Size,
+			Indent:     c.Indent,
+			Jmp:        c.Jmp,
+		}
+		if c.End != nil {
+			ptr.End = getCodeAddrByIdx(head, c.End.DisplayIdx)
+		}
+		if c.NextField != nil {
+			ptr.NextField = getCodeAddrByIdx(head, c.NextField.DisplayIdx)
+		}
+		if c.Next != nil {
+			ptr.Next = getCodeAddrByIdx(head, c.Next.DisplayIdx)
+		}
+		if c.IsEnd() {
+			break
+		}
+		ptr = getCodeAddrByIdx(head, c.DisplayIdx+1)
+		c = c.IterNext()
+	}
+	return head
+}
+
+func setTotalLengthToInterfaceOp(code *Opcode) {
+	for c := code; !c.IsEnd(); {
+		if c.Op == OpInterface || c.Op == OpInterfacePtr {
+			c.Length = uint32(code.TotalLength())
+		}
+		c = c.IterNext()
+	}
+}
+
+func ToEndCode(code *Opcode) *Opcode {
+	c := code
+	for !c.IsEnd() {
+		c = c.IterNext()
+	}
+	return c
+}
+
+func copyToInterfaceOpcode(code *Opcode) *Opcode {
+	copied := copyOpcode(code)
+	c := copied
+	c = ToEndCode(c)
+	c.Idx += uintptrSize
+	c.ElemIdx = c.Idx + uintptrSize
+	c.Length = c.Idx + 2*uintptrSize
+	c.Op = OpInterfaceEnd
+	return copied
+}
+
+func newOpCodeWithNext(ctx *compileContext, typ *runtime.Type, op OpType, next *Opcode) *Opcode {
+	return &Opcode{
+		Op:         op,
+		Idx:        opcodeOffset(ctx.ptrIndex),
+		Next:       next,
+		Type:       typ,
+		DisplayIdx: ctx.opcodeIndex,
+		Indent:     ctx.indent,
+	}
+}
+
+func newEndOp(ctx *compileContext, typ *runtime.Type) *Opcode {
+	return newOpCodeWithNext(ctx, typ, OpEnd, nil)
+}
+
+func (c *Opcode) TotalLength() int {
+	var idx int
+	code := c
+	for !code.IsEnd() {
+		maxIdx := int(code.MaxIdx() / uintptrSize)
+		if idx < maxIdx {
+			idx = maxIdx
+		}
+		if code.Op == OpRecursiveEnd {
+			break
+		}
+		code = code.IterNext()
+	}
+	maxIdx := int(code.MaxIdx() / uintptrSize)
+	if idx < maxIdx {
+		idx = maxIdx
+	}
+	return idx + 1
+}
+
+func (c *Opcode) dumpHead(code *Opcode) string {
+	var length uint32
+	if code.Op.CodeType() == CodeArrayHead {
+		length = code.Length
+	} else {
+		length = code.Length / uintptrSize
+	}
+	return fmt.Sprintf(
+		`[%03d]%s%s ([idx:%d][elemIdx:%d][length:%d])`,
+		code.DisplayIdx,
+		strings.Repeat("-", int(code.Indent)),
+		code.Op,
+		code.Idx/uintptrSize,
+		code.ElemIdx/uintptrSize,
+		length,
+	)
+}
+
+func (c *Opcode) dumpMapHead(code *Opcode) string {
+	return fmt.Sprintf(
+		`[%03d]%s%s ([idx:%d])`,
+		code.DisplayIdx,
+		strings.Repeat("-", int(code.Indent)),
+		code.Op,
+		code.Idx/uintptrSize,
+	)
+}
+
+func (c *Opcode) dumpMapEnd(code *Opcode) string {
+	return fmt.Sprintf(
+		`[%03d]%s%s ([idx:%d])`,
+		code.DisplayIdx,
+		strings.Repeat("-", int(code.Indent)),
+		code.Op,
+		code.Idx/uintptrSize,
+	)
+}
+
+func (c *Opcode) dumpElem(code *Opcode) string {
+	var length uint32
+	if code.Op.CodeType() == CodeArrayElem {
+		length = code.Length
+	} else {
+		length = code.Length / uintptrSize
+	}
+	return fmt.Sprintf(
+		`[%03d]%s%s ([idx:%d][elemIdx:%d][length:%d][size:%d])`,
+		code.DisplayIdx,
+		strings.Repeat("-", int(code.Indent)),
+		code.Op,
+		code.Idx/uintptrSize,
+		code.ElemIdx/uintptrSize,
+		length,
+		code.Size,
+	)
+}
+
+func (c *Opcode) dumpField(code *Opcode) string {
+	return fmt.Sprintf(
+		`[%03d]%s%s ([idx:%d][key:%s][offset:%d])`,
+		code.DisplayIdx,
+		strings.Repeat("-", int(code.Indent)),
+		code.Op,
+		code.Idx/uintptrSize,
+		code.DisplayKey,
+		code.Offset,
+	)
+}
+
+func (c *Opcode) dumpKey(code *Opcode) string {
+	return fmt.Sprintf(
+		`[%03d]%s%s ([idx:%d])`,
+		code.DisplayIdx,
+		strings.Repeat("-", int(code.Indent)),
+		code.Op,
+		code.Idx/uintptrSize,
+	)
+}
+
+func (c *Opcode) dumpValue(code *Opcode) string {
+	return fmt.Sprintf(
+		`[%03d]%s%s ([idx:%d])`,
+		code.DisplayIdx,
+		strings.Repeat("-", int(code.Indent)),
+		code.Op,
+		code.Idx/uintptrSize,
+	)
+}
+
+func (c *Opcode) Dump() string {
+	codes := []string{}
+	for code := c; !code.IsEnd(); {
+		switch code.Op.CodeType() {
+		case CodeSliceHead:
+			codes = append(codes, c.dumpHead(code))
+			code = code.Next
+		case CodeMapHead:
+			codes = append(codes, c.dumpMapHead(code))
+			code = code.Next
+		case CodeArrayElem, CodeSliceElem:
+			codes = append(codes, c.dumpElem(code))
+			code = code.End
+		case CodeMapKey:
+			codes = append(codes, c.dumpKey(code))
+			code = code.End
+		case CodeMapValue:
+			codes = append(codes, c.dumpValue(code))
+			code = code.Next
+		case CodeMapEnd:
+			codes = append(codes, c.dumpMapEnd(code))
+			code = code.Next
+		case CodeStructField:
+			codes = append(codes, c.dumpField(code))
+			code = code.Next
+		case CodeStructEnd:
+			codes = append(codes, c.dumpField(code))
+			code = code.Next
+		default:
+			codes = append(codes, fmt.Sprintf(
+				"[%03d]%s%s ([idx:%d])",
+				code.DisplayIdx,
+				strings.Repeat("-", int(code.Indent)),
+				code.Op,
+				code.Idx/uintptrSize,
+			))
+			code = code.Next
+		}
+	}
+	return strings.Join(codes, "\n")
+}
+
+func (c *Opcode) DumpDOT() string {
+	type edge struct {
+		from, to *Opcode
+		label    string
+		weight   int
+	}
+	var edges []edge
+
+	b := &bytes.Buffer{}
+	fmt.Fprintf(b, "digraph \"%p\" {\n", c.Type)
+	fmt.Fprintln(b, "mclimit=1.5;\nrankdir=TD;\nordering=out;\nnode[shape=box];")
+	for code := c; !code.IsEnd(); {
+		label := code.Op.String()
+		fmt.Fprintf(b, "\"%p\" [label=%q];\n", code, label)
+		if p := code.Next; p != nil {
+			edges = append(edges, edge{
+				from:   code,
+				to:     p,
+				label:  "Next",
+				weight: 10,
+			})
+		}
+		if p := code.NextField; p != nil {
+			edges = append(edges, edge{
+				from:   code,
+				to:     p,
+				label:  "NextField",
+				weight: 2,
+			})
+		}
+		if p := code.End; p != nil {
+			edges = append(edges, edge{
+				from:   code,
+				to:     p,
+				label:  "End",
+				weight: 1,
+			})
+		}
+		if p := code.Jmp; p != nil {
+			edges = append(edges, edge{
+				from:   code,
+				to:     p.Code,
+				label:  "Jmp",
+				weight: 1,
+			})
+		}
+
+		switch code.Op.CodeType() {
+		case CodeSliceHead:
+			code = code.Next
+		case CodeMapHead:
+			code = code.Next
+		case CodeArrayElem, CodeSliceElem:
+			code = code.End
+		case CodeMapKey:
+			code = code.End
+		case CodeMapValue:
+			code = code.Next
+		case CodeMapEnd:
+			code = code.Next
+		case CodeStructField:
+			code = code.Next
+		case CodeStructEnd:
+			code = code.Next
+		default:
+			code = code.Next
+		}
+		if code.IsEnd() {
+			fmt.Fprintf(b, "\"%p\" [label=%q];\n", code, code.Op.String())
+		}
+	}
+	sort.Slice(edges, func(i, j int) bool {
+		return edges[i].to.DisplayIdx < edges[j].to.DisplayIdx
+	})
+	for _, e := range edges {
+		fmt.Fprintf(b, "\"%p\" -> \"%p\" [label=%q][weight=%d];\n", e.from, e.to, e.label, e.weight)
+	}
+	fmt.Fprint(b, "}")
+	return b.String()
+}
+
+func newSliceHeaderCode(ctx *compileContext, typ *runtime.Type) *Opcode {
+	idx := opcodeOffset(ctx.ptrIndex)
+	ctx.incPtrIndex()
+	elemIdx := opcodeOffset(ctx.ptrIndex)
+	ctx.incPtrIndex()
+	length := opcodeOffset(ctx.ptrIndex)
+	return &Opcode{
+		Op:         OpSlice,
+		Type:       typ,
+		Idx:        idx,
+		DisplayIdx: ctx.opcodeIndex,
+		ElemIdx:    elemIdx,
+		Length:     length,
+		Indent:     ctx.indent,
+	}
+}
+
+func newSliceElemCode(ctx *compileContext, typ *runtime.Type, head *Opcode, size uintptr) *Opcode {
+	return &Opcode{
+		Op:         OpSliceElem,
+		Type:       typ,
+		Idx:        head.Idx,
+		DisplayIdx: ctx.opcodeIndex,
+		ElemIdx:    head.ElemIdx,
+		Length:     head.Length,
+		Indent:     ctx.indent,
+		Size:       uint32(size),
+	}
+}
+
+func newArrayHeaderCode(ctx *compileContext, typ *runtime.Type, alen int) *Opcode {
+	idx := opcodeOffset(ctx.ptrIndex)
+	ctx.incPtrIndex()
+	elemIdx := opcodeOffset(ctx.ptrIndex)
+	return &Opcode{
+		Op:         OpArray,
+		Type:       typ,
+		Idx:        idx,
+		DisplayIdx: ctx.opcodeIndex,
+		ElemIdx:    elemIdx,
+		Indent:     ctx.indent,
+		Length:     uint32(alen),
+	}
+}
+
+func newArrayElemCode(ctx *compileContext, typ *runtime.Type, head *Opcode, length int, size uintptr) *Opcode {
+	return &Opcode{
+		Op:         OpArrayElem,
+		Type:       typ,
+		Idx:        head.Idx,
+		DisplayIdx: ctx.opcodeIndex,
+		ElemIdx:    head.ElemIdx,
+		Length:     uint32(length),
+		Indent:     ctx.indent,
+		Size:       uint32(size),
+	}
+}
+
+func newMapHeaderCode(ctx *compileContext, typ *runtime.Type) *Opcode {
+	idx := opcodeOffset(ctx.ptrIndex)
+	ctx.incPtrIndex()
+	return &Opcode{
+		Op:         OpMap,
+		Type:       typ,
+		Idx:        idx,
+		DisplayIdx: ctx.opcodeIndex,
+		Indent:     ctx.indent,
+	}
+}
+
+func newMapKeyCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode {
+	return &Opcode{
+		Op:         OpMapKey,
+		Type:       typ,
+		Idx:        head.Idx,
+		DisplayIdx: ctx.opcodeIndex,
+		Indent:     ctx.indent,
+	}
+}
+
+func newMapValueCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode {
+	return &Opcode{
+		Op:         OpMapValue,
+		Type:       typ,
+		Idx:        head.Idx,
+		DisplayIdx: ctx.opcodeIndex,
+		Indent:     ctx.indent,
+	}
+}
+
+func newMapEndCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode {
+	return &Opcode{
+		Op:         OpMapEnd,
+		Type:       typ,
+		Idx:        head.Idx,
+		DisplayIdx: ctx.opcodeIndex,
+		Indent:     ctx.indent,
+		Next:       newEndOp(ctx, typ),
+	}
+}
+
+func newRecursiveCode(ctx *compileContext, typ *runtime.Type, jmp *CompiledCode) *Opcode {
+	return &Opcode{
+		Op:         OpRecursive,
+		Type:       typ,
+		Idx:        opcodeOffset(ctx.ptrIndex),
+		Next:       newEndOp(ctx, typ),
+		DisplayIdx: ctx.opcodeIndex,
+		Indent:     ctx.indent,
+		Jmp:        jmp,
+	}
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/option.go b/vendor/github.com/goccy/go-json/internal/encoder/option.go
new file mode 100644
index 0000000000..12c58e46c0
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/option.go
@@ -0,0 +1,48 @@
+package encoder
+
+import (
+	"context"
+	"io"
+)
+
+type OptionFlag uint8
+
+const (
+	HTMLEscapeOption OptionFlag = 1 << iota
+	IndentOption
+	UnorderedMapOption
+	DebugOption
+	ColorizeOption
+	ContextOption
+	NormalizeUTF8Option
+	FieldQueryOption
+)
+
+type Option struct {
+	Flag        OptionFlag
+	ColorScheme *ColorScheme
+	Context     context.Context
+	DebugOut    io.Writer
+	DebugDOTOut io.WriteCloser
+}
+
+type EncodeFormat struct {
+	Header string
+	Footer string
+}
+
+type EncodeFormatScheme struct {
+	Int       EncodeFormat
+	Uint      EncodeFormat
+	Float     EncodeFormat
+	Bool      EncodeFormat
+	String    EncodeFormat
+	Binary    EncodeFormat
+	ObjectKey EncodeFormat
+	Null      EncodeFormat
+}
+
+type (
+	ColorScheme = EncodeFormatScheme
+	ColorFormat = EncodeFormat
+)
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/optype.go b/vendor/github.com/goccy/go-json/internal/encoder/optype.go
new file mode 100644
index 0000000000..5c1241b47d
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/optype.go
@@ -0,0 +1,932 @@
+// Code generated by internal/cmd/generator. DO NOT EDIT!
+package encoder
+
+import (
+	"strings"
+)
+
+type CodeType int
+
+const (
+	CodeOp          CodeType = 0
+	CodeArrayHead   CodeType = 1
+	CodeArrayElem   CodeType = 2
+	CodeSliceHead   CodeType = 3
+	CodeSliceElem   CodeType = 4
+	CodeMapHead     CodeType = 5
+	CodeMapKey      CodeType = 6
+	CodeMapValue    CodeType = 7
+	CodeMapEnd      CodeType = 8
+	CodeRecursive   CodeType = 9
+	CodeStructField CodeType = 10
+	CodeStructEnd   CodeType = 11
+)
+
+var opTypeStrings = [400]string{
+	"End",
+	"Interface",
+	"Ptr",
+	"SliceElem",
+	"SliceEnd",
+	"ArrayElem",
+	"ArrayEnd",
+	"MapKey",
+	"MapValue",
+	"MapEnd",
+	"Recursive",
+	"RecursivePtr",
+	"RecursiveEnd",
+	"InterfaceEnd",
+	"Int",
+	"Uint",
+	"Float32",
+	"Float64",
+	"Bool",
+	"String",
+	"Bytes",
+	"Number",
+	"Array",
+	"Map",
+	"Slice",
+	"Struct",
+	"MarshalJSON",
+	"MarshalText",
+	"IntString",
+	"UintString",
+	"Float32String",
+	"Float64String",
+	"BoolString",
+	"StringString",
+	"NumberString",
+	"IntPtr",
+	"UintPtr",
+	"Float32Ptr",
+	"Float64Ptr",
+	"BoolPtr",
+	"StringPtr",
+	"BytesPtr",
+	"NumberPtr",
+	"ArrayPtr",
+	"MapPtr",
+	"SlicePtr",
+	"MarshalJSONPtr",
+	"MarshalTextPtr",
+	"InterfacePtr",
+	"IntPtrString",
+	"UintPtrString",
+	"Float32PtrString",
+	"Float64PtrString",
+	"BoolPtrString",
+	"StringPtrString",
+	"NumberPtrString",
+	"StructHeadInt",
+	"StructHeadOmitEmptyInt",
+	"StructPtrHeadInt",
+	"StructPtrHeadOmitEmptyInt",
+	"StructHeadUint",
+	"StructHeadOmitEmptyUint",
+	"StructPtrHeadUint",
+	"StructPtrHeadOmitEmptyUint",
+	"StructHeadFloat32",
+	"StructHeadOmitEmptyFloat32",
+	"StructPtrHeadFloat32",
+	"StructPtrHeadOmitEmptyFloat32",
+	"StructHeadFloat64",
+	"StructHeadOmitEmptyFloat64",
+	"StructPtrHeadFloat64",
+	"StructPtrHeadOmitEmptyFloat64",
+	"StructHeadBool",
+	"StructHeadOmitEmptyBool",
+	"StructPtrHeadBool",
+	"StructPtrHeadOmitEmptyBool",
+	"StructHeadString",
+	"StructHeadOmitEmptyString",
+	"StructPtrHeadString",
+	"StructPtrHeadOmitEmptyString",
+	"StructHeadBytes",
+	"StructHeadOmitEmptyBytes",
+	"StructPtrHeadBytes",
+	"StructPtrHeadOmitEmptyBytes",
+	"StructHeadNumber",
+	"StructHeadOmitEmptyNumber",
+	"StructPtrHeadNumber",
+	"StructPtrHeadOmitEmptyNumber",
+	"StructHeadArray",
+	"StructHeadOmitEmptyArray",
+	"StructPtrHeadArray",
+	"StructPtrHeadOmitEmptyArray",
+	"StructHeadMap",
+	"StructHeadOmitEmptyMap",
+	"StructPtrHeadMap",
+	"StructPtrHeadOmitEmptyMap",
+	"StructHeadSlice",
+	"StructHeadOmitEmptySlice",
+	"StructPtrHeadSlice",
+	"StructPtrHeadOmitEmptySlice",
+	"StructHeadStruct",
+	"StructHeadOmitEmptyStruct",
+	"StructPtrHeadStruct",
+	"StructPtrHeadOmitEmptyStruct",
+	"StructHeadMarshalJSON",
+	"StructHeadOmitEmptyMarshalJSON",
+	"StructPtrHeadMarshalJSON",
+	"StructPtrHeadOmitEmptyMarshalJSON",
+	"StructHeadMarshalText",
+	"StructHeadOmitEmptyMarshalText",
+	"StructPtrHeadMarshalText",
+	"StructPtrHeadOmitEmptyMarshalText",
+	"StructHeadIntString",
+	"StructHeadOmitEmptyIntString",
+	"StructPtrHeadIntString",
+	"StructPtrHeadOmitEmptyIntString",
+	"StructHeadUintString",
+	"StructHeadOmitEmptyUintString",
+	"StructPtrHeadUintString",
+	"StructPtrHeadOmitEmptyUintString",
+	"StructHeadFloat32String",
+	"StructHeadOmitEmptyFloat32String",
+	"StructPtrHeadFloat32String",
+	"StructPtrHeadOmitEmptyFloat32String",
+	"StructHeadFloat64String",
+	"StructHeadOmitEmptyFloat64String",
+	"StructPtrHeadFloat64String",
+	"StructPtrHeadOmitEmptyFloat64String",
+	"StructHeadBoolString",
+	"StructHeadOmitEmptyBoolString",
+	"StructPtrHeadBoolString",
+	"StructPtrHeadOmitEmptyBoolString",
+	"StructHeadStringString",
+	"StructHeadOmitEmptyStringString",
+	"StructPtrHeadStringString",
+	"StructPtrHeadOmitEmptyStringString",
+	"StructHeadNumberString",
+	"StructHeadOmitEmptyNumberString",
+	"StructPtrHeadNumberString",
+	"StructPtrHeadOmitEmptyNumberString",
+	"StructHeadIntPtr",
+	"StructHeadOmitEmptyIntPtr",
+	"StructPtrHeadIntPtr",
+	"StructPtrHeadOmitEmptyIntPtr",
+	"StructHeadUintPtr",
+	"StructHeadOmitEmptyUintPtr",
+	"StructPtrHeadUintPtr",
+	"StructPtrHeadOmitEmptyUintPtr",
+	"StructHeadFloat32Ptr",
+	"StructHeadOmitEmptyFloat32Ptr",
+	"StructPtrHeadFloat32Ptr",
+	"StructPtrHeadOmitEmptyFloat32Ptr",
+	"StructHeadFloat64Ptr",
+	"StructHeadOmitEmptyFloat64Ptr",
+	"StructPtrHeadFloat64Ptr",
+	"StructPtrHeadOmitEmptyFloat64Ptr",
+	"StructHeadBoolPtr",
+	"StructHeadOmitEmptyBoolPtr",
+	"StructPtrHeadBoolPtr",
+	"StructPtrHeadOmitEmptyBoolPtr",
+	"StructHeadStringPtr",
+	"StructHeadOmitEmptyStringPtr",
+	"StructPtrHeadStringPtr",
+	"StructPtrHeadOmitEmptyStringPtr",
+	"StructHeadBytesPtr",
+	"StructHeadOmitEmptyBytesPtr",
+	"StructPtrHeadBytesPtr",
+	"StructPtrHeadOmitEmptyBytesPtr",
+	"StructHeadNumberPtr",
+	"StructHeadOmitEmptyNumberPtr",
+	"StructPtrHeadNumberPtr",
+	"StructPtrHeadOmitEmptyNumberPtr",
+	"StructHeadArrayPtr",
+	"StructHeadOmitEmptyArrayPtr",
+	"StructPtrHeadArrayPtr",
+	"StructPtrHeadOmitEmptyArrayPtr",
+	"StructHeadMapPtr",
+	"StructHeadOmitEmptyMapPtr",
+	"StructPtrHeadMapPtr",
+	"StructPtrHeadOmitEmptyMapPtr",
+	"StructHeadSlicePtr",
+	"StructHeadOmitEmptySlicePtr",
+	"StructPtrHeadSlicePtr",
+	"StructPtrHeadOmitEmptySlicePtr",
+	"StructHeadMarshalJSONPtr",
+	"StructHeadOmitEmptyMarshalJSONPtr",
+	"StructPtrHeadMarshalJSONPtr",
+	"StructPtrHeadOmitEmptyMarshalJSONPtr",
+	"StructHeadMarshalTextPtr",
+	"StructHeadOmitEmptyMarshalTextPtr",
+	"StructPtrHeadMarshalTextPtr",
+	"StructPtrHeadOmitEmptyMarshalTextPtr",
+	"StructHeadInterfacePtr",
+	"StructHeadOmitEmptyInterfacePtr",
+	"StructPtrHeadInterfacePtr",
+	"StructPtrHeadOmitEmptyInterfacePtr",
+	"StructHeadIntPtrString",
+	"StructHeadOmitEmptyIntPtrString",
+	"StructPtrHeadIntPtrString",
+	"StructPtrHeadOmitEmptyIntPtrString",
+	"StructHeadUintPtrString",
+	"StructHeadOmitEmptyUintPtrString",
+	"StructPtrHeadUintPtrString",
+	"StructPtrHeadOmitEmptyUintPtrString",
+	"StructHeadFloat32PtrString",
+	"StructHeadOmitEmptyFloat32PtrString",
+	"StructPtrHeadFloat32PtrString",
+	"StructPtrHeadOmitEmptyFloat32PtrString",
+	"StructHeadFloat64PtrString",
+	"StructHeadOmitEmptyFloat64PtrString",
+	"StructPtrHeadFloat64PtrString",
+	"StructPtrHeadOmitEmptyFloat64PtrString",
+	"StructHeadBoolPtrString",
+	"StructHeadOmitEmptyBoolPtrString",
+	"StructPtrHeadBoolPtrString",
+	"StructPtrHeadOmitEmptyBoolPtrString",
+	"StructHeadStringPtrString",
+	"StructHeadOmitEmptyStringPtrString",
+	"StructPtrHeadStringPtrString",
+	"StructPtrHeadOmitEmptyStringPtrString",
+	"StructHeadNumberPtrString",
+	"StructHeadOmitEmptyNumberPtrString",
+	"StructPtrHeadNumberPtrString",
+	"StructPtrHeadOmitEmptyNumberPtrString",
+	"StructHead",
+	"StructHeadOmitEmpty",
+	"StructPtrHead",
+	"StructPtrHeadOmitEmpty",
+	"StructFieldInt",
+	"StructFieldOmitEmptyInt",
+	"StructEndInt",
+	"StructEndOmitEmptyInt",
+	"StructFieldUint",
+	"StructFieldOmitEmptyUint",
+	"StructEndUint",
+	"StructEndOmitEmptyUint",
+	"StructFieldFloat32",
+	"StructFieldOmitEmptyFloat32",
+	"StructEndFloat32",
+	"StructEndOmitEmptyFloat32",
+	"StructFieldFloat64",
+	"StructFieldOmitEmptyFloat64",
+	"StructEndFloat64",
+	"StructEndOmitEmptyFloat64",
+	"StructFieldBool",
+	"StructFieldOmitEmptyBool",
+	"StructEndBool",
+	"StructEndOmitEmptyBool",
+	"StructFieldString",
+	"StructFieldOmitEmptyString",
+	"StructEndString",
+	"StructEndOmitEmptyString",
+	"StructFieldBytes",
+	"StructFieldOmitEmptyBytes",
+	"StructEndBytes",
+	"StructEndOmitEmptyBytes",
+	"StructFieldNumber",
+	"StructFieldOmitEmptyNumber",
+	"StructEndNumber",
+	"StructEndOmitEmptyNumber",
+	"StructFieldArray",
+	"StructFieldOmitEmptyArray",
+	"StructEndArray",
+	"StructEndOmitEmptyArray",
+	"StructFieldMap",
+	"StructFieldOmitEmptyMap",
+	"StructEndMap",
+	"StructEndOmitEmptyMap",
+	"StructFieldSlice",
+	"StructFieldOmitEmptySlice",
+	"StructEndSlice",
+	"StructEndOmitEmptySlice",
+	"StructFieldStruct",
+	"StructFieldOmitEmptyStruct",
+	"StructEndStruct",
+	"StructEndOmitEmptyStruct",
+	"StructFieldMarshalJSON",
+	"StructFieldOmitEmptyMarshalJSON",
+	"StructEndMarshalJSON",
+	"StructEndOmitEmptyMarshalJSON",
+	"StructFieldMarshalText",
+	"StructFieldOmitEmptyMarshalText",
+	"StructEndMarshalText",
+	"StructEndOmitEmptyMarshalText",
+	"StructFieldIntString",
+	"StructFieldOmitEmptyIntString",
+	"StructEndIntString",
+	"StructEndOmitEmptyIntString",
+	"StructFieldUintString",
+	"StructFieldOmitEmptyUintString",
+	"StructEndUintString",
+	"StructEndOmitEmptyUintString",
+	"StructFieldFloat32String",
+	"StructFieldOmitEmptyFloat32String",
+	"StructEndFloat32String",
+	"StructEndOmitEmptyFloat32String",
+	"StructFieldFloat64String",
+	"StructFieldOmitEmptyFloat64String",
+	"StructEndFloat64String",
+	"StructEndOmitEmptyFloat64String",
+	"StructFieldBoolString",
+	"StructFieldOmitEmptyBoolString",
+	"StructEndBoolString",
+	"StructEndOmitEmptyBoolString",
+	"StructFieldStringString",
+	"StructFieldOmitEmptyStringString",
+	"StructEndStringString",
+	"StructEndOmitEmptyStringString",
+	"StructFieldNumberString",
+	"StructFieldOmitEmptyNumberString",
+	"StructEndNumberString",
+	"StructEndOmitEmptyNumberString",
+	"StructFieldIntPtr",
+	"StructFieldOmitEmptyIntPtr",
+	"StructEndIntPtr",
+	"StructEndOmitEmptyIntPtr",
+	"StructFieldUintPtr",
+	"StructFieldOmitEmptyUintPtr",
+	"StructEndUintPtr",
+	"StructEndOmitEmptyUintPtr",
+	"StructFieldFloat32Ptr",
+	"StructFieldOmitEmptyFloat32Ptr",
+	"StructEndFloat32Ptr",
+	"StructEndOmitEmptyFloat32Ptr",
+	"StructFieldFloat64Ptr",
+	"StructFieldOmitEmptyFloat64Ptr",
+	"StructEndFloat64Ptr",
+	"StructEndOmitEmptyFloat64Ptr",
+	"StructFieldBoolPtr",
+	"StructFieldOmitEmptyBoolPtr",
+	"StructEndBoolPtr",
+	"StructEndOmitEmptyBoolPtr",
+	"StructFieldStringPtr",
+	"StructFieldOmitEmptyStringPtr",
+	"StructEndStringPtr",
+	"StructEndOmitEmptyStringPtr",
+	"StructFieldBytesPtr",
+	"StructFieldOmitEmptyBytesPtr",
+	"StructEndBytesPtr",
+	"StructEndOmitEmptyBytesPtr",
+	"StructFieldNumberPtr",
+	"StructFieldOmitEmptyNumberPtr",
+	"StructEndNumberPtr",
+	"StructEndOmitEmptyNumberPtr",
+	"StructFieldArrayPtr",
+	"StructFieldOmitEmptyArrayPtr",
+	"StructEndArrayPtr",
+	"StructEndOmitEmptyArrayPtr",
+	"StructFieldMapPtr",
+	"StructFieldOmitEmptyMapPtr",
+	"StructEndMapPtr",
+	"StructEndOmitEmptyMapPtr",
+	"StructFieldSlicePtr",
+	"StructFieldOmitEmptySlicePtr",
+	"StructEndSlicePtr",
+	"StructEndOmitEmptySlicePtr",
+	"StructFieldMarshalJSONPtr",
+	"StructFieldOmitEmptyMarshalJSONPtr",
+	"StructEndMarshalJSONPtr",
+	"StructEndOmitEmptyMarshalJSONPtr",
+	"StructFieldMarshalTextPtr",
+	"StructFieldOmitEmptyMarshalTextPtr",
+	"StructEndMarshalTextPtr",
+	"StructEndOmitEmptyMarshalTextPtr",
+	"StructFieldInterfacePtr",
+	"StructFieldOmitEmptyInterfacePtr",
+	"StructEndInterfacePtr",
+	"StructEndOmitEmptyInterfacePtr",
+	"StructFieldIntPtrString",
+	"StructFieldOmitEmptyIntPtrString",
+	"StructEndIntPtrString",
+	"StructEndOmitEmptyIntPtrString",
+	"StructFieldUintPtrString",
+	"StructFieldOmitEmptyUintPtrString",
+	"StructEndUintPtrString",
+	"StructEndOmitEmptyUintPtrString",
+	"StructFieldFloat32PtrString",
+	"StructFieldOmitEmptyFloat32PtrString",
+	"StructEndFloat32PtrString",
+	"StructEndOmitEmptyFloat32PtrString",
+	"StructFieldFloat64PtrString",
+	"StructFieldOmitEmptyFloat64PtrString",
+	"StructEndFloat64PtrString",
+	"StructEndOmitEmptyFloat64PtrString",
+	"StructFieldBoolPtrString",
+	"StructFieldOmitEmptyBoolPtrString",
+	"StructEndBoolPtrString",
+	"StructEndOmitEmptyBoolPtrString",
+	"StructFieldStringPtrString",
+	"StructFieldOmitEmptyStringPtrString",
+	"StructEndStringPtrString",
+	"StructEndOmitEmptyStringPtrString",
+	"StructFieldNumberPtrString",
+	"StructFieldOmitEmptyNumberPtrString",
+	"StructEndNumberPtrString",
+	"StructEndOmitEmptyNumberPtrString",
+	"StructField",
+	"StructFieldOmitEmpty",
+	"StructEnd",
+	"StructEndOmitEmpty",
+}
+
+type OpType uint16
+
+const (
+	OpEnd                                    OpType = 0
+	OpInterface                              OpType = 1
+	OpPtr                                    OpType = 2
+	OpSliceElem                              OpType = 3
+	OpSliceEnd                               OpType = 4
+	OpArrayElem                              OpType = 5
+	OpArrayEnd                               OpType = 6
+	OpMapKey                                 OpType = 7
+	OpMapValue                               OpType = 8
+	OpMapEnd                                 OpType = 9
+	OpRecursive                              OpType = 10
+	OpRecursivePtr                           OpType = 11
+	OpRecursiveEnd                           OpType = 12
+	OpInterfaceEnd                           OpType = 13
+	OpInt                                    OpType = 14
+	OpUint                                   OpType = 15
+	OpFloat32                                OpType = 16
+	OpFloat64                                OpType = 17
+	OpBool                                   OpType = 18
+	OpString                                 OpType = 19
+	OpBytes                                  OpType = 20
+	OpNumber                                 OpType = 21
+	OpArray                                  OpType = 22
+	OpMap                                    OpType = 23
+	OpSlice                                  OpType = 24
+	OpStruct                                 OpType = 25
+	OpMarshalJSON                            OpType = 26
+	OpMarshalText                            OpType = 27
+	OpIntString                              OpType = 28
+	OpUintString                             OpType = 29
+	OpFloat32String                          OpType = 30
+	OpFloat64String                          OpType = 31
+	OpBoolString                             OpType = 32
+	OpStringString                           OpType = 33
+	OpNumberString                           OpType = 34
+	OpIntPtr                                 OpType = 35
+	OpUintPtr                                OpType = 36
+	OpFloat32Ptr                             OpType = 37
+	OpFloat64Ptr                             OpType = 38
+	OpBoolPtr                                OpType = 39
+	OpStringPtr                              OpType = 40
+	OpBytesPtr                               OpType = 41
+	OpNumberPtr                              OpType = 42
+	OpArrayPtr                               OpType = 43
+	OpMapPtr                                 OpType = 44
+	OpSlicePtr                               OpType = 45
+	OpMarshalJSONPtr                         OpType = 46
+	OpMarshalTextPtr                         OpType = 47
+	OpInterfacePtr                           OpType = 48
+	OpIntPtrString                           OpType = 49
+	OpUintPtrString                          OpType = 50
+	OpFloat32PtrString                       OpType = 51
+	OpFloat64PtrString                       OpType = 52
+	OpBoolPtrString                          OpType = 53
+	OpStringPtrString                        OpType = 54
+	OpNumberPtrString                        OpType = 55
+	OpStructHeadInt                          OpType = 56
+	OpStructHeadOmitEmptyInt                 OpType = 57
+	OpStructPtrHeadInt                       OpType = 58
+	OpStructPtrHeadOmitEmptyInt              OpType = 59
+	OpStructHeadUint                         OpType = 60
+	OpStructHeadOmitEmptyUint                OpType = 61
+	OpStructPtrHeadUint                      OpType = 62
+	OpStructPtrHeadOmitEmptyUint             OpType = 63
+	OpStructHeadFloat32                      OpType = 64
+	OpStructHeadOmitEmptyFloat32             OpType = 65
+	OpStructPtrHeadFloat32                   OpType = 66
+	OpStructPtrHeadOmitEmptyFloat32          OpType = 67
+	OpStructHeadFloat64                      OpType = 68
+	OpStructHeadOmitEmptyFloat64             OpType = 69
+	OpStructPtrHeadFloat64                   OpType = 70
+	OpStructPtrHeadOmitEmptyFloat64          OpType = 71
+	OpStructHeadBool                         OpType = 72
+	OpStructHeadOmitEmptyBool                OpType = 73
+	OpStructPtrHeadBool                      OpType = 74
+	OpStructPtrHeadOmitEmptyBool             OpType = 75
+	OpStructHeadString                       OpType = 76
+	OpStructHeadOmitEmptyString              OpType = 77
+	OpStructPtrHeadString                    OpType = 78
+	OpStructPtrHeadOmitEmptyString           OpType = 79
+	OpStructHeadBytes                        OpType = 80
+	OpStructHeadOmitEmptyBytes               OpType = 81
+	OpStructPtrHeadBytes                     OpType = 82
+	OpStructPtrHeadOmitEmptyBytes            OpType = 83
+	OpStructHeadNumber                       OpType = 84
+	OpStructHeadOmitEmptyNumber              OpType = 85
+	OpStructPtrHeadNumber                    OpType = 86
+	OpStructPtrHeadOmitEmptyNumber           OpType = 87
+	OpStructHeadArray                        OpType = 88
+	OpStructHeadOmitEmptyArray               OpType = 89
+	OpStructPtrHeadArray                     OpType = 90
+	OpStructPtrHeadOmitEmptyArray            OpType = 91
+	OpStructHeadMap                          OpType = 92
+	OpStructHeadOmitEmptyMap                 OpType = 93
+	OpStructPtrHeadMap                       OpType = 94
+	OpStructPtrHeadOmitEmptyMap              OpType = 95
+	OpStructHeadSlice                        OpType = 96
+	OpStructHeadOmitEmptySlice               OpType = 97
+	OpStructPtrHeadSlice                     OpType = 98
+	OpStructPtrHeadOmitEmptySlice            OpType = 99
+	OpStructHeadStruct                       OpType = 100
+	OpStructHeadOmitEmptyStruct              OpType = 101
+	OpStructPtrHeadStruct                    OpType = 102
+	OpStructPtrHeadOmitEmptyStruct           OpType = 103
+	OpStructHeadMarshalJSON                  OpType = 104
+	OpStructHeadOmitEmptyMarshalJSON         OpType = 105
+	OpStructPtrHeadMarshalJSON               OpType = 106
+	OpStructPtrHeadOmitEmptyMarshalJSON      OpType = 107
+	OpStructHeadMarshalText                  OpType = 108
+	OpStructHeadOmitEmptyMarshalText         OpType = 109
+	OpStructPtrHeadMarshalText               OpType = 110
+	OpStructPtrHeadOmitEmptyMarshalText      OpType = 111
+	OpStructHeadIntString                    OpType = 112
+	OpStructHeadOmitEmptyIntString           OpType = 113
+	OpStructPtrHeadIntString                 OpType = 114
+	OpStructPtrHeadOmitEmptyIntString        OpType = 115
+	OpStructHeadUintString                   OpType = 116
+	OpStructHeadOmitEmptyUintString          OpType = 117
+	OpStructPtrHeadUintString                OpType = 118
+	OpStructPtrHeadOmitEmptyUintString       OpType = 119
+	OpStructHeadFloat32String                OpType = 120
+	OpStructHeadOmitEmptyFloat32String       OpType = 121
+	OpStructPtrHeadFloat32String             OpType = 122
+	OpStructPtrHeadOmitEmptyFloat32String    OpType = 123
+	OpStructHeadFloat64String                OpType = 124
+	OpStructHeadOmitEmptyFloat64String       OpType = 125
+	OpStructPtrHeadFloat64String             OpType = 126
+	OpStructPtrHeadOmitEmptyFloat64String    OpType = 127
+	OpStructHeadBoolString                   OpType = 128
+	OpStructHeadOmitEmptyBoolString          OpType = 129
+	OpStructPtrHeadBoolString                OpType = 130
+	OpStructPtrHeadOmitEmptyBoolString       OpType = 131
+	OpStructHeadStringString                 OpType = 132
+	OpStructHeadOmitEmptyStringString        OpType = 133
+	OpStructPtrHeadStringString              OpType = 134
+	OpStructPtrHeadOmitEmptyStringString     OpType = 135
+	OpStructHeadNumberString                 OpType = 136
+	OpStructHeadOmitEmptyNumberString        OpType = 137
+	OpStructPtrHeadNumberString              OpType = 138
+	OpStructPtrHeadOmitEmptyNumberString     OpType = 139
+	OpStructHeadIntPtr                       OpType = 140
+	OpStructHeadOmitEmptyIntPtr              OpType = 141
+	OpStructPtrHeadIntPtr                    OpType = 142
+	OpStructPtrHeadOmitEmptyIntPtr           OpType = 143
+	OpStructHeadUintPtr                      OpType = 144
+	OpStructHeadOmitEmptyUintPtr             OpType = 145
+	OpStructPtrHeadUintPtr                   OpType = 146
+	OpStructPtrHeadOmitEmptyUintPtr          OpType = 147
+	OpStructHeadFloat32Ptr                   OpType = 148
+	OpStructHeadOmitEmptyFloat32Ptr          OpType = 149
+	OpStructPtrHeadFloat32Ptr                OpType = 150
+	OpStructPtrHeadOmitEmptyFloat32Ptr       OpType = 151
+	OpStructHeadFloat64Ptr                   OpType = 152
+	OpStructHeadOmitEmptyFloat64Ptr          OpType = 153
+	OpStructPtrHeadFloat64Ptr                OpType = 154
+	OpStructPtrHeadOmitEmptyFloat64Ptr       OpType = 155
+	OpStructHeadBoolPtr                      OpType = 156
+	OpStructHeadOmitEmptyBoolPtr             OpType = 157
+	OpStructPtrHeadBoolPtr                   OpType = 158
+	OpStructPtrHeadOmitEmptyBoolPtr          OpType = 159
+	OpStructHeadStringPtr                    OpType = 160
+	OpStructHeadOmitEmptyStringPtr           OpType = 161
+	OpStructPtrHeadStringPtr                 OpType = 162
+	OpStructPtrHeadOmitEmptyStringPtr        OpType = 163
+	OpStructHeadBytesPtr                     OpType = 164
+	OpStructHeadOmitEmptyBytesPtr            OpType = 165
+	OpStructPtrHeadBytesPtr                  OpType = 166
+	OpStructPtrHeadOmitEmptyBytesPtr         OpType = 167
+	OpStructHeadNumberPtr                    OpType = 168
+	OpStructHeadOmitEmptyNumberPtr           OpType = 169
+	OpStructPtrHeadNumberPtr                 OpType = 170
+	OpStructPtrHeadOmitEmptyNumberPtr        OpType = 171
+	OpStructHeadArrayPtr                     OpType = 172
+	OpStructHeadOmitEmptyArrayPtr            OpType = 173
+	OpStructPtrHeadArrayPtr                  OpType = 174
+	OpStructPtrHeadOmitEmptyArrayPtr         OpType = 175
+	OpStructHeadMapPtr                       OpType = 176
+	OpStructHeadOmitEmptyMapPtr              OpType = 177
+	OpStructPtrHeadMapPtr                    OpType = 178
+	OpStructPtrHeadOmitEmptyMapPtr           OpType = 179
+	OpStructHeadSlicePtr                     OpType = 180
+	OpStructHeadOmitEmptySlicePtr            OpType = 181
+	OpStructPtrHeadSlicePtr                  OpType = 182
+	OpStructPtrHeadOmitEmptySlicePtr         OpType = 183
+	OpStructHeadMarshalJSONPtr               OpType = 184
+	OpStructHeadOmitEmptyMarshalJSONPtr      OpType = 185
+	OpStructPtrHeadMarshalJSONPtr            OpType = 186
+	OpStructPtrHeadOmitEmptyMarshalJSONPtr   OpType = 187
+	OpStructHeadMarshalTextPtr               OpType = 188
+	OpStructHeadOmitEmptyMarshalTextPtr      OpType = 189
+	OpStructPtrHeadMarshalTextPtr            OpType = 190
+	OpStructPtrHeadOmitEmptyMarshalTextPtr   OpType = 191
+	OpStructHeadInterfacePtr                 OpType = 192
+	OpStructHeadOmitEmptyInterfacePtr        OpType = 193
+	OpStructPtrHeadInterfacePtr              OpType = 194
+	OpStructPtrHeadOmitEmptyInterfacePtr     OpType = 195
+	OpStructHeadIntPtrString                 OpType = 196
+	OpStructHeadOmitEmptyIntPtrString        OpType = 197
+	OpStructPtrHeadIntPtrString              OpType = 198
+	OpStructPtrHeadOmitEmptyIntPtrString     OpType = 199
+	OpStructHeadUintPtrString                OpType = 200
+	OpStructHeadOmitEmptyUintPtrString       OpType = 201
+	OpStructPtrHeadUintPtrString             OpType = 202
+	OpStructPtrHeadOmitEmptyUintPtrString    OpType = 203
+	OpStructHeadFloat32PtrString             OpType = 204
+	OpStructHeadOmitEmptyFloat32PtrString    OpType = 205
+	OpStructPtrHeadFloat32PtrString          OpType = 206
+	OpStructPtrHeadOmitEmptyFloat32PtrString OpType = 207
+	OpStructHeadFloat64PtrString             OpType = 208
+	OpStructHeadOmitEmptyFloat64PtrString    OpType = 209
+	OpStructPtrHeadFloat64PtrString          OpType = 210
+	OpStructPtrHeadOmitEmptyFloat64PtrString OpType = 211
+	OpStructHeadBoolPtrString                OpType = 212
+	OpStructHeadOmitEmptyBoolPtrString       OpType = 213
+	OpStructPtrHeadBoolPtrString             OpType = 214
+	OpStructPtrHeadOmitEmptyBoolPtrString    OpType = 215
+	OpStructHeadStringPtrString              OpType = 216
+	OpStructHeadOmitEmptyStringPtrString     OpType = 217
+	OpStructPtrHeadStringPtrString           OpType = 218
+	OpStructPtrHeadOmitEmptyStringPtrString  OpType = 219
+	OpStructHeadNumberPtrString              OpType = 220
+	OpStructHeadOmitEmptyNumberPtrString     OpType = 221
+	OpStructPtrHeadNumberPtrString           OpType = 222
+	OpStructPtrHeadOmitEmptyNumberPtrString  OpType = 223
+	OpStructHead                             OpType = 224
+	OpStructHeadOmitEmpty                    OpType = 225
+	OpStructPtrHead                          OpType = 226
+	OpStructPtrHeadOmitEmpty                 OpType = 227
+	OpStructFieldInt                         OpType = 228
+	OpStructFieldOmitEmptyInt                OpType = 229
+	OpStructEndInt                           OpType = 230
+	OpStructEndOmitEmptyInt                  OpType = 231
+	OpStructFieldUint                        OpType = 232
+	OpStructFieldOmitEmptyUint               OpType = 233
+	OpStructEndUint                          OpType = 234
+	OpStructEndOmitEmptyUint                 OpType = 235
+	OpStructFieldFloat32                     OpType = 236
+	OpStructFieldOmitEmptyFloat32            OpType = 237
+	OpStructEndFloat32                       OpType = 238
+	OpStructEndOmitEmptyFloat32              OpType = 239
+	OpStructFieldFloat64                     OpType = 240
+	OpStructFieldOmitEmptyFloat64            OpType = 241
+	OpStructEndFloat64                       OpType = 242
+	OpStructEndOmitEmptyFloat64              OpType = 243
+	OpStructFieldBool                        OpType = 244
+	OpStructFieldOmitEmptyBool               OpType = 245
+	OpStructEndBool                          OpType = 246
+	OpStructEndOmitEmptyBool                 OpType = 247
+	OpStructFieldString                      OpType = 248
+	OpStructFieldOmitEmptyString             OpType = 249
+	OpStructEndString                        OpType = 250
+	OpStructEndOmitEmptyString               OpType = 251
+	OpStructFieldBytes                       OpType = 252
+	OpStructFieldOmitEmptyBytes              OpType = 253
+	OpStructEndBytes                         OpType = 254
+	OpStructEndOmitEmptyBytes                OpType = 255
+	OpStructFieldNumber                      OpType = 256
+	OpStructFieldOmitEmptyNumber             OpType = 257
+	OpStructEndNumber                        OpType = 258
+	OpStructEndOmitEmptyNumber               OpType = 259
+	OpStructFieldArray                       OpType = 260
+	OpStructFieldOmitEmptyArray              OpType = 261
+	OpStructEndArray                         OpType = 262
+	OpStructEndOmitEmptyArray                OpType = 263
+	OpStructFieldMap                         OpType = 264
+	OpStructFieldOmitEmptyMap                OpType = 265
+	OpStructEndMap                           OpType = 266
+	OpStructEndOmitEmptyMap                  OpType = 267
+	OpStructFieldSlice                       OpType = 268
+	OpStructFieldOmitEmptySlice              OpType = 269
+	OpStructEndSlice                         OpType = 270
+	OpStructEndOmitEmptySlice                OpType = 271
+	OpStructFieldStruct                      OpType = 272
+	OpStructFieldOmitEmptyStruct             OpType = 273
+	OpStructEndStruct                        OpType = 274
+	OpStructEndOmitEmptyStruct               OpType = 275
+	OpStructFieldMarshalJSON                 OpType = 276
+	OpStructFieldOmitEmptyMarshalJSON        OpType = 277
+	OpStructEndMarshalJSON                   OpType = 278
+	OpStructEndOmitEmptyMarshalJSON          OpType = 279
+	OpStructFieldMarshalText                 OpType = 280
+	OpStructFieldOmitEmptyMarshalText        OpType = 281
+	OpStructEndMarshalText                   OpType = 282
+	OpStructEndOmitEmptyMarshalText          OpType = 283
+	OpStructFieldIntString                   OpType = 284
+	OpStructFieldOmitEmptyIntString          OpType = 285
+	OpStructEndIntString                     OpType = 286
+	OpStructEndOmitEmptyIntString            OpType = 287
+	OpStructFieldUintString                  OpType = 288
+	OpStructFieldOmitEmptyUintString         OpType = 289
+	OpStructEndUintString                    OpType = 290
+	OpStructEndOmitEmptyUintString           OpType = 291
+	OpStructFieldFloat32String               OpType = 292
+	OpStructFieldOmitEmptyFloat32String      OpType = 293
+	OpStructEndFloat32String                 OpType = 294
+	OpStructEndOmitEmptyFloat32String        OpType = 295
+	OpStructFieldFloat64String               OpType = 296
+	OpStructFieldOmitEmptyFloat64String      OpType = 297
+	OpStructEndFloat64String                 OpType = 298
+	OpStructEndOmitEmptyFloat64String        OpType = 299
+	OpStructFieldBoolString                  OpType = 300
+	OpStructFieldOmitEmptyBoolString         OpType = 301
+	OpStructEndBoolString                    OpType = 302
+	OpStructEndOmitEmptyBoolString           OpType = 303
+	OpStructFieldStringString                OpType = 304
+	OpStructFieldOmitEmptyStringString       OpType = 305
+	OpStructEndStringString                  OpType = 306
+	OpStructEndOmitEmptyStringString         OpType = 307
+	OpStructFieldNumberString                OpType = 308
+	OpStructFieldOmitEmptyNumberString       OpType = 309
+	OpStructEndNumberString                  OpType = 310
+	OpStructEndOmitEmptyNumberString         OpType = 311
+	OpStructFieldIntPtr                      OpType = 312
+	OpStructFieldOmitEmptyIntPtr             OpType = 313
+	OpStructEndIntPtr                        OpType = 314
+	OpStructEndOmitEmptyIntPtr               OpType = 315
+	OpStructFieldUintPtr                     OpType = 316
+	OpStructFieldOmitEmptyUintPtr            OpType = 317
+	OpStructEndUintPtr                       OpType = 318
+	OpStructEndOmitEmptyUintPtr              OpType = 319
+	OpStructFieldFloat32Ptr                  OpType = 320
+	OpStructFieldOmitEmptyFloat32Ptr         OpType = 321
+	OpStructEndFloat32Ptr                    OpType = 322
+	OpStructEndOmitEmptyFloat32Ptr           OpType = 323
+	OpStructFieldFloat64Ptr                  OpType = 324
+	OpStructFieldOmitEmptyFloat64Ptr         OpType = 325
+	OpStructEndFloat64Ptr                    OpType = 326
+	OpStructEndOmitEmptyFloat64Ptr           OpType = 327
+	OpStructFieldBoolPtr                     OpType = 328
+	OpStructFieldOmitEmptyBoolPtr            OpType = 329
+	OpStructEndBoolPtr                       OpType = 330
+	OpStructEndOmitEmptyBoolPtr              OpType = 331
+	OpStructFieldStringPtr                   OpType = 332
+	OpStructFieldOmitEmptyStringPtr          OpType = 333
+	OpStructEndStringPtr                     OpType = 334
+	OpStructEndOmitEmptyStringPtr            OpType = 335
+	OpStructFieldBytesPtr                    OpType = 336
+	OpStructFieldOmitEmptyBytesPtr           OpType = 337
+	OpStructEndBytesPtr                      OpType = 338
+	OpStructEndOmitEmptyBytesPtr             OpType = 339
+	OpStructFieldNumberPtr                   OpType = 340
+	OpStructFieldOmitEmptyNumberPtr          OpType = 341
+	OpStructEndNumberPtr                     OpType = 342
+	OpStructEndOmitEmptyNumberPtr            OpType = 343
+	OpStructFieldArrayPtr                    OpType = 344
+	OpStructFieldOmitEmptyArrayPtr           OpType = 345
+	OpStructEndArrayPtr                      OpType = 346
+	OpStructEndOmitEmptyArrayPtr             OpType = 347
+	OpStructFieldMapPtr                      OpType = 348
+	OpStructFieldOmitEmptyMapPtr             OpType = 349
+	OpStructEndMapPtr                        OpType = 350
+	OpStructEndOmitEmptyMapPtr               OpType = 351
+	OpStructFieldSlicePtr                    OpType = 352
+	OpStructFieldOmitEmptySlicePtr           OpType = 353
+	OpStructEndSlicePtr                      OpType = 354
+	OpStructEndOmitEmptySlicePtr             OpType = 355
+	OpStructFieldMarshalJSONPtr              OpType = 356
+	OpStructFieldOmitEmptyMarshalJSONPtr     OpType = 357
+	OpStructEndMarshalJSONPtr                OpType = 358
+	OpStructEndOmitEmptyMarshalJSONPtr       OpType = 359
+	OpStructFieldMarshalTextPtr              OpType = 360
+	OpStructFieldOmitEmptyMarshalTextPtr     OpType = 361
+	OpStructEndMarshalTextPtr                OpType = 362
+	OpStructEndOmitEmptyMarshalTextPtr       OpType = 363
+	OpStructFieldInterfacePtr                OpType = 364
+	OpStructFieldOmitEmptyInterfacePtr       OpType = 365
+	OpStructEndInterfacePtr                  OpType = 366
+	OpStructEndOmitEmptyInterfacePtr         OpType = 367
+	OpStructFieldIntPtrString                OpType = 368
+	OpStructFieldOmitEmptyIntPtrString       OpType = 369
+	OpStructEndIntPtrString                  OpType = 370
+	OpStructEndOmitEmptyIntPtrString         OpType = 371
+	OpStructFieldUintPtrString               OpType = 372
+	OpStructFieldOmitEmptyUintPtrString      OpType = 373
+	OpStructEndUintPtrString                 OpType = 374
+	OpStructEndOmitEmptyUintPtrString        OpType = 375
+	OpStructFieldFloat32PtrString            OpType = 376
+	OpStructFieldOmitEmptyFloat32PtrString   OpType = 377
+	OpStructEndFloat32PtrString              OpType = 378
+	OpStructEndOmitEmptyFloat32PtrString     OpType = 379
+	OpStructFieldFloat64PtrString            OpType = 380
+	OpStructFieldOmitEmptyFloat64PtrString   OpType = 381
+	OpStructEndFloat64PtrString              OpType = 382
+	OpStructEndOmitEmptyFloat64PtrString     OpType = 383
+	OpStructFieldBoolPtrString               OpType = 384
+	OpStructFieldOmitEmptyBoolPtrString      OpType = 385
+	OpStructEndBoolPtrString                 OpType = 386
+	OpStructEndOmitEmptyBoolPtrString        OpType = 387
+	OpStructFieldStringPtrString             OpType = 388
+	OpStructFieldOmitEmptyStringPtrString    OpType = 389
+	OpStructEndStringPtrString               OpType = 390
+	OpStructEndOmitEmptyStringPtrString      OpType = 391
+	OpStructFieldNumberPtrString             OpType = 392
+	OpStructFieldOmitEmptyNumberPtrString    OpType = 393
+	OpStructEndNumberPtrString               OpType = 394
+	OpStructEndOmitEmptyNumberPtrString      OpType = 395
+	OpStructField                            OpType = 396
+	OpStructFieldOmitEmpty                   OpType = 397
+	OpStructEnd                              OpType = 398
+	OpStructEndOmitEmpty                     OpType = 399
+)
+
+func (t OpType) String() string {
+	if int(t) >= 400 {
+		return ""
+	}
+	return opTypeStrings[int(t)]
+}
+
+func (t OpType) CodeType() CodeType {
+	if strings.Contains(t.String(), "Struct") {
+		if strings.Contains(t.String(), "End") {
+			return CodeStructEnd
+		}
+		return CodeStructField
+	}
+	switch t {
+	case OpArray, OpArrayPtr:
+		return CodeArrayHead
+	case OpArrayElem:
+		return CodeArrayElem
+	case OpSlice, OpSlicePtr:
+		return CodeSliceHead
+	case OpSliceElem:
+		return CodeSliceElem
+	case OpMap, OpMapPtr:
+		return CodeMapHead
+	case OpMapKey:
+		return CodeMapKey
+	case OpMapValue:
+		return CodeMapValue
+	case OpMapEnd:
+		return CodeMapEnd
+	}
+
+	return CodeOp
+}
+
+func (t OpType) HeadToPtrHead() OpType {
+	if strings.Index(t.String(), "PtrHead") > 0 {
+		return t
+	}
+
+	idx := strings.Index(t.String(), "Head")
+	if idx == -1 {
+		return t
+	}
+	suffix := "PtrHead" + t.String()[idx+len("Head"):]
+
+	const toPtrOffset = 2
+	if strings.Contains(OpType(int(t)+toPtrOffset).String(), suffix) {
+		return OpType(int(t) + toPtrOffset)
+	}
+	return t
+}
+
+func (t OpType) HeadToOmitEmptyHead() OpType {
+	const toOmitEmptyOffset = 1
+	if strings.Contains(OpType(int(t)+toOmitEmptyOffset).String(), "OmitEmpty") {
+		return OpType(int(t) + toOmitEmptyOffset)
+	}
+
+	return t
+}
+
+func (t OpType) PtrHeadToHead() OpType {
+	idx := strings.Index(t.String(), "PtrHead")
+	if idx == -1 {
+		return t
+	}
+	suffix := t.String()[idx+len("Ptr"):]
+
+	const toPtrOffset = 2
+	if strings.Contains(OpType(int(t)-toPtrOffset).String(), suffix) {
+		return OpType(int(t) - toPtrOffset)
+	}
+	return t
+}
+
+func (t OpType) FieldToEnd() OpType {
+	idx := strings.Index(t.String(), "Field")
+	if idx == -1 {
+		return t
+	}
+	suffix := t.String()[idx+len("Field"):]
+	if suffix == "" || suffix == "OmitEmpty" {
+		return t
+	}
+	const toEndOffset = 2
+	if strings.Contains(OpType(int(t)+toEndOffset).String(), "End"+suffix) {
+		return OpType(int(t) + toEndOffset)
+	}
+	return t
+}
+
+func (t OpType) FieldToOmitEmptyField() OpType {
+	const toOmitEmptyOffset = 1
+	if strings.Contains(OpType(int(t)+toOmitEmptyOffset).String(), "OmitEmpty") {
+		return OpType(int(t) + toOmitEmptyOffset)
+	}
+	return t
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/query.go b/vendor/github.com/goccy/go-json/internal/encoder/query.go
new file mode 100644
index 0000000000..1e1850cc15
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/query.go
@@ -0,0 +1,135 @@
+package encoder
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+)
+
+var (
+	Marshal   func(interface{}) ([]byte, error)
+	Unmarshal func([]byte, interface{}) error
+)
+
+type FieldQuery struct {
+	Name   string
+	Fields []*FieldQuery
+	hash   string
+}
+
+func (q *FieldQuery) Hash() string {
+	if q.hash != "" {
+		return q.hash
+	}
+	b, _ := Marshal(q)
+	q.hash = string(b)
+	return q.hash
+}
+
+func (q *FieldQuery) MarshalJSON() ([]byte, error) {
+	if q.Name != "" {
+		if len(q.Fields) > 0 {
+			return Marshal(map[string][]*FieldQuery{q.Name: q.Fields})
+		}
+		return Marshal(q.Name)
+	}
+	return Marshal(q.Fields)
+}
+
+func (q *FieldQuery) QueryString() (FieldQueryString, error) {
+	b, err := Marshal(q)
+	if err != nil {
+		return "", err
+	}
+	return FieldQueryString(b), nil
+}
+
+type FieldQueryString string
+
+func (s FieldQueryString) Build() (*FieldQuery, error) {
+	var query interface{}
+	if err := Unmarshal([]byte(s), &query); err != nil {
+		return nil, err
+	}
+	return s.build(reflect.ValueOf(query))
+}
+
+func (s FieldQueryString) build(v reflect.Value) (*FieldQuery, error) {
+	switch v.Type().Kind() {
+	case reflect.String:
+		return s.buildString(v)
+	case reflect.Map:
+		return s.buildMap(v)
+	case reflect.Slice:
+		return s.buildSlice(v)
+	case reflect.Interface:
+		return s.build(reflect.ValueOf(v.Interface()))
+	}
+	return nil, fmt.Errorf("failed to build field query")
+}
+
+func (s FieldQueryString) buildString(v reflect.Value) (*FieldQuery, error) {
+	b := []byte(v.String())
+	switch b[0] {
+	case '[', '{':
+		var query interface{}
+		if err := Unmarshal(b, &query); err != nil {
+			return nil, err
+		}
+		if str, ok := query.(string); ok {
+			return &FieldQuery{Name: str}, nil
+		}
+		return s.build(reflect.ValueOf(query))
+	}
+	return &FieldQuery{Name: string(b)}, nil
+}
+
+func (s FieldQueryString) buildSlice(v reflect.Value) (*FieldQuery, error) {
+	fields := make([]*FieldQuery, 0, v.Len())
+	for i := 0; i < v.Len(); i++ {
+		def, err := s.build(v.Index(i))
+		if err != nil {
+			return nil, err
+		}
+		fields = append(fields, def)
+	}
+	return &FieldQuery{Fields: fields}, nil
+}
+
+func (s FieldQueryString) buildMap(v reflect.Value) (*FieldQuery, error) {
+	keys := v.MapKeys()
+	if len(keys) != 1 {
+		return nil, fmt.Errorf("failed to build field query object")
+	}
+	key := keys[0]
+	if key.Type().Kind() != reflect.String {
+		return nil, fmt.Errorf("failed to build field query. invalid object key type")
+	}
+	name := key.String()
+	def, err := s.build(v.MapIndex(key))
+	if err != nil {
+		return nil, err
+	}
+	return &FieldQuery{
+		Name:   name,
+		Fields: def.Fields,
+	}, nil
+}
+
+type queryKey struct{}
+
+func FieldQueryFromContext(ctx context.Context) *FieldQuery {
+	query := ctx.Value(queryKey{})
+	if query == nil {
+		return nil
+	}
+	q, ok := query.(*FieldQuery)
+	if !ok {
+		return nil
+	}
+	return q
+}
+
+func SetFieldQueryToContext(ctx context.Context, query *FieldQuery) context.Context {
+	return context.WithValue(ctx, queryKey{}, query)
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/string.go b/vendor/github.com/goccy/go-json/internal/encoder/string.go
new file mode 100644
index 0000000000..e4152b27c7
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/string.go
@@ -0,0 +1,459 @@
+package encoder
+
+import (
+	"math/bits"
+	"reflect"
+	"unsafe"
+)
+
+const (
+	lsb = 0x0101010101010101
+	msb = 0x8080808080808080
+)
+
+var hex = "0123456789abcdef"
+
+//nolint:govet
+func stringToUint64Slice(s string) []uint64 {
+	return *(*[]uint64)(unsafe.Pointer(&reflect.SliceHeader{
+		Data: ((*reflect.StringHeader)(unsafe.Pointer(&s))).Data,
+		Len:  len(s) / 8,
+		Cap:  len(s) / 8,
+	}))
+}
+
+func AppendString(ctx *RuntimeContext, buf []byte, s string) []byte {
+	if ctx.Option.Flag&HTMLEscapeOption != 0 {
+		if ctx.Option.Flag&NormalizeUTF8Option != 0 {
+			return appendNormalizedHTMLString(buf, s)
+		}
+		return appendHTMLString(buf, s)
+	}
+	if ctx.Option.Flag&NormalizeUTF8Option != 0 {
+		return appendNormalizedString(buf, s)
+	}
+	return appendString(buf, s)
+}
+
+func appendNormalizedHTMLString(buf []byte, s string) []byte {
+	valLen := len(s)
+	if valLen == 0 {
+		return append(buf, `""`...)
+	}
+	buf = append(buf, '"')
+	var (
+		i, j int
+	)
+	if valLen >= 8 {
+		chunks := stringToUint64Slice(s)
+		for _, n := range chunks {
+			// combine masks before checking for the MSB of each byte. We include
+			// `n` in the mask to check whether any of the *input* byte MSBs were
+			// set (i.e. the byte was outside the ASCII range).
+			mask := n | (n - (lsb * 0x20)) |
+				((n ^ (lsb * '"')) - lsb) |
+				((n ^ (lsb * '\\')) - lsb) |
+				((n ^ (lsb * '<')) - lsb) |
+				((n ^ (lsb * '>')) - lsb) |
+				((n ^ (lsb * '&')) - lsb)
+			if (mask & msb) != 0 {
+				j = bits.TrailingZeros64(mask&msb) / 8
+				goto ESCAPE_END
+			}
+		}
+		for i := len(chunks) * 8; i < valLen; i++ {
+			if needEscapeHTMLNormalizeUTF8[s[i]] {
+				j = i
+				goto ESCAPE_END
+			}
+		}
+		// no found any escape characters.
+		return append(append(buf, s...), '"')
+	}
+ESCAPE_END:
+	for j < valLen {
+		c := s[j]
+
+		if !needEscapeHTMLNormalizeUTF8[c] {
+			// fast path: most of the time, printable ascii characters are used
+			j++
+			continue
+		}
+
+		switch c {
+		case '\\', '"':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', c)
+			i = j + 1
+			j = j + 1
+			continue
+
+		case '\n':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', 'n')
+			i = j + 1
+			j = j + 1
+			continue
+
+		case '\r':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', 'r')
+			i = j + 1
+			j = j + 1
+			continue
+
+		case '\t':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', 't')
+			i = j + 1
+			j = j + 1
+			continue
+
+		case '<', '>', '&':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, `\u00`...)
+			buf = append(buf, hex[c>>4], hex[c&0xF])
+			i = j + 1
+			j = j + 1
+			continue
+
+		case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F
+			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, `\u00`...)
+			buf = append(buf, hex[c>>4], hex[c&0xF])
+			i = j + 1
+			j = j + 1
+			continue
+		}
+		state, size := decodeRuneInString(s[j:])
+		switch state {
+		case runeErrorState:
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, `\ufffd`...)
+			i = j + 1
+			j = j + 1
+			continue
+			// U+2028 is LINE SEPARATOR.
+			// U+2029 is PARAGRAPH SEPARATOR.
+			// They are both technically valid characters in JSON strings,
+			// but don't work in JSONP, which has to be evaluated as JavaScript,
+			// and can lead to security holes there. It is valid JSON to
+			// escape them, so we do so unconditionally.
+			// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		case lineSepState:
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, `\u2028`...)
+			i = j + 3
+			j = j + 3
+			continue
+		case paragraphSepState:
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, `\u2029`...)
+			i = j + 3
+			j = j + 3
+			continue
+		}
+		j += size
+	}
+
+	return append(append(buf, s[i:]...), '"')
+}
+
+func appendHTMLString(buf []byte, s string) []byte {
+	valLen := len(s)
+	if valLen == 0 {
+		return append(buf, `""`...)
+	}
+	buf = append(buf, '"')
+	var (
+		i, j int
+	)
+	if valLen >= 8 {
+		chunks := stringToUint64Slice(s)
+		for _, n := range chunks {
+			// combine masks before checking for the MSB of each byte. We include
+			// `n` in the mask to check whether any of the *input* byte MSBs were
+			// set (i.e. the byte was outside the ASCII range).
+			mask := n | (n - (lsb * 0x20)) |
+				((n ^ (lsb * '"')) - lsb) |
+				((n ^ (lsb * '\\')) - lsb) |
+				((n ^ (lsb * '<')) - lsb) |
+				((n ^ (lsb * '>')) - lsb) |
+				((n ^ (lsb * '&')) - lsb)
+			if (mask & msb) != 0 {
+				j = bits.TrailingZeros64(mask&msb) / 8
+				goto ESCAPE_END
+			}
+		}
+		for i := len(chunks) * 8; i < valLen; i++ {
+			if needEscapeHTML[s[i]] {
+				j = i
+				goto ESCAPE_END
+			}
+		}
+		// no found any escape characters.
+		return append(append(buf, s...), '"')
+	}
+ESCAPE_END:
+	for j < valLen {
+		c := s[j]
+
+		if !needEscapeHTML[c] {
+			// fast path: most of the time, printable ascii characters are used
+			j++
+			continue
+		}
+
+		switch c {
+		case '\\', '"':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', c)
+			i = j + 1
+			j = j + 1
+			continue
+
+		case '\n':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', 'n')
+			i = j + 1
+			j = j + 1
+			continue
+
+		case '\r':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', 'r')
+			i = j + 1
+			j = j + 1
+			continue
+
+		case '\t':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', 't')
+			i = j + 1
+			j = j + 1
+			continue
+
+		case '<', '>', '&':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, `\u00`...)
+			buf = append(buf, hex[c>>4], hex[c&0xF])
+			i = j + 1
+			j = j + 1
+			continue
+
+		case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F
+			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, `\u00`...)
+			buf = append(buf, hex[c>>4], hex[c&0xF])
+			i = j + 1
+			j = j + 1
+			continue
+		}
+		j++
+	}
+
+	return append(append(buf, s[i:]...), '"')
+}
+
+func appendNormalizedString(buf []byte, s string) []byte {
+	valLen := len(s)
+	if valLen == 0 {
+		return append(buf, `""`...)
+	}
+	buf = append(buf, '"')
+	var (
+		i, j int
+	)
+	if valLen >= 8 {
+		chunks := stringToUint64Slice(s)
+		for _, n := range chunks {
+			// combine masks before checking for the MSB of each byte. We include
+			// `n` in the mask to check whether any of the *input* byte MSBs were
+			// set (i.e. the byte was outside the ASCII range).
+			mask := n | (n - (lsb * 0x20)) |
+				((n ^ (lsb * '"')) - lsb) |
+				((n ^ (lsb * '\\')) - lsb)
+			if (mask & msb) != 0 {
+				j = bits.TrailingZeros64(mask&msb) / 8
+				goto ESCAPE_END
+			}
+		}
+		valLen := len(s)
+		for i := len(chunks) * 8; i < valLen; i++ {
+			if needEscapeNormalizeUTF8[s[i]] {
+				j = i
+				goto ESCAPE_END
+			}
+		}
+		return append(append(buf, s...), '"')
+	}
+ESCAPE_END:
+	for j < valLen {
+		c := s[j]
+
+		if !needEscapeNormalizeUTF8[c] {
+			// fast path: most of the time, printable ascii characters are used
+			j++
+			continue
+		}
+
+		switch c {
+		case '\\', '"':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', c)
+			i = j + 1
+			j = j + 1
+			continue
+
+		case '\n':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', 'n')
+			i = j + 1
+			j = j + 1
+			continue
+
+		case '\r':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', 'r')
+			i = j + 1
+			j = j + 1
+			continue
+
+		case '\t':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', 't')
+			i = j + 1
+			j = j + 1
+			continue
+
+		case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F
+			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, `\u00`...)
+			buf = append(buf, hex[c>>4], hex[c&0xF])
+			i = j + 1
+			j = j + 1
+			continue
+		}
+
+		state, size := decodeRuneInString(s[j:])
+		switch state {
+		case runeErrorState:
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, `\ufffd`...)
+			i = j + 1
+			j = j + 1
+			continue
+			// U+2028 is LINE SEPARATOR.
+			// U+2029 is PARAGRAPH SEPARATOR.
+			// They are both technically valid characters in JSON strings,
+			// but don't work in JSONP, which has to be evaluated as JavaScript,
+			// and can lead to security holes there. It is valid JSON to
+			// escape them, so we do so unconditionally.
+			// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		case lineSepState:
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, `\u2028`...)
+			i = j + 3
+			j = j + 3
+			continue
+		case paragraphSepState:
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, `\u2029`...)
+			i = j + 3
+			j = j + 3
+			continue
+		}
+		j += size
+	}
+
+	return append(append(buf, s[i:]...), '"')
+}
+
+func appendString(buf []byte, s string) []byte {
+	valLen := len(s)
+	if valLen == 0 {
+		return append(buf, `""`...)
+	}
+	buf = append(buf, '"')
+	var (
+		i, j int
+	)
+	if valLen >= 8 {
+		chunks := stringToUint64Slice(s)
+		for _, n := range chunks {
+			// combine masks before checking for the MSB of each byte. We include
+			// `n` in the mask to check whether any of the *input* byte MSBs were
+			// set (i.e. the byte was outside the ASCII range).
+			mask := n | (n - (lsb * 0x20)) |
+				((n ^ (lsb * '"')) - lsb) |
+				((n ^ (lsb * '\\')) - lsb)
+			if (mask & msb) != 0 {
+				j = bits.TrailingZeros64(mask&msb) / 8
+				goto ESCAPE_END
+			}
+		}
+		valLen := len(s)
+		for i := len(chunks) * 8; i < valLen; i++ {
+			if needEscape[s[i]] {
+				j = i
+				goto ESCAPE_END
+			}
+		}
+		return append(append(buf, s...), '"')
+	}
+ESCAPE_END:
+	for j < valLen {
+		c := s[j]
+
+		if !needEscape[c] {
+			// fast path: most of the time, printable ascii characters are used
+			j++
+			continue
+		}
+
+		switch c {
+		case '\\', '"':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', c)
+			i = j + 1
+			j = j + 1
+			continue
+
+		case '\n':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', 'n')
+			i = j + 1
+			j = j + 1
+			continue
+
+		case '\r':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', 'r')
+			i = j + 1
+			j = j + 1
+			continue
+
+		case '\t':
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, '\\', 't')
+			i = j + 1
+			j = j + 1
+			continue
+
+		case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F
+			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F
+			buf = append(buf, s[i:j]...)
+			buf = append(buf, `\u00`...)
+			buf = append(buf, hex[c>>4], hex[c&0xF])
+			i = j + 1
+			j = j + 1
+			continue
+		}
+		j++
+	}
+
+	return append(append(buf, s[i:]...), '"')
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/string_table.go b/vendor/github.com/goccy/go-json/internal/encoder/string_table.go
new file mode 100644
index 0000000000..ebe42c92df
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/string_table.go
@@ -0,0 +1,415 @@
+package encoder
+
+var needEscapeHTMLNormalizeUTF8 = [256]bool{
+	'"':  true,
+	'&':  true,
+	'<':  true,
+	'>':  true,
+	'\\': true,
+	0x00: true,
+	0x01: true,
+	0x02: true,
+	0x03: true,
+	0x04: true,
+	0x05: true,
+	0x06: true,
+	0x07: true,
+	0x08: true,
+	0x09: true,
+	0x0a: true,
+	0x0b: true,
+	0x0c: true,
+	0x0d: true,
+	0x0e: true,
+	0x0f: true,
+	0x10: true,
+	0x11: true,
+	0x12: true,
+	0x13: true,
+	0x14: true,
+	0x15: true,
+	0x16: true,
+	0x17: true,
+	0x18: true,
+	0x19: true,
+	0x1a: true,
+	0x1b: true,
+	0x1c: true,
+	0x1d: true,
+	0x1e: true,
+	0x1f: true,
+	/* 0x20 - 0x7f */
+	0x80: true,
+	0x81: true,
+	0x82: true,
+	0x83: true,
+	0x84: true,
+	0x85: true,
+	0x86: true,
+	0x87: true,
+	0x88: true,
+	0x89: true,
+	0x8a: true,
+	0x8b: true,
+	0x8c: true,
+	0x8d: true,
+	0x8e: true,
+	0x8f: true,
+	0x90: true,
+	0x91: true,
+	0x92: true,
+	0x93: true,
+	0x94: true,
+	0x95: true,
+	0x96: true,
+	0x97: true,
+	0x98: true,
+	0x99: true,
+	0x9a: true,
+	0x9b: true,
+	0x9c: true,
+	0x9d: true,
+	0x9e: true,
+	0x9f: true,
+	0xa0: true,
+	0xa1: true,
+	0xa2: true,
+	0xa3: true,
+	0xa4: true,
+	0xa5: true,
+	0xa6: true,
+	0xa7: true,
+	0xa8: true,
+	0xa9: true,
+	0xaa: true,
+	0xab: true,
+	0xac: true,
+	0xad: true,
+	0xae: true,
+	0xaf: true,
+	0xb0: true,
+	0xb1: true,
+	0xb2: true,
+	0xb3: true,
+	0xb4: true,
+	0xb5: true,
+	0xb6: true,
+	0xb7: true,
+	0xb8: true,
+	0xb9: true,
+	0xba: true,
+	0xbb: true,
+	0xbc: true,
+	0xbd: true,
+	0xbe: true,
+	0xbf: true,
+	0xc0: true,
+	0xc1: true,
+	0xc2: true,
+	0xc3: true,
+	0xc4: true,
+	0xc5: true,
+	0xc6: true,
+	0xc7: true,
+	0xc8: true,
+	0xc9: true,
+	0xca: true,
+	0xcb: true,
+	0xcc: true,
+	0xcd: true,
+	0xce: true,
+	0xcf: true,
+	0xd0: true,
+	0xd1: true,
+	0xd2: true,
+	0xd3: true,
+	0xd4: true,
+	0xd5: true,
+	0xd6: true,
+	0xd7: true,
+	0xd8: true,
+	0xd9: true,
+	0xda: true,
+	0xdb: true,
+	0xdc: true,
+	0xdd: true,
+	0xde: true,
+	0xdf: true,
+	0xe0: true,
+	0xe1: true,
+	0xe2: true,
+	0xe3: true,
+	0xe4: true,
+	0xe5: true,
+	0xe6: true,
+	0xe7: true,
+	0xe8: true,
+	0xe9: true,
+	0xea: true,
+	0xeb: true,
+	0xec: true,
+	0xed: true,
+	0xee: true,
+	0xef: true,
+	0xf0: true,
+	0xf1: true,
+	0xf2: true,
+	0xf3: true,
+	0xf4: true,
+	0xf5: true,
+	0xf6: true,
+	0xf7: true,
+	0xf8: true,
+	0xf9: true,
+	0xfa: true,
+	0xfb: true,
+	0xfc: true,
+	0xfd: true,
+	0xfe: true,
+	0xff: true,
+}
+
+var needEscapeNormalizeUTF8 = [256]bool{
+	'"':  true,
+	'\\': true,
+	0x00: true,
+	0x01: true,
+	0x02: true,
+	0x03: true,
+	0x04: true,
+	0x05: true,
+	0x06: true,
+	0x07: true,
+	0x08: true,
+	0x09: true,
+	0x0a: true,
+	0x0b: true,
+	0x0c: true,
+	0x0d: true,
+	0x0e: true,
+	0x0f: true,
+	0x10: true,
+	0x11: true,
+	0x12: true,
+	0x13: true,
+	0x14: true,
+	0x15: true,
+	0x16: true,
+	0x17: true,
+	0x18: true,
+	0x19: true,
+	0x1a: true,
+	0x1b: true,
+	0x1c: true,
+	0x1d: true,
+	0x1e: true,
+	0x1f: true,
+	/* 0x20 - 0x7f */
+	0x80: true,
+	0x81: true,
+	0x82: true,
+	0x83: true,
+	0x84: true,
+	0x85: true,
+	0x86: true,
+	0x87: true,
+	0x88: true,
+	0x89: true,
+	0x8a: true,
+	0x8b: true,
+	0x8c: true,
+	0x8d: true,
+	0x8e: true,
+	0x8f: true,
+	0x90: true,
+	0x91: true,
+	0x92: true,
+	0x93: true,
+	0x94: true,
+	0x95: true,
+	0x96: true,
+	0x97: true,
+	0x98: true,
+	0x99: true,
+	0x9a: true,
+	0x9b: true,
+	0x9c: true,
+	0x9d: true,
+	0x9e: true,
+	0x9f: true,
+	0xa0: true,
+	0xa1: true,
+	0xa2: true,
+	0xa3: true,
+	0xa4: true,
+	0xa5: true,
+	0xa6: true,
+	0xa7: true,
+	0xa8: true,
+	0xa9: true,
+	0xaa: true,
+	0xab: true,
+	0xac: true,
+	0xad: true,
+	0xae: true,
+	0xaf: true,
+	0xb0: true,
+	0xb1: true,
+	0xb2: true,
+	0xb3: true,
+	0xb4: true,
+	0xb5: true,
+	0xb6: true,
+	0xb7: true,
+	0xb8: true,
+	0xb9: true,
+	0xba: true,
+	0xbb: true,
+	0xbc: true,
+	0xbd: true,
+	0xbe: true,
+	0xbf: true,
+	0xc0: true,
+	0xc1: true,
+	0xc2: true,
+	0xc3: true,
+	0xc4: true,
+	0xc5: true,
+	0xc6: true,
+	0xc7: true,
+	0xc8: true,
+	0xc9: true,
+	0xca: true,
+	0xcb: true,
+	0xcc: true,
+	0xcd: true,
+	0xce: true,
+	0xcf: true,
+	0xd0: true,
+	0xd1: true,
+	0xd2: true,
+	0xd3: true,
+	0xd4: true,
+	0xd5: true,
+	0xd6: true,
+	0xd7: true,
+	0xd8: true,
+	0xd9: true,
+	0xda: true,
+	0xdb: true,
+	0xdc: true,
+	0xdd: true,
+	0xde: true,
+	0xdf: true,
+	0xe0: true,
+	0xe1: true,
+	0xe2: true,
+	0xe3: true,
+	0xe4: true,
+	0xe5: true,
+	0xe6: true,
+	0xe7: true,
+	0xe8: true,
+	0xe9: true,
+	0xea: true,
+	0xeb: true,
+	0xec: true,
+	0xed: true,
+	0xee: true,
+	0xef: true,
+	0xf0: true,
+	0xf1: true,
+	0xf2: true,
+	0xf3: true,
+	0xf4: true,
+	0xf5: true,
+	0xf6: true,
+	0xf7: true,
+	0xf8: true,
+	0xf9: true,
+	0xfa: true,
+	0xfb: true,
+	0xfc: true,
+	0xfd: true,
+	0xfe: true,
+	0xff: true,
+}
+
+var needEscapeHTML = [256]bool{
+	'"':  true,
+	'&':  true,
+	'<':  true,
+	'>':  true,
+	'\\': true,
+	0x00: true,
+	0x01: true,
+	0x02: true,
+	0x03: true,
+	0x04: true,
+	0x05: true,
+	0x06: true,
+	0x07: true,
+	0x08: true,
+	0x09: true,
+	0x0a: true,
+	0x0b: true,
+	0x0c: true,
+	0x0d: true,
+	0x0e: true,
+	0x0f: true,
+	0x10: true,
+	0x11: true,
+	0x12: true,
+	0x13: true,
+	0x14: true,
+	0x15: true,
+	0x16: true,
+	0x17: true,
+	0x18: true,
+	0x19: true,
+	0x1a: true,
+	0x1b: true,
+	0x1c: true,
+	0x1d: true,
+	0x1e: true,
+	0x1f: true,
+	/* 0x20 - 0xff */
+}
+
+var needEscape = [256]bool{
+	'"':  true,
+	'\\': true,
+	0x00: true,
+	0x01: true,
+	0x02: true,
+	0x03: true,
+	0x04: true,
+	0x05: true,
+	0x06: true,
+	0x07: true,
+	0x08: true,
+	0x09: true,
+	0x0a: true,
+	0x0b: true,
+	0x0c: true,
+	0x0d: true,
+	0x0e: true,
+	0x0f: true,
+	0x10: true,
+	0x11: true,
+	0x12: true,
+	0x13: true,
+	0x14: true,
+	0x15: true,
+	0x16: true,
+	0x17: true,
+	0x18: true,
+	0x19: true,
+	0x1a: true,
+	0x1b: true,
+	0x1c: true,
+	0x1d: true,
+	0x1e: true,
+	0x1f: true,
+	/* 0x20 - 0xff */
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go
new file mode 100644
index 0000000000..82b6dd47f8
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go
@@ -0,0 +1,41 @@
+package vm
+
+import (
+	"fmt"
+	"io"
+
+	"github.com/goccy/go-json/internal/encoder"
+)
+
+func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) {
+	defer func() {
+		var code *encoder.Opcode
+		if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 {
+			code = codeSet.EscapeKeyCode
+		} else {
+			code = codeSet.NoescapeKeyCode
+		}
+		if wc := ctx.Option.DebugDOTOut; wc != nil {
+			_, _ = io.WriteString(wc, code.DumpDOT())
+			wc.Close()
+			ctx.Option.DebugDOTOut = nil
+		}
+
+		if err := recover(); err != nil {
+			w := ctx.Option.DebugOut
+			fmt.Fprintln(w, "=============[DEBUG]===============")
+			fmt.Fprintln(w, "* [TYPE]")
+			fmt.Fprintln(w, codeSet.Type)
+			fmt.Fprintf(w, "\n")
+			fmt.Fprintln(w, "* [ALL OPCODE]")
+			fmt.Fprintln(w, code.Dump())
+			fmt.Fprintf(w, "\n")
+			fmt.Fprintln(w, "* [CONTEXT]")
+			fmt.Fprintf(w, "%+v\n", ctx)
+			fmt.Fprintln(w, "===================================")
+			panic(err)
+		}
+	}()
+
+	return Run(ctx, b, codeSet)
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go
new file mode 100644
index 0000000000..65252b4a5c
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go
@@ -0,0 +1,9 @@
+package vm
+
+import (
+	// HACK: compile order
+	// `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile,
+	// so forcibly make dependencies and avoid compiling in concurrent.
+	// dependency order: vm => vm_indent => vm_color => vm_color_indent
+	_ "github.com/goccy/go-json/internal/encoder/vm_indent"
+)
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go
new file mode 100644
index 0000000000..86291d7bb3
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go
@@ -0,0 +1,207 @@
+package vm
+
+import (
+	"encoding/json"
+	"fmt"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/encoder"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+const uintptrSize = 4 << (^uintptr(0) >> 63)
+
+var (
+	appendInt           = encoder.AppendInt
+	appendUint          = encoder.AppendUint
+	appendFloat32       = encoder.AppendFloat32
+	appendFloat64       = encoder.AppendFloat64
+	appendString        = encoder.AppendString
+	appendByteSlice     = encoder.AppendByteSlice
+	appendNumber        = encoder.AppendNumber
+	errUnsupportedValue = encoder.ErrUnsupportedValue
+	errUnsupportedFloat = encoder.ErrUnsupportedFloat
+	mapiterinit         = encoder.MapIterInit
+	mapiterkey          = encoder.MapIterKey
+	mapitervalue        = encoder.MapIterValue
+	mapiternext         = encoder.MapIterNext
+	maplen              = encoder.MapLen
+)
+
+type emptyInterface struct {
+	typ *runtime.Type
+	ptr unsafe.Pointer
+}
+
+type nonEmptyInterface struct {
+	itab *struct {
+		ityp *runtime.Type // static interface type
+		typ  *runtime.Type // dynamic concrete type
+		// unused fields...
+	}
+	ptr unsafe.Pointer
+}
+
+func errUnimplementedOp(op encoder.OpType) error {
+	return fmt.Errorf("encoder: opcode %s has not been implemented", op)
+}
+
+func load(base uintptr, idx uint32) uintptr {
+	addr := base + uintptr(idx)
+	return **(**uintptr)(unsafe.Pointer(&addr))
+}
+
+func store(base uintptr, idx uint32, p uintptr) {
+	addr := base + uintptr(idx)
+	**(**uintptr)(unsafe.Pointer(&addr)) = p
+}
+
+func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr {
+	addr := base + uintptr(idx)
+	p := **(**uintptr)(unsafe.Pointer(&addr))
+	for i := uint8(0); i < ptrNum; i++ {
+		if p == 0 {
+			return 0
+		}
+		p = ptrToPtr(p)
+	}
+	return p
+}
+
+func ptrToUint64(p uintptr, bitSize uint8) uint64 {
+	switch bitSize {
+	case 8:
+		return (uint64)(**(**uint8)(unsafe.Pointer(&p)))
+	case 16:
+		return (uint64)(**(**uint16)(unsafe.Pointer(&p)))
+	case 32:
+		return (uint64)(**(**uint32)(unsafe.Pointer(&p)))
+	case 64:
+		return **(**uint64)(unsafe.Pointer(&p))
+	}
+	return 0
+}
+func ptrToFloat32(p uintptr) float32            { return **(**float32)(unsafe.Pointer(&p)) }
+func ptrToFloat64(p uintptr) float64            { return **(**float64)(unsafe.Pointer(&p)) }
+func ptrToBool(p uintptr) bool                  { return **(**bool)(unsafe.Pointer(&p)) }
+func ptrToBytes(p uintptr) []byte               { return **(**[]byte)(unsafe.Pointer(&p)) }
+func ptrToNumber(p uintptr) json.Number         { return **(**json.Number)(unsafe.Pointer(&p)) }
+func ptrToString(p uintptr) string              { return **(**string)(unsafe.Pointer(&p)) }
+func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) }
+func ptrToPtr(p uintptr) uintptr {
+	return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p)))
+}
+func ptrToNPtr(p uintptr, ptrNum uint8) uintptr {
+	for i := uint8(0); i < ptrNum; i++ {
+		if p == 0 {
+			return 0
+		}
+		p = ptrToPtr(p)
+	}
+	return p
+}
+
+func ptrToUnsafePtr(p uintptr) unsafe.Pointer {
+	return *(*unsafe.Pointer)(unsafe.Pointer(&p))
+}
+func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} {
+	return *(*interface{})(unsafe.Pointer(&emptyInterface{
+		typ: code.Type,
+		ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)),
+	}))
+}
+
+func appendBool(_ *encoder.RuntimeContext, b []byte, v bool) []byte {
+	if v {
+		return append(b, "true"...)
+	}
+	return append(b, "false"...)
+}
+
+func appendNull(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, "null"...)
+}
+
+func appendComma(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, ',')
+}
+
+func appendNullComma(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, "null,"...)
+}
+
+func appendColon(_ *encoder.RuntimeContext, b []byte) []byte {
+	last := len(b) - 1
+	b[last] = ':'
+	return b
+}
+
+func appendMapKeyValue(_ *encoder.RuntimeContext, _ *encoder.Opcode, b, key, value []byte) []byte {
+	b = append(b, key...)
+	b[len(b)-1] = ':'
+	return append(b, value...)
+}
+
+func appendMapEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte {
+	b[len(b)-1] = '}'
+	b = append(b, ',')
+	return b
+}
+
+func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) {
+	return encoder.AppendMarshalJSON(ctx, code, b, v)
+}
+
+func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) {
+	return encoder.AppendMarshalText(ctx, code, b, v)
+}
+
+func appendArrayHead(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte {
+	return append(b, '[')
+}
+
+func appendArrayEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte {
+	last := len(b) - 1
+	b[last] = ']'
+	return append(b, ',')
+}
+
+func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, '[', ']', ',')
+}
+
+func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, '{', '}', ',')
+}
+
+func appendObjectEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte {
+	last := len(b) - 1
+	b[last] = '}'
+	return append(b, ',')
+}
+
+func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, '{')
+}
+
+func appendStructKey(_ *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	return append(b, code.Key...)
+}
+
+func appendStructEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte {
+	return append(b, '}', ',')
+}
+
+func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	last := len(b) - 1
+	if b[last] == ',' {
+		b[last] = '}'
+		return appendComma(ctx, b)
+	}
+	return appendStructEnd(ctx, code, b)
+}
+
+func restoreIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, _ uintptr)               {}
+func storeIndent(_ uintptr, _ *encoder.Opcode, _ uintptr)                                 {}
+func appendMapKeyIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte    { return b }
+func appendArrayElemIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b }
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go
new file mode 100644
index 0000000000..645d20f9fb
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go
@@ -0,0 +1,4859 @@
+// Code generated by internal/cmd/generator. DO NOT EDIT!
+package vm
+
+import (
+	"math"
+	"reflect"
+	"sort"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/encoder"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) {
+	recursiveLevel := 0
+	ptrOffset := uintptr(0)
+	ctxptr := ctx.Ptr()
+	var code *encoder.Opcode
+	if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 {
+		code = codeSet.EscapeKeyCode
+	} else {
+		code = codeSet.NoescapeKeyCode
+	}
+
+	for {
+		switch code.Op {
+		default:
+			return nil, errUnimplementedOp(code.Op)
+		case encoder.OpPtr:
+			p := load(ctxptr, code.Idx)
+			code = code.Next
+			store(ctxptr, code.Idx, ptrToPtr(p))
+		case encoder.OpIntPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpInt:
+			b = appendInt(ctx, b, load(ctxptr, code.Idx), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpUintPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpUint:
+			b = appendUint(ctx, b, load(ctxptr, code.Idx), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpIntString:
+			b = append(b, '"')
+			b = appendInt(ctx, b, load(ctxptr, code.Idx), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpUintString:
+			b = append(b, '"')
+			b = appendUint(ctx, b, load(ctxptr, code.Idx), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpFloat32Ptr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+				b = appendComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpFloat32:
+			b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpFloat64Ptr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpFloat64:
+			v := ptrToFloat64(load(ctxptr, code.Idx))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStringPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpString:
+			b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpBoolPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpBool:
+			b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpBytesPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpBytes:
+			b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpNumberPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpNumber:
+			bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpInterfacePtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpInterface:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			if recursiveLevel > encoder.StartDetectingCyclesAfter {
+				for _, seen := range ctx.SeenPtr {
+					if p == seen {
+						return nil, errUnsupportedValue(code, p)
+					}
+				}
+			}
+			ctx.SeenPtr = append(ctx.SeenPtr, p)
+			var (
+				typ      *runtime.Type
+				ifacePtr unsafe.Pointer
+			)
+			up := ptrToUnsafePtr(p)
+			if code.Flags&encoder.NonEmptyInterfaceFlags != 0 {
+				iface := (*nonEmptyInterface)(up)
+				ifacePtr = iface.ptr
+				if iface.itab != nil {
+					typ = iface.itab.typ
+				}
+			} else {
+				iface := (*emptyInterface)(up)
+				ifacePtr = iface.ptr
+				typ = iface.typ
+			}
+			if ifacePtr == nil {
+				isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ)
+				if !isDirectedNil {
+					b = appendNullComma(ctx, b)
+					code = code.Next
+					break
+				}
+			}
+			ctx.KeepRefs = append(ctx.KeepRefs, up)
+			ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ)))
+			if err != nil {
+				return nil, err
+			}
+
+			totalLength := uintptr(code.Length) + 3
+			nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3
+
+			var c *encoder.Opcode
+			if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 {
+				c = ifaceCodeSet.InterfaceEscapeKeyCode
+			} else {
+				c = ifaceCodeSet.InterfaceNoescapeKeyCode
+			}
+			curlen := uintptr(len(ctx.Ptrs))
+			offsetNum := ptrOffset / uintptrSize
+			oldOffset := ptrOffset
+			ptrOffset += totalLength * uintptrSize
+			oldBaseIndent := ctx.BaseIndent
+			ctx.BaseIndent += code.Indent
+
+			newLen := offsetNum + totalLength + nextTotalLength
+			if curlen < newLen {
+				ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...)
+			}
+			ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr
+
+			end := ifaceCodeSet.EndCode
+			store(ctxptr, c.Idx, uintptr(ifacePtr))
+			store(ctxptr, end.Idx, oldOffset)
+			store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next)))
+			storeIndent(ctxptr, end, uintptr(oldBaseIndent))
+			code = c
+			recursiveLevel++
+		case encoder.OpInterfaceEnd:
+			recursiveLevel--
+
+			// restore ctxptr
+			offset := load(ctxptr, code.Idx)
+			restoreIndent(ctx, code, ctxptr)
+			ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1]
+
+			codePtr := load(ctxptr, code.ElemIdx)
+			code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr))
+			ctxptr = ctx.Ptr() + offset
+			ptrOffset = offset
+		case encoder.OpMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToPtr(p))
+			fallthrough
+		case encoder.OpMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToPtr(p))
+			fallthrough
+		case encoder.OpMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = append(b, `""`...)
+				b = appendComma(ctx, b)
+				code = code.Next
+				break
+			}
+			if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpSlicePtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpSlice:
+			p := load(ctxptr, code.Idx)
+			slice := ptrToSlice(p)
+			if p == 0 || slice.Data == nil {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.ElemIdx, 0)
+			store(ctxptr, code.Length, uintptr(slice.Len))
+			store(ctxptr, code.Idx, uintptr(slice.Data))
+			if slice.Len > 0 {
+				b = appendArrayHead(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, uintptr(slice.Data))
+			} else {
+				b = appendEmptyArray(ctx, b)
+				code = code.End.Next
+			}
+		case encoder.OpSliceElem:
+			idx := load(ctxptr, code.ElemIdx)
+			length := load(ctxptr, code.Length)
+			idx++
+			if idx < length {
+				b = appendArrayElemIndent(ctx, code, b)
+				store(ctxptr, code.ElemIdx, idx)
+				data := load(ctxptr, code.Idx)
+				size := uintptr(code.Size)
+				code = code.Next
+				store(ctxptr, code.Idx, data+idx*size)
+			} else {
+				b = appendArrayEnd(ctx, code, b)
+				code = code.End.Next
+			}
+		case encoder.OpArrayPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpArray:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			if code.Length > 0 {
+				b = appendArrayHead(ctx, code, b)
+				store(ctxptr, code.ElemIdx, 0)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				b = appendEmptyArray(ctx, b)
+				code = code.End.Next
+			}
+		case encoder.OpArrayElem:
+			idx := load(ctxptr, code.ElemIdx)
+			idx++
+			if idx < uintptr(code.Length) {
+				b = appendArrayElemIndent(ctx, code, b)
+				store(ctxptr, code.ElemIdx, idx)
+				p := load(ctxptr, code.Idx)
+				size := uintptr(code.Size)
+				code = code.Next
+				store(ctxptr, code.Idx, p+idx*size)
+			} else {
+				b = appendArrayEnd(ctx, code, b)
+				code = code.End.Next
+			}
+		case encoder.OpMapPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			uptr := ptrToUnsafePtr(p)
+			mlen := maplen(uptr)
+			if mlen <= 0 {
+				b = appendEmptyObject(ctx, b)
+				code = code.End.Next
+				break
+			}
+			b = appendStructHead(ctx, b)
+			unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0
+			mapCtx := encoder.NewMapContext(mlen, unorderedMap)
+			mapiterinit(code.Type, uptr, &mapCtx.Iter)
+			store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx)))
+			ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx))
+			if unorderedMap {
+				b = appendMapKeyIndent(ctx, code.Next, b)
+			} else {
+				mapCtx.Start = len(b)
+				mapCtx.First = len(b)
+			}
+			key := mapiterkey(&mapCtx.Iter)
+			store(ctxptr, code.Next.Idx, uintptr(key))
+			code = code.Next
+		case encoder.OpMapKey:
+			mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx)))
+			idx := mapCtx.Idx
+			idx++
+			if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 {
+				if idx < mapCtx.Len {
+					b = appendMapKeyIndent(ctx, code, b)
+					mapCtx.Idx = int(idx)
+					key := mapiterkey(&mapCtx.Iter)
+					store(ctxptr, code.Next.Idx, uintptr(key))
+					code = code.Next
+				} else {
+					b = appendObjectEnd(ctx, code, b)
+					encoder.ReleaseMapContext(mapCtx)
+					code = code.End.Next
+				}
+			} else {
+				mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)]
+				if idx < mapCtx.Len {
+					mapCtx.Idx = int(idx)
+					mapCtx.Start = len(b)
+					key := mapiterkey(&mapCtx.Iter)
+					store(ctxptr, code.Next.Idx, uintptr(key))
+					code = code.Next
+				} else {
+					code = code.End
+				}
+			}
+		case encoder.OpMapValue:
+			mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx)))
+			if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 {
+				b = appendColon(ctx, b)
+			} else {
+				mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)]
+				mapCtx.Start = len(b)
+			}
+			value := mapitervalue(&mapCtx.Iter)
+			store(ctxptr, code.Next.Idx, uintptr(value))
+			mapiternext(&mapCtx.Iter)
+			code = code.Next
+		case encoder.OpMapEnd:
+			// this operation only used by sorted map.
+			mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx)))
+			sort.Sort(mapCtx.Slice)
+			buf := mapCtx.Buf
+			for _, item := range mapCtx.Slice.Items {
+				buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value)
+			}
+			buf = appendMapEnd(ctx, code, buf)
+			b = b[:mapCtx.First]
+			b = append(b, buf...)
+			mapCtx.Buf = buf
+			encoder.ReleaseMapContext(mapCtx)
+			code = code.Next
+		case encoder.OpRecursivePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpRecursive:
+			ptr := load(ctxptr, code.Idx)
+			if ptr != 0 {
+				if recursiveLevel > encoder.StartDetectingCyclesAfter {
+					for _, seen := range ctx.SeenPtr {
+						if ptr == seen {
+							return nil, errUnsupportedValue(code, ptr)
+						}
+					}
+				}
+			}
+			ctx.SeenPtr = append(ctx.SeenPtr, ptr)
+			c := code.Jmp.Code
+			curlen := uintptr(len(ctx.Ptrs))
+			offsetNum := ptrOffset / uintptrSize
+			oldOffset := ptrOffset
+			ptrOffset += code.Jmp.CurLen * uintptrSize
+			oldBaseIndent := ctx.BaseIndent
+			indentDiffFromTop := c.Indent - 1
+			ctx.BaseIndent += code.Indent - indentDiffFromTop
+
+			newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen
+			if curlen < newLen {
+				ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...)
+			}
+			ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr
+
+			store(ctxptr, c.Idx, ptr)
+			store(ctxptr, c.End.Next.Idx, oldOffset)
+			store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next)))
+			storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent))
+			code = c
+			recursiveLevel++
+		case encoder.OpRecursiveEnd:
+			recursiveLevel--
+
+			// restore ctxptr
+			restoreIndent(ctx, code, ctxptr)
+			offset := load(ctxptr, code.Idx)
+			ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1]
+
+			codePtr := load(ctxptr, code.ElemIdx)
+			code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr))
+			ctxptr = ctx.Ptr() + offset
+			ptrOffset = offset
+		case encoder.OpStructPtrHead:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHead:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if len(code.Key) > 0 {
+				if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 {
+					b = appendStructKey(ctx, code, b)
+				}
+			}
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmpty:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmpty:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadInt:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadInt:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyInt:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyInt:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadIntString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadIntString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyIntString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyIntString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			u64 := ptrToUint64(p, code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendInt(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadUint:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadUint:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUint:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUint:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadUintString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadUintString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUintString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUintString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendUint(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat32:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat32:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat32String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat32String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat64:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat64:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat64String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat64String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat64(ctx, b, v)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNull(ctx, b)
+					b = appendComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToString(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadStringString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadStringString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset)))))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyStringString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyStringString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToString(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, v)))
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, ptrToString(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, ptrToString(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadBool:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadBool:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBool:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBool:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructPtrHeadBoolString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadBoolString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBoolString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBoolString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructPtrHeadBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendBool(ctx, b, ptrToBool(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadBytes:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadBytes:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBytes:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBytes:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToBytes(p + uintptr(code.Offset))
+			if len(v) == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadNumber:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadNumber:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumber:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumber:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadNumberString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadNumberString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = append(bb, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumberString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumberString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadArray, encoder.OpStructHeadSlice:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmptyArray:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyArray:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			b = appendStructKey(ctx, code, b)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmptySlice:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptySlice:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			slice := ptrToSlice(p)
+			if slice.Len == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.NextField
+			} else {
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p + uintptr(code.Offset))
+			}
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmptyMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p + uintptr(code.Offset))
+			}
+			if maplen(ptrToUnsafePtr(p)) == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.NextField
+				break
+			}
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.NextField
+			} else {
+				if (code.Flags & encoder.IndirectFlags) != 0 {
+					p = ptrToNPtr(p, code.PtrNum)
+				}
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadOmitEmptyMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p == 0 {
+				code = code.NextField
+				break
+			}
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p == 0 {
+				code = code.NextField
+			} else {
+				if (code.Flags & encoder.IndirectFlags) != 0 {
+					p = ptrToNPtr(p, code.PtrNum)
+				}
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON {
+					p = ptrToPtr(p)
+				}
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON {
+					p = ptrToPtr(p)
+				}
+			}
+			iface := ptrToInterface(code, p)
+			if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalJSON(ctx, code, b, iface)
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText {
+					p = ptrToPtr(p)
+				}
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText {
+					p = ptrToPtr(p)
+				}
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructField:
+			if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 {
+				b = appendStructKey(ctx, code, b)
+			}
+			p := load(ctxptr, code.Idx) + uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmpty:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructFieldInt:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyInt:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldIntString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyIntString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldIntPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendInt(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldIntPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUint:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUint:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUintString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUintString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUintPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendUint(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUintPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32String:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat64(ctx, b, v)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			v := ptrToFloat64(p)
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldStringString:
+			p := load(ctxptr, code.Idx)
+			s := ptrToString(p + uintptr(code.Offset))
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, string(appendString(ctx, []byte{}, s)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyStringString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, v)))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldStringPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, ptrToString(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, ptrToString(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldStringPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBool:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBool:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBoolString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBoolString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBoolPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendBool(ctx, b, ptrToBool(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBytes:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBytes:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBytes(p + uintptr(code.Offset))
+			if len(v) > 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBytesPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumber:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumber:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumberString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = append(bb, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumberString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumberPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				code = code.NextField
+				break
+			}
+			iface := ptrToInterface(code, p)
+			if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) {
+				code = code.NextField
+				break
+			}
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendMarshalJSON(ctx, code, b, iface)
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructFieldMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldMarshalText:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalText:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				code = code.NextField
+				break
+			}
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructFieldMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldArray:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyArray:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldArrayPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyArrayPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructFieldSlice:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptySlice:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			slice := ptrToSlice(p)
+			if slice.Len == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructFieldSlicePtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptySlicePtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructFieldMap:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyMap:
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructFieldMapPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p != 0 {
+				p = ptrToNPtr(p, code.PtrNum)
+			}
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyMapPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p != 0 {
+				p = ptrToNPtr(p, code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructFieldStruct:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyStruct:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructEnd:
+			b = appendStructEndSkipLast(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndInt:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyInt:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndIntString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyIntString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndIntPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendInt(ctx, b, p, code)
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p, code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndIntPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUint:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUint:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUintString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUintString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUintPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendUint(ctx, b, p, code)
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p, code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUintPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32String:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32Ptr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32PtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat64(ctx, b, v)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat64(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat64(ctx, b, v)
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64Ptr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+				b = appendStructEnd(ctx, code, b)
+				code = code.Next
+				break
+			}
+			v := ptrToFloat64(p)
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64PtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndStringString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			s := ptrToString(p + uintptr(code.Offset))
+			b = appendString(ctx, b, string(appendString(ctx, []byte{}, s)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyStringString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, v)))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndStringPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, ptrToString(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, ptrToString(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndStringPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBool:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBool:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBoolString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBoolString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBoolPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendBool(ctx, b, ptrToBool(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBoolPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBytes:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBytes:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBytes(p + uintptr(code.Offset))
+			if len(v) > 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBytesPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumber:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendStructEnd(ctx, code, bb)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumber:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendStructEnd(ctx, code, bb)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumberString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = append(bb, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumberString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumberPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendStructEnd(ctx, code, bb)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumberPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpEnd:
+			goto END
+		}
+	}
+END:
+	return b, nil
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go
new file mode 100644
index 0000000000..925f61ed8e
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go
@@ -0,0 +1,35 @@
+package vm_color
+
+import (
+	"fmt"
+
+	"github.com/goccy/go-json/internal/encoder"
+)
+
+func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) {
+	var code *encoder.Opcode
+	if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 {
+		code = codeSet.EscapeKeyCode
+	} else {
+		code = codeSet.NoescapeKeyCode
+	}
+
+	defer func() {
+		if err := recover(); err != nil {
+			w := ctx.Option.DebugOut
+			fmt.Fprintln(w, "=============[DEBUG]===============")
+			fmt.Fprintln(w, "* [TYPE]")
+			fmt.Fprintln(w, codeSet.Type)
+			fmt.Fprintf(w, "\n")
+			fmt.Fprintln(w, "* [ALL OPCODE]")
+			fmt.Fprintln(w, code.Dump())
+			fmt.Fprintf(w, "\n")
+			fmt.Fprintln(w, "* [CONTEXT]")
+			fmt.Fprintf(w, "%+v\n", ctx)
+			fmt.Fprintln(w, "===================================")
+			panic(err)
+		}
+	}()
+
+	return Run(ctx, b, codeSet)
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go
new file mode 100644
index 0000000000..12ec56c5bb
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go
@@ -0,0 +1,9 @@
+package vm_color
+
+import (
+	// HACK: compile order
+	// `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile,
+	// so forcibly make dependencies and avoid compiling in concurrent.
+	// dependency order: vm => vm_indent => vm_color => vm_color_indent
+	_ "github.com/goccy/go-json/internal/encoder/vm_color_indent"
+)
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go
new file mode 100644
index 0000000000..33f29aee44
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go
@@ -0,0 +1,274 @@
+package vm_color
+
+import (
+	"encoding/json"
+	"fmt"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/encoder"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+const uintptrSize = 4 << (^uintptr(0) >> 63)
+
+var (
+	errUnsupportedValue = encoder.ErrUnsupportedValue
+	errUnsupportedFloat = encoder.ErrUnsupportedFloat
+	mapiterinit         = encoder.MapIterInit
+	mapiterkey          = encoder.MapIterKey
+	mapitervalue        = encoder.MapIterValue
+	mapiternext         = encoder.MapIterNext
+	maplen              = encoder.MapLen
+)
+
+type emptyInterface struct {
+	typ *runtime.Type
+	ptr unsafe.Pointer
+}
+
+type nonEmptyInterface struct {
+	itab *struct {
+		ityp *runtime.Type // static interface type
+		typ  *runtime.Type // dynamic concrete type
+		// unused fields...
+	}
+	ptr unsafe.Pointer
+}
+
+func errUnimplementedOp(op encoder.OpType) error {
+	return fmt.Errorf("encoder: opcode %s has not been implemented", op)
+}
+
+func load(base uintptr, idx uint32) uintptr {
+	addr := base + uintptr(idx)
+	return **(**uintptr)(unsafe.Pointer(&addr))
+}
+
+func store(base uintptr, idx uint32, p uintptr) {
+	addr := base + uintptr(idx)
+	**(**uintptr)(unsafe.Pointer(&addr)) = p
+}
+
+func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr {
+	addr := base + uintptr(idx)
+	p := **(**uintptr)(unsafe.Pointer(&addr))
+	for i := uint8(0); i < ptrNum; i++ {
+		if p == 0 {
+			return 0
+		}
+		p = ptrToPtr(p)
+	}
+	return p
+}
+
+func ptrToUint64(p uintptr, bitSize uint8) uint64 {
+	switch bitSize {
+	case 8:
+		return (uint64)(**(**uint8)(unsafe.Pointer(&p)))
+	case 16:
+		return (uint64)(**(**uint16)(unsafe.Pointer(&p)))
+	case 32:
+		return (uint64)(**(**uint32)(unsafe.Pointer(&p)))
+	case 64:
+		return **(**uint64)(unsafe.Pointer(&p))
+	}
+	return 0
+}
+func ptrToFloat32(p uintptr) float32            { return **(**float32)(unsafe.Pointer(&p)) }
+func ptrToFloat64(p uintptr) float64            { return **(**float64)(unsafe.Pointer(&p)) }
+func ptrToBool(p uintptr) bool                  { return **(**bool)(unsafe.Pointer(&p)) }
+func ptrToBytes(p uintptr) []byte               { return **(**[]byte)(unsafe.Pointer(&p)) }
+func ptrToNumber(p uintptr) json.Number         { return **(**json.Number)(unsafe.Pointer(&p)) }
+func ptrToString(p uintptr) string              { return **(**string)(unsafe.Pointer(&p)) }
+func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) }
+func ptrToPtr(p uintptr) uintptr {
+	return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p)))
+}
+func ptrToNPtr(p uintptr, ptrNum uint8) uintptr {
+	for i := uint8(0); i < ptrNum; i++ {
+		if p == 0 {
+			return 0
+		}
+		p = ptrToPtr(p)
+	}
+	return p
+}
+
+func ptrToUnsafePtr(p uintptr) unsafe.Pointer {
+	return *(*unsafe.Pointer)(unsafe.Pointer(&p))
+}
+func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} {
+	return *(*interface{})(unsafe.Pointer(&emptyInterface{
+		typ: code.Type,
+		ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)),
+	}))
+}
+
+func appendInt(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte {
+	format := ctx.Option.ColorScheme.Int
+	b = append(b, format.Header...)
+	b = encoder.AppendInt(ctx, b, p, code)
+	return append(b, format.Footer...)
+}
+
+func appendUint(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte {
+	format := ctx.Option.ColorScheme.Uint
+	b = append(b, format.Header...)
+	b = encoder.AppendUint(ctx, b, p, code)
+	return append(b, format.Footer...)
+}
+
+func appendFloat32(ctx *encoder.RuntimeContext, b []byte, v float32) []byte {
+	format := ctx.Option.ColorScheme.Float
+	b = append(b, format.Header...)
+	b = encoder.AppendFloat32(ctx, b, v)
+	return append(b, format.Footer...)
+}
+
+func appendFloat64(ctx *encoder.RuntimeContext, b []byte, v float64) []byte {
+	format := ctx.Option.ColorScheme.Float
+	b = append(b, format.Header...)
+	b = encoder.AppendFloat64(ctx, b, v)
+	return append(b, format.Footer...)
+}
+
+func appendString(ctx *encoder.RuntimeContext, b []byte, v string) []byte {
+	format := ctx.Option.ColorScheme.String
+	b = append(b, format.Header...)
+	b = encoder.AppendString(ctx, b, v)
+	return append(b, format.Footer...)
+}
+
+func appendByteSlice(ctx *encoder.RuntimeContext, b []byte, src []byte) []byte {
+	format := ctx.Option.ColorScheme.Binary
+	b = append(b, format.Header...)
+	b = encoder.AppendByteSlice(ctx, b, src)
+	return append(b, format.Footer...)
+}
+
+func appendNumber(ctx *encoder.RuntimeContext, b []byte, n json.Number) ([]byte, error) {
+	format := ctx.Option.ColorScheme.Int
+	b = append(b, format.Header...)
+	bb, err := encoder.AppendNumber(ctx, b, n)
+	if err != nil {
+		return nil, err
+	}
+	return append(bb, format.Footer...), nil
+}
+
+func appendBool(ctx *encoder.RuntimeContext, b []byte, v bool) []byte {
+	format := ctx.Option.ColorScheme.Bool
+	b = append(b, format.Header...)
+	if v {
+		b = append(b, "true"...)
+	} else {
+		b = append(b, "false"...)
+	}
+	return append(b, format.Footer...)
+}
+
+func appendNull(ctx *encoder.RuntimeContext, b []byte) []byte {
+	format := ctx.Option.ColorScheme.Null
+	b = append(b, format.Header...)
+	b = append(b, "null"...)
+	return append(b, format.Footer...)
+}
+
+func appendComma(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, ',')
+}
+
+func appendNullComma(ctx *encoder.RuntimeContext, b []byte) []byte {
+	format := ctx.Option.ColorScheme.Null
+	b = append(b, format.Header...)
+	b = append(b, "null"...)
+	return append(append(b, format.Footer...), ',')
+}
+
+func appendColon(_ *encoder.RuntimeContext, b []byte) []byte {
+	last := len(b) - 1
+	b[last] = ':'
+	return b
+}
+
+func appendMapKeyValue(_ *encoder.RuntimeContext, _ *encoder.Opcode, b, key, value []byte) []byte {
+	b = append(b, key[:len(key)-1]...)
+	b = append(b, ':')
+	return append(b, value...)
+}
+
+func appendMapEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte {
+	last := len(b) - 1
+	b[last] = '}'
+	b = append(b, ',')
+	return b
+}
+
+func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) {
+	return encoder.AppendMarshalJSON(ctx, code, b, v)
+}
+
+func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) {
+	format := ctx.Option.ColorScheme.String
+	b = append(b, format.Header...)
+	bb, err := encoder.AppendMarshalText(ctx, code, b, v)
+	if err != nil {
+		return nil, err
+	}
+	return append(bb, format.Footer...), nil
+}
+
+func appendArrayHead(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte {
+	return append(b, '[')
+}
+
+func appendArrayEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte {
+	last := len(b) - 1
+	b[last] = ']'
+	return append(b, ',')
+}
+
+func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, '[', ']', ',')
+}
+
+func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, '{', '}', ',')
+}
+
+func appendObjectEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte {
+	last := len(b) - 1
+	b[last] = '}'
+	return append(b, ',')
+}
+
+func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, '{')
+}
+
+func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	format := ctx.Option.ColorScheme.ObjectKey
+	b = append(b, format.Header...)
+	b = append(b, code.Key[:len(code.Key)-1]...)
+	b = append(b, format.Footer...)
+
+	return append(b, ':')
+}
+
+func appendStructEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte {
+	return append(b, '}', ',')
+}
+
+func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	last := len(b) - 1
+	if b[last] == ',' {
+		b[last] = '}'
+		return appendComma(ctx, b)
+	}
+	return appendStructEnd(ctx, code, b)
+}
+
+func restoreIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, _ uintptr)               {}
+func storeIndent(_ uintptr, _ *encoder.Opcode, _ uintptr)                                 {}
+func appendMapKeyIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte    { return b }
+func appendArrayElemIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b }
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go
new file mode 100644
index 0000000000..a63e83e550
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go
@@ -0,0 +1,4859 @@
+// Code generated by internal/cmd/generator. DO NOT EDIT!
+package vm_color
+
+import (
+	"math"
+	"reflect"
+	"sort"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/encoder"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) {
+	recursiveLevel := 0
+	ptrOffset := uintptr(0)
+	ctxptr := ctx.Ptr()
+	var code *encoder.Opcode
+	if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 {
+		code = codeSet.EscapeKeyCode
+	} else {
+		code = codeSet.NoescapeKeyCode
+	}
+
+	for {
+		switch code.Op {
+		default:
+			return nil, errUnimplementedOp(code.Op)
+		case encoder.OpPtr:
+			p := load(ctxptr, code.Idx)
+			code = code.Next
+			store(ctxptr, code.Idx, ptrToPtr(p))
+		case encoder.OpIntPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpInt:
+			b = appendInt(ctx, b, load(ctxptr, code.Idx), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpUintPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpUint:
+			b = appendUint(ctx, b, load(ctxptr, code.Idx), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpIntString:
+			b = append(b, '"')
+			b = appendInt(ctx, b, load(ctxptr, code.Idx), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpUintString:
+			b = append(b, '"')
+			b = appendUint(ctx, b, load(ctxptr, code.Idx), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpFloat32Ptr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+				b = appendComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpFloat32:
+			b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpFloat64Ptr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpFloat64:
+			v := ptrToFloat64(load(ctxptr, code.Idx))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStringPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpString:
+			b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpBoolPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpBool:
+			b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpBytesPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpBytes:
+			b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpNumberPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpNumber:
+			bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpInterfacePtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpInterface:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			if recursiveLevel > encoder.StartDetectingCyclesAfter {
+				for _, seen := range ctx.SeenPtr {
+					if p == seen {
+						return nil, errUnsupportedValue(code, p)
+					}
+				}
+			}
+			ctx.SeenPtr = append(ctx.SeenPtr, p)
+			var (
+				typ      *runtime.Type
+				ifacePtr unsafe.Pointer
+			)
+			up := ptrToUnsafePtr(p)
+			if code.Flags&encoder.NonEmptyInterfaceFlags != 0 {
+				iface := (*nonEmptyInterface)(up)
+				ifacePtr = iface.ptr
+				if iface.itab != nil {
+					typ = iface.itab.typ
+				}
+			} else {
+				iface := (*emptyInterface)(up)
+				ifacePtr = iface.ptr
+				typ = iface.typ
+			}
+			if ifacePtr == nil {
+				isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ)
+				if !isDirectedNil {
+					b = appendNullComma(ctx, b)
+					code = code.Next
+					break
+				}
+			}
+			ctx.KeepRefs = append(ctx.KeepRefs, up)
+			ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ)))
+			if err != nil {
+				return nil, err
+			}
+
+			totalLength := uintptr(code.Length) + 3
+			nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3
+
+			var c *encoder.Opcode
+			if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 {
+				c = ifaceCodeSet.InterfaceEscapeKeyCode
+			} else {
+				c = ifaceCodeSet.InterfaceNoescapeKeyCode
+			}
+			curlen := uintptr(len(ctx.Ptrs))
+			offsetNum := ptrOffset / uintptrSize
+			oldOffset := ptrOffset
+			ptrOffset += totalLength * uintptrSize
+			oldBaseIndent := ctx.BaseIndent
+			ctx.BaseIndent += code.Indent
+
+			newLen := offsetNum + totalLength + nextTotalLength
+			if curlen < newLen {
+				ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...)
+			}
+			ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr
+
+			end := ifaceCodeSet.EndCode
+			store(ctxptr, c.Idx, uintptr(ifacePtr))
+			store(ctxptr, end.Idx, oldOffset)
+			store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next)))
+			storeIndent(ctxptr, end, uintptr(oldBaseIndent))
+			code = c
+			recursiveLevel++
+		case encoder.OpInterfaceEnd:
+			recursiveLevel--
+
+			// restore ctxptr
+			offset := load(ctxptr, code.Idx)
+			restoreIndent(ctx, code, ctxptr)
+			ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1]
+
+			codePtr := load(ctxptr, code.ElemIdx)
+			code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr))
+			ctxptr = ctx.Ptr() + offset
+			ptrOffset = offset
+		case encoder.OpMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToPtr(p))
+			fallthrough
+		case encoder.OpMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToPtr(p))
+			fallthrough
+		case encoder.OpMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = append(b, `""`...)
+				b = appendComma(ctx, b)
+				code = code.Next
+				break
+			}
+			if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpSlicePtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpSlice:
+			p := load(ctxptr, code.Idx)
+			slice := ptrToSlice(p)
+			if p == 0 || slice.Data == nil {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.ElemIdx, 0)
+			store(ctxptr, code.Length, uintptr(slice.Len))
+			store(ctxptr, code.Idx, uintptr(slice.Data))
+			if slice.Len > 0 {
+				b = appendArrayHead(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, uintptr(slice.Data))
+			} else {
+				b = appendEmptyArray(ctx, b)
+				code = code.End.Next
+			}
+		case encoder.OpSliceElem:
+			idx := load(ctxptr, code.ElemIdx)
+			length := load(ctxptr, code.Length)
+			idx++
+			if idx < length {
+				b = appendArrayElemIndent(ctx, code, b)
+				store(ctxptr, code.ElemIdx, idx)
+				data := load(ctxptr, code.Idx)
+				size := uintptr(code.Size)
+				code = code.Next
+				store(ctxptr, code.Idx, data+idx*size)
+			} else {
+				b = appendArrayEnd(ctx, code, b)
+				code = code.End.Next
+			}
+		case encoder.OpArrayPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpArray:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			if code.Length > 0 {
+				b = appendArrayHead(ctx, code, b)
+				store(ctxptr, code.ElemIdx, 0)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				b = appendEmptyArray(ctx, b)
+				code = code.End.Next
+			}
+		case encoder.OpArrayElem:
+			idx := load(ctxptr, code.ElemIdx)
+			idx++
+			if idx < uintptr(code.Length) {
+				b = appendArrayElemIndent(ctx, code, b)
+				store(ctxptr, code.ElemIdx, idx)
+				p := load(ctxptr, code.Idx)
+				size := uintptr(code.Size)
+				code = code.Next
+				store(ctxptr, code.Idx, p+idx*size)
+			} else {
+				b = appendArrayEnd(ctx, code, b)
+				code = code.End.Next
+			}
+		case encoder.OpMapPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			uptr := ptrToUnsafePtr(p)
+			mlen := maplen(uptr)
+			if mlen <= 0 {
+				b = appendEmptyObject(ctx, b)
+				code = code.End.Next
+				break
+			}
+			b = appendStructHead(ctx, b)
+			unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0
+			mapCtx := encoder.NewMapContext(mlen, unorderedMap)
+			mapiterinit(code.Type, uptr, &mapCtx.Iter)
+			store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx)))
+			ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx))
+			if unorderedMap {
+				b = appendMapKeyIndent(ctx, code.Next, b)
+			} else {
+				mapCtx.Start = len(b)
+				mapCtx.First = len(b)
+			}
+			key := mapiterkey(&mapCtx.Iter)
+			store(ctxptr, code.Next.Idx, uintptr(key))
+			code = code.Next
+		case encoder.OpMapKey:
+			mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx)))
+			idx := mapCtx.Idx
+			idx++
+			if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 {
+				if idx < mapCtx.Len {
+					b = appendMapKeyIndent(ctx, code, b)
+					mapCtx.Idx = int(idx)
+					key := mapiterkey(&mapCtx.Iter)
+					store(ctxptr, code.Next.Idx, uintptr(key))
+					code = code.Next
+				} else {
+					b = appendObjectEnd(ctx, code, b)
+					encoder.ReleaseMapContext(mapCtx)
+					code = code.End.Next
+				}
+			} else {
+				mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)]
+				if idx < mapCtx.Len {
+					mapCtx.Idx = int(idx)
+					mapCtx.Start = len(b)
+					key := mapiterkey(&mapCtx.Iter)
+					store(ctxptr, code.Next.Idx, uintptr(key))
+					code = code.Next
+				} else {
+					code = code.End
+				}
+			}
+		case encoder.OpMapValue:
+			mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx)))
+			if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 {
+				b = appendColon(ctx, b)
+			} else {
+				mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)]
+				mapCtx.Start = len(b)
+			}
+			value := mapitervalue(&mapCtx.Iter)
+			store(ctxptr, code.Next.Idx, uintptr(value))
+			mapiternext(&mapCtx.Iter)
+			code = code.Next
+		case encoder.OpMapEnd:
+			// this operation only used by sorted map.
+			mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx)))
+			sort.Sort(mapCtx.Slice)
+			buf := mapCtx.Buf
+			for _, item := range mapCtx.Slice.Items {
+				buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value)
+			}
+			buf = appendMapEnd(ctx, code, buf)
+			b = b[:mapCtx.First]
+			b = append(b, buf...)
+			mapCtx.Buf = buf
+			encoder.ReleaseMapContext(mapCtx)
+			code = code.Next
+		case encoder.OpRecursivePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpRecursive:
+			ptr := load(ctxptr, code.Idx)
+			if ptr != 0 {
+				if recursiveLevel > encoder.StartDetectingCyclesAfter {
+					for _, seen := range ctx.SeenPtr {
+						if ptr == seen {
+							return nil, errUnsupportedValue(code, ptr)
+						}
+					}
+				}
+			}
+			ctx.SeenPtr = append(ctx.SeenPtr, ptr)
+			c := code.Jmp.Code
+			curlen := uintptr(len(ctx.Ptrs))
+			offsetNum := ptrOffset / uintptrSize
+			oldOffset := ptrOffset
+			ptrOffset += code.Jmp.CurLen * uintptrSize
+			oldBaseIndent := ctx.BaseIndent
+			indentDiffFromTop := c.Indent - 1
+			ctx.BaseIndent += code.Indent - indentDiffFromTop
+
+			newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen
+			if curlen < newLen {
+				ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...)
+			}
+			ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr
+
+			store(ctxptr, c.Idx, ptr)
+			store(ctxptr, c.End.Next.Idx, oldOffset)
+			store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next)))
+			storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent))
+			code = c
+			recursiveLevel++
+		case encoder.OpRecursiveEnd:
+			recursiveLevel--
+
+			// restore ctxptr
+			restoreIndent(ctx, code, ctxptr)
+			offset := load(ctxptr, code.Idx)
+			ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1]
+
+			codePtr := load(ctxptr, code.ElemIdx)
+			code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr))
+			ctxptr = ctx.Ptr() + offset
+			ptrOffset = offset
+		case encoder.OpStructPtrHead:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHead:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if len(code.Key) > 0 {
+				if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 {
+					b = appendStructKey(ctx, code, b)
+				}
+			}
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmpty:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmpty:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadInt:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadInt:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyInt:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyInt:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadIntString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadIntString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyIntString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyIntString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			u64 := ptrToUint64(p, code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendInt(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadUint:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadUint:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUint:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUint:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadUintString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadUintString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUintString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUintString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendUint(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat32:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat32:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat32String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat32String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat64:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat64:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat64String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat64String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat64(ctx, b, v)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNull(ctx, b)
+					b = appendComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToString(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadStringString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadStringString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset)))))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyStringString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyStringString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToString(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, v)))
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, ptrToString(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, ptrToString(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadBool:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadBool:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBool:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBool:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructPtrHeadBoolString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadBoolString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBoolString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBoolString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructPtrHeadBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendBool(ctx, b, ptrToBool(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadBytes:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadBytes:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBytes:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBytes:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToBytes(p + uintptr(code.Offset))
+			if len(v) == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadNumber:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadNumber:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumber:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumber:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadNumberString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadNumberString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = append(bb, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumberString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumberString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadArray, encoder.OpStructHeadSlice:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmptyArray:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyArray:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			b = appendStructKey(ctx, code, b)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmptySlice:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptySlice:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			slice := ptrToSlice(p)
+			if slice.Len == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.NextField
+			} else {
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p + uintptr(code.Offset))
+			}
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmptyMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p + uintptr(code.Offset))
+			}
+			if maplen(ptrToUnsafePtr(p)) == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.NextField
+				break
+			}
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.NextField
+			} else {
+				if (code.Flags & encoder.IndirectFlags) != 0 {
+					p = ptrToNPtr(p, code.PtrNum)
+				}
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadOmitEmptyMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p == 0 {
+				code = code.NextField
+				break
+			}
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p == 0 {
+				code = code.NextField
+			} else {
+				if (code.Flags & encoder.IndirectFlags) != 0 {
+					p = ptrToNPtr(p, code.PtrNum)
+				}
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON {
+					p = ptrToPtr(p)
+				}
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON {
+					p = ptrToPtr(p)
+				}
+			}
+			iface := ptrToInterface(code, p)
+			if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalJSON(ctx, code, b, iface)
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText {
+					p = ptrToPtr(p)
+				}
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText {
+					p = ptrToPtr(p)
+				}
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructField:
+			if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 {
+				b = appendStructKey(ctx, code, b)
+			}
+			p := load(ctxptr, code.Idx) + uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmpty:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructFieldInt:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyInt:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldIntString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyIntString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldIntPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendInt(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldIntPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUint:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUint:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUintString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUintString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUintPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendUint(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUintPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32String:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat64(ctx, b, v)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			v := ptrToFloat64(p)
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldStringString:
+			p := load(ctxptr, code.Idx)
+			s := ptrToString(p + uintptr(code.Offset))
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, string(appendString(ctx, []byte{}, s)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyStringString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, v)))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldStringPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, ptrToString(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, ptrToString(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldStringPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBool:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBool:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBoolString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBoolString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBoolPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendBool(ctx, b, ptrToBool(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBytes:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBytes:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBytes(p + uintptr(code.Offset))
+			if len(v) > 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBytesPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumber:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumber:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumberString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = append(bb, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumberString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumberPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				code = code.NextField
+				break
+			}
+			iface := ptrToInterface(code, p)
+			if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) {
+				code = code.NextField
+				break
+			}
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendMarshalJSON(ctx, code, b, iface)
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructFieldMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldMarshalText:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalText:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				code = code.NextField
+				break
+			}
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructFieldMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldArray:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyArray:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldArrayPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyArrayPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructFieldSlice:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptySlice:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			slice := ptrToSlice(p)
+			if slice.Len == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructFieldSlicePtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptySlicePtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructFieldMap:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyMap:
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructFieldMapPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p != 0 {
+				p = ptrToNPtr(p, code.PtrNum)
+			}
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyMapPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p != 0 {
+				p = ptrToNPtr(p, code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructFieldStruct:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyStruct:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructEnd:
+			b = appendStructEndSkipLast(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndInt:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyInt:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndIntString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyIntString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndIntPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendInt(ctx, b, p, code)
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p, code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndIntPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUint:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUint:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUintString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUintString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUintPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendUint(ctx, b, p, code)
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p, code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUintPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32String:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32Ptr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32PtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat64(ctx, b, v)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat64(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat64(ctx, b, v)
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64Ptr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+				b = appendStructEnd(ctx, code, b)
+				code = code.Next
+				break
+			}
+			v := ptrToFloat64(p)
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64PtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndStringString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			s := ptrToString(p + uintptr(code.Offset))
+			b = appendString(ctx, b, string(appendString(ctx, []byte{}, s)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyStringString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, v)))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndStringPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, ptrToString(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, ptrToString(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndStringPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBool:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBool:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBoolString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBoolString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBoolPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendBool(ctx, b, ptrToBool(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBoolPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBytes:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBytes:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBytes(p + uintptr(code.Offset))
+			if len(v) > 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBytesPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumber:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendStructEnd(ctx, code, bb)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumber:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendStructEnd(ctx, code, bb)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumberString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = append(bb, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumberString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumberPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendStructEnd(ctx, code, bb)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumberPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpEnd:
+			goto END
+		}
+	}
+END:
+	return b, nil
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go
new file mode 100644
index 0000000000..dd4cd489e0
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go
@@ -0,0 +1,35 @@
+package vm_color_indent
+
+import (
+	"fmt"
+
+	"github.com/goccy/go-json/internal/encoder"
+)
+
+func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) {
+	var code *encoder.Opcode
+	if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 {
+		code = codeSet.EscapeKeyCode
+	} else {
+		code = codeSet.NoescapeKeyCode
+	}
+
+	defer func() {
+		if err := recover(); err != nil {
+			w := ctx.Option.DebugOut
+			fmt.Fprintln(w, "=============[DEBUG]===============")
+			fmt.Fprintln(w, "* [TYPE]")
+			fmt.Fprintln(w, codeSet.Type)
+			fmt.Fprintf(w, "\n")
+			fmt.Fprintln(w, "* [ALL OPCODE]")
+			fmt.Fprintln(w, code.Dump())
+			fmt.Fprintf(w, "\n")
+			fmt.Fprintln(w, "* [CONTEXT]")
+			fmt.Fprintf(w, "%+v\n", ctx)
+			fmt.Fprintln(w, "===================================")
+			panic(err)
+		}
+	}()
+
+	return Run(ctx, b, codeSet)
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go
new file mode 100644
index 0000000000..2395abec97
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go
@@ -0,0 +1,297 @@
+package vm_color_indent
+
+import (
+	"encoding/json"
+	"fmt"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/encoder"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+const uintptrSize = 4 << (^uintptr(0) >> 63)
+
+var (
+	appendIndent        = encoder.AppendIndent
+	appendStructEnd     = encoder.AppendStructEndIndent
+	errUnsupportedValue = encoder.ErrUnsupportedValue
+	errUnsupportedFloat = encoder.ErrUnsupportedFloat
+	mapiterinit         = encoder.MapIterInit
+	mapiterkey          = encoder.MapIterKey
+	mapitervalue        = encoder.MapIterValue
+	mapiternext         = encoder.MapIterNext
+	maplen              = encoder.MapLen
+)
+
+type emptyInterface struct {
+	typ *runtime.Type
+	ptr unsafe.Pointer
+}
+
+type nonEmptyInterface struct {
+	itab *struct {
+		ityp *runtime.Type // static interface type
+		typ  *runtime.Type // dynamic concrete type
+		// unused fields...
+	}
+	ptr unsafe.Pointer
+}
+
+func errUnimplementedOp(op encoder.OpType) error {
+	return fmt.Errorf("encoder (indent): opcode %s has not been implemented", op)
+}
+
+func load(base uintptr, idx uint32) uintptr {
+	addr := base + uintptr(idx)
+	return **(**uintptr)(unsafe.Pointer(&addr))
+}
+
+func store(base uintptr, idx uint32, p uintptr) {
+	addr := base + uintptr(idx)
+	**(**uintptr)(unsafe.Pointer(&addr)) = p
+}
+
+func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr {
+	addr := base + uintptr(idx)
+	p := **(**uintptr)(unsafe.Pointer(&addr))
+	for i := uint8(0); i < ptrNum; i++ {
+		if p == 0 {
+			return 0
+		}
+		p = ptrToPtr(p)
+	}
+	return p
+}
+
+func ptrToUint64(p uintptr, bitSize uint8) uint64 {
+	switch bitSize {
+	case 8:
+		return (uint64)(**(**uint8)(unsafe.Pointer(&p)))
+	case 16:
+		return (uint64)(**(**uint16)(unsafe.Pointer(&p)))
+	case 32:
+		return (uint64)(**(**uint32)(unsafe.Pointer(&p)))
+	case 64:
+		return **(**uint64)(unsafe.Pointer(&p))
+	}
+	return 0
+}
+
+func ptrToFloat32(p uintptr) float32            { return **(**float32)(unsafe.Pointer(&p)) }
+func ptrToFloat64(p uintptr) float64            { return **(**float64)(unsafe.Pointer(&p)) }
+func ptrToBool(p uintptr) bool                  { return **(**bool)(unsafe.Pointer(&p)) }
+func ptrToBytes(p uintptr) []byte               { return **(**[]byte)(unsafe.Pointer(&p)) }
+func ptrToNumber(p uintptr) json.Number         { return **(**json.Number)(unsafe.Pointer(&p)) }
+func ptrToString(p uintptr) string              { return **(**string)(unsafe.Pointer(&p)) }
+func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) }
+func ptrToPtr(p uintptr) uintptr {
+	return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p)))
+}
+func ptrToNPtr(p uintptr, ptrNum uint8) uintptr {
+	for i := uint8(0); i < ptrNum; i++ {
+		if p == 0 {
+			return 0
+		}
+		p = ptrToPtr(p)
+	}
+	return p
+}
+
+func ptrToUnsafePtr(p uintptr) unsafe.Pointer {
+	return *(*unsafe.Pointer)(unsafe.Pointer(&p))
+}
+func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} {
+	return *(*interface{})(unsafe.Pointer(&emptyInterface{
+		typ: code.Type,
+		ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)),
+	}))
+}
+
+func appendInt(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte {
+	format := ctx.Option.ColorScheme.Int
+	b = append(b, format.Header...)
+	b = encoder.AppendInt(ctx, b, p, code)
+	return append(b, format.Footer...)
+}
+
+func appendUint(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte {
+	format := ctx.Option.ColorScheme.Uint
+	b = append(b, format.Header...)
+	b = encoder.AppendUint(ctx, b, p, code)
+	return append(b, format.Footer...)
+}
+
+func appendFloat32(ctx *encoder.RuntimeContext, b []byte, v float32) []byte {
+	format := ctx.Option.ColorScheme.Float
+	b = append(b, format.Header...)
+	b = encoder.AppendFloat32(ctx, b, v)
+	return append(b, format.Footer...)
+}
+
+func appendFloat64(ctx *encoder.RuntimeContext, b []byte, v float64) []byte {
+	format := ctx.Option.ColorScheme.Float
+	b = append(b, format.Header...)
+	b = encoder.AppendFloat64(ctx, b, v)
+	return append(b, format.Footer...)
+}
+
+func appendString(ctx *encoder.RuntimeContext, b []byte, v string) []byte {
+	format := ctx.Option.ColorScheme.String
+	b = append(b, format.Header...)
+	b = encoder.AppendString(ctx, b, v)
+	return append(b, format.Footer...)
+}
+
+func appendByteSlice(ctx *encoder.RuntimeContext, b []byte, src []byte) []byte {
+	format := ctx.Option.ColorScheme.Binary
+	b = append(b, format.Header...)
+	b = encoder.AppendByteSlice(ctx, b, src)
+	return append(b, format.Footer...)
+}
+
+func appendNumber(ctx *encoder.RuntimeContext, b []byte, n json.Number) ([]byte, error) {
+	format := ctx.Option.ColorScheme.Int
+	b = append(b, format.Header...)
+	bb, err := encoder.AppendNumber(ctx, b, n)
+	if err != nil {
+		return nil, err
+	}
+	return append(bb, format.Footer...), nil
+}
+
+func appendBool(ctx *encoder.RuntimeContext, b []byte, v bool) []byte {
+	format := ctx.Option.ColorScheme.Bool
+	b = append(b, format.Header...)
+	if v {
+		b = append(b, "true"...)
+	} else {
+		b = append(b, "false"...)
+	}
+	return append(b, format.Footer...)
+}
+
+func appendNull(ctx *encoder.RuntimeContext, b []byte) []byte {
+	format := ctx.Option.ColorScheme.Null
+	b = append(b, format.Header...)
+	b = append(b, "null"...)
+	return append(b, format.Footer...)
+}
+
+func appendComma(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, ',', '\n')
+}
+
+func appendNullComma(ctx *encoder.RuntimeContext, b []byte) []byte {
+	format := ctx.Option.ColorScheme.Null
+	b = append(b, format.Header...)
+	b = append(b, "null"...)
+	return append(append(b, format.Footer...), ',', '\n')
+}
+
+func appendColon(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b[:len(b)-2], ':', ' ')
+}
+
+func appendMapKeyValue(ctx *encoder.RuntimeContext, code *encoder.Opcode, b, key, value []byte) []byte {
+	b = appendIndent(ctx, b, code.Indent+1)
+	b = append(b, key...)
+	b[len(b)-2] = ':'
+	b[len(b)-1] = ' '
+	return append(b, value...)
+}
+
+func appendMapEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	b = b[:len(b)-2]
+	b = append(b, '\n')
+	b = appendIndent(ctx, b, code.Indent)
+	return append(b, '}', ',', '\n')
+}
+
+func appendArrayHead(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	b = append(b, '[', '\n')
+	return appendIndent(ctx, b, code.Indent+1)
+}
+
+func appendArrayEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	b = b[:len(b)-2]
+	b = append(b, '\n')
+	b = appendIndent(ctx, b, code.Indent)
+	return append(b, ']', ',', '\n')
+}
+
+func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, '[', ']', ',', '\n')
+}
+
+func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, '{', '}', ',', '\n')
+}
+
+func appendObjectEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	last := len(b) - 1
+	// replace comma to newline
+	b[last-1] = '\n'
+	b = appendIndent(ctx, b[:last], code.Indent)
+	return append(b, '}', ',', '\n')
+}
+
+func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) {
+	return encoder.AppendMarshalJSONIndent(ctx, code, b, v)
+}
+
+func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) {
+	format := ctx.Option.ColorScheme.String
+	b = append(b, format.Header...)
+	bb, err := encoder.AppendMarshalTextIndent(ctx, code, b, v)
+	if err != nil {
+		return nil, err
+	}
+	return append(bb, format.Footer...), nil
+}
+
+func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, '{', '\n')
+}
+
+func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	b = appendIndent(ctx, b, code.Indent)
+
+	format := ctx.Option.ColorScheme.ObjectKey
+	b = append(b, format.Header...)
+	b = append(b, code.Key[:len(code.Key)-1]...)
+	b = append(b, format.Footer...)
+
+	return append(b, ':', ' ')
+}
+
+func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	last := len(b) - 1
+	if b[last-1] == '{' {
+		b[last] = '}'
+	} else {
+		if b[last] == '\n' {
+			// to remove ',' and '\n' characters
+			b = b[:len(b)-2]
+		}
+		b = append(b, '\n')
+		b = appendIndent(ctx, b, code.Indent-1)
+		b = append(b, '}')
+	}
+	return appendComma(ctx, b)
+}
+
+func restoreIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, ctxptr uintptr) {
+	ctx.BaseIndent = uint32(load(ctxptr, code.Length))
+}
+
+func storeIndent(ctxptr uintptr, code *encoder.Opcode, indent uintptr) {
+	store(ctxptr, code.Length, indent)
+}
+
+func appendArrayElemIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	return appendIndent(ctx, b, code.Indent+1)
+}
+
+func appendMapKeyIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	return appendIndent(ctx, b, code.Indent)
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go
new file mode 100644
index 0000000000..3b4e22e5d4
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go
@@ -0,0 +1,4859 @@
+// Code generated by internal/cmd/generator. DO NOT EDIT!
+package vm_color_indent
+
+import (
+	"math"
+	"reflect"
+	"sort"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/encoder"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) {
+	recursiveLevel := 0
+	ptrOffset := uintptr(0)
+	ctxptr := ctx.Ptr()
+	var code *encoder.Opcode
+	if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 {
+		code = codeSet.EscapeKeyCode
+	} else {
+		code = codeSet.NoescapeKeyCode
+	}
+
+	for {
+		switch code.Op {
+		default:
+			return nil, errUnimplementedOp(code.Op)
+		case encoder.OpPtr:
+			p := load(ctxptr, code.Idx)
+			code = code.Next
+			store(ctxptr, code.Idx, ptrToPtr(p))
+		case encoder.OpIntPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpInt:
+			b = appendInt(ctx, b, load(ctxptr, code.Idx), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpUintPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpUint:
+			b = appendUint(ctx, b, load(ctxptr, code.Idx), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpIntString:
+			b = append(b, '"')
+			b = appendInt(ctx, b, load(ctxptr, code.Idx), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpUintString:
+			b = append(b, '"')
+			b = appendUint(ctx, b, load(ctxptr, code.Idx), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpFloat32Ptr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+				b = appendComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpFloat32:
+			b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpFloat64Ptr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpFloat64:
+			v := ptrToFloat64(load(ctxptr, code.Idx))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStringPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpString:
+			b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpBoolPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpBool:
+			b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpBytesPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpBytes:
+			b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpNumberPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpNumber:
+			bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpInterfacePtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpInterface:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			if recursiveLevel > encoder.StartDetectingCyclesAfter {
+				for _, seen := range ctx.SeenPtr {
+					if p == seen {
+						return nil, errUnsupportedValue(code, p)
+					}
+				}
+			}
+			ctx.SeenPtr = append(ctx.SeenPtr, p)
+			var (
+				typ      *runtime.Type
+				ifacePtr unsafe.Pointer
+			)
+			up := ptrToUnsafePtr(p)
+			if code.Flags&encoder.NonEmptyInterfaceFlags != 0 {
+				iface := (*nonEmptyInterface)(up)
+				ifacePtr = iface.ptr
+				if iface.itab != nil {
+					typ = iface.itab.typ
+				}
+			} else {
+				iface := (*emptyInterface)(up)
+				ifacePtr = iface.ptr
+				typ = iface.typ
+			}
+			if ifacePtr == nil {
+				isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ)
+				if !isDirectedNil {
+					b = appendNullComma(ctx, b)
+					code = code.Next
+					break
+				}
+			}
+			ctx.KeepRefs = append(ctx.KeepRefs, up)
+			ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ)))
+			if err != nil {
+				return nil, err
+			}
+
+			totalLength := uintptr(code.Length) + 3
+			nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3
+
+			var c *encoder.Opcode
+			if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 {
+				c = ifaceCodeSet.InterfaceEscapeKeyCode
+			} else {
+				c = ifaceCodeSet.InterfaceNoescapeKeyCode
+			}
+			curlen := uintptr(len(ctx.Ptrs))
+			offsetNum := ptrOffset / uintptrSize
+			oldOffset := ptrOffset
+			ptrOffset += totalLength * uintptrSize
+			oldBaseIndent := ctx.BaseIndent
+			ctx.BaseIndent += code.Indent
+
+			newLen := offsetNum + totalLength + nextTotalLength
+			if curlen < newLen {
+				ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...)
+			}
+			ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr
+
+			end := ifaceCodeSet.EndCode
+			store(ctxptr, c.Idx, uintptr(ifacePtr))
+			store(ctxptr, end.Idx, oldOffset)
+			store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next)))
+			storeIndent(ctxptr, end, uintptr(oldBaseIndent))
+			code = c
+			recursiveLevel++
+		case encoder.OpInterfaceEnd:
+			recursiveLevel--
+
+			// restore ctxptr
+			offset := load(ctxptr, code.Idx)
+			restoreIndent(ctx, code, ctxptr)
+			ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1]
+
+			codePtr := load(ctxptr, code.ElemIdx)
+			code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr))
+			ctxptr = ctx.Ptr() + offset
+			ptrOffset = offset
+		case encoder.OpMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToPtr(p))
+			fallthrough
+		case encoder.OpMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToPtr(p))
+			fallthrough
+		case encoder.OpMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = append(b, `""`...)
+				b = appendComma(ctx, b)
+				code = code.Next
+				break
+			}
+			if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpSlicePtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpSlice:
+			p := load(ctxptr, code.Idx)
+			slice := ptrToSlice(p)
+			if p == 0 || slice.Data == nil {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.ElemIdx, 0)
+			store(ctxptr, code.Length, uintptr(slice.Len))
+			store(ctxptr, code.Idx, uintptr(slice.Data))
+			if slice.Len > 0 {
+				b = appendArrayHead(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, uintptr(slice.Data))
+			} else {
+				b = appendEmptyArray(ctx, b)
+				code = code.End.Next
+			}
+		case encoder.OpSliceElem:
+			idx := load(ctxptr, code.ElemIdx)
+			length := load(ctxptr, code.Length)
+			idx++
+			if idx < length {
+				b = appendArrayElemIndent(ctx, code, b)
+				store(ctxptr, code.ElemIdx, idx)
+				data := load(ctxptr, code.Idx)
+				size := uintptr(code.Size)
+				code = code.Next
+				store(ctxptr, code.Idx, data+idx*size)
+			} else {
+				b = appendArrayEnd(ctx, code, b)
+				code = code.End.Next
+			}
+		case encoder.OpArrayPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpArray:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			if code.Length > 0 {
+				b = appendArrayHead(ctx, code, b)
+				store(ctxptr, code.ElemIdx, 0)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				b = appendEmptyArray(ctx, b)
+				code = code.End.Next
+			}
+		case encoder.OpArrayElem:
+			idx := load(ctxptr, code.ElemIdx)
+			idx++
+			if idx < uintptr(code.Length) {
+				b = appendArrayElemIndent(ctx, code, b)
+				store(ctxptr, code.ElemIdx, idx)
+				p := load(ctxptr, code.Idx)
+				size := uintptr(code.Size)
+				code = code.Next
+				store(ctxptr, code.Idx, p+idx*size)
+			} else {
+				b = appendArrayEnd(ctx, code, b)
+				code = code.End.Next
+			}
+		case encoder.OpMapPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			uptr := ptrToUnsafePtr(p)
+			mlen := maplen(uptr)
+			if mlen <= 0 {
+				b = appendEmptyObject(ctx, b)
+				code = code.End.Next
+				break
+			}
+			b = appendStructHead(ctx, b)
+			unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0
+			mapCtx := encoder.NewMapContext(mlen, unorderedMap)
+			mapiterinit(code.Type, uptr, &mapCtx.Iter)
+			store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx)))
+			ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx))
+			if unorderedMap {
+				b = appendMapKeyIndent(ctx, code.Next, b)
+			} else {
+				mapCtx.Start = len(b)
+				mapCtx.First = len(b)
+			}
+			key := mapiterkey(&mapCtx.Iter)
+			store(ctxptr, code.Next.Idx, uintptr(key))
+			code = code.Next
+		case encoder.OpMapKey:
+			mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx)))
+			idx := mapCtx.Idx
+			idx++
+			if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 {
+				if idx < mapCtx.Len {
+					b = appendMapKeyIndent(ctx, code, b)
+					mapCtx.Idx = int(idx)
+					key := mapiterkey(&mapCtx.Iter)
+					store(ctxptr, code.Next.Idx, uintptr(key))
+					code = code.Next
+				} else {
+					b = appendObjectEnd(ctx, code, b)
+					encoder.ReleaseMapContext(mapCtx)
+					code = code.End.Next
+				}
+			} else {
+				mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)]
+				if idx < mapCtx.Len {
+					mapCtx.Idx = int(idx)
+					mapCtx.Start = len(b)
+					key := mapiterkey(&mapCtx.Iter)
+					store(ctxptr, code.Next.Idx, uintptr(key))
+					code = code.Next
+				} else {
+					code = code.End
+				}
+			}
+		case encoder.OpMapValue:
+			mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx)))
+			if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 {
+				b = appendColon(ctx, b)
+			} else {
+				mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)]
+				mapCtx.Start = len(b)
+			}
+			value := mapitervalue(&mapCtx.Iter)
+			store(ctxptr, code.Next.Idx, uintptr(value))
+			mapiternext(&mapCtx.Iter)
+			code = code.Next
+		case encoder.OpMapEnd:
+			// this operation only used by sorted map.
+			mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx)))
+			sort.Sort(mapCtx.Slice)
+			buf := mapCtx.Buf
+			for _, item := range mapCtx.Slice.Items {
+				buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value)
+			}
+			buf = appendMapEnd(ctx, code, buf)
+			b = b[:mapCtx.First]
+			b = append(b, buf...)
+			mapCtx.Buf = buf
+			encoder.ReleaseMapContext(mapCtx)
+			code = code.Next
+		case encoder.OpRecursivePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpRecursive:
+			ptr := load(ctxptr, code.Idx)
+			if ptr != 0 {
+				if recursiveLevel > encoder.StartDetectingCyclesAfter {
+					for _, seen := range ctx.SeenPtr {
+						if ptr == seen {
+							return nil, errUnsupportedValue(code, ptr)
+						}
+					}
+				}
+			}
+			ctx.SeenPtr = append(ctx.SeenPtr, ptr)
+			c := code.Jmp.Code
+			curlen := uintptr(len(ctx.Ptrs))
+			offsetNum := ptrOffset / uintptrSize
+			oldOffset := ptrOffset
+			ptrOffset += code.Jmp.CurLen * uintptrSize
+			oldBaseIndent := ctx.BaseIndent
+			indentDiffFromTop := c.Indent - 1
+			ctx.BaseIndent += code.Indent - indentDiffFromTop
+
+			newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen
+			if curlen < newLen {
+				ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...)
+			}
+			ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr
+
+			store(ctxptr, c.Idx, ptr)
+			store(ctxptr, c.End.Next.Idx, oldOffset)
+			store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next)))
+			storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent))
+			code = c
+			recursiveLevel++
+		case encoder.OpRecursiveEnd:
+			recursiveLevel--
+
+			// restore ctxptr
+			restoreIndent(ctx, code, ctxptr)
+			offset := load(ctxptr, code.Idx)
+			ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1]
+
+			codePtr := load(ctxptr, code.ElemIdx)
+			code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr))
+			ctxptr = ctx.Ptr() + offset
+			ptrOffset = offset
+		case encoder.OpStructPtrHead:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHead:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if len(code.Key) > 0 {
+				if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 {
+					b = appendStructKey(ctx, code, b)
+				}
+			}
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmpty:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmpty:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadInt:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadInt:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyInt:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyInt:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadIntString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadIntString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyIntString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyIntString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			u64 := ptrToUint64(p, code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendInt(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadUint:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadUint:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUint:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUint:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadUintString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadUintString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUintString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUintString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendUint(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat32:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat32:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat32String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat32String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat64:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat64:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat64String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat64String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat64(ctx, b, v)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNull(ctx, b)
+					b = appendComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToString(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadStringString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadStringString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset)))))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyStringString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyStringString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToString(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, v)))
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, ptrToString(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, ptrToString(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadBool:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadBool:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBool:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBool:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructPtrHeadBoolString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadBoolString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBoolString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBoolString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructPtrHeadBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendBool(ctx, b, ptrToBool(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadBytes:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadBytes:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBytes:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBytes:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToBytes(p + uintptr(code.Offset))
+			if len(v) == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadNumber:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadNumber:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumber:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumber:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadNumberString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadNumberString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = append(bb, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumberString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumberString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadArray, encoder.OpStructHeadSlice:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmptyArray:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyArray:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			b = appendStructKey(ctx, code, b)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmptySlice:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptySlice:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			slice := ptrToSlice(p)
+			if slice.Len == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.NextField
+			} else {
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p + uintptr(code.Offset))
+			}
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmptyMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p + uintptr(code.Offset))
+			}
+			if maplen(ptrToUnsafePtr(p)) == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.NextField
+				break
+			}
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.NextField
+			} else {
+				if (code.Flags & encoder.IndirectFlags) != 0 {
+					p = ptrToNPtr(p, code.PtrNum)
+				}
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadOmitEmptyMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p == 0 {
+				code = code.NextField
+				break
+			}
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p == 0 {
+				code = code.NextField
+			} else {
+				if (code.Flags & encoder.IndirectFlags) != 0 {
+					p = ptrToNPtr(p, code.PtrNum)
+				}
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON {
+					p = ptrToPtr(p)
+				}
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON {
+					p = ptrToPtr(p)
+				}
+			}
+			iface := ptrToInterface(code, p)
+			if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalJSON(ctx, code, b, iface)
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText {
+					p = ptrToPtr(p)
+				}
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText {
+					p = ptrToPtr(p)
+				}
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructField:
+			if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 {
+				b = appendStructKey(ctx, code, b)
+			}
+			p := load(ctxptr, code.Idx) + uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmpty:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructFieldInt:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyInt:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldIntString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyIntString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldIntPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendInt(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldIntPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUint:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUint:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUintString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUintString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUintPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendUint(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUintPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32String:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat64(ctx, b, v)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			v := ptrToFloat64(p)
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldStringString:
+			p := load(ctxptr, code.Idx)
+			s := ptrToString(p + uintptr(code.Offset))
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, string(appendString(ctx, []byte{}, s)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyStringString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, v)))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldStringPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, ptrToString(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, ptrToString(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldStringPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBool:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBool:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBoolString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBoolString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBoolPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendBool(ctx, b, ptrToBool(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBytes:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBytes:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBytes(p + uintptr(code.Offset))
+			if len(v) > 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBytesPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumber:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumber:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumberString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = append(bb, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumberString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumberPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				code = code.NextField
+				break
+			}
+			iface := ptrToInterface(code, p)
+			if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) {
+				code = code.NextField
+				break
+			}
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendMarshalJSON(ctx, code, b, iface)
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructFieldMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldMarshalText:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalText:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				code = code.NextField
+				break
+			}
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructFieldMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldArray:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyArray:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldArrayPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyArrayPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructFieldSlice:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptySlice:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			slice := ptrToSlice(p)
+			if slice.Len == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructFieldSlicePtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptySlicePtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructFieldMap:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyMap:
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructFieldMapPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p != 0 {
+				p = ptrToNPtr(p, code.PtrNum)
+			}
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyMapPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p != 0 {
+				p = ptrToNPtr(p, code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructFieldStruct:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyStruct:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructEnd:
+			b = appendStructEndSkipLast(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndInt:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyInt:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndIntString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyIntString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndIntPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendInt(ctx, b, p, code)
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p, code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndIntPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUint:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUint:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUintString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUintString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUintPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendUint(ctx, b, p, code)
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p, code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUintPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32String:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32Ptr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32PtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat64(ctx, b, v)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat64(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat64(ctx, b, v)
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64Ptr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+				b = appendStructEnd(ctx, code, b)
+				code = code.Next
+				break
+			}
+			v := ptrToFloat64(p)
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64PtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndStringString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			s := ptrToString(p + uintptr(code.Offset))
+			b = appendString(ctx, b, string(appendString(ctx, []byte{}, s)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyStringString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, v)))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndStringPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, ptrToString(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, ptrToString(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndStringPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBool:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBool:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBoolString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBoolString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBoolPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendBool(ctx, b, ptrToBool(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBoolPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBytes:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBytes:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBytes(p + uintptr(code.Offset))
+			if len(v) > 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBytesPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumber:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendStructEnd(ctx, code, bb)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumber:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendStructEnd(ctx, code, bb)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumberString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = append(bb, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumberString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumberPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendStructEnd(ctx, code, bb)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumberPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpEnd:
+			goto END
+		}
+	}
+END:
+	return b, nil
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go
new file mode 100644
index 0000000000..99395388c1
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go
@@ -0,0 +1,35 @@
+package vm_indent
+
+import (
+	"fmt"
+
+	"github.com/goccy/go-json/internal/encoder"
+)
+
+func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) {
+	var code *encoder.Opcode
+	if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 {
+		code = codeSet.EscapeKeyCode
+	} else {
+		code = codeSet.NoescapeKeyCode
+	}
+
+	defer func() {
+		if err := recover(); err != nil {
+			w := ctx.Option.DebugOut
+			fmt.Fprintln(w, "=============[DEBUG]===============")
+			fmt.Fprintln(w, "* [TYPE]")
+			fmt.Fprintln(w, codeSet.Type)
+			fmt.Fprintf(w, "\n")
+			fmt.Fprintln(w, "* [ALL OPCODE]")
+			fmt.Fprintln(w, code.Dump())
+			fmt.Fprintf(w, "\n")
+			fmt.Fprintln(w, "* [CONTEXT]")
+			fmt.Fprintf(w, "%+v\n", ctx)
+			fmt.Fprintln(w, "===================================")
+			panic(err)
+		}
+	}()
+
+	return Run(ctx, b, codeSet)
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go
new file mode 100644
index 0000000000..9e245bfe57
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go
@@ -0,0 +1,9 @@
+package vm_indent
+
+import (
+	// HACK: compile order
+	// `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile,
+	// so forcibly make dependencies and avoid compiling in concurrent.
+	// dependency order: vm => vm_indent => vm_color => vm_color_indent
+	_ "github.com/goccy/go-json/internal/encoder/vm_color"
+)
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go
new file mode 100644
index 0000000000..6cb745e393
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go
@@ -0,0 +1,230 @@
+package vm_indent
+
+import (
+	"encoding/json"
+	"fmt"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/encoder"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+const uintptrSize = 4 << (^uintptr(0) >> 63)
+
+var (
+	appendInt           = encoder.AppendInt
+	appendUint          = encoder.AppendUint
+	appendFloat32       = encoder.AppendFloat32
+	appendFloat64       = encoder.AppendFloat64
+	appendString        = encoder.AppendString
+	appendByteSlice     = encoder.AppendByteSlice
+	appendNumber        = encoder.AppendNumber
+	appendStructEnd     = encoder.AppendStructEndIndent
+	appendIndent        = encoder.AppendIndent
+	errUnsupportedValue = encoder.ErrUnsupportedValue
+	errUnsupportedFloat = encoder.ErrUnsupportedFloat
+	mapiterinit         = encoder.MapIterInit
+	mapiterkey          = encoder.MapIterKey
+	mapitervalue        = encoder.MapIterValue
+	mapiternext         = encoder.MapIterNext
+	maplen              = encoder.MapLen
+)
+
+type emptyInterface struct {
+	typ *runtime.Type
+	ptr unsafe.Pointer
+}
+
+type nonEmptyInterface struct {
+	itab *struct {
+		ityp *runtime.Type // static interface type
+		typ  *runtime.Type // dynamic concrete type
+		// unused fields...
+	}
+	ptr unsafe.Pointer
+}
+
+func errUnimplementedOp(op encoder.OpType) error {
+	return fmt.Errorf("encoder (indent): opcode %s has not been implemented", op)
+}
+
+func load(base uintptr, idx uint32) uintptr {
+	addr := base + uintptr(idx)
+	return **(**uintptr)(unsafe.Pointer(&addr))
+}
+
+func store(base uintptr, idx uint32, p uintptr) {
+	addr := base + uintptr(idx)
+	**(**uintptr)(unsafe.Pointer(&addr)) = p
+}
+
+func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr {
+	addr := base + uintptr(idx)
+	p := **(**uintptr)(unsafe.Pointer(&addr))
+	for i := uint8(0); i < ptrNum; i++ {
+		if p == 0 {
+			return 0
+		}
+		p = ptrToPtr(p)
+	}
+	return p
+}
+
+func ptrToUint64(p uintptr, bitSize uint8) uint64 {
+	switch bitSize {
+	case 8:
+		return (uint64)(**(**uint8)(unsafe.Pointer(&p)))
+	case 16:
+		return (uint64)(**(**uint16)(unsafe.Pointer(&p)))
+	case 32:
+		return (uint64)(**(**uint32)(unsafe.Pointer(&p)))
+	case 64:
+		return **(**uint64)(unsafe.Pointer(&p))
+	}
+	return 0
+}
+func ptrToFloat32(p uintptr) float32            { return **(**float32)(unsafe.Pointer(&p)) }
+func ptrToFloat64(p uintptr) float64            { return **(**float64)(unsafe.Pointer(&p)) }
+func ptrToBool(p uintptr) bool                  { return **(**bool)(unsafe.Pointer(&p)) }
+func ptrToBytes(p uintptr) []byte               { return **(**[]byte)(unsafe.Pointer(&p)) }
+func ptrToNumber(p uintptr) json.Number         { return **(**json.Number)(unsafe.Pointer(&p)) }
+func ptrToString(p uintptr) string              { return **(**string)(unsafe.Pointer(&p)) }
+func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) }
+func ptrToPtr(p uintptr) uintptr {
+	return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p)))
+}
+func ptrToNPtr(p uintptr, ptrNum uint8) uintptr {
+	for i := uint8(0); i < ptrNum; i++ {
+		if p == 0 {
+			return 0
+		}
+		p = ptrToPtr(p)
+	}
+	return p
+}
+
+func ptrToUnsafePtr(p uintptr) unsafe.Pointer {
+	return *(*unsafe.Pointer)(unsafe.Pointer(&p))
+}
+func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} {
+	return *(*interface{})(unsafe.Pointer(&emptyInterface{
+		typ: code.Type,
+		ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)),
+	}))
+}
+
+func appendBool(_ *encoder.RuntimeContext, b []byte, v bool) []byte {
+	if v {
+		return append(b, "true"...)
+	}
+	return append(b, "false"...)
+}
+
+func appendNull(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, "null"...)
+}
+
+func appendComma(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, ',', '\n')
+}
+
+func appendNullComma(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, "null,\n"...)
+}
+
+func appendColon(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b[:len(b)-2], ':', ' ')
+}
+
+func appendMapKeyValue(ctx *encoder.RuntimeContext, code *encoder.Opcode, b, key, value []byte) []byte {
+	b = appendIndent(ctx, b, code.Indent+1)
+	b = append(b, key...)
+	b[len(b)-2] = ':'
+	b[len(b)-1] = ' '
+	return append(b, value...)
+}
+
+func appendMapEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	b = b[:len(b)-2]
+	b = append(b, '\n')
+	b = appendIndent(ctx, b, code.Indent)
+	return append(b, '}', ',', '\n')
+}
+
+func appendArrayHead(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	b = append(b, '[', '\n')
+	return appendIndent(ctx, b, code.Indent+1)
+}
+
+func appendArrayEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	b = b[:len(b)-2]
+	b = append(b, '\n')
+	b = appendIndent(ctx, b, code.Indent)
+	return append(b, ']', ',', '\n')
+}
+
+func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, '[', ']', ',', '\n')
+}
+
+func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, '{', '}', ',', '\n')
+}
+
+func appendObjectEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	last := len(b) - 1
+	// replace comma to newline
+	b[last-1] = '\n'
+	b = appendIndent(ctx, b[:last], code.Indent)
+	return append(b, '}', ',', '\n')
+}
+
+func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) {
+	return encoder.AppendMarshalJSONIndent(ctx, code, b, v)
+}
+
+func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) {
+	return encoder.AppendMarshalTextIndent(ctx, code, b, v)
+}
+
+func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte {
+	return append(b, '{', '\n')
+}
+
+func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	b = appendIndent(ctx, b, code.Indent)
+	b = append(b, code.Key...)
+	return append(b, ' ')
+}
+
+func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	last := len(b) - 1
+	if b[last-1] == '{' {
+		b[last] = '}'
+	} else {
+		if b[last] == '\n' {
+			// to remove ',' and '\n' characters
+			b = b[:len(b)-2]
+		}
+		b = append(b, '\n')
+		b = appendIndent(ctx, b, code.Indent-1)
+		b = append(b, '}')
+	}
+	return appendComma(ctx, b)
+}
+
+func restoreIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, ctxptr uintptr) {
+	ctx.BaseIndent = uint32(load(ctxptr, code.Length))
+}
+
+func storeIndent(ctxptr uintptr, code *encoder.Opcode, indent uintptr) {
+	store(ctxptr, code.Length, indent)
+}
+
+func appendArrayElemIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	return appendIndent(ctx, b, code.Indent+1)
+}
+
+func appendMapKeyIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte {
+	return appendIndent(ctx, b, code.Indent)
+}
diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go
new file mode 100644
index 0000000000..836c5c8a85
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go
@@ -0,0 +1,4859 @@
+// Code generated by internal/cmd/generator. DO NOT EDIT!
+package vm_indent
+
+import (
+	"math"
+	"reflect"
+	"sort"
+	"unsafe"
+
+	"github.com/goccy/go-json/internal/encoder"
+	"github.com/goccy/go-json/internal/runtime"
+)
+
+func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) {
+	recursiveLevel := 0
+	ptrOffset := uintptr(0)
+	ctxptr := ctx.Ptr()
+	var code *encoder.Opcode
+	if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 {
+		code = codeSet.EscapeKeyCode
+	} else {
+		code = codeSet.NoescapeKeyCode
+	}
+
+	for {
+		switch code.Op {
+		default:
+			return nil, errUnimplementedOp(code.Op)
+		case encoder.OpPtr:
+			p := load(ctxptr, code.Idx)
+			code = code.Next
+			store(ctxptr, code.Idx, ptrToPtr(p))
+		case encoder.OpIntPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpInt:
+			b = appendInt(ctx, b, load(ctxptr, code.Idx), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpUintPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpUint:
+			b = appendUint(ctx, b, load(ctxptr, code.Idx), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpIntString:
+			b = append(b, '"')
+			b = appendInt(ctx, b, load(ctxptr, code.Idx), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpUintString:
+			b = append(b, '"')
+			b = appendUint(ctx, b, load(ctxptr, code.Idx), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpFloat32Ptr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+				b = appendComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpFloat32:
+			b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpFloat64Ptr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpFloat64:
+			v := ptrToFloat64(load(ctxptr, code.Idx))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStringPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpString:
+			b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpBoolPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpBool:
+			b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpBytesPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpBytes:
+			b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpNumberPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpNumber:
+			bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpInterfacePtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpInterface:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			if recursiveLevel > encoder.StartDetectingCyclesAfter {
+				for _, seen := range ctx.SeenPtr {
+					if p == seen {
+						return nil, errUnsupportedValue(code, p)
+					}
+				}
+			}
+			ctx.SeenPtr = append(ctx.SeenPtr, p)
+			var (
+				typ      *runtime.Type
+				ifacePtr unsafe.Pointer
+			)
+			up := ptrToUnsafePtr(p)
+			if code.Flags&encoder.NonEmptyInterfaceFlags != 0 {
+				iface := (*nonEmptyInterface)(up)
+				ifacePtr = iface.ptr
+				if iface.itab != nil {
+					typ = iface.itab.typ
+				}
+			} else {
+				iface := (*emptyInterface)(up)
+				ifacePtr = iface.ptr
+				typ = iface.typ
+			}
+			if ifacePtr == nil {
+				isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ)
+				if !isDirectedNil {
+					b = appendNullComma(ctx, b)
+					code = code.Next
+					break
+				}
+			}
+			ctx.KeepRefs = append(ctx.KeepRefs, up)
+			ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ)))
+			if err != nil {
+				return nil, err
+			}
+
+			totalLength := uintptr(code.Length) + 3
+			nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3
+
+			var c *encoder.Opcode
+			if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 {
+				c = ifaceCodeSet.InterfaceEscapeKeyCode
+			} else {
+				c = ifaceCodeSet.InterfaceNoescapeKeyCode
+			}
+			curlen := uintptr(len(ctx.Ptrs))
+			offsetNum := ptrOffset / uintptrSize
+			oldOffset := ptrOffset
+			ptrOffset += totalLength * uintptrSize
+			oldBaseIndent := ctx.BaseIndent
+			ctx.BaseIndent += code.Indent
+
+			newLen := offsetNum + totalLength + nextTotalLength
+			if curlen < newLen {
+				ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...)
+			}
+			ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr
+
+			end := ifaceCodeSet.EndCode
+			store(ctxptr, c.Idx, uintptr(ifacePtr))
+			store(ctxptr, end.Idx, oldOffset)
+			store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next)))
+			storeIndent(ctxptr, end, uintptr(oldBaseIndent))
+			code = c
+			recursiveLevel++
+		case encoder.OpInterfaceEnd:
+			recursiveLevel--
+
+			// restore ctxptr
+			offset := load(ctxptr, code.Idx)
+			restoreIndent(ctx, code, ctxptr)
+			ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1]
+
+			codePtr := load(ctxptr, code.ElemIdx)
+			code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr))
+			ctxptr = ctx.Ptr() + offset
+			ptrOffset = offset
+		case encoder.OpMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToPtr(p))
+			fallthrough
+		case encoder.OpMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToPtr(p))
+			fallthrough
+		case encoder.OpMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = append(b, `""`...)
+				b = appendComma(ctx, b)
+				code = code.Next
+				break
+			}
+			if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpSlicePtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpSlice:
+			p := load(ctxptr, code.Idx)
+			slice := ptrToSlice(p)
+			if p == 0 || slice.Data == nil {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.ElemIdx, 0)
+			store(ctxptr, code.Length, uintptr(slice.Len))
+			store(ctxptr, code.Idx, uintptr(slice.Data))
+			if slice.Len > 0 {
+				b = appendArrayHead(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, uintptr(slice.Data))
+			} else {
+				b = appendEmptyArray(ctx, b)
+				code = code.End.Next
+			}
+		case encoder.OpSliceElem:
+			idx := load(ctxptr, code.ElemIdx)
+			length := load(ctxptr, code.Length)
+			idx++
+			if idx < length {
+				b = appendArrayElemIndent(ctx, code, b)
+				store(ctxptr, code.ElemIdx, idx)
+				data := load(ctxptr, code.Idx)
+				size := uintptr(code.Size)
+				code = code.Next
+				store(ctxptr, code.Idx, data+idx*size)
+			} else {
+				b = appendArrayEnd(ctx, code, b)
+				code = code.End.Next
+			}
+		case encoder.OpArrayPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpArray:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			if code.Length > 0 {
+				b = appendArrayHead(ctx, code, b)
+				store(ctxptr, code.ElemIdx, 0)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				b = appendEmptyArray(ctx, b)
+				code = code.End.Next
+			}
+		case encoder.OpArrayElem:
+			idx := load(ctxptr, code.ElemIdx)
+			idx++
+			if idx < uintptr(code.Length) {
+				b = appendArrayElemIndent(ctx, code, b)
+				store(ctxptr, code.ElemIdx, idx)
+				p := load(ctxptr, code.Idx)
+				size := uintptr(code.Size)
+				code = code.Next
+				store(ctxptr, code.Idx, p+idx*size)
+			} else {
+				b = appendArrayEnd(ctx, code, b)
+				code = code.End.Next
+			}
+		case encoder.OpMapPtr:
+			p := loadNPtr(ctxptr, code.Idx, code.PtrNum)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, p)
+			fallthrough
+		case encoder.OpMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.End.Next
+				break
+			}
+			uptr := ptrToUnsafePtr(p)
+			mlen := maplen(uptr)
+			if mlen <= 0 {
+				b = appendEmptyObject(ctx, b)
+				code = code.End.Next
+				break
+			}
+			b = appendStructHead(ctx, b)
+			unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0
+			mapCtx := encoder.NewMapContext(mlen, unorderedMap)
+			mapiterinit(code.Type, uptr, &mapCtx.Iter)
+			store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx)))
+			ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx))
+			if unorderedMap {
+				b = appendMapKeyIndent(ctx, code.Next, b)
+			} else {
+				mapCtx.Start = len(b)
+				mapCtx.First = len(b)
+			}
+			key := mapiterkey(&mapCtx.Iter)
+			store(ctxptr, code.Next.Idx, uintptr(key))
+			code = code.Next
+		case encoder.OpMapKey:
+			mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx)))
+			idx := mapCtx.Idx
+			idx++
+			if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 {
+				if idx < mapCtx.Len {
+					b = appendMapKeyIndent(ctx, code, b)
+					mapCtx.Idx = int(idx)
+					key := mapiterkey(&mapCtx.Iter)
+					store(ctxptr, code.Next.Idx, uintptr(key))
+					code = code.Next
+				} else {
+					b = appendObjectEnd(ctx, code, b)
+					encoder.ReleaseMapContext(mapCtx)
+					code = code.End.Next
+				}
+			} else {
+				mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)]
+				if idx < mapCtx.Len {
+					mapCtx.Idx = int(idx)
+					mapCtx.Start = len(b)
+					key := mapiterkey(&mapCtx.Iter)
+					store(ctxptr, code.Next.Idx, uintptr(key))
+					code = code.Next
+				} else {
+					code = code.End
+				}
+			}
+		case encoder.OpMapValue:
+			mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx)))
+			if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 {
+				b = appendColon(ctx, b)
+			} else {
+				mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)]
+				mapCtx.Start = len(b)
+			}
+			value := mapitervalue(&mapCtx.Iter)
+			store(ctxptr, code.Next.Idx, uintptr(value))
+			mapiternext(&mapCtx.Iter)
+			code = code.Next
+		case encoder.OpMapEnd:
+			// this operation only used by sorted map.
+			mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx)))
+			sort.Sort(mapCtx.Slice)
+			buf := mapCtx.Buf
+			for _, item := range mapCtx.Slice.Items {
+				buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value)
+			}
+			buf = appendMapEnd(ctx, code, buf)
+			b = b[:mapCtx.First]
+			b = append(b, buf...)
+			mapCtx.Buf = buf
+			encoder.ReleaseMapContext(mapCtx)
+			code = code.Next
+		case encoder.OpRecursivePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				code = code.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpRecursive:
+			ptr := load(ctxptr, code.Idx)
+			if ptr != 0 {
+				if recursiveLevel > encoder.StartDetectingCyclesAfter {
+					for _, seen := range ctx.SeenPtr {
+						if ptr == seen {
+							return nil, errUnsupportedValue(code, ptr)
+						}
+					}
+				}
+			}
+			ctx.SeenPtr = append(ctx.SeenPtr, ptr)
+			c := code.Jmp.Code
+			curlen := uintptr(len(ctx.Ptrs))
+			offsetNum := ptrOffset / uintptrSize
+			oldOffset := ptrOffset
+			ptrOffset += code.Jmp.CurLen * uintptrSize
+			oldBaseIndent := ctx.BaseIndent
+			indentDiffFromTop := c.Indent - 1
+			ctx.BaseIndent += code.Indent - indentDiffFromTop
+
+			newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen
+			if curlen < newLen {
+				ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...)
+			}
+			ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr
+
+			store(ctxptr, c.Idx, ptr)
+			store(ctxptr, c.End.Next.Idx, oldOffset)
+			store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next)))
+			storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent))
+			code = c
+			recursiveLevel++
+		case encoder.OpRecursiveEnd:
+			recursiveLevel--
+
+			// restore ctxptr
+			restoreIndent(ctx, code, ctxptr)
+			offset := load(ctxptr, code.Idx)
+			ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1]
+
+			codePtr := load(ctxptr, code.ElemIdx)
+			code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr))
+			ctxptr = ctx.Ptr() + offset
+			ptrOffset = offset
+		case encoder.OpStructPtrHead:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHead:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if len(code.Key) > 0 {
+				if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 {
+					b = appendStructKey(ctx, code, b)
+				}
+			}
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmpty:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmpty:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadInt:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadInt:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyInt:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyInt:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadIntString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadIntString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyIntString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyIntString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			u64 := ptrToUint64(p, code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendInt(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadUint:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadUint:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUint:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUint:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadUintString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadUintString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUintString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUintString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendUint(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat32:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat32:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat32String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat32String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat64:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat64:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat64String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadFloat64String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat64(ctx, b, v)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64String:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64String:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v == 0 {
+				code = code.NextField
+			} else {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNull(ctx, b)
+					b = appendComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToString(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadStringString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadStringString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset)))))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyStringString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyStringString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToString(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, v)))
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, ptrToString(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, ptrToString(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadBool:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadBool:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBool:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBool:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructPtrHeadBoolString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadBoolString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBoolString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBoolString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructPtrHeadBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendBool(ctx, b, ptrToBool(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadBytes:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadBytes:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBytes:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBytes:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToBytes(p + uintptr(code.Offset))
+			if len(v) == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, v)
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadNumber:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadNumber:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumber:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumber:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadNumberString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadNumberString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = append(bb, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumberString:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumberString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v == "" {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadArray, encoder.OpStructHeadSlice:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmptyArray:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyArray:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			b = appendStructKey(ctx, code, b)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmptySlice:
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p := load(ctxptr, code.Idx)
+				if p == 0 {
+					if code.Flags&encoder.AnonymousHeadFlags == 0 {
+						b = appendNullComma(ctx, b)
+					}
+					code = code.End.Next
+					break
+				}
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptySlice:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			slice := ptrToSlice(p)
+			if slice.Len == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.NextField
+			} else {
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p + uintptr(code.Offset))
+			}
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructPtrHeadOmitEmptyMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMap:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				p = ptrToPtr(p + uintptr(code.Offset))
+			}
+			if maplen(ptrToUnsafePtr(p)) == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.NextField
+				break
+			}
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.NextField
+			} else {
+				if (code.Flags & encoder.IndirectFlags) != 0 {
+					p = ptrToNPtr(p, code.PtrNum)
+				}
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadOmitEmptyMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMapPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p == 0 {
+				code = code.NextField
+				break
+			}
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p == 0 {
+				code = code.NextField
+			} else {
+				if (code.Flags & encoder.IndirectFlags) != 0 {
+					p = ptrToNPtr(p, code.PtrNum)
+				}
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructPtrHeadMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON {
+					p = ptrToPtr(p)
+				}
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON {
+					p = ptrToPtr(p)
+				}
+			}
+			iface := ptrToInterface(code, p)
+			if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalJSON(ctx, code, b, iface)
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText {
+					p = ptrToPtr(p)
+				}
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			}
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalText:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText {
+					p = ptrToPtr(p)
+				}
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructPtrHeadMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			b = appendStructKey(ctx, code, b)
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum))
+			fallthrough
+		case encoder.OpStructHeadOmitEmptyMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 {
+				if code.Flags&encoder.AnonymousHeadFlags == 0 {
+					b = appendNullComma(ctx, b)
+				}
+				code = code.End.Next
+				break
+			}
+			if (code.Flags & encoder.IndirectFlags) != 0 {
+				p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			}
+			if code.Flags&encoder.AnonymousHeadFlags == 0 {
+				b = appendStructHead(ctx, b)
+			}
+			if p == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+				b = appendComma(ctx, b)
+				code = code.Next
+			}
+		case encoder.OpStructField:
+			if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 {
+				b = appendStructKey(ctx, code, b)
+			}
+			p := load(ctxptr, code.Idx) + uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmpty:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructFieldInt:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyInt:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldIntString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyIntString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldIntPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendInt(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldIntPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUint:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUint:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUintString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUintString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUintPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendUint(ctx, b, p, code)
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p, code)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldUintPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32String:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat64(ctx, b, v)
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNullComma(ctx, b)
+				code = code.Next
+				break
+			}
+			v := ptrToFloat64(p)
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldStringString:
+			p := load(ctxptr, code.Idx)
+			s := ptrToString(p + uintptr(code.Offset))
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, string(appendString(ctx, []byte{}, s)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyStringString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, v)))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldStringPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, ptrToString(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, ptrToString(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldStringPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBool:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBool:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBoolString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBoolString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, v)
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBoolPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendBool(ctx, b, ptrToBool(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBytes:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset)))
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBytes:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBytes(p + uintptr(code.Offset))
+			if len(v) > 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, v)
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldBytesPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumber:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumber:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumberString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = append(bb, '"')
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumberString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumberPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			b = appendStructKey(ctx, code, b)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendComma(ctx, b)
+			}
+			code = code.Next
+		case encoder.OpStructFieldMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalJSON:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				code = code.NextField
+				break
+			}
+			iface := ptrToInterface(code, p)
+			if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) {
+				code = code.NextField
+				break
+			}
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendMarshalJSON(ctx, code, b, iface)
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructFieldMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalJSONPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldMarshalText:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalText:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if (code.Flags & encoder.IsNilableTypeFlags) != 0 {
+				p = ptrToPtr(p)
+			}
+			if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 {
+				code = code.NextField
+				break
+			}
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+			if err != nil {
+				return nil, err
+			}
+			b = appendComma(ctx, bb)
+			code = code.Next
+		case encoder.OpStructFieldMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendComma(ctx, b)
+			code = code.Next
+		case encoder.OpStructFieldOmitEmptyMarshalTextPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendComma(ctx, bb)
+			}
+			code = code.Next
+		case encoder.OpStructFieldArray:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyArray:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldArrayPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyArrayPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructFieldSlice:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptySlice:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			slice := ptrToSlice(p)
+			if slice.Len == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructFieldSlicePtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptySlicePtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructFieldMap:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyMap:
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructFieldMapPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p != 0 {
+				p = ptrToNPtr(p, code.PtrNum)
+			}
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyMapPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToPtr(p + uintptr(code.Offset))
+			if p != 0 {
+				p = ptrToNPtr(p, code.PtrNum)
+			}
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			} else {
+				code = code.NextField
+			}
+		case encoder.OpStructFieldStruct:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			code = code.Next
+			store(ctxptr, code.Idx, p)
+		case encoder.OpStructFieldOmitEmptyStruct:
+			p := load(ctxptr, code.Idx)
+			p += uintptr(code.Offset)
+			if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 {
+				code = code.NextField
+			} else {
+				b = appendStructKey(ctx, code, b)
+				code = code.Next
+				store(ctxptr, code.Idx, p)
+			}
+		case encoder.OpStructEnd:
+			b = appendStructEndSkipLast(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndInt:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyInt:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndIntString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyIntString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndIntPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendInt(ctx, b, p, code)
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyIntPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendInt(ctx, b, p, code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndIntPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyIntPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendInt(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUint:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUint:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUintString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUintString:
+			p := load(ctxptr, code.Idx)
+			u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize)
+			v := u64 & ((1 << code.NumBitSize) - 1)
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p+uintptr(code.Offset), code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUintPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendUint(ctx, b, p, code)
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUintPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendUint(ctx, b, p, code)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndUintPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyUintPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendUint(ctx, b, p, code)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32String:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat32(p + uintptr(code.Offset))
+			if v != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32Ptr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat32PtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat32PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat32(ctx, b, ptrToFloat32(p))
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = appendFloat64(ctx, b, v)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = appendFloat64(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendFloat64(ctx, b, v)
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64String:
+			p := load(ctxptr, code.Idx)
+			v := ptrToFloat64(p + uintptr(code.Offset))
+			if v != 0 {
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64Ptr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+				b = appendStructEnd(ctx, code, b)
+				code = code.Next
+				break
+			}
+			v := ptrToFloat64(p)
+			if math.IsInf(v, 0) || math.IsNaN(v) {
+				return nil, errUnsupportedFloat(v)
+			}
+			b = appendFloat64(ctx, b, v)
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64Ptr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndFloat64PtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyFloat64PtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				v := ptrToFloat64(p)
+				if math.IsInf(v, 0) || math.IsNaN(v) {
+					return nil, errUnsupportedFloat(v)
+				}
+				b = append(b, '"')
+				b = appendFloat64(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndStringString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			s := ptrToString(p + uintptr(code.Offset))
+			b = appendString(ctx, b, string(appendString(ctx, []byte{}, s)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyStringString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToString(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, v)))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndStringPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, ptrToString(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyStringPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, ptrToString(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndStringPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyStringPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p))))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBool:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBool:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBoolString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset)))
+			b = append(b, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBoolString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBool(p + uintptr(code.Offset))
+			if v {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, v)
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBoolPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendBool(ctx, b, ptrToBool(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBoolPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBoolPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBoolPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				b = appendBool(ctx, b, ptrToBool(p))
+				b = append(b, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBytes:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset)))
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBytes:
+			p := load(ctxptr, code.Idx)
+			v := ptrToBytes(p + uintptr(code.Offset))
+			if len(v) > 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, v)
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndBytesPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyBytesPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = appendByteSlice(ctx, b, ptrToBytes(p))
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumber:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = appendStructEnd(ctx, code, bb)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumber:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendStructEnd(ctx, code, bb)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumberString:
+			p := load(ctxptr, code.Idx)
+			b = appendStructKey(ctx, code, b)
+			b = append(b, '"')
+			bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset)))
+			if err != nil {
+				return nil, err
+			}
+			b = append(bb, '"')
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumberString:
+			p := load(ctxptr, code.Idx)
+			v := ptrToNumber(p + uintptr(code.Offset))
+			if v != "" {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, v)
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumberPtr:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = bb
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumberPtr:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = appendStructEnd(ctx, code, bb)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpStructEndNumberPtrString:
+			b = appendStructKey(ctx, code, b)
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p == 0 {
+				b = appendNull(ctx, b)
+			} else {
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+			}
+			b = appendStructEnd(ctx, code, b)
+			code = code.Next
+		case encoder.OpStructEndOmitEmptyNumberPtrString:
+			p := load(ctxptr, code.Idx)
+			p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum)
+			if p != 0 {
+				b = appendStructKey(ctx, code, b)
+				b = append(b, '"')
+				bb, err := appendNumber(ctx, b, ptrToNumber(p))
+				if err != nil {
+					return nil, err
+				}
+				b = append(bb, '"')
+				b = appendStructEnd(ctx, code, b)
+			} else {
+				b = appendStructEndSkipLast(ctx, code, b)
+			}
+			code = code.Next
+		case encoder.OpEnd:
+			goto END
+		}
+	}
+END:
+	return b, nil
+}
diff --git a/vendor/github.com/goccy/go-json/internal/errors/error.go b/vendor/github.com/goccy/go-json/internal/errors/error.go
new file mode 100644
index 0000000000..9207d0ff25
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/errors/error.go
@@ -0,0 +1,183 @@
+package errors
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+)
+
+type InvalidUTF8Error struct {
+	S string // the whole string value that caused the error
+}
+
+func (e *InvalidUTF8Error) Error() string {
+	return fmt.Sprintf("json: invalid UTF-8 in string: %s", strconv.Quote(e.S))
+}
+
+type InvalidUnmarshalError struct {
+	Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+	if e.Type == nil {
+		return "json: Unmarshal(nil)"
+	}
+
+	if e.Type.Kind() != reflect.Ptr {
+		return fmt.Sprintf("json: Unmarshal(non-pointer %s)", e.Type)
+	}
+	return fmt.Sprintf("json: Unmarshal(nil %s)", e.Type)
+}
+
+// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method.
+type MarshalerError struct {
+	Type       reflect.Type
+	Err        error
+	sourceFunc string
+}
+
+func (e *MarshalerError) Error() string {
+	srcFunc := e.sourceFunc
+	if srcFunc == "" {
+		srcFunc = "MarshalJSON"
+	}
+	return fmt.Sprintf("json: error calling %s for type %s: %s", srcFunc, e.Type, e.Err.Error())
+}
+
+// Unwrap returns the underlying error.
+func (e *MarshalerError) Unwrap() error { return e.Err }
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError struct {
+	msg    string // description of error
+	Offset int64  // error occurred after reading Offset bytes
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+//
+// Deprecated: No longer used; kept for compatibility.
+type UnmarshalFieldError struct {
+	Key   string
+	Type  reflect.Type
+	Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+	return fmt.Sprintf("json: cannot unmarshal object key %s into unexported field %s of type %s",
+		strconv.Quote(e.Key), e.Field.Name, e.Type.String(),
+	)
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+	Value  string       // description of JSON value - "bool", "array", "number -5"
+	Type   reflect.Type // type of Go value it could not be assigned to
+	Offset int64        // error occurred after reading Offset bytes
+	Struct string       // name of the struct type containing the field
+	Field  string       // the full path from root node to the field
+}
+
+func (e *UnmarshalTypeError) Error() string {
+	if e.Struct != "" || e.Field != "" {
+		return fmt.Sprintf("json: cannot unmarshal %s into Go struct field %s.%s of type %s",
+			e.Value, e.Struct, e.Field, e.Type,
+		)
+	}
+	return fmt.Sprintf("json: cannot unmarshal %s into Go value of type %s", e.Value, e.Type)
+}
+
+// An UnsupportedTypeError is returned by Marshal when attempting
+// to encode an unsupported value type.
+type UnsupportedTypeError struct {
+	Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+	return fmt.Sprintf("json: unsupported type: %s", e.Type)
+}
+
+type UnsupportedValueError struct {
+	Value reflect.Value
+	Str   string
+}
+
+func (e *UnsupportedValueError) Error() string {
+	return fmt.Sprintf("json: unsupported value: %s", e.Str)
+}
+
+func ErrSyntax(msg string, offset int64) *SyntaxError {
+	return &SyntaxError{msg: msg, Offset: offset}
+}
+
+func ErrMarshaler(typ reflect.Type, err error, msg string) *MarshalerError {
+	return &MarshalerError{
+		Type:       typ,
+		Err:        err,
+		sourceFunc: msg,
+	}
+}
+
+func ErrExceededMaxDepth(c byte, cursor int64) *SyntaxError {
+	return &SyntaxError{
+		msg:    fmt.Sprintf(`invalid character "%c" exceeded max depth`, c),
+		Offset: cursor,
+	}
+}
+
+func ErrNotAtBeginningOfValue(cursor int64) *SyntaxError {
+	return &SyntaxError{msg: "not at beginning of value", Offset: cursor}
+}
+
+func ErrUnexpectedEndOfJSON(msg string, cursor int64) *SyntaxError {
+	return &SyntaxError{
+		msg:    fmt.Sprintf("json: %s unexpected end of JSON input", msg),
+		Offset: cursor,
+	}
+}
+
+func ErrExpected(msg string, cursor int64) *SyntaxError {
+	return &SyntaxError{msg: fmt.Sprintf("expected %s", msg), Offset: cursor}
+}
+
+func ErrInvalidCharacter(c byte, context string, cursor int64) *SyntaxError {
+	if c == 0 {
+		return &SyntaxError{
+			msg:    fmt.Sprintf("json: invalid character as %s", context),
+			Offset: cursor,
+		}
+	}
+	return &SyntaxError{
+		msg:    fmt.Sprintf("json: invalid character %c as %s", c, context),
+		Offset: cursor,
+	}
+}
+
+func ErrInvalidBeginningOfValue(c byte, cursor int64) *SyntaxError {
+	return &SyntaxError{
+		msg:    fmt.Sprintf("invalid character '%c' looking for beginning of value", c),
+		Offset: cursor,
+	}
+}
+
+type PathError struct {
+	msg string
+}
+
+func (e *PathError) Error() string {
+	return fmt.Sprintf("json: invalid path format: %s", e.msg)
+}
+
+func ErrInvalidPath(msg string, args ...interface{}) *PathError {
+	if len(args) != 0 {
+		return &PathError{msg: fmt.Sprintf(msg, args...)}
+	}
+	return &PathError{msg: msg}
+}
+
+func ErrEmptyPath() *PathError {
+	return &PathError{msg: "path is empty"}
+}
diff --git a/vendor/github.com/goccy/go-json/internal/runtime/rtype.go b/vendor/github.com/goccy/go-json/internal/runtime/rtype.go
new file mode 100644
index 0000000000..4db10debe1
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/runtime/rtype.go
@@ -0,0 +1,263 @@
+package runtime
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// Type representing reflect.rtype for noescape trick
+type Type struct{}
+
+//go:linkname rtype_Align reflect.(*rtype).Align
+//go:noescape
+func rtype_Align(*Type) int
+
+func (t *Type) Align() int {
+	return rtype_Align(t)
+}
+
+//go:linkname rtype_FieldAlign reflect.(*rtype).FieldAlign
+//go:noescape
+func rtype_FieldAlign(*Type) int
+
+func (t *Type) FieldAlign() int {
+	return rtype_FieldAlign(t)
+}
+
+//go:linkname rtype_Method reflect.(*rtype).Method
+//go:noescape
+func rtype_Method(*Type, int) reflect.Method
+
+func (t *Type) Method(a0 int) reflect.Method {
+	return rtype_Method(t, a0)
+}
+
+//go:linkname rtype_MethodByName reflect.(*rtype).MethodByName
+//go:noescape
+func rtype_MethodByName(*Type, string) (reflect.Method, bool)
+
+func (t *Type) MethodByName(a0 string) (reflect.Method, bool) {
+	return rtype_MethodByName(t, a0)
+}
+
+//go:linkname rtype_NumMethod reflect.(*rtype).NumMethod
+//go:noescape
+func rtype_NumMethod(*Type) int
+
+func (t *Type) NumMethod() int {
+	return rtype_NumMethod(t)
+}
+
+//go:linkname rtype_Name reflect.(*rtype).Name
+//go:noescape
+func rtype_Name(*Type) string
+
+func (t *Type) Name() string {
+	return rtype_Name(t)
+}
+
+//go:linkname rtype_PkgPath reflect.(*rtype).PkgPath
+//go:noescape
+func rtype_PkgPath(*Type) string
+
+func (t *Type) PkgPath() string {
+	return rtype_PkgPath(t)
+}
+
+//go:linkname rtype_Size reflect.(*rtype).Size
+//go:noescape
+func rtype_Size(*Type) uintptr
+
+func (t *Type) Size() uintptr {
+	return rtype_Size(t)
+}
+
+//go:linkname rtype_String reflect.(*rtype).String
+//go:noescape
+func rtype_String(*Type) string
+
+func (t *Type) String() string {
+	return rtype_String(t)
+}
+
+//go:linkname rtype_Kind reflect.(*rtype).Kind
+//go:noescape
+func rtype_Kind(*Type) reflect.Kind
+
+func (t *Type) Kind() reflect.Kind {
+	return rtype_Kind(t)
+}
+
+//go:linkname rtype_Implements reflect.(*rtype).Implements
+//go:noescape
+func rtype_Implements(*Type, reflect.Type) bool
+
+func (t *Type) Implements(u reflect.Type) bool {
+	return rtype_Implements(t, u)
+}
+
+//go:linkname rtype_AssignableTo reflect.(*rtype).AssignableTo
+//go:noescape
+func rtype_AssignableTo(*Type, reflect.Type) bool
+
+func (t *Type) AssignableTo(u reflect.Type) bool {
+	return rtype_AssignableTo(t, u)
+}
+
+//go:linkname rtype_ConvertibleTo reflect.(*rtype).ConvertibleTo
+//go:noescape
+func rtype_ConvertibleTo(*Type, reflect.Type) bool
+
+func (t *Type) ConvertibleTo(u reflect.Type) bool {
+	return rtype_ConvertibleTo(t, u)
+}
+
+//go:linkname rtype_Comparable reflect.(*rtype).Comparable
+//go:noescape
+func rtype_Comparable(*Type) bool
+
+func (t *Type) Comparable() bool {
+	return rtype_Comparable(t)
+}
+
+//go:linkname rtype_Bits reflect.(*rtype).Bits
+//go:noescape
+func rtype_Bits(*Type) int
+
+func (t *Type) Bits() int {
+	return rtype_Bits(t)
+}
+
+//go:linkname rtype_ChanDir reflect.(*rtype).ChanDir
+//go:noescape
+func rtype_ChanDir(*Type) reflect.ChanDir
+
+func (t *Type) ChanDir() reflect.ChanDir {
+	return rtype_ChanDir(t)
+}
+
+//go:linkname rtype_IsVariadic reflect.(*rtype).IsVariadic
+//go:noescape
+func rtype_IsVariadic(*Type) bool
+
+func (t *Type) IsVariadic() bool {
+	return rtype_IsVariadic(t)
+}
+
+//go:linkname rtype_Elem reflect.(*rtype).Elem
+//go:noescape
+func rtype_Elem(*Type) reflect.Type
+
+func (t *Type) Elem() *Type {
+	return Type2RType(rtype_Elem(t))
+}
+
+//go:linkname rtype_Field reflect.(*rtype).Field
+//go:noescape
+func rtype_Field(*Type, int) reflect.StructField
+
+func (t *Type) Field(i int) reflect.StructField {
+	return rtype_Field(t, i)
+}
+
+//go:linkname rtype_FieldByIndex reflect.(*rtype).FieldByIndex
+//go:noescape
+func rtype_FieldByIndex(*Type, []int) reflect.StructField
+
+func (t *Type) FieldByIndex(index []int) reflect.StructField {
+	return rtype_FieldByIndex(t, index)
+}
+
+//go:linkname rtype_FieldByName reflect.(*rtype).FieldByName
+//go:noescape
+func rtype_FieldByName(*Type, string) (reflect.StructField, bool)
+
+func (t *Type) FieldByName(name string) (reflect.StructField, bool) {
+	return rtype_FieldByName(t, name)
+}
+
+//go:linkname rtype_FieldByNameFunc reflect.(*rtype).FieldByNameFunc
+//go:noescape
+func rtype_FieldByNameFunc(*Type, func(string) bool) (reflect.StructField, bool)
+
+func (t *Type) FieldByNameFunc(match func(string) bool) (reflect.StructField, bool) {
+	return rtype_FieldByNameFunc(t, match)
+}
+
+//go:linkname rtype_In reflect.(*rtype).In
+//go:noescape
+func rtype_In(*Type, int) reflect.Type
+
+func (t *Type) In(i int) reflect.Type {
+	return rtype_In(t, i)
+}
+
+//go:linkname rtype_Key reflect.(*rtype).Key
+//go:noescape
+func rtype_Key(*Type) reflect.Type
+
+func (t *Type) Key() *Type {
+	return Type2RType(rtype_Key(t))
+}
+
+//go:linkname rtype_Len reflect.(*rtype).Len
+//go:noescape
+func rtype_Len(*Type) int
+
+func (t *Type) Len() int {
+	return rtype_Len(t)
+}
+
+//go:linkname rtype_NumField reflect.(*rtype).NumField
+//go:noescape
+func rtype_NumField(*Type) int
+
+func (t *Type) NumField() int {
+	return rtype_NumField(t)
+}
+
+//go:linkname rtype_NumIn reflect.(*rtype).NumIn
+//go:noescape
+func rtype_NumIn(*Type) int
+
+func (t *Type) NumIn() int {
+	return rtype_NumIn(t)
+}
+
+//go:linkname rtype_NumOut reflect.(*rtype).NumOut
+//go:noescape
+func rtype_NumOut(*Type) int
+
+func (t *Type) NumOut() int {
+	return rtype_NumOut(t)
+}
+
+//go:linkname rtype_Out reflect.(*rtype).Out
+//go:noescape
+func rtype_Out(*Type, int) reflect.Type
+
+//go:linkname PtrTo reflect.(*rtype).ptrTo
+//go:noescape
+func PtrTo(*Type) *Type
+
+func (t *Type) Out(i int) reflect.Type {
+	return rtype_Out(t, i)
+}
+
+//go:linkname IfaceIndir reflect.ifaceIndir
+//go:noescape
+func IfaceIndir(*Type) bool
+
+//go:linkname RType2Type reflect.toType
+//go:noescape
+func RType2Type(t *Type) reflect.Type
+
+//go:nolint structcheck
+type emptyInterface struct {
+	_   *Type
+	ptr unsafe.Pointer
+}
+
+func Type2RType(t reflect.Type) *Type {
+	return (*Type)(((*emptyInterface)(unsafe.Pointer(&t))).ptr)
+}
diff --git a/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go b/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go
new file mode 100644
index 0000000000..baab0c5978
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go
@@ -0,0 +1,91 @@
+package runtime
+
+import (
+	"reflect"
+	"strings"
+	"unicode"
+)
+
+func getTag(field reflect.StructField) string {
+	return field.Tag.Get("json")
+}
+
+func IsIgnoredStructField(field reflect.StructField) bool {
+	if field.PkgPath != "" {
+		if field.Anonymous {
+			t := field.Type
+			if t.Kind() == reflect.Ptr {
+				t = t.Elem()
+			}
+			if t.Kind() != reflect.Struct {
+				return true
+			}
+		} else {
+			// private field
+			return true
+		}
+	}
+	tag := getTag(field)
+	return tag == "-"
+}
+
+type StructTag struct {
+	Key         string
+	IsTaggedKey bool
+	IsOmitEmpty bool
+	IsString    bool
+	Field       reflect.StructField
+}
+
+type StructTags []*StructTag
+
+func (t StructTags) ExistsKey(key string) bool {
+	for _, tt := range t {
+		if tt.Key == key {
+			return true
+		}
+	}
+	return false
+}
+
+func isValidTag(s string) bool {
+	if s == "" {
+		return false
+	}
+	for _, c := range s {
+		switch {
+		case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+			// Backslash and quote chars are reserved, but
+			// otherwise any punctuation chars are allowed
+			// in a tag name.
+		case !unicode.IsLetter(c) && !unicode.IsDigit(c):
+			return false
+		}
+	}
+	return true
+}
+
+func StructTagFromField(field reflect.StructField) *StructTag {
+	keyName := field.Name
+	tag := getTag(field)
+	st := &StructTag{Field: field}
+	opts := strings.Split(tag, ",")
+	if len(opts) > 0 {
+		if opts[0] != "" && isValidTag(opts[0]) {
+			keyName = opts[0]
+			st.IsTaggedKey = true
+		}
+	}
+	st.Key = keyName
+	if len(opts) > 1 {
+		for _, opt := range opts[1:] {
+			switch opt {
+			case "omitempty":
+				st.IsOmitEmpty = true
+			case "string":
+				st.IsString = true
+			}
+		}
+	}
+	return st
+}
diff --git a/vendor/github.com/goccy/go-json/internal/runtime/type.go b/vendor/github.com/goccy/go-json/internal/runtime/type.go
new file mode 100644
index 0000000000..0167cd2c01
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/internal/runtime/type.go
@@ -0,0 +1,100 @@
+package runtime
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type SliceHeader struct {
+	Data unsafe.Pointer
+	Len  int
+	Cap  int
+}
+
+const (
+	maxAcceptableTypeAddrRange = 1024 * 1024 * 2 // 2 Mib
+)
+
+type TypeAddr struct {
+	BaseTypeAddr uintptr
+	MaxTypeAddr  uintptr
+	AddrRange    uintptr
+	AddrShift    uintptr
+}
+
+var (
+	typeAddr        *TypeAddr
+	alreadyAnalyzed bool
+)
+
+//go:linkname typelinks reflect.typelinks
+func typelinks() ([]unsafe.Pointer, [][]int32)
+
+//go:linkname rtypeOff reflect.rtypeOff
+func rtypeOff(unsafe.Pointer, int32) unsafe.Pointer
+
+func AnalyzeTypeAddr() *TypeAddr {
+	defer func() {
+		alreadyAnalyzed = true
+	}()
+	if alreadyAnalyzed {
+		return typeAddr
+	}
+	sections, offsets := typelinks()
+	if len(sections) != 1 {
+		return nil
+	}
+	if len(offsets) != 1 {
+		return nil
+	}
+	section := sections[0]
+	offset := offsets[0]
+	var (
+		min         uintptr = uintptr(^uint(0))
+		max         uintptr = 0
+		isAligned64         = true
+		isAligned32         = true
+	)
+	for i := 0; i < len(offset); i++ {
+		typ := (*Type)(rtypeOff(section, offset[i]))
+		addr := uintptr(unsafe.Pointer(typ))
+		if min > addr {
+			min = addr
+		}
+		if max < addr {
+			max = addr
+		}
+		if typ.Kind() == reflect.Ptr {
+			addr = uintptr(unsafe.Pointer(typ.Elem()))
+			if min > addr {
+				min = addr
+			}
+			if max < addr {
+				max = addr
+			}
+		}
+		isAligned64 = isAligned64 && (addr-min)&63 == 0
+		isAligned32 = isAligned32 && (addr-min)&31 == 0
+	}
+	addrRange := max - min
+	if addrRange == 0 {
+		return nil
+	}
+	var addrShift uintptr
+	if isAligned64 {
+		addrShift = 6
+	} else if isAligned32 {
+		addrShift = 5
+	}
+	cacheSize := addrRange >> addrShift
+	if cacheSize > maxAcceptableTypeAddrRange {
+		return nil
+	}
+	typeAddr = &TypeAddr{
+		BaseTypeAddr: min,
+		MaxTypeAddr:  max,
+		AddrRange:    addrRange,
+		AddrShift:    addrShift,
+	}
+	return typeAddr
+}
diff --git a/vendor/github.com/goccy/go-json/json.go b/vendor/github.com/goccy/go-json/json.go
new file mode 100644
index 0000000000..413cb20bf3
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/json.go
@@ -0,0 +1,371 @@
+package json
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+
+	"github.com/goccy/go-json/internal/encoder"
+)
+
+// Marshaler is the interface implemented by types that
+// can marshal themselves into valid JSON.
+type Marshaler interface {
+	MarshalJSON() ([]byte, error)
+}
+
+// MarshalerContext is the interface implemented by types that
+// can marshal themselves into valid JSON with context.Context.
+type MarshalerContext interface {
+	MarshalJSON(context.Context) ([]byte, error)
+}
+
+// Unmarshaler is the interface implemented by types
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+//
+// By convention, to approximate the behavior of Unmarshal itself,
+// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op.
+type Unmarshaler interface {
+	UnmarshalJSON([]byte) error
+}
+
+// UnmarshalerContext is the interface implemented by types
+// that can unmarshal with context.Context a JSON description of themselves.
+type UnmarshalerContext interface {
+	UnmarshalJSON(context.Context, []byte) error
+}
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method and encodes the result as a JSON string.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+// This escaping can be disabled using an Encoder that had SetEscapeHTML(false)
+// called on it.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON value.
+//
+// Struct values encode as JSON objects.
+// Each exported struct field becomes a member of the object, using the
+// field name as the object key, unless the field is omitted for one of the
+// reasons given below.
+//
+// The encoding of each struct field can be customized by the format string
+// stored under the "json" key in the struct field's tag.
+// The format string gives the name of the field, possibly followed by a
+// comma-separated list of options. The name may be empty in order to
+// specify options without overriding the default field name.
+//
+// The "omitempty" option specifies that the field should be omitted
+// from the encoding if the field has an empty value, defined as
+// false, 0, a nil pointer, a nil interface value, and any empty array,
+// slice, map, or string.
+//
+// As a special case, if the field tag is "-", the field is always omitted.
+// Note that a field with name "-" can still be generated using the tag "-,".
+//
+// Examples of struct field tags and their meanings:
+//
+//   // Field appears in JSON as key "myName".
+//   Field int `json:"myName"`
+//
+//   // Field appears in JSON as key "myName" and
+//   // the field is omitted from the object if its value is empty,
+//   // as defined above.
+//   Field int `json:"myName,omitempty"`
+//
+//   // Field appears in JSON as key "Field" (the default), but
+//   // the field is skipped if empty.
+//   // Note the leading comma.
+//   Field int `json:",omitempty"`
+//
+//   // Field is ignored by this package.
+//   Field int `json:"-"`
+//
+//   // Field appears in JSON as key "-".
+//   Field int `json:"-,"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+//    Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, and ASCII punctuation except quotation
+// marks, backslash, and comma.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+//
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+//
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects. The map's key type must either be a
+// string, an integer type, or implement encoding.TextMarshaler. The map keys
+// are sorted and used as JSON object keys by applying the following rules,
+// subject to the UTF-8 coercion described for string values above:
+//   - string keys are used directly
+//   - encoding.TextMarshalers are marshaled
+//   - integer keys are converted to strings
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON value.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON value.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+	return MarshalWithOption(v)
+}
+
+// MarshalNoEscape returns the JSON encoding of v and doesn't escape v.
+func MarshalNoEscape(v interface{}) ([]byte, error) {
+	return marshalNoEscape(v)
+}
+
+// MarshalContext returns the JSON encoding of v with context.Context and EncodeOption.
+func MarshalContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) {
+	return marshalContext(ctx, v, optFuncs...)
+}
+
+// MarshalWithOption returns the JSON encoding of v with EncodeOption.
+func MarshalWithOption(v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) {
+	return marshal(v, optFuncs...)
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+// Each JSON element in the output will begin on a new line beginning with prefix
+// followed by one or more copies of indent according to the indentation nesting.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+	return MarshalIndentWithOption(v, prefix, indent)
+}
+
+// MarshalIndentWithOption is like Marshal but applies Indent to format the output with EncodeOption.
+func MarshalIndentWithOption(v interface{}, prefix, indent string, optFuncs ...EncodeOptionFunc) ([]byte, error) {
+	return marshalIndent(v, prefix, indent, optFuncs...)
+}
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v. If v is nil or not a pointer,
+// Unmarshal returns an InvalidUnmarshalError.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a value implementing the Unmarshaler interface,
+// Unmarshal calls that value's UnmarshalJSON method, including
+// when the input is a JSON null.
+// Otherwise, if the value implements encoding.TextUnmarshaler
+// and the input is a JSON quoted string, Unmarshal calls that value's
+// UnmarshalText method with the unquoted form of the string.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match. By
+// default, object keys which don't have a corresponding struct field are
+// ignored (see Decoder.DisallowUnknownFields for an alternative).
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+//	bool, for JSON booleans
+//	float64, for JSON numbers
+//	string, for JSON strings
+//	[]interface{}, for JSON arrays
+//	map[string]interface{}, for JSON objects
+//	nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a map, Unmarshal first establishes a map to
+// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
+// reuses the existing map, keeping existing entries. Unmarshal then stores
+// key-value pairs from the JSON object into the map. The map's key type must
+// either be any string type, an integer, implement json.Unmarshaler, or
+// implement encoding.TextUnmarshaler.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error. In any
+// case, it's not guaranteed that all the remaining fields following
+// the problematic one will be unmarshaled into the target object.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}) error {
+	return unmarshal(data, v)
+}
+
+// UnmarshalContext parses the JSON-encoded data and stores the result
+// in the value pointed to by v. If you implement the UnmarshalerContext interface,
+// call it with ctx as an argument.
+func UnmarshalContext(ctx context.Context, data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error {
+	return unmarshalContext(ctx, data, v)
+}
+
+func UnmarshalWithOption(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error {
+	return unmarshal(data, v, optFuncs...)
+}
+
+func UnmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error {
+	return unmarshalNoEscape(data, v, optFuncs...)
+}
+
+// A Token holds a value of one of these types:
+//
+//	Delim, for the four JSON delimiters [ ] { }
+//	bool, for JSON booleans
+//	float64, for JSON numbers
+//	Number, for JSON numbers
+//	string, for JSON string literals
+//	nil, for JSON null
+//
+type Token = json.Token
+
+// A Number represents a JSON number literal.
+type Number = json.Number
+
+// RawMessage is a raw encoded JSON value.
+// It implements Marshaler and Unmarshaler and can
+// be used to delay JSON decoding or precompute a JSON encoding.
+type RawMessage = json.RawMessage
+
+// A Delim is a JSON array or object delimiter, one of [ ] { or }.
+type Delim = json.Delim
+
+// Compact appends to dst the JSON-encoded src with
+// insignificant space characters elided.
+func Compact(dst *bytes.Buffer, src []byte) error {
+	return encoder.Compact(dst, src, false)
+}
+
+// Indent appends to dst an indented form of the JSON-encoded src.
+// Each element in a JSON object or array begins on a new,
+// indented line beginning with prefix followed by one or more
+// copies of indent according to the indentation nesting.
+// The data appended to dst does not begin with the prefix nor
+// any indentation, to make it easier to embed inside other formatted JSON data.
+// Although leading space characters (space, tab, carriage return, newline)
+// at the beginning of src are dropped, trailing space characters
+// at the end of src are preserved and copied to dst.
+// For example, if src has no trailing spaces, neither will dst;
+// if src ends in a trailing newline, so will dst.
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
+	return encoder.Indent(dst, src, prefix, indent)
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML <script> tags.
+// For historical reasons, web browsers don't honor standard HTML
+// escaping within <script> tags, so an alternative JSON encoding must
+// be used.
+func HTMLEscape(dst *bytes.Buffer, src []byte) {
+	var v interface{}
+	dec := NewDecoder(bytes.NewBuffer(src))
+	dec.UseNumber()
+	if err := dec.Decode(&v); err != nil {
+		return
+	}
+	buf, _ := marshal(v)
+	dst.Write(buf)
+}
+
+// Valid reports whether data is a valid JSON encoding.
+func Valid(data []byte) bool {
+	var v interface{}
+	decoder := NewDecoder(bytes.NewReader(data))
+	err := decoder.Decode(&v)
+	if err != nil {
+		return false
+	}
+	if !decoder.More() {
+		return true
+	}
+	return decoder.InputOffset() >= int64(len(data))
+}
+
+func init() {
+	encoder.Marshal = Marshal
+	encoder.Unmarshal = Unmarshal
+}
diff --git a/vendor/github.com/goccy/go-json/option.go b/vendor/github.com/goccy/go-json/option.go
new file mode 100644
index 0000000000..378031a080
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/option.go
@@ -0,0 +1,79 @@
+package json
+
+import (
+	"io"
+
+	"github.com/goccy/go-json/internal/decoder"
+	"github.com/goccy/go-json/internal/encoder"
+)
+
+type EncodeOption = encoder.Option
+type EncodeOptionFunc func(*EncodeOption)
+
+// UnorderedMap doesn't sort when encoding map type.
+func UnorderedMap() EncodeOptionFunc {
+	return func(opt *EncodeOption) {
+		opt.Flag |= encoder.UnorderedMapOption
+	}
+}
+
+// DisableHTMLEscape disables escaping of HTML characters ( '&', '<', '>' ) when encoding string.
+func DisableHTMLEscape() EncodeOptionFunc {
+	return func(opt *EncodeOption) {
+		opt.Flag &= ^encoder.HTMLEscapeOption
+	}
+}
+
+// DisableNormalizeUTF8
+// By default, when encoding string, UTF8 characters in the range of 0x80 - 0xFF are processed by applying \ufffd for invalid code and escaping for \u2028 and \u2029.
+// This option disables this behaviour. You can expect faster speeds by applying this option, but be careful.
+// encoding/json implements here: https://github.com/golang/go/blob/6178d25fc0b28724b1b5aec2b1b74fc06d9294c7/src/encoding/json/encode.go#L1067-L1093.
+func DisableNormalizeUTF8() EncodeOptionFunc {
+	return func(opt *EncodeOption) {
+		opt.Flag &= ^encoder.NormalizeUTF8Option
+	}
+}
+
+// Debug outputs debug information when panic occurs during encoding.
+func Debug() EncodeOptionFunc {
+	return func(opt *EncodeOption) {
+		opt.Flag |= encoder.DebugOption
+	}
+}
+
+// DebugWith sets the destination to write debug messages.
+func DebugWith(w io.Writer) EncodeOptionFunc {
+	return func(opt *EncodeOption) {
+		opt.DebugOut = w
+	}
+}
+
+// DebugDOT sets the destination to write opcodes graph.
+func DebugDOT(w io.WriteCloser) EncodeOptionFunc {
+	return func(opt *EncodeOption) {
+		opt.DebugDOTOut = w
+	}
+}
+
+// Colorize add an identifier for coloring to the string of the encoded result.
+func Colorize(scheme *ColorScheme) EncodeOptionFunc {
+	return func(opt *EncodeOption) {
+		opt.Flag |= encoder.ColorizeOption
+		opt.ColorScheme = scheme
+	}
+}
+
+type DecodeOption = decoder.Option
+type DecodeOptionFunc func(*DecodeOption)
+
+// DecodeFieldPriorityFirstWin
+// in the default behavior, go-json, like encoding/json,
+// will reflect the result of the last evaluation when a field with the same name exists.
+// This option allow you to change this behavior.
+// this option reflects the result of the first evaluation if a field with the same name exists.
+// This behavior has a performance advantage as it allows the subsequent strings to be skipped if all fields have been evaluated.
+func DecodeFieldPriorityFirstWin() DecodeOptionFunc {
+	return func(opt *DecodeOption) {
+		opt.Flags |= decoder.FirstWinOption
+	}
+}
diff --git a/vendor/github.com/goccy/go-json/path.go b/vendor/github.com/goccy/go-json/path.go
new file mode 100644
index 0000000000..38abce78f3
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/path.go
@@ -0,0 +1,84 @@
+package json
+
+import (
+	"reflect"
+
+	"github.com/goccy/go-json/internal/decoder"
+)
+
+// CreatePath creates JSON Path.
+//
+// JSON Path rule
+// $   : root object or element. The JSON Path format must start with this operator, which refers to the outermost level of the JSON-formatted string.
+// .   : child operator. You can identify child values using dot-notation.
+// ..  : recursive descent.
+// []  : subscript operator. If the JSON object is an array, you can use brackets to specify the array index.
+// [*] : all objects/elements for array.
+//
+// Reserved words must be properly escaped when included in Path.
+//
+// Escape Rule
+// single quote style escape: e.g.) `$['a.b'].c`
+// double quote style escape: e.g.) `$."a.b".c`
+func CreatePath(p string) (*Path, error) {
+	path, err := decoder.PathString(p).Build()
+	if err != nil {
+		return nil, err
+	}
+	return &Path{path: path}, nil
+}
+
+// Path represents JSON Path.
+type Path struct {
+	path *decoder.Path
+}
+
+// RootSelectorOnly whether only the root selector ($) is used.
+func (p *Path) RootSelectorOnly() bool {
+	return p.path.RootSelectorOnly
+}
+
+// UsedSingleQuotePathSelector whether single quote-based escaping was done when building the JSON Path.
+func (p *Path) UsedSingleQuotePathSelector() bool {
+	return p.path.SingleQuotePathSelector
+}
+
+// UsedSingleQuotePathSelector whether double quote-based escaping was done when building the JSON Path.
+func (p *Path) UsedDoubleQuotePathSelector() bool {
+	return p.path.DoubleQuotePathSelector
+}
+
+// Extract extracts a specific JSON string.
+func (p *Path) Extract(data []byte, optFuncs ...DecodeOptionFunc) ([][]byte, error) {
+	return extractFromPath(p, data, optFuncs...)
+}
+
+// PathString returns original JSON Path string.
+func (p *Path) PathString() string {
+	return p.path.String()
+}
+
+// Unmarshal extract and decode the value of the part corresponding to JSON Path from the input data.
+func (p *Path) Unmarshal(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error {
+	contents, err := extractFromPath(p, data, optFuncs...)
+	if err != nil {
+		return err
+	}
+	results := make([]interface{}, 0, len(contents))
+	for _, content := range contents {
+		var result interface{}
+		if err := Unmarshal(content, &result); err != nil {
+			return err
+		}
+		results = append(results, result)
+	}
+	if err := decoder.AssignValue(reflect.ValueOf(results), reflect.ValueOf(v)); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Get extract and substitute the value of the part corresponding to JSON Path from the input value.
+func (p *Path) Get(src, dst interface{}) error {
+	return p.path.Get(reflect.ValueOf(src), reflect.ValueOf(dst))
+}
diff --git a/vendor/github.com/goccy/go-json/query.go b/vendor/github.com/goccy/go-json/query.go
new file mode 100644
index 0000000000..4b11cf20df
--- /dev/null
+++ b/vendor/github.com/goccy/go-json/query.go
@@ -0,0 +1,47 @@
+package json
+
+import (
+	"github.com/goccy/go-json/internal/encoder"
+)
+
+type (
+	// FieldQuery you can dynamically filter the fields in the structure by creating a FieldQuery,
+	// adding it to context.Context using SetFieldQueryToContext and then passing it to MarshalContext.
+	// This is a type-safe operation, so it is faster than filtering using map[string]interface{}.
+	FieldQuery       = encoder.FieldQuery
+	FieldQueryString = encoder.FieldQueryString
+)
+
+var (
+	// FieldQueryFromContext get current FieldQuery from context.Context.
+	FieldQueryFromContext = encoder.FieldQueryFromContext
+	// SetFieldQueryToContext set current FieldQuery to context.Context.
+	SetFieldQueryToContext = encoder.SetFieldQueryToContext
+)
+
+// BuildFieldQuery builds FieldQuery by fieldName or sub field query.
+// First, specify the field name that you want to keep in structure type.
+// If the field you want to keep is a structure type, by creating a sub field query using BuildSubFieldQuery,
+// you can select the fields you want to keep in the structure.
+// This description can be written recursively.
+func BuildFieldQuery(fields ...FieldQueryString) (*FieldQuery, error) {
+	query, err := Marshal(fields)
+	if err != nil {
+		return nil, err
+	}
+	return FieldQueryString(query).Build()
+}
+
+// BuildSubFieldQuery builds sub field query.
+func BuildSubFieldQuery(name string) *SubFieldQuery {
+	return &SubFieldQuery{name: name}
+}
+
+type SubFieldQuery struct {
+	name string
+}
+
+func (q *SubFieldQuery) Fields(fields ...FieldQueryString) FieldQueryString {
+	query, _ := Marshal(map[string][]FieldQueryString{q.name: fields})
+	return FieldQueryString(query)
+}
diff --git a/vendor/github.com/google/go-github/v50/github/orgs_audit_log.go b/vendor/github.com/google/go-github/v50/github/orgs_audit_log.go
deleted file mode 100644
index 700c233c80..0000000000
--- a/vendor/github.com/google/go-github/v50/github/orgs_audit_log.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2021 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
-	"context"
-	"fmt"
-)
-
-// GetAuditLogOptions sets up optional parameters to query audit-log endpoint.
-type GetAuditLogOptions struct {
-	Phrase  *string `url:"phrase,omitempty"`  // A search phrase. (Optional.)
-	Include *string `url:"include,omitempty"` // Event type includes. Can be one of "web", "git", "all". Default: "web". (Optional.)
-	Order   *string `url:"order,omitempty"`   // The order of audit log events. Can be one of "asc" or "desc". Default: "desc". (Optional.)
-
-	ListCursorOptions
-}
-
-// HookConfig describes metadata about a webhook configuration.
-type HookConfig struct {
-	ContentType *string `json:"content_type,omitempty"`
-	InsecureSSL *string `json:"insecure_ssl,omitempty"`
-	URL         *string `json:"url,omitempty"`
-
-	// Secret is returned obfuscated by GitHub, but it can be set for outgoing requests.
-	Secret *string `json:"secret,omitempty"`
-}
-
-// AuditEntry describes the fields that may be represented by various audit-log "action" entries.
-// For a list of actions see - https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/reviewing-the-audit-log-for-your-organization#audit-log-actions
-type AuditEntry struct {
-	Action                *string     `json:"action,omitempty"` // The name of the action that was performed, for example `user.login` or `repo.create`.
-	Active                *bool       `json:"active,omitempty"`
-	ActiveWas             *bool       `json:"active_was,omitempty"`
-	Actor                 *string     `json:"actor,omitempty"` // The actor who performed the action.
-	BlockedUser           *string     `json:"blocked_user,omitempty"`
-	Business              *string     `json:"business,omitempty"`
-	CancelledAt           *Timestamp  `json:"cancelled_at,omitempty"`
-	CompletedAt           *Timestamp  `json:"completed_at,omitempty"`
-	Conclusion            *string     `json:"conclusion,omitempty"`
-	Config                *HookConfig `json:"config,omitempty"`
-	ConfigWas             *HookConfig `json:"config_was,omitempty"`
-	ContentType           *string     `json:"content_type,omitempty"`
-	CreatedAt             *Timestamp  `json:"created_at,omitempty"`
-	DeployKeyFingerprint  *string     `json:"deploy_key_fingerprint,omitempty"`
-	DocumentID            *string     `json:"_document_id,omitempty"`
-	Emoji                 *string     `json:"emoji,omitempty"`
-	EnvironmentName       *string     `json:"environment_name,omitempty"`
-	Event                 *string     `json:"event,omitempty"`
-	Events                []string    `json:"events,omitempty"`
-	EventsWere            []string    `json:"events_were,omitempty"`
-	Explanation           *string     `json:"explanation,omitempty"`
-	Fingerprint           *string     `json:"fingerprint,omitempty"`
-	HeadBranch            *string     `json:"head_branch,omitempty"`
-	HeadSHA               *string     `json:"head_sha,omitempty"`
-	HookID                *int64      `json:"hook_id,omitempty"`
-	IsHostedRunner        *bool       `json:"is_hosted_runner,omitempty"`
-	JobName               *string     `json:"job_name,omitempty"`
-	LimitedAvailability   *bool       `json:"limited_availability,omitempty"`
-	Message               *string     `json:"message,omitempty"`
-	Name                  *string     `json:"name,omitempty"`
-	OldUser               *string     `json:"old_user,omitempty"`
-	OldPermission         *string     `json:"old_permission,omitempty"` // The permission level for membership changes, for example `admin` or `read`.
-	OpenSSHPublicKey      *string     `json:"openssh_public_key,omitempty"`
-	Org                   *string     `json:"org,omitempty"`
-	Permission            *string     `json:"permission,omitempty"` // The permission level for membership changes, for example `admin` or `read`.
-	PreviousVisibility    *string     `json:"previous_visibility,omitempty"`
-	ReadOnly              *string     `json:"read_only,omitempty"`
-	Repo                  *string     `json:"repo,omitempty"`
-	Repository            *string     `json:"repository,omitempty"`
-	RepositoryPublic      *bool       `json:"repository_public,omitempty"`
-	RunAttempt            *int64      `json:"run_attempt,omitempty"`
-	RunnerGroupID         *int64      `json:"runner_group_id,omitempty"`
-	RunnerGroupName       *string     `json:"runner_group_name,omitempty"`
-	RunnerID              *int64      `json:"runner_id,omitempty"`
-	RunnerLabels          []string    `json:"runner_labels,omitempty"`
-	RunnerName            *string     `json:"runner_name,omitempty"`
-	SecretsPassed         []string    `json:"secrets_passed,omitempty"`
-	SourceVersion         *string     `json:"source_version,omitempty"`
-	StartedAt             *Timestamp  `json:"started_at,omitempty"`
-	TargetLogin           *string     `json:"target_login,omitempty"`
-	TargetVersion         *string     `json:"target_version,omitempty"`
-	Team                  *string     `json:"team,omitempty"`
-	Timestamp             *Timestamp  `json:"@timestamp,omitempty"`              // The time the audit log event occurred, given as a [Unix timestamp](http://en.wikipedia.org/wiki/Unix_time).
-	TransportProtocolName *string     `json:"transport_protocol_name,omitempty"` // A human readable name for the protocol (for example, HTTP or SSH) used to transfer Git data.
-	TransportProtocol     *int        `json:"transport_protocol,omitempty"`      // The type of protocol (for example, HTTP=1 or SSH=2) used to transfer Git data.
-	TriggerID             *int64      `json:"trigger_id,omitempty"`
-	User                  *string     `json:"user,omitempty"`       // The user that was affected by the action performed (if available).
-	Visibility            *string     `json:"visibility,omitempty"` // The repository visibility, for example `public` or `private`.
-	WorkflowID            *int64      `json:"workflow_id,omitempty"`
-	WorkflowRunID         *int64      `json:"workflow_run_id,omitempty"`
-}
-
-// GetAuditLog gets the audit-log entries for an organization.
-//
-// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#get-the-audit-log-for-an-organization
-func (s *OrganizationsService) GetAuditLog(ctx context.Context, org string, opts *GetAuditLogOptions) ([]*AuditEntry, *Response, error) {
-	u := fmt.Sprintf("orgs/%v/audit-log", org)
-	u, err := addOptions(u, opts)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	var auditEntries []*AuditEntry
-	resp, err := s.client.Do(ctx, req, &auditEntries)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return auditEntries, resp, nil
-}
diff --git a/vendor/github.com/google/go-github/v50/AUTHORS b/vendor/github.com/google/go-github/v53/AUTHORS
similarity index 100%
rename from vendor/github.com/google/go-github/v50/AUTHORS
rename to vendor/github.com/google/go-github/v53/AUTHORS
diff --git a/vendor/github.com/google/go-github/v50/LICENSE b/vendor/github.com/google/go-github/v53/LICENSE
similarity index 100%
rename from vendor/github.com/google/go-github/v50/LICENSE
rename to vendor/github.com/google/go-github/v53/LICENSE
diff --git a/vendor/github.com/google/go-github/v50/github/actions.go b/vendor/github.com/google/go-github/v53/github/actions.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/actions.go
rename to vendor/github.com/google/go-github/v53/github/actions.go
diff --git a/vendor/github.com/google/go-github/v50/github/actions_artifacts.go b/vendor/github.com/google/go-github/v53/github/actions_artifacts.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/actions_artifacts.go
rename to vendor/github.com/google/go-github/v53/github/actions_artifacts.go
diff --git a/vendor/github.com/google/go-github/v50/github/actions_cache.go b/vendor/github.com/google/go-github/v53/github/actions_cache.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/actions_cache.go
rename to vendor/github.com/google/go-github/v53/github/actions_cache.go
diff --git a/vendor/github.com/google/go-github/v50/github/actions_oidc.go b/vendor/github.com/google/go-github/v53/github/actions_oidc.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/actions_oidc.go
rename to vendor/github.com/google/go-github/v53/github/actions_oidc.go
diff --git a/vendor/github.com/google/go-github/v53/github/actions_required_workflows.go b/vendor/github.com/google/go-github/v53/github/actions_required_workflows.go
new file mode 100644
index 0000000000..3566eb9d20
--- /dev/null
+++ b/vendor/github.com/google/go-github/v53/github/actions_required_workflows.go
@@ -0,0 +1,247 @@
+// Copyright 2023 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+	"context"
+	"fmt"
+)
+
+// OrgRequiredWorkflow represents a required workflow object at the org level.
+type OrgRequiredWorkflow struct {
+	ID                      *int64      `json:"id,omitempty"`
+	Name                    *string     `json:"name,omitempty"`
+	Path                    *string     `json:"path,omitempty"`
+	Scope                   *string     `json:"scope,omitempty"`
+	Ref                     *string     `json:"ref,omitempty"`
+	State                   *string     `json:"state,omitempty"`
+	SelectedRepositoriesURL *string     `json:"selected_repositories_url,omitempty"`
+	CreatedAt               *Timestamp  `json:"created_at,omitempty"`
+	UpdatedAt               *Timestamp  `json:"updated_at,omitempty"`
+	Repository              *Repository `json:"repository,omitempty"`
+}
+
+// OrgRequiredWorkflows represents the required workflows for the org.
+type OrgRequiredWorkflows struct {
+	TotalCount        *int                   `json:"total_count,omitempty"`
+	RequiredWorkflows []*OrgRequiredWorkflow `json:"required_workflows,omitempty"`
+}
+
+// CreateUpdateRequiredWorkflowOptions represents the input object used to create or update required workflows.
+type CreateUpdateRequiredWorkflowOptions struct {
+	WorkflowFilePath      *string          `json:"workflow_file_path,omitempty"`
+	RepositoryID          *int64           `json:"repository_id,omitempty"`
+	Scope                 *string          `json:"scope,omitempty"`
+	SelectedRepositoryIDs *SelectedRepoIDs `json:"selected_repository_ids,omitempty"`
+}
+
+// RequiredWorkflowSelectedRepos represents the repos that a required workflow is applied to.
+type RequiredWorkflowSelectedRepos struct {
+	TotalCount   *int          `json:"total_count,omitempty"`
+	Repositories []*Repository `json:"repositories,omitempty"`
+}
+
+// RepoRequiredWorkflow represents a required workflow object at the repo level.
+type RepoRequiredWorkflow struct {
+	ID               *int64      `json:"id,omitempty"`
+	NodeID           *string     `json:"node_id,omitempty"`
+	Name             *string     `json:"name,omitempty"`
+	Path             *string     `json:"path,omitempty"`
+	State            *string     `json:"state,omitempty"`
+	URL              *string     `json:"url,omitempty"`
+	HTMLURL          *string     `json:"html_url,omitempty"`
+	BadgeURL         *string     `json:"badge_url,omitempty"`
+	CreatedAt        *Timestamp  `json:"created_at,omitempty"`
+	UpdatedAt        *Timestamp  `json:"updated_at,omitempty"`
+	SourceRepository *Repository `json:"source_repository,omitempty"`
+}
+
+// RepoRequiredWorkflows represents the required workflows for a repo.
+type RepoRequiredWorkflows struct {
+	TotalCount        *int                    `json:"total_count,omitempty"`
+	RequiredWorkflows []*RepoRequiredWorkflow `json:"required_workflows,omitempty"`
+}
+
+// ListOrgRequiredWorkflows lists the RequiredWorkflows for an org.
+//
+// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-required-workflows
+func (s *ActionsService) ListOrgRequiredWorkflows(ctx context.Context, org string, opts *ListOptions) (*OrgRequiredWorkflows, *Response, error) {
+	url := fmt.Sprintf("orgs/%v/actions/required_workflows", org)
+	u, err := addOptions(url, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	requiredWorkflows := new(OrgRequiredWorkflows)
+	resp, err := s.client.Do(ctx, req, &requiredWorkflows)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return requiredWorkflows, resp, nil
+}
+
+// CreateRequiredWorkflow creates the required workflow in an org.
+//
+// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#create-a-required-workflow
+func (s *ActionsService) CreateRequiredWorkflow(ctx context.Context, org string, createRequiredWorkflowOptions *CreateUpdateRequiredWorkflowOptions) (*OrgRequiredWorkflow, *Response, error) {
+	url := fmt.Sprintf("orgs/%v/actions/required_workflows", org)
+	req, err := s.client.NewRequest("POST", url, createRequiredWorkflowOptions)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	orgRequiredWorkflow := new(OrgRequiredWorkflow)
+	resp, err := s.client.Do(ctx, req, orgRequiredWorkflow)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return orgRequiredWorkflow, resp, nil
+}
+
+// GetRequiredWorkflowByID get the RequiredWorkflows for an org by its ID.
+//
+// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-required-workflows
+func (s *ActionsService) GetRequiredWorkflowByID(ctx context.Context, owner string, requiredWorkflowID int64) (*OrgRequiredWorkflow, *Response, error) {
+	u := fmt.Sprintf("orgs/%v/actions/required_workflows/%v", owner, requiredWorkflowID)
+
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	requiredWorkflow := new(OrgRequiredWorkflow)
+	resp, err := s.client.Do(ctx, req, &requiredWorkflow)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return requiredWorkflow, resp, nil
+}
+
+// UpdateRequiredWorkflow updates a required workflow in an org.
+//
+// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#update-a-required-workflow
+func (s *ActionsService) UpdateRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID int64, updateRequiredWorkflowOptions *CreateUpdateRequiredWorkflowOptions) (*OrgRequiredWorkflow, *Response, error) {
+	url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v", org, requiredWorkflowID)
+	req, err := s.client.NewRequest("PATCH", url, updateRequiredWorkflowOptions)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	orgRequiredWorkflow := new(OrgRequiredWorkflow)
+	resp, err := s.client.Do(ctx, req, orgRequiredWorkflow)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return orgRequiredWorkflow, resp, nil
+}
+
+// DeleteRequiredWorkflow deletes a required workflow in an org.
+//
+// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#update-a-required-workflow
+func (s *ActionsService) DeleteRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID int64) (*Response, error) {
+	url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v", org, requiredWorkflowID)
+	req, err := s.client.NewRequest("DELETE", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return s.client.Do(ctx, req, nil)
+}
+
+// ListRequiredWorkflowSelectedRepos lists the Repositories selected for a workflow.
+//
+// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-selected-repositories-for-a-required-workflow
+func (s *ActionsService) ListRequiredWorkflowSelectedRepos(ctx context.Context, org string, requiredWorkflowID int64, opts *ListOptions) (*RequiredWorkflowSelectedRepos, *Response, error) {
+	url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories", org, requiredWorkflowID)
+	u, err := addOptions(url, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	requiredWorkflowRepos := new(RequiredWorkflowSelectedRepos)
+	resp, err := s.client.Do(ctx, req, &requiredWorkflowRepos)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return requiredWorkflowRepos, resp, nil
+}
+
+// SetRequiredWorkflowSelectedRepos sets the Repositories selected for a workflow.
+//
+// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#sets-repositories-for-a-required-workflow
+func (s *ActionsService) SetRequiredWorkflowSelectedRepos(ctx context.Context, org string, requiredWorkflowID int64, ids SelectedRepoIDs) (*Response, error) {
+	type repoIDs struct {
+		SelectedIDs SelectedRepoIDs `json:"selected_repository_ids"`
+	}
+	url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories", org, requiredWorkflowID)
+	req, err := s.client.NewRequest("PUT", url, repoIDs{SelectedIDs: ids})
+	if err != nil {
+		return nil, err
+	}
+
+	return s.client.Do(ctx, req, nil)
+}
+
+// AddRepoToRequiredWorkflow adds the Repository to a required workflow.
+//
+// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#add-a-repository-to-a-required-workflow
+func (s *ActionsService) AddRepoToRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID, repoID int64) (*Response, error) {
+	url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories/%v", org, requiredWorkflowID, repoID)
+	req, err := s.client.NewRequest("PUT", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return s.client.Do(ctx, req, nil)
+}
+
+// RemoveRepoFromRequiredWorkflow removes the Repository from a required workflow.
+//
+// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#add-a-repository-to-a-required-workflow
+func (s *ActionsService) RemoveRepoFromRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID, repoID int64) (*Response, error) {
+	url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories/%v", org, requiredWorkflowID, repoID)
+	req, err := s.client.NewRequest("DELETE", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return s.client.Do(ctx, req, nil)
+}
+
+// ListRepoRequiredWorkflows lists the RequiredWorkflows for a repo.
+//
+// Github API docs:https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-repository-required-workflows
+func (s *ActionsService) ListRepoRequiredWorkflows(ctx context.Context, owner, repo string, opts *ListOptions) (*RepoRequiredWorkflows, *Response, error) {
+	url := fmt.Sprintf("repos/%v/%v/actions/required_workflows", owner, repo)
+	u, err := addOptions(url, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	requiredWorkflows := new(RepoRequiredWorkflows)
+	resp, err := s.client.Do(ctx, req, &requiredWorkflows)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return requiredWorkflows, resp, nil
+}
diff --git a/vendor/github.com/google/go-github/v50/github/actions_runner_groups.go b/vendor/github.com/google/go-github/v53/github/actions_runner_groups.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/actions_runner_groups.go
rename to vendor/github.com/google/go-github/v53/github/actions_runner_groups.go
diff --git a/vendor/github.com/google/go-github/v50/github/actions_runners.go b/vendor/github.com/google/go-github/v53/github/actions_runners.go
similarity index 86%
rename from vendor/github.com/google/go-github/v50/github/actions_runners.go
rename to vendor/github.com/google/go-github/v53/github/actions_runners.go
index 40c6be3a92..3990a5a90f 100644
--- a/vendor/github.com/google/go-github/v50/github/actions_runners.go
+++ b/vendor/github.com/google/go-github/v53/github/actions_runners.go
@@ -45,6 +45,60 @@ func (s *ActionsService) ListRunnerApplicationDownloads(ctx context.Context, own
 	return rads, resp, nil
 }
 
+// GenerateJITConfigRequest specifies body parameters to GenerateRepoJITConfig.
+type GenerateJITConfigRequest struct {
+	Name          string  `json:"name"`
+	RunnerGroupID int64   `json:"runner_group_id"`
+	WorkFolder    *string `json:"work_folder,omitempty"`
+
+	// Labels represents the names of the custom labels to add to the runner.
+	// Minimum items: 1. Maximum items: 100.
+	Labels []string `json:"labels"`
+}
+
+// JITRunnerConfig represents encoded JIT configuration that can be used to bootstrap a self-hosted runner.
+type JITRunnerConfig struct {
+	EncodedJITConfig *string `json:"encoded_jit_config,omitempty"`
+}
+
+// GenerateOrgJITConfig generate a just-in-time configuration for an organization.
+//
+// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners?apiVersion=2022-11-28#create-configuration-for-a-just-in-time-runner-for-an-organization
+func (s *ActionsService) GenerateOrgJITConfig(ctx context.Context, owner string, request *GenerateJITConfigRequest) (*JITRunnerConfig, *Response, error) {
+	u := fmt.Sprintf("orgs/%v/actions/runners/generate-jitconfig", owner)
+	req, err := s.client.NewRequest("POST", u, request)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	jitConfig := new(JITRunnerConfig)
+	resp, err := s.client.Do(ctx, req, jitConfig)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return jitConfig, resp, nil
+}
+
+// GenerateRepoJITConfig generates a just-in-time configuration for a repository.
+//
+// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners?apiVersion=2022-11-28#create-configuration-for-a-just-in-time-runner-for-a-repository
+func (s *ActionsService) GenerateRepoJITConfig(ctx context.Context, owner, repo string, request *GenerateJITConfigRequest) (*JITRunnerConfig, *Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/actions/runners/generate-jitconfig", owner, repo)
+	req, err := s.client.NewRequest("POST", u, request)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	jitConfig := new(JITRunnerConfig)
+	resp, err := s.client.Do(ctx, req, jitConfig)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return jitConfig, resp, nil
+}
+
 // RegistrationToken represents a token that can be used to add a self-hosted runner to a repository.
 type RegistrationToken struct {
 	Token     *string    `json:"token,omitempty"`
diff --git a/vendor/github.com/google/go-github/v50/github/actions_secrets.go b/vendor/github.com/google/go-github/v53/github/actions_secrets.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/actions_secrets.go
rename to vendor/github.com/google/go-github/v53/github/actions_secrets.go
diff --git a/vendor/github.com/google/go-github/v50/github/actions_variables.go b/vendor/github.com/google/go-github/v53/github/actions_variables.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/actions_variables.go
rename to vendor/github.com/google/go-github/v53/github/actions_variables.go
diff --git a/vendor/github.com/google/go-github/v50/github/actions_workflow_jobs.go b/vendor/github.com/google/go-github/v53/github/actions_workflow_jobs.go
similarity index 98%
rename from vendor/github.com/google/go-github/v50/github/actions_workflow_jobs.go
rename to vendor/github.com/google/go-github/v53/github/actions_workflow_jobs.go
index b7130916fe..1f018b3e48 100644
--- a/vendor/github.com/google/go-github/v50/github/actions_workflow_jobs.go
+++ b/vendor/github.com/google/go-github/v53/github/actions_workflow_jobs.go
@@ -28,6 +28,7 @@ type WorkflowJob struct {
 	RunID       *int64      `json:"run_id,omitempty"`
 	RunURL      *string     `json:"run_url,omitempty"`
 	NodeID      *string     `json:"node_id,omitempty"`
+	HeadBranch  *string     `json:"head_branch,omitempty"`
 	HeadSHA     *string     `json:"head_sha,omitempty"`
 	URL         *string     `json:"url,omitempty"`
 	HTMLURL     *string     `json:"html_url,omitempty"`
diff --git a/vendor/github.com/google/go-github/v50/github/actions_workflow_runs.go b/vendor/github.com/google/go-github/v53/github/actions_workflow_runs.go
similarity index 90%
rename from vendor/github.com/google/go-github/v50/github/actions_workflow_runs.go
rename to vendor/github.com/google/go-github/v53/github/actions_workflow_runs.go
index 0cda90d41b..61f736be4f 100644
--- a/vendor/github.com/google/go-github/v50/github/actions_workflow_runs.go
+++ b/vendor/github.com/google/go-github/v53/github/actions_workflow_runs.go
@@ -22,6 +22,7 @@ type WorkflowRun struct {
 	RunNumber          *int           `json:"run_number,omitempty"`
 	RunAttempt         *int           `json:"run_attempt,omitempty"`
 	Event              *string        `json:"event,omitempty"`
+	DisplayTitle       *string        `json:"display_title,omitempty"`
 	Status             *string        `json:"status,omitempty"`
 	Conclusion         *string        `json:"conclusion,omitempty"`
 	WorkflowID         *int64         `json:"workflow_id,omitempty"`
@@ -55,12 +56,14 @@ type WorkflowRuns struct {
 
 // ListWorkflowRunsOptions specifies optional parameters to ListWorkflowRuns.
 type ListWorkflowRunsOptions struct {
-	Actor   string `url:"actor,omitempty"`
-	Branch  string `url:"branch,omitempty"`
-	Event   string `url:"event,omitempty"`
-	Status  string `url:"status,omitempty"`
-	Created string `url:"created,omitempty"`
-	HeadSHA string `url:"head_sha,omitempty"`
+	Actor               string `url:"actor,omitempty"`
+	Branch              string `url:"branch,omitempty"`
+	Event               string `url:"event,omitempty"`
+	Status              string `url:"status,omitempty"`
+	Created             string `url:"created,omitempty"`
+	HeadSHA             string `url:"head_sha,omitempty"`
+	ExcludePullRequests bool   `url:"exclude_pull_requests,omitempty"`
+	CheckSuiteID        int64  `url:"check_suite_id,omitempty"`
 	ListOptions
 }
 
@@ -204,6 +207,26 @@ func (s *ActionsService) GetWorkflowRunAttempt(ctx context.Context, owner, repo
 	return run, resp, nil
 }
 
+// GetWorkflowRunAttemptLogs gets a redirect URL to download a plain text file of logs for a workflow run for attempt number.
+//
+// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#download-workflow-run-attempt-logs
+func (s *ActionsService) GetWorkflowRunAttemptLogs(ctx context.Context, owner, repo string, runID int64, attemptNumber int, followRedirects bool) (*url.URL, *Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/attempts/%v/logs", owner, repo, runID, attemptNumber)
+
+	resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	if resp.StatusCode != http.StatusFound {
+		return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status)
+	}
+
+	parsedURL, err := url.Parse(resp.Header.Get("Location"))
+	return parsedURL, newResponse(resp), err
+}
+
 // RerunWorkflowByID re-runs a workflow by ID.
 //
 // GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#re-run-a-workflow
diff --git a/vendor/github.com/google/go-github/v50/github/actions_workflows.go b/vendor/github.com/google/go-github/v53/github/actions_workflows.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/actions_workflows.go
rename to vendor/github.com/google/go-github/v53/github/actions_workflows.go
diff --git a/vendor/github.com/google/go-github/v50/github/activity.go b/vendor/github.com/google/go-github/v53/github/activity.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/activity.go
rename to vendor/github.com/google/go-github/v53/github/activity.go
diff --git a/vendor/github.com/google/go-github/v50/github/activity_events.go b/vendor/github.com/google/go-github/v53/github/activity_events.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/activity_events.go
rename to vendor/github.com/google/go-github/v53/github/activity_events.go
diff --git a/vendor/github.com/google/go-github/v50/github/activity_notifications.go b/vendor/github.com/google/go-github/v53/github/activity_notifications.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/activity_notifications.go
rename to vendor/github.com/google/go-github/v53/github/activity_notifications.go
diff --git a/vendor/github.com/google/go-github/v50/github/activity_star.go b/vendor/github.com/google/go-github/v53/github/activity_star.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/activity_star.go
rename to vendor/github.com/google/go-github/v53/github/activity_star.go
diff --git a/vendor/github.com/google/go-github/v50/github/activity_watching.go b/vendor/github.com/google/go-github/v53/github/activity_watching.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/activity_watching.go
rename to vendor/github.com/google/go-github/v53/github/activity_watching.go
diff --git a/vendor/github.com/google/go-github/v50/github/admin.go b/vendor/github.com/google/go-github/v53/github/admin.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/admin.go
rename to vendor/github.com/google/go-github/v53/github/admin.go
diff --git a/vendor/github.com/google/go-github/v50/github/admin_orgs.go b/vendor/github.com/google/go-github/v53/github/admin_orgs.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/admin_orgs.go
rename to vendor/github.com/google/go-github/v53/github/admin_orgs.go
diff --git a/vendor/github.com/google/go-github/v50/github/admin_stats.go b/vendor/github.com/google/go-github/v53/github/admin_stats.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/admin_stats.go
rename to vendor/github.com/google/go-github/v53/github/admin_stats.go
diff --git a/vendor/github.com/google/go-github/v50/github/admin_users.go b/vendor/github.com/google/go-github/v53/github/admin_users.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/admin_users.go
rename to vendor/github.com/google/go-github/v53/github/admin_users.go
diff --git a/vendor/github.com/google/go-github/v50/github/apps.go b/vendor/github.com/google/go-github/v53/github/apps.go
similarity index 93%
rename from vendor/github.com/google/go-github/v50/github/apps.go
rename to vendor/github.com/google/go-github/v53/github/apps.go
index e1d9aadaf5..ab83d59ab2 100644
--- a/vendor/github.com/google/go-github/v50/github/apps.go
+++ b/vendor/github.com/google/go-github/v53/github/apps.go
@@ -18,18 +18,19 @@ type AppsService service
 
 // App represents a GitHub App.
 type App struct {
-	ID          *int64                   `json:"id,omitempty"`
-	Slug        *string                  `json:"slug,omitempty"`
-	NodeID      *string                  `json:"node_id,omitempty"`
-	Owner       *User                    `json:"owner,omitempty"`
-	Name        *string                  `json:"name,omitempty"`
-	Description *string                  `json:"description,omitempty"`
-	ExternalURL *string                  `json:"external_url,omitempty"`
-	HTMLURL     *string                  `json:"html_url,omitempty"`
-	CreatedAt   *Timestamp               `json:"created_at,omitempty"`
-	UpdatedAt   *Timestamp               `json:"updated_at,omitempty"`
-	Permissions *InstallationPermissions `json:"permissions,omitempty"`
-	Events      []string                 `json:"events,omitempty"`
+	ID                 *int64                   `json:"id,omitempty"`
+	Slug               *string                  `json:"slug,omitempty"`
+	NodeID             *string                  `json:"node_id,omitempty"`
+	Owner              *User                    `json:"owner,omitempty"`
+	Name               *string                  `json:"name,omitempty"`
+	Description        *string                  `json:"description,omitempty"`
+	ExternalURL        *string                  `json:"external_url,omitempty"`
+	HTMLURL            *string                  `json:"html_url,omitempty"`
+	CreatedAt          *Timestamp               `json:"created_at,omitempty"`
+	UpdatedAt          *Timestamp               `json:"updated_at,omitempty"`
+	Permissions        *InstallationPermissions `json:"permissions,omitempty"`
+	Events             []string                 `json:"events,omitempty"`
+	InstallationsCount *int                     `json:"installations_count,omitempty"`
 }
 
 // InstallationToken represents an installation token.
diff --git a/vendor/github.com/google/go-github/v50/github/apps_hooks.go b/vendor/github.com/google/go-github/v53/github/apps_hooks.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/apps_hooks.go
rename to vendor/github.com/google/go-github/v53/github/apps_hooks.go
diff --git a/vendor/github.com/google/go-github/v50/github/apps_hooks_deliveries.go b/vendor/github.com/google/go-github/v53/github/apps_hooks_deliveries.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/apps_hooks_deliveries.go
rename to vendor/github.com/google/go-github/v53/github/apps_hooks_deliveries.go
diff --git a/vendor/github.com/google/go-github/v50/github/apps_installation.go b/vendor/github.com/google/go-github/v53/github/apps_installation.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/apps_installation.go
rename to vendor/github.com/google/go-github/v53/github/apps_installation.go
diff --git a/vendor/github.com/google/go-github/v50/github/apps_manifest.go b/vendor/github.com/google/go-github/v53/github/apps_manifest.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/apps_manifest.go
rename to vendor/github.com/google/go-github/v53/github/apps_manifest.go
diff --git a/vendor/github.com/google/go-github/v50/github/apps_marketplace.go b/vendor/github.com/google/go-github/v53/github/apps_marketplace.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/apps_marketplace.go
rename to vendor/github.com/google/go-github/v53/github/apps_marketplace.go
diff --git a/vendor/github.com/google/go-github/v50/github/authorizations.go b/vendor/github.com/google/go-github/v53/github/authorizations.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/authorizations.go
rename to vendor/github.com/google/go-github/v53/github/authorizations.go
diff --git a/vendor/github.com/google/go-github/v50/github/billing.go b/vendor/github.com/google/go-github/v53/github/billing.go
similarity index 95%
rename from vendor/github.com/google/go-github/v50/github/billing.go
rename to vendor/github.com/google/go-github/v53/github/billing.go
index 2900a01670..7a76bf86fd 100644
--- a/vendor/github.com/google/go-github/v50/github/billing.go
+++ b/vendor/github.com/google/go-github/v53/github/billing.go
@@ -120,9 +120,14 @@ func (s *BillingService) GetStorageBillingOrg(ctx context.Context, org string) (
 
 // GetAdvancedSecurityActiveCommittersOrg returns the GitHub Advanced Security active committers for an organization per repository.
 //
-// GitHub API docs: https://docs.github.com/en/rest/billing#get-github-advanced-security-active-committers-for-an-organization
-func (s *BillingService) GetAdvancedSecurityActiveCommittersOrg(ctx context.Context, org string) (*ActiveCommitters, *Response, error) {
+// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/billing?apiVersion=2022-11-28#get-github-advanced-security-active-committers-for-an-organization
+func (s *BillingService) GetAdvancedSecurityActiveCommittersOrg(ctx context.Context, org string, opts *ListOptions) (*ActiveCommitters, *Response, error) {
 	u := fmt.Sprintf("orgs/%v/settings/billing/advanced-security", org)
+	u, err := addOptions(u, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+
 	req, err := s.client.NewRequest("GET", u, nil)
 	if err != nil {
 		return nil, nil, err
diff --git a/vendor/github.com/google/go-github/v50/github/checks.go b/vendor/github.com/google/go-github/v53/github/checks.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/checks.go
rename to vendor/github.com/google/go-github/v53/github/checks.go
diff --git a/vendor/github.com/google/go-github/v50/github/code-scanning.go b/vendor/github.com/google/go-github/v53/github/code-scanning.go
similarity index 83%
rename from vendor/github.com/google/go-github/v50/github/code-scanning.go
rename to vendor/github.com/google/go-github/v53/github/code-scanning.go
index 6717348ed7..e4a6abeba3 100644
--- a/vendor/github.com/google/go-github/v50/github/code-scanning.go
+++ b/vendor/github.com/google/go-github/v53/github/code-scanning.go
@@ -378,3 +378,76 @@ func (s *CodeScanningService) GetAnalysis(ctx context.Context, owner, repo strin
 
 	return analysis, resp, nil
 }
+
+// DefaultSetupConfiguration represents a code scanning default setup configuration.
+type DefaultSetupConfiguration struct {
+	State      *string    `json:"state,omitempty"`
+	Languages  []string   `json:"languages,omitempty"`
+	QuerySuite *string    `json:"query_suite,omitempty"`
+	UpdatedAt  *Timestamp `json:"updated_at,omitempty"`
+}
+
+// GetDefaultSetupConfiguration gets a code scanning default setup configuration.
+//
+// You must use an access token with the repo scope to use this
+// endpoint with private repos or the public_repo scope for public repos. GitHub Apps must have the repo write
+// permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/code-scanning#get-a-code-scanning-default-setup-configuration
+func (s *CodeScanningService) GetDefaultSetupConfiguration(ctx context.Context, owner, repo string) (*DefaultSetupConfiguration, *Response, error) {
+	u := fmt.Sprintf("repos/%s/%s/code-scanning/default-setup", owner, repo)
+
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	cfg := new(DefaultSetupConfiguration)
+	resp, err := s.client.Do(ctx, req, cfg)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return cfg, resp, nil
+}
+
+// UpdateDefaultSetupConfigurationOptions specifies parameters to the CodeScanningService.UpdateDefaultSetupConfiguration
+// method.
+type UpdateDefaultSetupConfigurationOptions struct {
+	State      string   `json:"state"`
+	QuerySuite *string  `json:"query_suite,omitempty"`
+	Languages  []string `json:"languages,omitempty"`
+}
+
+// UpdateDefaultSetupConfigurationResponse represents a response from updating a code scanning default setup configuration.
+type UpdateDefaultSetupConfigurationResponse struct {
+	RunID  *int64  `json:"run_id,omitempty"`
+	RunURL *string `json:"run_url,omitempty"`
+}
+
+// UpdateDefaultSetupConfiguration updates a code scanning default setup configuration.
+//
+// You must use an access token with the repo scope to use this
+// endpoint with private repos or the public_repo scope for public repos. GitHub Apps must have the repo write
+// permission to use this endpoint.
+//
+// This method might return an AcceptedError and a status code of 202. This is because this is the status that GitHub
+// returns to signify that it has now scheduled the update of the pull request branch in a background task.
+//
+// GitHub API docs: https://docs.github.com/en/rest/code-scanning#update-a-code-scanning-default-setup-configuration
+func (s *CodeScanningService) UpdateDefaultSetupConfiguration(ctx context.Context, owner, repo string, options *UpdateDefaultSetupConfigurationOptions) (*UpdateDefaultSetupConfigurationResponse, *Response, error) {
+	u := fmt.Sprintf("repos/%s/%s/code-scanning/default-setup", owner, repo)
+
+	req, err := s.client.NewRequest("PATCH", u, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	a := new(UpdateDefaultSetupConfigurationResponse)
+	resp, err := s.client.Do(ctx, req, a)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return a, resp, nil
+}
diff --git a/vendor/github.com/google/go-github/v53/github/codespaces.go b/vendor/github.com/google/go-github/v53/github/codespaces.go
new file mode 100644
index 0000000000..a260c227de
--- /dev/null
+++ b/vendor/github.com/google/go-github/v53/github/codespaces.go
@@ -0,0 +1,254 @@
+// Copyright 2023 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+	"context"
+	"fmt"
+)
+
+// CodespacesService handles communication with the Codespaces related
+// methods of the GitHub API.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/
+type CodespacesService service
+
+// Codespace represents a codespace.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces
+type Codespace struct {
+	ID                             *int64                        `json:"id,omitempty"`
+	Name                           *string                       `json:"name,omitempty"`
+	DisplayName                    *string                       `json:"display_name,omitempty"`
+	EnvironmentID                  *string                       `json:"environment_id,omitempty"`
+	Owner                          *User                         `json:"owner,omitempty"`
+	BillableOwner                  *User                         `json:"billable_owner,omitempty"`
+	Repository                     *Repository                   `json:"repository,omitempty"`
+	Machine                        *CodespacesMachine            `json:"machine,omitempty"`
+	DevcontainerPath               *string                       `json:"devcontainer_path,omitempty"`
+	Prebuild                       *bool                         `json:"prebuild,omitempty"`
+	CreatedAt                      *Timestamp                    `json:"created_at,omitempty"`
+	UpdatedAt                      *Timestamp                    `json:"updated_at,omitempty"`
+	LastUsedAt                     *Timestamp                    `json:"last_used_at,omitempty"`
+	State                          *string                       `json:"state,omitempty"`
+	URL                            *string                       `json:"url,omitempty"`
+	GitStatus                      *CodespacesGitStatus          `json:"git_status,omitempty"`
+	Location                       *string                       `json:"location,omitempty"`
+	IdleTimeoutMinutes             *int                          `json:"idle_timeout_minutes,omitempty"`
+	WebURL                         *string                       `json:"web_url,omitempty"`
+	MachinesURL                    *string                       `json:"machines_url,omitempty"`
+	StartURL                       *string                       `json:"start_url,omitempty"`
+	StopURL                        *string                       `json:"stop_url,omitempty"`
+	PullsURL                       *string                       `json:"pulls_url,omitempty"`
+	RecentFolders                  []string                      `json:"recent_folders,omitempty"`
+	RuntimeConstraints             *CodespacesRuntimeConstraints `json:"runtime_constraints,omitempty"`
+	PendingOperation               *bool                         `json:"pending_operation,omitempty"`
+	PendingOperationDisabledReason *string                       `json:"pending_operation_disabled_reason,omitempty"`
+	IdleTimeoutNotice              *string                       `json:"idle_timeout_notice,omitempty"`
+	RetentionPeriodMinutes         *int                          `json:"retention_period_minutes,omitempty"`
+	RetentionExpiresAt             *Timestamp                    `json:"retention_expires_at,omitempty"`
+	LastKnownStopNotice            *string                       `json:"last_known_stop_notice,omitempty"`
+}
+
+// CodespacesGitStatus represents the git status of a codespace.
+type CodespacesGitStatus struct {
+	Ahead                 *int    `json:"ahead,omitempty"`
+	Behind                *int    `json:"behind,omitempty"`
+	HasUnpushedChanges    *bool   `json:"has_unpushed_changes,omitempty"`
+	HasUncommittedChanges *bool   `json:"has_uncommitted_changes,omitempty"`
+	Ref                   *string `json:"ref,omitempty"`
+}
+
+// CodespacesMachine represents the machine type of a codespace.
+type CodespacesMachine struct {
+	Name                 *string `json:"name,omitempty"`
+	DisplayName          *string `json:"display_name,omitempty"`
+	OperatingSystem      *string `json:"operating_system,omitempty"`
+	StorageInBytes       *int64  `json:"storage_in_bytes,omitempty"`
+	MemoryInBytes        *int64  `json:"memory_in_bytes,omitempty"`
+	CPUs                 *int    `json:"cpus,omitempty"`
+	PrebuildAvailability *string `json:"prebuild_availability,omitempty"`
+}
+
+// CodespacesRuntimeConstraints represents the runtime constraints of a codespace.
+type CodespacesRuntimeConstraints struct {
+	AllowedPortPrivacySettings []string `json:"allowed_port_privacy_settings,omitempty"`
+}
+
+// ListCodespaces represents the response from the list codespaces endpoints.
+type ListCodespaces struct {
+	TotalCount *int         `json:"total_count,omitempty"`
+	Codespaces []*Codespace `json:"codespaces"`
+}
+
+// ListInRepo lists codespaces for a user in a repository.
+//
+// Lists the codespaces associated with a specified repository and the authenticated user.
+// You must authenticate using an access token with the codespace scope to use this endpoint.
+// GitHub Apps must have read access to the codespaces repository permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#list-codespaces-in-a-repository-for-the-authenticated-user
+func (s *CodespacesService) ListInRepo(ctx context.Context, owner, repo string, opts *ListOptions) (*ListCodespaces, *Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/codespaces", owner, repo)
+	u, err := addOptions(u, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var codespaces *ListCodespaces
+	resp, err := s.client.Do(ctx, req, &codespaces)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return codespaces, resp, nil
+}
+
+// ListOptions represents the options for listing codespaces for a user.
+type ListCodespacesOptions struct {
+	ListOptions
+	RepositoryID int64 `url:"repository_id,omitempty"`
+}
+
+// List lists codespaces for an authenticated user.
+//
+// Lists the authenticated user's codespaces.
+// You must authenticate using an access token with the codespace scope to use this endpoint.
+// GitHub Apps must have read access to the codespaces repository permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#list-codespaces-for-the-authenticated-user
+func (s *CodespacesService) List(ctx context.Context, opts *ListCodespacesOptions) (*ListCodespaces, *Response, error) {
+	u := fmt.Sprint("user/codespaces")
+	u, err := addOptions(u, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var codespaces *ListCodespaces
+	resp, err := s.client.Do(ctx, req, &codespaces)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return codespaces, resp, nil
+}
+
+// CreateCodespaceOptions represents options for the creation of a codespace in a repository.
+type CreateCodespaceOptions struct {
+	Ref *string `json:"ref,omitempty"`
+	// Geo represents the geographic area for this codespace.
+	// If not specified, the value is assigned by IP.
+	// This property replaces location, which is being deprecated.
+	// Geo can be one of: `EuropeWest`, `SoutheastAsia`, `UsEast`, `UsWest`.
+	Geo                        *string `json:"geo,omitempty"`
+	ClientIP                   *string `json:"client_ip,omitempty"`
+	Machine                    *string `json:"machine,omitempty"`
+	DevcontainerPath           *string `json:"devcontainer_path,omitempty"`
+	MultiRepoPermissionsOptOut *bool   `json:"multi_repo_permissions_opt_out,omitempty"`
+	WorkingDirectory           *string `json:"working_directory,omitempty"`
+	IdleTimeoutMinutes         *int    `json:"idle_timeout_minutes,omitempty"`
+	DisplayName                *string `json:"display_name,omitempty"`
+	// RetentionPeriodMinutes represents the duration in minutes after codespace has gone idle in which it will be deleted.
+	// Must be integer minutes between 0 and 43200 (30 days).
+	RetentionPeriodMinutes *int `json:"retention_period_minutes,omitempty"`
+}
+
+// CreateInRepo creates a codespace in a repository.
+//
+// Creates a codespace owned by the authenticated user in the specified repository.
+// You must authenticate using an access token with the codespace scope to use this endpoint.
+// GitHub Apps must have write access to the codespaces repository permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#create-a-codespace-in-a-repository
+func (s *CodespacesService) CreateInRepo(ctx context.Context, owner, repo string, request *CreateCodespaceOptions) (*Codespace, *Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/codespaces", owner, repo)
+
+	req, err := s.client.NewRequest("POST", u, request)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var codespace *Codespace
+	resp, err := s.client.Do(ctx, req, &codespace)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return codespace, resp, nil
+}
+
+// Start starts a codespace.
+//
+// You must authenticate using an access token with the codespace scope to use this endpoint.
+// GitHub Apps must have write access to the codespaces_lifecycle_admin repository permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#start-a-codespace-for-the-authenticated-user
+func (s *CodespacesService) Start(ctx context.Context, codespaceName string) (*Codespace, *Response, error) {
+	u := fmt.Sprintf("user/codespaces/%v/start", codespaceName)
+
+	req, err := s.client.NewRequest("POST", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var codespace *Codespace
+	resp, err := s.client.Do(ctx, req, &codespace)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return codespace, resp, nil
+}
+
+// Stop stops a codespace.
+//
+// You must authenticate using an access token with the codespace scope to use this endpoint.
+// GitHub Apps must have write access to the codespaces_lifecycle_admin repository permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#stop-a-codespace-for-the-authenticated-user
+func (s *CodespacesService) Stop(ctx context.Context, codespaceName string) (*Codespace, *Response, error) {
+	u := fmt.Sprintf("user/codespaces/%v/stop", codespaceName)
+
+	req, err := s.client.NewRequest("POST", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var codespace *Codespace
+	resp, err := s.client.Do(ctx, req, &codespace)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return codespace, resp, nil
+}
+
+// Delete deletes a codespace.
+//
+// You must authenticate using an access token with the codespace scope to use this endpoint.
+// GitHub Apps must have write access to the codespaces repository permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#delete-a-codespace-for-the-authenticated-user
+func (s *CodespacesService) Delete(ctx context.Context, codespaceName string) (*Response, error) {
+	u := fmt.Sprintf("user/codespaces/%v", codespaceName)
+
+	req, err := s.client.NewRequest("DELETE", u, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	return s.client.Do(ctx, req, nil)
+}
diff --git a/vendor/github.com/google/go-github/v53/github/codespaces_secrets.go b/vendor/github.com/google/go-github/v53/github/codespaces_secrets.go
new file mode 100644
index 0000000000..e11c679c66
--- /dev/null
+++ b/vendor/github.com/google/go-github/v53/github/codespaces_secrets.go
@@ -0,0 +1,405 @@
+// Copyright 2023 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+	"context"
+	"fmt"
+)
+
+// ListUserSecrets list all secrets available for a users codespace
+//
+// Lists all secrets available for a user's Codespaces without revealing their encrypted values
+// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint
+// GitHub Apps must have read access to the codespaces_user_secrets user permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#list-secrets-for-the-authenticated-user
+func (s *CodespacesService) ListUserSecrets(ctx context.Context, opts *ListOptions) (*Secrets, *Response, error) {
+	u, err := addOptions("user/codespaces/secrets", opts)
+	if err != nil {
+		return nil, nil, err
+	}
+	return s.listSecrets(ctx, u)
+}
+
+// ListOrgSecrets list all secrets available to an org
+//
+// Lists all Codespaces secrets available at the organization-level without revealing their encrypted values. You must authenticate using an access token with the admin:org scope to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#list-organization-secrets
+func (s *CodespacesService) ListOrgSecrets(ctx context.Context, org string, opts *ListOptions) (*Secrets, *Response, error) {
+	u := fmt.Sprintf("orgs/%v/codespaces/secrets", org)
+	u, err := addOptions(u, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+	return s.listSecrets(ctx, u)
+}
+
+// ListRepoSecrets list all secrets available to a repo
+//
+// Lists all secrets available in a repository without revealing their encrypted values. You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#list-repository-secrets
+func (s *CodespacesService) ListRepoSecrets(ctx context.Context, owner, repo string, opts *ListOptions) (*Secrets, *Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/codespaces/secrets", owner, repo)
+	u, err := addOptions(u, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+	return s.listSecrets(ctx, u)
+}
+
+func (s *CodespacesService) listSecrets(ctx context.Context, url string) (*Secrets, *Response, error) {
+	req, err := s.client.NewRequest("GET", url, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var secrets *Secrets
+	resp, err := s.client.Do(ctx, req, &secrets)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return secrets, resp, nil
+}
+
+// GetUserPublicKey gets the users public key for encrypting codespace secrets
+//
+// Gets your public key, which you need to encrypt secrets. You need to encrypt a secret before you can create or update secrets.
+// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint.
+// GitHub Apps must have read access to the codespaces_user_secrets user permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#get-public-key-for-the-authenticated-user
+func (s *CodespacesService) GetUserPublicKey(ctx context.Context) (*PublicKey, *Response, error) {
+	return s.getPublicKey(ctx, "user/codespaces/secrets/public-key")
+}
+
+// GetOrgPublicKey gets the org public key for encrypting codespace secrets
+//
+// Gets a public key for an organization, which is required in order to encrypt secrets. You need to encrypt the value of a secret before you can create or update secrets. You must authenticate using an access token with the admin:org scope to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#get-an-organization-public-key
+func (s *CodespacesService) GetOrgPublicKey(ctx context.Context, org string) (*PublicKey, *Response, error) {
+	return s.getPublicKey(ctx, fmt.Sprintf("orgs/%v/codespaces/secrets/public-key", org))
+}
+
+// GetRepoPublicKey gets the repo public key for encrypting codespace secrets
+//
+// Gets your public key, which you need to encrypt secrets. You need to encrypt a secret before you can create or update secrets. Anyone with read access to the repository can use this endpoint. If the repository is private you must use an access token with the repo scope. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#get-a-repository-public-key
+func (s *CodespacesService) GetRepoPublicKey(ctx context.Context, owner, repo string) (*PublicKey, *Response, error) {
+	return s.getPublicKey(ctx, fmt.Sprintf("repos/%v/%v/codespaces/secrets/public-key", owner, repo))
+}
+
+func (s *CodespacesService) getPublicKey(ctx context.Context, url string) (*PublicKey, *Response, error) {
+	req, err := s.client.NewRequest("GET", url, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var publicKey *PublicKey
+	resp, err := s.client.Do(ctx, req, &publicKey)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return publicKey, resp, nil
+}
+
+// GetUserSecret gets a users codespace secret
+//
+// Gets a secret available to a user's codespaces without revealing its encrypted value.
+// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint.
+// GitHub Apps must have read access to the codespaces_user_secrets user permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#get-a-secret-for-the-authenticated-user
+func (s *CodespacesService) GetUserSecret(ctx context.Context, name string) (*Secret, *Response, error) {
+	u := fmt.Sprintf("user/codespaces/secrets/%v", name)
+	return s.getSecret(ctx, u)
+}
+
+// GetOrgSecret gets an org codespace secret
+//
+// Gets an organization secret without revealing its encrypted value. You must authenticate using an access token with the admin:org scope to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#get-an-organization-secret
+func (s *CodespacesService) GetOrgSecret(ctx context.Context, org, name string) (*Secret, *Response, error) {
+	u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v", org, name)
+	return s.getSecret(ctx, u)
+}
+
+// GetRepoSecret gets a repo codespace secret
+//
+// Gets a single repository secret without revealing its encrypted value. You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#get-a-repository-secret
+func (s *CodespacesService) GetRepoSecret(ctx context.Context, owner, repo, name string) (*Secret, *Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/codespaces/secrets/%v", owner, repo, name)
+	return s.getSecret(ctx, u)
+}
+
+func (s *CodespacesService) getSecret(ctx context.Context, url string) (*Secret, *Response, error) {
+	req, err := s.client.NewRequest("GET", url, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var secret *Secret
+	resp, err := s.client.Do(ctx, req, &secret)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return secret, resp, nil
+}
+
+// CreateOrUpdateUserSecret creates or updates a users codespace secret
+//
+// Creates or updates a secret for a user's codespace with an encrypted value. Encrypt your secret using LibSodium.
+// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must also have Codespaces access to use this endpoint.
+// GitHub Apps must have write access to the codespaces_user_secrets user permission and codespaces_secrets repository permission on all referenced repositories to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#create-or-update-a-secret-for-the-authenticated-user
+func (s *CodespacesService) CreateOrUpdateUserSecret(ctx context.Context, eSecret *EncryptedSecret) (*Response, error) {
+	u := fmt.Sprintf("user/codespaces/secrets/%v", eSecret.Name)
+	return s.createOrUpdateSecret(ctx, u, eSecret)
+}
+
+// CreateOrUpdateOrgSecret creates or updates an orgs codespace secret
+//
+// Creates or updates an organization secret with an encrypted value. Encrypt your secret using LibSodium. You must authenticate using an access token with the admin:org scope to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#create-or-update-an-organization-secret
+func (s *CodespacesService) CreateOrUpdateOrgSecret(ctx context.Context, org string, eSecret *EncryptedSecret) (*Response, error) {
+	u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v", org, eSecret.Name)
+	return s.createOrUpdateSecret(ctx, u, eSecret)
+}
+
+// CreateOrUpdateRepoSecret creates or updates a repos codespace secret
+//
+// Creates or updates a repository secret with an encrypted value. Encrypt your secret using LibSodium. You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#create-or-update-a-repository-secret
+func (s *CodespacesService) CreateOrUpdateRepoSecret(ctx context.Context, owner, repo string, eSecret *EncryptedSecret) (*Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/codespaces/secrets/%v", owner, repo, eSecret.Name)
+	return s.createOrUpdateSecret(ctx, u, eSecret)
+}
+
+func (s *CodespacesService) createOrUpdateSecret(ctx context.Context, url string, eSecret *EncryptedSecret) (*Response, error) {
+	req, err := s.client.NewRequest("PUT", url, eSecret)
+	if err != nil {
+		return nil, err
+	}
+
+	resp, err := s.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+
+	return resp, nil
+}
+
+// DeleteUserSecret deletes a users codespace secret
+//
+// Deletes a secret from a user's codespaces using the secret name. Deleting the secret will remove access from all codespaces that were allowed to access the secret.
+// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint.
+// GitHub Apps must have write access to the codespaces_user_secrets user permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#delete-a-secret-for-the-authenticated-user
+func (s *CodespacesService) DeleteUserSecret(ctx context.Context, name string) (*Response, error) {
+	u := fmt.Sprintf("user/codespaces/secrets/%v", name)
+	return s.deleteSecret(ctx, u)
+}
+
+// DeleteOrgSecret deletes an orgs codespace secret
+//
+// Deletes an organization secret using the secret name. You must authenticate using an access token with the admin:org scope to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#delete-an-organization-secret
+func (s *CodespacesService) DeleteOrgSecret(ctx context.Context, org, name string) (*Response, error) {
+	u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v", org, name)
+	return s.deleteSecret(ctx, u)
+}
+
+// DeleteRepoSecret deletes a repos codespace secret
+//
+// Deletes a secret in a repository using the secret name. You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#delete-a-repository-secret
+func (s *CodespacesService) DeleteRepoSecret(ctx context.Context, owner, repo, name string) (*Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/codespaces/secrets/%v", owner, repo, name)
+	return s.deleteSecret(ctx, u)
+}
+
+func (s *CodespacesService) deleteSecret(ctx context.Context, url string) (*Response, error) {
+	req, err := s.client.NewRequest("DELETE", url, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	resp, err := s.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+
+	return resp, nil
+}
+
+// ListSelectedReposForUserSecret lists the repositories that have been granted the ability to use a user's codespace secret.
+//
+// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint.
+// GitHub Apps must have read access to the codespaces_user_secrets user permission and write access to the codespaces_secrets repository permission on all referenced repositories to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#list-selected-repositories-for-a-user-secret
+func (s *CodespacesService) ListSelectedReposForUserSecret(ctx context.Context, name string, opts *ListOptions) (*SelectedReposList, *Response, error) {
+	u := fmt.Sprintf("user/codespaces/secrets/%v/repositories", name)
+	u, err := addOptions(u, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return s.listSelectedReposForSecret(ctx, u)
+}
+
+// ListSelectedReposForOrgSecret lists the repositories that have been granted the ability to use an organization's codespace secret.
+//
+// Lists all repositories that have been selected when the visibility for repository access to a secret is set to selected. You must authenticate using an access token with the admin:org scope to use this endpoint.
+//
+// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#list-selected-repositories-for-an-organization-secret
+func (s *CodespacesService) ListSelectedReposForOrgSecret(ctx context.Context, org, name string, opts *ListOptions) (*SelectedReposList, *Response, error) {
+	u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v/repositories", org, name)
+	u, err := addOptions(u, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return s.listSelectedReposForSecret(ctx, u)
+}
+
+func (s *CodespacesService) listSelectedReposForSecret(ctx context.Context, url string) (*SelectedReposList, *Response, error) {
+	req, err := s.client.NewRequest("GET", url, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var repositories *SelectedReposList
+	resp, err := s.client.Do(ctx, req, &repositories)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return repositories, resp, nil
+}
+
+// SetSelectedReposForUserSecret sets the repositories that have been granted the ability to use a user's codespace secret.
+//
+// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint.
+// GitHub Apps must have write access to the codespaces_user_secrets user permission and write access to the codespaces_secrets repository permission on all referenced repositories to use this endpoint.
+//
+// Github API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#set-selected-repositories-for-a-user-secret
+func (s *CodespacesService) SetSelectedReposForUserSecret(ctx context.Context, name string, ids SelectedRepoIDs) (*Response, error) {
+	u := fmt.Sprintf("user/codespaces/secrets/%v/repositories", name)
+	return s.setSelectedRepoForSecret(ctx, u, ids)
+}
+
+// SetSelectedReposForOrgSecret sets the repositories that have been granted the ability to use a user's codespace secret.
+//
+// Replaces all repositories for an organization secret when the visibility for repository access is set to selected. The visibility is set when you Create or update an organization secret. You must authenticate using an access token with the admin:org scope to use this endpoint.
+//
+// Github API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#set-selected-repositories-for-a-user-secret
+func (s *CodespacesService) SetSelectedReposForOrgSecret(ctx context.Context, org, name string, ids SelectedRepoIDs) (*Response, error) {
+	u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v/repositories", org, name)
+	return s.setSelectedRepoForSecret(ctx, u, ids)
+}
+
+func (s *CodespacesService) setSelectedRepoForSecret(ctx context.Context, url string, ids SelectedRepoIDs) (*Response, error) {
+	type repoIDs struct {
+		SelectedIDs SelectedRepoIDs `json:"selected_repository_ids"`
+	}
+
+	req, err := s.client.NewRequest("PUT", url, repoIDs{SelectedIDs: ids})
+	if err != nil {
+		return nil, err
+	}
+
+	resp, err := s.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+
+	return resp, nil
+}
+
+// AddSelectedRepoToUserSecret adds a repository to the list of repositories that have been granted the ability to use a user's codespace secret.
+//
+// Adds a repository to the selected repositories for a user's codespace secret. You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. GitHub Apps must have write access to the codespaces_user_secrets user permission and write access to the codespaces_secrets repository permission on the referenced repository to use this endpoint.
+//
+// Github API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#add-a-selected-repository-to-a-user-secret
+func (s *CodespacesService) AddSelectedRepoToUserSecret(ctx context.Context, name string, repo *Repository) (*Response, error) {
+	u := fmt.Sprintf("user/codespaces/secrets/%v/repositories/%v", name, *repo.ID)
+	return s.addSelectedRepoToSecret(ctx, u)
+}
+
+// AddSelectedRepoToOrgSecret adds a repository to the list of repositories that have been granted the ability to use an organization's codespace secret.
+//
+// Adds a repository to an organization secret when the visibility for repository access is set to selected. The visibility is set when you Create or update an organization secret. You must authenticate using an access token with the admin:org scope to use this endpoint.
+//
+// Github API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#add-selected-repository-to-an-organization-secret
+func (s *CodespacesService) AddSelectedRepoToOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) {
+	u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v/repositories/%v", org, name, *repo.ID)
+	return s.addSelectedRepoToSecret(ctx, u)
+}
+
+func (s *CodespacesService) addSelectedRepoToSecret(ctx context.Context, url string) (*Response, error) {
+	req, err := s.client.NewRequest("PUT", url, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	resp, err := s.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+
+	return resp, nil
+}
+
+// RemoveSelectedRepoFromUserSecret removes a repository from the list of repositories that have been granted the ability to use a user's codespace secret.
+//
+// Removes a repository from the selected repositories for a user's codespace secret. You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. GitHub Apps must have write access to the codespaces_user_secrets user permission to use this endpoint.
+//
+// Github API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#remove-a-selected-repository-from-a-user-secret
+func (s *CodespacesService) RemoveSelectedRepoFromUserSecret(ctx context.Context, name string, repo *Repository) (*Response, error) {
+	u := fmt.Sprintf("user/codespaces/secrets/%v/repositories/%v", name, *repo.ID)
+	return s.removeSelectedRepoFromSecret(ctx, u)
+}
+
+// RemoveSelectedRepoFromOrgSecret removes a repository from the list of repositories that have been granted the ability to use an organization's codespace secret.
+//
+// Removes a repository from an organization secret when the visibility for repository access is set to selected. The visibility is set when you Create or update an organization secret. You must authenticate using an access token with the admin:org scope to use this endpoint.
+//
+// Github API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#remove-selected-repository-from-an-organization-secret
+func (s *CodespacesService) RemoveSelectedRepoFromOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) {
+	u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v/repositories/%v", org, name, *repo.ID)
+	return s.removeSelectedRepoFromSecret(ctx, u)
+}
+
+func (s *CodespacesService) removeSelectedRepoFromSecret(ctx context.Context, url string) (*Response, error) {
+	req, err := s.client.NewRequest("DELETE", url, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	resp, err := s.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+
+	return resp, nil
+}
diff --git a/vendor/github.com/google/go-github/v50/github/dependabot.go b/vendor/github.com/google/go-github/v53/github/dependabot.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/dependabot.go
rename to vendor/github.com/google/go-github/v53/github/dependabot.go
diff --git a/vendor/github.com/google/go-github/v50/github/dependabot_alerts.go b/vendor/github.com/google/go-github/v53/github/dependabot_alerts.go
similarity index 98%
rename from vendor/github.com/google/go-github/v50/github/dependabot_alerts.go
rename to vendor/github.com/google/go-github/v53/github/dependabot_alerts.go
index e00aebc804..7b5d53b393 100644
--- a/vendor/github.com/google/go-github/v50/github/dependabot_alerts.go
+++ b/vendor/github.com/google/go-github/v53/github/dependabot_alerts.go
@@ -62,6 +62,7 @@ type DependabotAlert struct {
 	DismissedReason       *string                     `json:"dismissed_reason,omitempty"`
 	DismissedComment      *string                     `json:"dismissed_comment,omitempty"`
 	FixedAt               *Timestamp                  `json:"fixed_at,omitempty"`
+	Repository            *Repository                 `json:"repository,omitempty"`
 }
 
 // ListAlertsOptions specifies the optional parameters to the DependabotService.ListRepoAlerts
diff --git a/vendor/github.com/google/go-github/v50/github/dependabot_secrets.go b/vendor/github.com/google/go-github/v53/github/dependabot_secrets.go
similarity index 99%
rename from vendor/github.com/google/go-github/v50/github/dependabot_secrets.go
rename to vendor/github.com/google/go-github/v53/github/dependabot_secrets.go
index 8318cd812c..f87ab42c39 100644
--- a/vendor/github.com/google/go-github/v50/github/dependabot_secrets.go
+++ b/vendor/github.com/google/go-github/v53/github/dependabot_secrets.go
@@ -198,7 +198,7 @@ func (s *DependabotService) ListSelectedReposForOrgSecret(ctx context.Context, o
 }
 
 // DependabotSecretsSelectedRepoIDs are the repository IDs that have access to the dependabot secrets.
-type DependabotSecretsSelectedRepoIDs []string
+type DependabotSecretsSelectedRepoIDs []int64
 
 // SetSelectedReposForOrgSecret sets the repositories that have access to a Dependabot secret.
 //
diff --git a/vendor/github.com/google/go-github/v50/github/doc.go b/vendor/github.com/google/go-github/v53/github/doc.go
similarity index 99%
rename from vendor/github.com/google/go-github/v50/github/doc.go
rename to vendor/github.com/google/go-github/v53/github/doc.go
index 9adfea8fe9..af2847111a 100644
--- a/vendor/github.com/google/go-github/v50/github/doc.go
+++ b/vendor/github.com/google/go-github/v53/github/doc.go
@@ -8,7 +8,7 @@ Package github provides a client for using the GitHub API.
 
 Usage:
 
-	import "github.com/google/go-github/v50/github"	// with go modules enabled (GO111MODULE=on or outside GOPATH)
+	import "github.com/google/go-github/v53/github"	// with go modules enabled (GO111MODULE=on or outside GOPATH)
 	import "github.com/google/go-github/github"     // with go modules disabled
 
 Construct a new GitHub client, then use the various services on the client to
diff --git a/vendor/github.com/google/go-github/v50/github/enterprise.go b/vendor/github.com/google/go-github/v53/github/enterprise.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/enterprise.go
rename to vendor/github.com/google/go-github/v53/github/enterprise.go
diff --git a/vendor/github.com/google/go-github/v50/github/enterprise_actions_runners.go b/vendor/github.com/google/go-github/v53/github/enterprise_actions_runners.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/enterprise_actions_runners.go
rename to vendor/github.com/google/go-github/v53/github/enterprise_actions_runners.go
diff --git a/vendor/github.com/google/go-github/v50/github/enterprise_audit_log.go b/vendor/github.com/google/go-github/v53/github/enterprise_audit_log.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/enterprise_audit_log.go
rename to vendor/github.com/google/go-github/v53/github/enterprise_audit_log.go
diff --git a/vendor/github.com/google/go-github/v50/github/enterprise_code_security_and_analysis.go b/vendor/github.com/google/go-github/v53/github/enterprise_code_security_and_analysis.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/enterprise_code_security_and_analysis.go
rename to vendor/github.com/google/go-github/v53/github/enterprise_code_security_and_analysis.go
diff --git a/vendor/github.com/google/go-github/v50/github/event.go b/vendor/github.com/google/go-github/v53/github/event.go
similarity index 96%
rename from vendor/github.com/google/go-github/v50/github/event.go
rename to vendor/github.com/google/go-github/v53/github/event.go
index 1aabf13bad..4ee25603a8 100644
--- a/vendor/github.com/google/go-github/v50/github/event.go
+++ b/vendor/github.com/google/go-github/v53/github/event.go
@@ -49,6 +49,8 @@ func (e *Event) ParsePayload() (payload interface{}, err error) {
 		payload = &DeployKeyEvent{}
 	case "DeploymentEvent":
 		payload = &DeploymentEvent{}
+	case "DeploymentProtectionRuleEvent":
+		payload = &DeploymentProtectionRuleEvent{}
 	case "DeploymentStatusEvent":
 		payload = &DeploymentStatusEvent{}
 	case "DiscussionEvent":
@@ -125,6 +127,8 @@ func (e *Event) ParsePayload() (payload interface{}, err error) {
 		payload = &RepositoryVulnerabilityAlertEvent{}
 	case "SecretScanningAlertEvent":
 		payload = &SecretScanningAlertEvent{}
+	case "SecurityAdvisoryEvent":
+		payload = &SecurityAdvisoryEvent{}
 	case "StarEvent":
 		payload = &StarEvent{}
 	case "StatusEvent":
diff --git a/vendor/github.com/google/go-github/v50/github/event_types.go b/vendor/github.com/google/go-github/v53/github/event_types.go
similarity index 97%
rename from vendor/github.com/google/go-github/v50/github/event_types.go
rename to vendor/github.com/google/go-github/v53/github/event_types.go
index 598d98d48c..6a13b286bd 100644
--- a/vendor/github.com/google/go-github/v50/github/event_types.go
+++ b/vendor/github.com/google/go-github/v53/github/event_types.go
@@ -173,6 +173,25 @@ type DeploymentEvent struct {
 	Installation *Installation `json:"installation,omitempty"`
 }
 
+// DeploymentProtectionRuleEvent represents a deployment protection rule event.
+// The Webhook event name is "deployment_protection_rule".
+//
+// GitHub API docs: https://docs.github.com/webhooks-and-events/webhooks/webhook-events-and-payloads#deployment_protection_rule
+type DeploymentProtectionRuleEvent struct {
+	Action      *string `json:"action,omitempty"`
+	Environment *string `json:"environment,omitempty"`
+	Event       *string `json:"event,omitempty"`
+
+	// The URL Github provides for a third-party to use in order to pass/fail a deployment gate
+	DeploymentCallbackURL *string        `json:"deployment_callback_url,omitempty"`
+	Deployment            *Deployment    `json:"deployment,omitempty"`
+	Repo                  *Repository    `json:"repository,omitempty"`
+	Organization          *Organization  `json:"organization,omitempty"`
+	PullRequests          []*PullRequest `json:"pull_requests,omitempty"`
+	Sender                *User          `json:"sender,omitempty"`
+	Installation          *Installation  `json:"installation,omitempty"`
+}
+
 // DeploymentStatusEvent represents a deployment status.
 // The Webhook event name is "deployment_status".
 //
@@ -332,6 +351,7 @@ type EditChange struct {
 	Body  *EditBody  `json:"body,omitempty"`
 	Base  *EditBase  `json:"base,omitempty"`
 	Repo  *EditRepo  `json:"repository,omitempty"`
+	Owner *EditOwner `json:"owner,omitempty"`
 }
 
 // EditTitle represents a pull-request title change.
@@ -360,6 +380,17 @@ type EditRepo struct {
 	Name *RepoName `json:"name,omitempty"`
 }
 
+// EditOwner represents a change of repository ownership.
+type EditOwner struct {
+	OwnerInfo *OwnerInfo `json:"from,omitempty"`
+}
+
+// OwnerInfo represents the account info of the owner of the repo (could be User or Organization but both are User structs).
+type OwnerInfo struct {
+	User *User `json:"user,omitempty"`
+	Org  *User `json:"organization,omitempty"`
+}
+
 // RepoName represents a change of repository name.
 type RepoName struct {
 	From *string `json:"from,omitempty"`
@@ -457,7 +488,7 @@ type InstallationEvent struct {
 	Repositories []*Repository `json:"repositories,omitempty"`
 	Sender       *User         `json:"sender,omitempty"`
 	Installation *Installation `json:"installation,omitempty"`
-	// TODO key "requester" is not covered
+	Requester    *User         `json:"requester,omitempty"`
 }
 
 // InstallationRepositoriesEvent is triggered when a repository is added or
@@ -1062,6 +1093,7 @@ type PushEventRepository struct {
 	SSHURL          *string    `json:"ssh_url,omitempty"`
 	CloneURL        *string    `json:"clone_url,omitempty"`
 	SVNURL          *string    `json:"svn_url,omitempty"`
+	Topics          []string   `json:"topics,omitempty"`
 }
 
 // PushEventRepoOwner is a basic representation of user/org in a PushEvent payload.
diff --git a/vendor/github.com/google/go-github/v50/github/gists.go b/vendor/github.com/google/go-github/v53/github/gists.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/gists.go
rename to vendor/github.com/google/go-github/v53/github/gists.go
diff --git a/vendor/github.com/google/go-github/v50/github/gists_comments.go b/vendor/github.com/google/go-github/v53/github/gists_comments.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/gists_comments.go
rename to vendor/github.com/google/go-github/v53/github/gists_comments.go
diff --git a/vendor/github.com/google/go-github/v50/github/git.go b/vendor/github.com/google/go-github/v53/github/git.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/git.go
rename to vendor/github.com/google/go-github/v53/github/git.go
diff --git a/vendor/github.com/google/go-github/v50/github/git_blobs.go b/vendor/github.com/google/go-github/v53/github/git_blobs.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/git_blobs.go
rename to vendor/github.com/google/go-github/v53/github/git_blobs.go
diff --git a/vendor/github.com/google/go-github/v50/github/git_commits.go b/vendor/github.com/google/go-github/v53/github/git_commits.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/git_commits.go
rename to vendor/github.com/google/go-github/v53/github/git_commits.go
diff --git a/vendor/github.com/google/go-github/v50/github/git_refs.go b/vendor/github.com/google/go-github/v53/github/git_refs.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/git_refs.go
rename to vendor/github.com/google/go-github/v53/github/git_refs.go
diff --git a/vendor/github.com/google/go-github/v50/github/git_tags.go b/vendor/github.com/google/go-github/v53/github/git_tags.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/git_tags.go
rename to vendor/github.com/google/go-github/v53/github/git_tags.go
diff --git a/vendor/github.com/google/go-github/v50/github/git_trees.go b/vendor/github.com/google/go-github/v53/github/git_trees.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/git_trees.go
rename to vendor/github.com/google/go-github/v53/github/git_trees.go
diff --git a/vendor/github.com/google/go-github/v50/github/github-accessors.go b/vendor/github.com/google/go-github/v53/github/github-accessors.go
similarity index 93%
rename from vendor/github.com/google/go-github/v50/github/github-accessors.go
rename to vendor/github.com/google/go-github/v53/github/github-accessors.go
index beeb2defce..8acb72b042 100644
--- a/vendor/github.com/google/go-github/v50/github/github-accessors.go
+++ b/vendor/github.com/google/go-github/v53/github/github-accessors.go
@@ -214,6 +214,14 @@ func (a *ActionsVariable) GetVisibility() string {
 	return *a.Visibility
 }
 
+// GetCountryCode returns the CountryCode field if it's non-nil, zero value otherwise.
+func (a *ActorLocation) GetCountryCode() string {
+	if a == nil || a.CountryCode == nil {
+		return ""
+	}
+	return *a.CountryCode
+}
+
 // GetFrom returns the From field if it's non-nil, zero value otherwise.
 func (a *AdminEnforcedChanges) GetFrom() bool {
 	if a == nil || a.From == nil {
@@ -678,6 +686,14 @@ func (a *App) GetID() int64 {
 	return *a.ID
 }
 
+// GetInstallationsCount returns the InstallationsCount field if it's non-nil, zero value otherwise.
+func (a *App) GetInstallationsCount() int {
+	if a == nil || a.InstallationsCount == nil {
+		return 0
+	}
+	return *a.InstallationsCount
+}
+
 // GetName returns the Name field if it's non-nil, zero value otherwise.
 func (a *App) GetName() string {
 	if a == nil || a.Name == nil {
@@ -1030,6 +1046,22 @@ func (a *AuditEntry) GetActor() string {
 	return *a.Actor
 }
 
+// GetActorIP returns the ActorIP field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetActorIP() string {
+	if a == nil || a.ActorIP == nil {
+		return ""
+	}
+	return *a.ActorIP
+}
+
+// GetActorLocation returns the ActorLocation field.
+func (a *AuditEntry) GetActorLocation() *ActorLocation {
+	if a == nil {
+		return nil
+	}
+	return a.ActorLocation
+}
+
 // GetBlockedUser returns the BlockedUser field if it's non-nil, zero value otherwise.
 func (a *AuditEntry) GetBlockedUser() string {
 	if a == nil || a.BlockedUser == nil {
@@ -1158,6 +1190,14 @@ func (a *AuditEntry) GetFingerprint() string {
 	return *a.Fingerprint
 }
 
+// GetHashedToken returns the HashedToken field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetHashedToken() string {
+	if a == nil || a.HashedToken == nil {
+		return ""
+	}
+	return *a.HashedToken
+}
+
 // GetHeadBranch returns the HeadBranch field if it's non-nil, zero value otherwise.
 func (a *AuditEntry) GetHeadBranch() string {
 	if a == nil || a.HeadBranch == nil {
@@ -1198,6 +1238,14 @@ func (a *AuditEntry) GetJobName() string {
 	return *a.JobName
 }
 
+// GetJobWorkflowRef returns the JobWorkflowRef field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetJobWorkflowRef() string {
+	if a == nil || a.JobWorkflowRef == nil {
+		return ""
+	}
+	return *a.JobWorkflowRef
+}
+
 // GetLimitedAvailability returns the LimitedAvailability field if it's non-nil, zero value otherwise.
 func (a *AuditEntry) GetLimitedAvailability() bool {
 	if a == nil || a.LimitedAvailability == nil {
@@ -1222,6 +1270,14 @@ func (a *AuditEntry) GetName() string {
 	return *a.Name
 }
 
+// GetOAuthApplicationID returns the OAuthApplicationID field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetOAuthApplicationID() int64 {
+	if a == nil || a.OAuthApplicationID == nil {
+		return 0
+	}
+	return *a.OAuthApplicationID
+}
+
 // GetOldPermission returns the OldPermission field if it's non-nil, zero value otherwise.
 func (a *AuditEntry) GetOldPermission() string {
 	if a == nil || a.OldPermission == nil {
@@ -1246,6 +1302,14 @@ func (a *AuditEntry) GetOpenSSHPublicKey() string {
 	return *a.OpenSSHPublicKey
 }
 
+// GetOperationType returns the OperationType field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetOperationType() string {
+	if a == nil || a.OperationType == nil {
+		return ""
+	}
+	return *a.OperationType
+}
+
 // GetOrg returns the Org field if it's non-nil, zero value otherwise.
 func (a *AuditEntry) GetOrg() string {
 	if a == nil || a.Org == nil {
@@ -1254,6 +1318,14 @@ func (a *AuditEntry) GetOrg() string {
 	return *a.Org
 }
 
+// GetOrgID returns the OrgID field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetOrgID() int64 {
+	if a == nil || a.OrgID == nil {
+		return 0
+	}
+	return *a.OrgID
+}
+
 // GetPermission returns the Permission field if it's non-nil, zero value otherwise.
 func (a *AuditEntry) GetPermission() string {
 	if a == nil || a.Permission == nil {
@@ -1270,6 +1342,38 @@ func (a *AuditEntry) GetPreviousVisibility() string {
 	return *a.PreviousVisibility
 }
 
+// GetProgrammaticAccessType returns the ProgrammaticAccessType field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetProgrammaticAccessType() string {
+	if a == nil || a.ProgrammaticAccessType == nil {
+		return ""
+	}
+	return *a.ProgrammaticAccessType
+}
+
+// GetPullRequestID returns the PullRequestID field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetPullRequestID() int64 {
+	if a == nil || a.PullRequestID == nil {
+		return 0
+	}
+	return *a.PullRequestID
+}
+
+// GetPullRequestTitle returns the PullRequestTitle field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetPullRequestTitle() string {
+	if a == nil || a.PullRequestTitle == nil {
+		return ""
+	}
+	return *a.PullRequestTitle
+}
+
+// GetPullRequestURL returns the PullRequestURL field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetPullRequestURL() string {
+	if a == nil || a.PullRequestURL == nil {
+		return ""
+	}
+	return *a.PullRequestURL
+}
+
 // GetReadOnly returns the ReadOnly field if it's non-nil, zero value otherwise.
 func (a *AuditEntry) GetReadOnly() string {
 	if a == nil || a.ReadOnly == nil {
@@ -1342,6 +1446,14 @@ func (a *AuditEntry) GetRunnerName() string {
 	return *a.RunnerName
 }
 
+// GetRunNumber returns the RunNumber field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetRunNumber() int64 {
+	if a == nil || a.RunNumber == nil {
+		return 0
+	}
+	return *a.RunNumber
+}
+
 // GetSourceVersion returns the SourceVersion field if it's non-nil, zero value otherwise.
 func (a *AuditEntry) GetSourceVersion() string {
 	if a == nil || a.SourceVersion == nil {
@@ -1390,6 +1502,30 @@ func (a *AuditEntry) GetTimestamp() Timestamp {
 	return *a.Timestamp
 }
 
+// GetTokenID returns the TokenID field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetTokenID() int64 {
+	if a == nil || a.TokenID == nil {
+		return 0
+	}
+	return *a.TokenID
+}
+
+// GetTokenScopes returns the TokenScopes field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetTokenScopes() string {
+	if a == nil || a.TokenScopes == nil {
+		return ""
+	}
+	return *a.TokenScopes
+}
+
+// GetTopic returns the Topic field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetTopic() string {
+	if a == nil || a.Topic == nil {
+		return ""
+	}
+	return *a.Topic
+}
+
 // GetTransportProtocol returns the TransportProtocol field if it's non-nil, zero value otherwise.
 func (a *AuditEntry) GetTransportProtocol() int {
 	if a == nil || a.TransportProtocol == nil {
@@ -1422,6 +1558,14 @@ func (a *AuditEntry) GetUser() string {
 	return *a.User
 }
 
+// GetUserAgent returns the UserAgent field if it's non-nil, zero value otherwise.
+func (a *AuditEntry) GetUserAgent() string {
+	if a == nil || a.UserAgent == nil {
+		return ""
+	}
+	return *a.UserAgent
+}
+
 // GetVisibility returns the Visibility field if it's non-nil, zero value otherwise.
 func (a *AuditEntry) GetVisibility() string {
 	if a == nil || a.Visibility == nil {
@@ -2078,6 +2222,22 @@ func (b *BranchProtectionRuleEvent) GetSender() *User {
 	return b.Sender
 }
 
+// GetActorID returns the ActorID field if it's non-nil, zero value otherwise.
+func (b *BypassActor) GetActorID() int64 {
+	if b == nil || b.ActorID == nil {
+		return 0
+	}
+	return *b.ActorID
+}
+
+// GetActorType returns the ActorType field if it's non-nil, zero value otherwise.
+func (b *BypassActor) GetActorType() string {
+	if b == nil || b.ActorType == nil {
+		return ""
+	}
+	return *b.ActorType
+}
+
 // GetApp returns the App field.
 func (c *CheckRun) GetApp() *App {
 	if c == nil {
@@ -2742,6 +2902,342 @@ func (c *CodeSearchResult) GetTotal() int {
 	return *c.Total
 }
 
+// GetBillableOwner returns the BillableOwner field.
+func (c *Codespace) GetBillableOwner() *User {
+	if c == nil {
+		return nil
+	}
+	return c.BillableOwner
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetCreatedAt() Timestamp {
+	if c == nil || c.CreatedAt == nil {
+		return Timestamp{}
+	}
+	return *c.CreatedAt
+}
+
+// GetDevcontainerPath returns the DevcontainerPath field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetDevcontainerPath() string {
+	if c == nil || c.DevcontainerPath == nil {
+		return ""
+	}
+	return *c.DevcontainerPath
+}
+
+// GetDisplayName returns the DisplayName field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetDisplayName() string {
+	if c == nil || c.DisplayName == nil {
+		return ""
+	}
+	return *c.DisplayName
+}
+
+// GetEnvironmentID returns the EnvironmentID field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetEnvironmentID() string {
+	if c == nil || c.EnvironmentID == nil {
+		return ""
+	}
+	return *c.EnvironmentID
+}
+
+// GetGitStatus returns the GitStatus field.
+func (c *Codespace) GetGitStatus() *CodespacesGitStatus {
+	if c == nil {
+		return nil
+	}
+	return c.GitStatus
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetID() int64 {
+	if c == nil || c.ID == nil {
+		return 0
+	}
+	return *c.ID
+}
+
+// GetIdleTimeoutMinutes returns the IdleTimeoutMinutes field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetIdleTimeoutMinutes() int {
+	if c == nil || c.IdleTimeoutMinutes == nil {
+		return 0
+	}
+	return *c.IdleTimeoutMinutes
+}
+
+// GetIdleTimeoutNotice returns the IdleTimeoutNotice field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetIdleTimeoutNotice() string {
+	if c == nil || c.IdleTimeoutNotice == nil {
+		return ""
+	}
+	return *c.IdleTimeoutNotice
+}
+
+// GetLastKnownStopNotice returns the LastKnownStopNotice field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetLastKnownStopNotice() string {
+	if c == nil || c.LastKnownStopNotice == nil {
+		return ""
+	}
+	return *c.LastKnownStopNotice
+}
+
+// GetLastUsedAt returns the LastUsedAt field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetLastUsedAt() Timestamp {
+	if c == nil || c.LastUsedAt == nil {
+		return Timestamp{}
+	}
+	return *c.LastUsedAt
+}
+
+// GetLocation returns the Location field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetLocation() string {
+	if c == nil || c.Location == nil {
+		return ""
+	}
+	return *c.Location
+}
+
+// GetMachine returns the Machine field.
+func (c *Codespace) GetMachine() *CodespacesMachine {
+	if c == nil {
+		return nil
+	}
+	return c.Machine
+}
+
+// GetMachinesURL returns the MachinesURL field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetMachinesURL() string {
+	if c == nil || c.MachinesURL == nil {
+		return ""
+	}
+	return *c.MachinesURL
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetName() string {
+	if c == nil || c.Name == nil {
+		return ""
+	}
+	return *c.Name
+}
+
+// GetOwner returns the Owner field.
+func (c *Codespace) GetOwner() *User {
+	if c == nil {
+		return nil
+	}
+	return c.Owner
+}
+
+// GetPendingOperation returns the PendingOperation field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetPendingOperation() bool {
+	if c == nil || c.PendingOperation == nil {
+		return false
+	}
+	return *c.PendingOperation
+}
+
+// GetPendingOperationDisabledReason returns the PendingOperationDisabledReason field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetPendingOperationDisabledReason() string {
+	if c == nil || c.PendingOperationDisabledReason == nil {
+		return ""
+	}
+	return *c.PendingOperationDisabledReason
+}
+
+// GetPrebuild returns the Prebuild field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetPrebuild() bool {
+	if c == nil || c.Prebuild == nil {
+		return false
+	}
+	return *c.Prebuild
+}
+
+// GetPullsURL returns the PullsURL field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetPullsURL() string {
+	if c == nil || c.PullsURL == nil {
+		return ""
+	}
+	return *c.PullsURL
+}
+
+// GetRepository returns the Repository field.
+func (c *Codespace) GetRepository() *Repository {
+	if c == nil {
+		return nil
+	}
+	return c.Repository
+}
+
+// GetRetentionExpiresAt returns the RetentionExpiresAt field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetRetentionExpiresAt() Timestamp {
+	if c == nil || c.RetentionExpiresAt == nil {
+		return Timestamp{}
+	}
+	return *c.RetentionExpiresAt
+}
+
+// GetRetentionPeriodMinutes returns the RetentionPeriodMinutes field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetRetentionPeriodMinutes() int {
+	if c == nil || c.RetentionPeriodMinutes == nil {
+		return 0
+	}
+	return *c.RetentionPeriodMinutes
+}
+
+// GetRuntimeConstraints returns the RuntimeConstraints field.
+func (c *Codespace) GetRuntimeConstraints() *CodespacesRuntimeConstraints {
+	if c == nil {
+		return nil
+	}
+	return c.RuntimeConstraints
+}
+
+// GetStartURL returns the StartURL field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetStartURL() string {
+	if c == nil || c.StartURL == nil {
+		return ""
+	}
+	return *c.StartURL
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetState() string {
+	if c == nil || c.State == nil {
+		return ""
+	}
+	return *c.State
+}
+
+// GetStopURL returns the StopURL field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetStopURL() string {
+	if c == nil || c.StopURL == nil {
+		return ""
+	}
+	return *c.StopURL
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetUpdatedAt() Timestamp {
+	if c == nil || c.UpdatedAt == nil {
+		return Timestamp{}
+	}
+	return *c.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetURL() string {
+	if c == nil || c.URL == nil {
+		return ""
+	}
+	return *c.URL
+}
+
+// GetWebURL returns the WebURL field if it's non-nil, zero value otherwise.
+func (c *Codespace) GetWebURL() string {
+	if c == nil || c.WebURL == nil {
+		return ""
+	}
+	return *c.WebURL
+}
+
+// GetAhead returns the Ahead field if it's non-nil, zero value otherwise.
+func (c *CodespacesGitStatus) GetAhead() int {
+	if c == nil || c.Ahead == nil {
+		return 0
+	}
+	return *c.Ahead
+}
+
+// GetBehind returns the Behind field if it's non-nil, zero value otherwise.
+func (c *CodespacesGitStatus) GetBehind() int {
+	if c == nil || c.Behind == nil {
+		return 0
+	}
+	return *c.Behind
+}
+
+// GetHasUncommittedChanges returns the HasUncommittedChanges field if it's non-nil, zero value otherwise.
+func (c *CodespacesGitStatus) GetHasUncommittedChanges() bool {
+	if c == nil || c.HasUncommittedChanges == nil {
+		return false
+	}
+	return *c.HasUncommittedChanges
+}
+
+// GetHasUnpushedChanges returns the HasUnpushedChanges field if it's non-nil, zero value otherwise.
+func (c *CodespacesGitStatus) GetHasUnpushedChanges() bool {
+	if c == nil || c.HasUnpushedChanges == nil {
+		return false
+	}
+	return *c.HasUnpushedChanges
+}
+
+// GetRef returns the Ref field if it's non-nil, zero value otherwise.
+func (c *CodespacesGitStatus) GetRef() string {
+	if c == nil || c.Ref == nil {
+		return ""
+	}
+	return *c.Ref
+}
+
+// GetCPUs returns the CPUs field if it's non-nil, zero value otherwise.
+func (c *CodespacesMachine) GetCPUs() int {
+	if c == nil || c.CPUs == nil {
+		return 0
+	}
+	return *c.CPUs
+}
+
+// GetDisplayName returns the DisplayName field if it's non-nil, zero value otherwise.
+func (c *CodespacesMachine) GetDisplayName() string {
+	if c == nil || c.DisplayName == nil {
+		return ""
+	}
+	return *c.DisplayName
+}
+
+// GetMemoryInBytes returns the MemoryInBytes field if it's non-nil, zero value otherwise.
+func (c *CodespacesMachine) GetMemoryInBytes() int64 {
+	if c == nil || c.MemoryInBytes == nil {
+		return 0
+	}
+	return *c.MemoryInBytes
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (c *CodespacesMachine) GetName() string {
+	if c == nil || c.Name == nil {
+		return ""
+	}
+	return *c.Name
+}
+
+// GetOperatingSystem returns the OperatingSystem field if it's non-nil, zero value otherwise.
+func (c *CodespacesMachine) GetOperatingSystem() string {
+	if c == nil || c.OperatingSystem == nil {
+		return ""
+	}
+	return *c.OperatingSystem
+}
+
+// GetPrebuildAvailability returns the PrebuildAvailability field if it's non-nil, zero value otherwise.
+func (c *CodespacesMachine) GetPrebuildAvailability() string {
+	if c == nil || c.PrebuildAvailability == nil {
+		return ""
+	}
+	return *c.PrebuildAvailability
+}
+
+// GetStorageInBytes returns the StorageInBytes field if it's non-nil, zero value otherwise.
+func (c *CodespacesMachine) GetStorageInBytes() int64 {
+	if c == nil || c.StorageInBytes == nil {
+		return 0
+	}
+	return *c.StorageInBytes
+}
+
 // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
 func (c *CollaboratorInvitation) GetCreatedAt() Timestamp {
 	if c == nil || c.CreatedAt == nil {
@@ -3862,6 +4358,86 @@ func (c *CreateCheckSuiteOptions) GetHeadBranch() string {
 	return *c.HeadBranch
 }
 
+// GetClientIP returns the ClientIP field if it's non-nil, zero value otherwise.
+func (c *CreateCodespaceOptions) GetClientIP() string {
+	if c == nil || c.ClientIP == nil {
+		return ""
+	}
+	return *c.ClientIP
+}
+
+// GetDevcontainerPath returns the DevcontainerPath field if it's non-nil, zero value otherwise.
+func (c *CreateCodespaceOptions) GetDevcontainerPath() string {
+	if c == nil || c.DevcontainerPath == nil {
+		return ""
+	}
+	return *c.DevcontainerPath
+}
+
+// GetDisplayName returns the DisplayName field if it's non-nil, zero value otherwise.
+func (c *CreateCodespaceOptions) GetDisplayName() string {
+	if c == nil || c.DisplayName == nil {
+		return ""
+	}
+	return *c.DisplayName
+}
+
+// GetGeo returns the Geo field if it's non-nil, zero value otherwise.
+func (c *CreateCodespaceOptions) GetGeo() string {
+	if c == nil || c.Geo == nil {
+		return ""
+	}
+	return *c.Geo
+}
+
+// GetIdleTimeoutMinutes returns the IdleTimeoutMinutes field if it's non-nil, zero value otherwise.
+func (c *CreateCodespaceOptions) GetIdleTimeoutMinutes() int {
+	if c == nil || c.IdleTimeoutMinutes == nil {
+		return 0
+	}
+	return *c.IdleTimeoutMinutes
+}
+
+// GetMachine returns the Machine field if it's non-nil, zero value otherwise.
+func (c *CreateCodespaceOptions) GetMachine() string {
+	if c == nil || c.Machine == nil {
+		return ""
+	}
+	return *c.Machine
+}
+
+// GetMultiRepoPermissionsOptOut returns the MultiRepoPermissionsOptOut field if it's non-nil, zero value otherwise.
+func (c *CreateCodespaceOptions) GetMultiRepoPermissionsOptOut() bool {
+	if c == nil || c.MultiRepoPermissionsOptOut == nil {
+		return false
+	}
+	return *c.MultiRepoPermissionsOptOut
+}
+
+// GetRef returns the Ref field if it's non-nil, zero value otherwise.
+func (c *CreateCodespaceOptions) GetRef() string {
+	if c == nil || c.Ref == nil {
+		return ""
+	}
+	return *c.Ref
+}
+
+// GetRetentionPeriodMinutes returns the RetentionPeriodMinutes field if it's non-nil, zero value otherwise.
+func (c *CreateCodespaceOptions) GetRetentionPeriodMinutes() int {
+	if c == nil || c.RetentionPeriodMinutes == nil {
+		return 0
+	}
+	return *c.RetentionPeriodMinutes
+}
+
+// GetWorkingDirectory returns the WorkingDirectory field if it's non-nil, zero value otherwise.
+func (c *CreateCodespaceOptions) GetWorkingDirectory() string {
+	if c == nil || c.WorkingDirectory == nil {
+		return ""
+	}
+	return *c.WorkingDirectory
+}
+
 // GetDescription returns the Description field if it's non-nil, zero value otherwise.
 func (c *CreateEvent) GetDescription() string {
 	if c == nil || c.Description == nil {
@@ -4022,6 +4598,14 @@ func (c *CreateRunnerGroupRequest) GetVisibility() string {
 	return *c.Visibility
 }
 
+// GetCanAdminsBypass returns the CanAdminsBypass field if it's non-nil, zero value otherwise.
+func (c *CreateUpdateEnvironment) GetCanAdminsBypass() bool {
+	if c == nil || c.CanAdminsBypass == nil {
+		return false
+	}
+	return *c.CanAdminsBypass
+}
+
 // GetDeploymentBranchPolicy returns the DeploymentBranchPolicy field.
 func (c *CreateUpdateEnvironment) GetDeploymentBranchPolicy() *BranchPolicy {
 	if c == nil {
@@ -4038,6 +4622,38 @@ func (c *CreateUpdateEnvironment) GetWaitTimer() int {
 	return *c.WaitTimer
 }
 
+// GetRepositoryID returns the RepositoryID field if it's non-nil, zero value otherwise.
+func (c *CreateUpdateRequiredWorkflowOptions) GetRepositoryID() int64 {
+	if c == nil || c.RepositoryID == nil {
+		return 0
+	}
+	return *c.RepositoryID
+}
+
+// GetScope returns the Scope field if it's non-nil, zero value otherwise.
+func (c *CreateUpdateRequiredWorkflowOptions) GetScope() string {
+	if c == nil || c.Scope == nil {
+		return ""
+	}
+	return *c.Scope
+}
+
+// GetSelectedRepositoryIDs returns the SelectedRepositoryIDs field.
+func (c *CreateUpdateRequiredWorkflowOptions) GetSelectedRepositoryIDs() *SelectedRepoIDs {
+	if c == nil {
+		return nil
+	}
+	return c.SelectedRepositoryIDs
+}
+
+// GetWorkflowFilePath returns the WorkflowFilePath field if it's non-nil, zero value otherwise.
+func (c *CreateUpdateRequiredWorkflowOptions) GetWorkflowFilePath() string {
+	if c == nil || c.WorkflowFilePath == nil {
+		return ""
+	}
+	return *c.WorkflowFilePath
+}
+
 // GetBody returns the Body field if it's non-nil, zero value otherwise.
 func (c *CreateUserProjectOptions) GetBody() string {
 	if c == nil || c.Body == nil {
@@ -4078,6 +4694,30 @@ func (c *CustomRepoRoles) GetName() string {
 	return *c.Name
 }
 
+// GetQuerySuite returns the QuerySuite field if it's non-nil, zero value otherwise.
+func (d *DefaultSetupConfiguration) GetQuerySuite() string {
+	if d == nil || d.QuerySuite == nil {
+		return ""
+	}
+	return *d.QuerySuite
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (d *DefaultSetupConfiguration) GetState() string {
+	if d == nil || d.State == nil {
+		return ""
+	}
+	return *d.State
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (d *DefaultSetupConfiguration) GetUpdatedAt() Timestamp {
+	if d == nil || d.UpdatedAt == nil {
+		return Timestamp{}
+	}
+	return *d.UpdatedAt
+}
+
 // GetInstallation returns the Installation field.
 func (d *DeleteEvent) GetInstallation() *Installation {
 	if d == nil {
@@ -4198,6 +4838,14 @@ func (d *DependabotAlert) GetNumber() int {
 	return *d.Number
 }
 
+// GetRepository returns the Repository field.
+func (d *DependabotAlert) GetRepository() *Repository {
+	if d == nil {
+		return nil
+	}
+	return d.Repository
+}
+
 // GetSecurityAdvisory returns the SecurityAdvisory field.
 func (d *DependabotAlert) GetSecurityAdvisory() *DependabotSecurityAdvisory {
 	if d == nil {
@@ -4523,27 +5171,99 @@ func (d *DeploymentBranchPolicyResponse) GetTotalCount() int {
 	if d == nil || d.TotalCount == nil {
 		return 0
 	}
-	return *d.TotalCount
+	return *d.TotalCount
+}
+
+// GetDeployment returns the Deployment field.
+func (d *DeploymentEvent) GetDeployment() *Deployment {
+	if d == nil {
+		return nil
+	}
+	return d.Deployment
+}
+
+// GetInstallation returns the Installation field.
+func (d *DeploymentEvent) GetInstallation() *Installation {
+	if d == nil {
+		return nil
+	}
+	return d.Installation
+}
+
+// GetRepo returns the Repo field.
+func (d *DeploymentEvent) GetRepo() *Repository {
+	if d == nil {
+		return nil
+	}
+	return d.Repo
+}
+
+// GetSender returns the Sender field.
+func (d *DeploymentEvent) GetSender() *User {
+	if d == nil {
+		return nil
+	}
+	return d.Sender
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (d *DeploymentProtectionRuleEvent) GetAction() string {
+	if d == nil || d.Action == nil {
+		return ""
+	}
+	return *d.Action
+}
+
+// GetDeployment returns the Deployment field.
+func (d *DeploymentProtectionRuleEvent) GetDeployment() *Deployment {
+	if d == nil {
+		return nil
+	}
+	return d.Deployment
+}
+
+// GetDeploymentCallbackURL returns the DeploymentCallbackURL field if it's non-nil, zero value otherwise.
+func (d *DeploymentProtectionRuleEvent) GetDeploymentCallbackURL() string {
+	if d == nil || d.DeploymentCallbackURL == nil {
+		return ""
+	}
+	return *d.DeploymentCallbackURL
+}
+
+// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise.
+func (d *DeploymentProtectionRuleEvent) GetEnvironment() string {
+	if d == nil || d.Environment == nil {
+		return ""
+	}
+	return *d.Environment
+}
+
+// GetEvent returns the Event field if it's non-nil, zero value otherwise.
+func (d *DeploymentProtectionRuleEvent) GetEvent() string {
+	if d == nil || d.Event == nil {
+		return ""
+	}
+	return *d.Event
 }
 
-// GetDeployment returns the Deployment field.
-func (d *DeploymentEvent) GetDeployment() *Deployment {
+// GetInstallation returns the Installation field.
+func (d *DeploymentProtectionRuleEvent) GetInstallation() *Installation {
 	if d == nil {
 		return nil
 	}
-	return d.Deployment
+	return d.Installation
 }
 
-// GetInstallation returns the Installation field.
-func (d *DeploymentEvent) GetInstallation() *Installation {
+// GetOrganization returns the Organization field.
+func (d *DeploymentProtectionRuleEvent) GetOrganization() *Organization {
 	if d == nil {
 		return nil
 	}
-	return d.Installation
+	return d.Organization
 }
 
 // GetRepo returns the Repo field.
-func (d *DeploymentEvent) GetRepo() *Repository {
+func (d *DeploymentProtectionRuleEvent) GetRepo() *Repository {
 	if d == nil {
 		return nil
 	}
@@ -4551,7 +5271,7 @@ func (d *DeploymentEvent) GetRepo() *Repository {
 }
 
 // GetSender returns the Sender field.
-func (d *DeploymentEvent) GetSender() *User {
+func (d *DeploymentProtectionRuleEvent) GetSender() *User {
 	if d == nil {
 		return nil
 	}
@@ -5430,6 +6150,14 @@ func (e *EditChange) GetBody() *EditBody {
 	return e.Body
 }
 
+// GetOwner returns the Owner field.
+func (e *EditChange) GetOwner() *EditOwner {
+	if e == nil {
+		return nil
+	}
+	return e.Owner
+}
+
 // GetRepo returns the Repo field.
 func (e *EditChange) GetRepo() *EditRepo {
 	if e == nil {
@@ -5446,6 +6174,14 @@ func (e *EditChange) GetTitle() *EditTitle {
 	return e.Title
 }
 
+// GetOwnerInfo returns the OwnerInfo field.
+func (e *EditOwner) GetOwnerInfo() *OwnerInfo {
+	if e == nil {
+		return nil
+	}
+	return e.OwnerInfo
+}
+
 // GetFrom returns the From field if it's non-nil, zero value otherwise.
 func (e *EditRef) GetFrom() string {
 	if e == nil || e.From == nil {
@@ -5590,6 +6326,14 @@ func (e *EnterpriseSecurityAnalysisSettings) GetSecretScanningPushProtectionEnab
 	return *e.SecretScanningPushProtectionEnabledForNewRepositories
 }
 
+// GetCanAdminsBypass returns the CanAdminsBypass field if it's non-nil, zero value otherwise.
+func (e *Environment) GetCanAdminsBypass() bool {
+	if e == nil || e.CanAdminsBypass == nil {
+		return false
+	}
+	return *e.CanAdminsBypass
+}
+
 // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
 func (e *Environment) GetCreatedAt() Timestamp {
 	if e == nil || e.CreatedAt == nil {
@@ -6022,6 +6766,14 @@ func (f *ForkEvent) GetSender() *User {
 	return f.Sender
 }
 
+// GetWorkFolder returns the WorkFolder field if it's non-nil, zero value otherwise.
+func (g *GenerateJITConfigRequest) GetWorkFolder() string {
+	if g == nil || g.WorkFolder == nil {
+		return ""
+	}
+	return *g.WorkFolder
+}
+
 // GetPreviousTagName returns the PreviousTagName field if it's non-nil, zero value otherwise.
 func (g *GenerateNotesOptions) GetPreviousTagName() string {
 	if g == nil || g.PreviousTagName == nil {
@@ -7294,6 +8046,14 @@ func (i *InstallationEvent) GetInstallation() *Installation {
 	return i.Installation
 }
 
+// GetRequester returns the Requester field.
+func (i *InstallationEvent) GetRequester() *User {
+	if i == nil {
+		return nil
+	}
+	return i.Requester
+}
+
 // GetSender returns the Sender field.
 func (i *InstallationEvent) GetSender() *User {
 	if i == nil {
@@ -8638,6 +9398,14 @@ func (i *IssueStats) GetTotalIssues() int {
 	return *i.TotalIssues
 }
 
+// GetEncodedJITConfig returns the EncodedJITConfig field if it's non-nil, zero value otherwise.
+func (j *JITRunnerConfig) GetEncodedJITConfig() string {
+	if j == nil || j.EncodedJITConfig == nil {
+		return ""
+	}
+	return *j.EncodedJITConfig
+}
+
 // GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise.
 func (j *Jobs) GetTotalCount() int {
 	if j == nil || j.TotalCount == nil {
@@ -8646,6 +9414,14 @@ func (j *Jobs) GetTotalCount() int {
 	return *j.TotalCount
 }
 
+// GetAddedBy returns the AddedBy field if it's non-nil, zero value otherwise.
+func (k *Key) GetAddedBy() string {
+	if k == nil || k.AddedBy == nil {
+		return ""
+	}
+	return *k.AddedBy
+}
+
 // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
 func (k *Key) GetCreatedAt() Timestamp {
 	if k == nil || k.CreatedAt == nil {
@@ -8670,6 +9446,14 @@ func (k *Key) GetKey() string {
 	return *k.Key
 }
 
+// GetLastUsed returns the LastUsed field if it's non-nil, zero value otherwise.
+func (k *Key) GetLastUsed() Timestamp {
+	if k == nil || k.LastUsed == nil {
+		return Timestamp{}
+	}
+	return *k.LastUsed
+}
+
 // GetReadOnly returns the ReadOnly field if it's non-nil, zero value otherwise.
 func (k *Key) GetReadOnly() bool {
 	if k == nil || k.ReadOnly == nil {
@@ -9142,6 +9926,14 @@ func (l *ListCheckSuiteResults) GetTotal() int {
 	return *l.Total
 }
 
+// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise.
+func (l *ListCodespaces) GetTotalCount() int {
+	if l == nil || l.TotalCount == nil {
+		return 0
+	}
+	return *l.TotalCount
+}
+
 // GetAffiliation returns the Affiliation field if it's non-nil, zero value otherwise.
 func (l *ListCollaboratorOptions) GetAffiliation() string {
 	if l == nil || l.Affiliation == nil {
@@ -10815,7 +11607,7 @@ func (o *Organization) GetNodeID() string {
 }
 
 // GetOwnedPrivateRepos returns the OwnedPrivateRepos field if it's non-nil, zero value otherwise.
-func (o *Organization) GetOwnedPrivateRepos() int {
+func (o *Organization) GetOwnedPrivateRepos() int64 {
 	if o == nil || o.OwnedPrivateRepos == nil {
 		return 0
 	}
@@ -10887,7 +11679,7 @@ func (o *Organization) GetSecretScanningPushProtectionEnabledForNewRepos() bool
 }
 
 // GetTotalPrivateRepos returns the TotalPrivateRepos field if it's non-nil, zero value otherwise.
-func (o *Organization) GetTotalPrivateRepos() int {
+func (o *Organization) GetTotalPrivateRepos() int64 {
 	if o == nil || o.TotalPrivateRepos == nil {
 		return 0
 	}
@@ -11046,6 +11838,94 @@ func (o *OrgBlockEvent) GetSender() *User {
 	return o.Sender
 }
 
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (o *OrgRequiredWorkflow) GetCreatedAt() Timestamp {
+	if o == nil || o.CreatedAt == nil {
+		return Timestamp{}
+	}
+	return *o.CreatedAt
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (o *OrgRequiredWorkflow) GetID() int64 {
+	if o == nil || o.ID == nil {
+		return 0
+	}
+	return *o.ID
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (o *OrgRequiredWorkflow) GetName() string {
+	if o == nil || o.Name == nil {
+		return ""
+	}
+	return *o.Name
+}
+
+// GetPath returns the Path field if it's non-nil, zero value otherwise.
+func (o *OrgRequiredWorkflow) GetPath() string {
+	if o == nil || o.Path == nil {
+		return ""
+	}
+	return *o.Path
+}
+
+// GetRef returns the Ref field if it's non-nil, zero value otherwise.
+func (o *OrgRequiredWorkflow) GetRef() string {
+	if o == nil || o.Ref == nil {
+		return ""
+	}
+	return *o.Ref
+}
+
+// GetRepository returns the Repository field.
+func (o *OrgRequiredWorkflow) GetRepository() *Repository {
+	if o == nil {
+		return nil
+	}
+	return o.Repository
+}
+
+// GetScope returns the Scope field if it's non-nil, zero value otherwise.
+func (o *OrgRequiredWorkflow) GetScope() string {
+	if o == nil || o.Scope == nil {
+		return ""
+	}
+	return *o.Scope
+}
+
+// GetSelectedRepositoriesURL returns the SelectedRepositoriesURL field if it's non-nil, zero value otherwise.
+func (o *OrgRequiredWorkflow) GetSelectedRepositoriesURL() string {
+	if o == nil || o.SelectedRepositoriesURL == nil {
+		return ""
+	}
+	return *o.SelectedRepositoriesURL
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (o *OrgRequiredWorkflow) GetState() string {
+	if o == nil || o.State == nil {
+		return ""
+	}
+	return *o.State
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (o *OrgRequiredWorkflow) GetUpdatedAt() Timestamp {
+	if o == nil || o.UpdatedAt == nil {
+		return Timestamp{}
+	}
+	return *o.UpdatedAt
+}
+
+// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise.
+func (o *OrgRequiredWorkflows) GetTotalCount() int {
+	if o == nil || o.TotalCount == nil {
+		return 0
+	}
+	return *o.TotalCount
+}
+
 // GetDisabledOrgs returns the DisabledOrgs field if it's non-nil, zero value otherwise.
 func (o *OrgStats) GetDisabledOrgs() int {
 	if o == nil || o.DisabledOrgs == nil {
@@ -11078,6 +11958,22 @@ func (o *OrgStats) GetTotalTeams() int {
 	return *o.TotalTeams
 }
 
+// GetOrg returns the Org field.
+func (o *OwnerInfo) GetOrg() *User {
+	if o == nil {
+		return nil
+	}
+	return o.Org
+}
+
+// GetUser returns the User field.
+func (o *OwnerInfo) GetUser() *User {
+	if o == nil {
+		return nil
+	}
+	return o.User
+}
+
 // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
 func (p *Package) GetCreatedAt() Timestamp {
 	if p == nil || p.CreatedAt == nil {
@@ -11750,6 +12646,14 @@ func (p *PageBuildEvent) GetSender() *User {
 	return p.Sender
 }
 
+// GetBuildType returns the BuildType field if it's non-nil, zero value otherwise.
+func (p *Pages) GetBuildType() string {
+	if p == nil || p.BuildType == nil {
+		return ""
+	}
+	return *p.BuildType
+}
+
 // GetCNAME returns the CNAME field if it's non-nil, zero value otherwise.
 func (p *Pages) GetCNAME() string {
 	if p == nil || p.CNAME == nil {
@@ -11875,15 +12779,239 @@ func (p *PagesBuild) GetUpdatedAt() Timestamp {
 	if p == nil || p.UpdatedAt == nil {
 		return Timestamp{}
 	}
-	return *p.UpdatedAt
+	return *p.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (p *PagesBuild) GetURL() string {
+	if p == nil || p.URL == nil {
+		return ""
+	}
+	return *p.URL
+}
+
+// GetCAAError returns the CAAError field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetCAAError() string {
+	if p == nil || p.CAAError == nil {
+		return ""
+	}
+	return *p.CAAError
+}
+
+// GetDNSResolves returns the DNSResolves field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetDNSResolves() bool {
+	if p == nil || p.DNSResolves == nil {
+		return false
+	}
+	return *p.DNSResolves
+}
+
+// GetEnforcesHTTPS returns the EnforcesHTTPS field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetEnforcesHTTPS() bool {
+	if p == nil || p.EnforcesHTTPS == nil {
+		return false
+	}
+	return *p.EnforcesHTTPS
+}
+
+// GetHasCNAMERecord returns the HasCNAMERecord field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetHasCNAMERecord() bool {
+	if p == nil || p.HasCNAMERecord == nil {
+		return false
+	}
+	return *p.HasCNAMERecord
+}
+
+// GetHasMXRecordsPresent returns the HasMXRecordsPresent field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetHasMXRecordsPresent() bool {
+	if p == nil || p.HasMXRecordsPresent == nil {
+		return false
+	}
+	return *p.HasMXRecordsPresent
+}
+
+// GetHost returns the Host field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetHost() string {
+	if p == nil || p.Host == nil {
+		return ""
+	}
+	return *p.Host
+}
+
+// GetHTTPSError returns the HTTPSError field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetHTTPSError() string {
+	if p == nil || p.HTTPSError == nil {
+		return ""
+	}
+	return *p.HTTPSError
+}
+
+// GetIsApexDomain returns the IsApexDomain field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsApexDomain() bool {
+	if p == nil || p.IsApexDomain == nil {
+		return false
+	}
+	return *p.IsApexDomain
+}
+
+// GetIsARecord returns the IsARecord field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsARecord() bool {
+	if p == nil || p.IsARecord == nil {
+		return false
+	}
+	return *p.IsARecord
+}
+
+// GetIsCloudflareIP returns the IsCloudflareIP field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsCloudflareIP() bool {
+	if p == nil || p.IsCloudflareIP == nil {
+		return false
+	}
+	return *p.IsCloudflareIP
+}
+
+// GetIsCNAMEToFastly returns the IsCNAMEToFastly field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsCNAMEToFastly() bool {
+	if p == nil || p.IsCNAMEToFastly == nil {
+		return false
+	}
+	return *p.IsCNAMEToFastly
+}
+
+// GetIsCNAMEToGithubUserDomain returns the IsCNAMEToGithubUserDomain field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsCNAMEToGithubUserDomain() bool {
+	if p == nil || p.IsCNAMEToGithubUserDomain == nil {
+		return false
+	}
+	return *p.IsCNAMEToGithubUserDomain
+}
+
+// GetIsCNAMEToPagesDotGithubDotCom returns the IsCNAMEToPagesDotGithubDotCom field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsCNAMEToPagesDotGithubDotCom() bool {
+	if p == nil || p.IsCNAMEToPagesDotGithubDotCom == nil {
+		return false
+	}
+	return *p.IsCNAMEToPagesDotGithubDotCom
+}
+
+// GetIsFastlyIP returns the IsFastlyIP field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsFastlyIP() bool {
+	if p == nil || p.IsFastlyIP == nil {
+		return false
+	}
+	return *p.IsFastlyIP
+}
+
+// GetIsHTTPSEligible returns the IsHTTPSEligible field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsHTTPSEligible() bool {
+	if p == nil || p.IsHTTPSEligible == nil {
+		return false
+	}
+	return *p.IsHTTPSEligible
+}
+
+// GetIsNonGithubPagesIPPresent returns the IsNonGithubPagesIPPresent field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsNonGithubPagesIPPresent() bool {
+	if p == nil || p.IsNonGithubPagesIPPresent == nil {
+		return false
+	}
+	return *p.IsNonGithubPagesIPPresent
+}
+
+// GetIsOldIPAddress returns the IsOldIPAddress field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsOldIPAddress() bool {
+	if p == nil || p.IsOldIPAddress == nil {
+		return false
+	}
+	return *p.IsOldIPAddress
+}
+
+// GetIsPagesDomain returns the IsPagesDomain field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsPagesDomain() bool {
+	if p == nil || p.IsPagesDomain == nil {
+		return false
+	}
+	return *p.IsPagesDomain
+}
+
+// GetIsPointedToGithubPagesIP returns the IsPointedToGithubPagesIP field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsPointedToGithubPagesIP() bool {
+	if p == nil || p.IsPointedToGithubPagesIP == nil {
+		return false
+	}
+	return *p.IsPointedToGithubPagesIP
+}
+
+// GetIsProxied returns the IsProxied field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsProxied() bool {
+	if p == nil || p.IsProxied == nil {
+		return false
+	}
+	return *p.IsProxied
+}
+
+// GetIsServedByPages returns the IsServedByPages field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsServedByPages() bool {
+	if p == nil || p.IsServedByPages == nil {
+		return false
+	}
+	return *p.IsServedByPages
+}
+
+// GetIsValid returns the IsValid field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsValid() bool {
+	if p == nil || p.IsValid == nil {
+		return false
+	}
+	return *p.IsValid
+}
+
+// GetIsValidDomain returns the IsValidDomain field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetIsValidDomain() bool {
+	if p == nil || p.IsValidDomain == nil {
+		return false
+	}
+	return *p.IsValidDomain
+}
+
+// GetNameservers returns the Nameservers field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetNameservers() string {
+	if p == nil || p.Nameservers == nil {
+		return ""
+	}
+	return *p.Nameservers
+}
+
+// GetReason returns the Reason field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetReason() string {
+	if p == nil || p.Reason == nil {
+		return ""
+	}
+	return *p.Reason
+}
+
+// GetRespondsToHTTPS returns the RespondsToHTTPS field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetRespondsToHTTPS() bool {
+	if p == nil || p.RespondsToHTTPS == nil {
+		return false
+	}
+	return *p.RespondsToHTTPS
+}
+
+// GetShouldBeARecord returns the ShouldBeARecord field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetShouldBeARecord() bool {
+	if p == nil || p.ShouldBeARecord == nil {
+		return false
+	}
+	return *p.ShouldBeARecord
 }
 
-// GetURL returns the URL field if it's non-nil, zero value otherwise.
-func (p *PagesBuild) GetURL() string {
-	if p == nil || p.URL == nil {
+// GetURI returns the URI field if it's non-nil, zero value otherwise.
+func (p *PagesDomain) GetURI() string {
+	if p == nil || p.URI == nil {
 		return ""
 	}
-	return *p.URL
+	return *p.URI
 }
 
 // GetMessage returns the Message field if it's non-nil, zero value otherwise.
@@ -11894,6 +13022,22 @@ func (p *PagesError) GetMessage() string {
 	return *p.Message
 }
 
+// GetAltDomain returns the AltDomain field.
+func (p *PagesHealthCheckResponse) GetAltDomain() *PagesDomain {
+	if p == nil {
+		return nil
+	}
+	return p.AltDomain
+}
+
+// GetDomain returns the Domain field.
+func (p *PagesHealthCheckResponse) GetDomain() *PagesDomain {
+	if p == nil {
+		return nil
+	}
+	return p.Domain
+}
+
 // GetDescription returns the Description field if it's non-nil, zero value otherwise.
 func (p *PagesHTTPSCertificate) GetDescription() string {
 	if p == nil || p.Description == nil {
@@ -11942,6 +13086,14 @@ func (p *PageStats) GetTotalPages() int {
 	return *p.TotalPages
 }
 
+// GetBuildType returns the BuildType field if it's non-nil, zero value otherwise.
+func (p *PagesUpdate) GetBuildType() string {
+	if p == nil || p.BuildType == nil {
+		return ""
+	}
+	return *p.BuildType
+}
+
 // GetCNAME returns the CNAME field if it's non-nil, zero value otherwise.
 func (p *PagesUpdate) GetCNAME() string {
 	if p == nil || p.CNAME == nil {
@@ -12055,7 +13207,7 @@ func (p *Plan) GetName() string {
 }
 
 // GetPrivateRepos returns the PrivateRepos field if it's non-nil, zero value otherwise.
-func (p *Plan) GetPrivateRepos() int {
+func (p *Plan) GetPrivateRepos() int64 {
 	if p == nil || p.PrivateRepos == nil {
 		return 0
 	}
@@ -12078,6 +13230,22 @@ func (p *Plan) GetSpace() int {
 	return *p.Space
 }
 
+// GetCode returns the Code field if it's non-nil, zero value otherwise.
+func (p *PolicyOverrideReason) GetCode() string {
+	if p == nil || p.Code == nil {
+		return ""
+	}
+	return *p.Code
+}
+
+// GetMessage returns the Message field if it's non-nil, zero value otherwise.
+func (p *PolicyOverrideReason) GetMessage() string {
+	if p == nil || p.Message == nil {
+		return ""
+	}
+	return *p.Message
+}
+
 // GetConfigURL returns the ConfigURL field if it's non-nil, zero value otherwise.
 func (p *PreReceiveHook) GetConfigURL() string {
 	if p == nil || p.ConfigURL == nil {
@@ -12878,6 +14046,14 @@ func (p *Protection) GetRequiredPullRequestReviews() *PullRequestReviewsEnforcem
 	return p.RequiredPullRequestReviews
 }
 
+// GetRequiredSignatures returns the RequiredSignatures field.
+func (p *Protection) GetRequiredSignatures() *SignaturesProtectedBranch {
+	if p == nil {
+		return nil
+	}
+	return p.RequiredSignatures
+}
+
 // GetRequiredStatusChecks returns the RequiredStatusChecks field.
 func (p *Protection) GetRequiredStatusChecks() *RequiredStatusChecks {
 	if p == nil {
@@ -12902,6 +14078,14 @@ func (p *Protection) GetRestrictions() *BranchRestrictions {
 	return p.Restrictions
 }
 
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (p *Protection) GetURL() string {
+	if p == nil || p.URL == nil {
+		return ""
+	}
+	return *p.URL
+}
+
 // GetAdminEnforced returns the AdminEnforced field.
 func (p *ProtectionChanges) GetAdminEnforced() *AdminEnforcedChanges {
 	if p == nil {
@@ -14574,6 +15758,14 @@ func (p *PushEvent) GetBefore() string {
 	return *p.Before
 }
 
+// GetCommits returns the Commits slice if it's non-nil, nil otherwise.
+func (p *PushEvent) GetCommits() []*HeadCommit {
+	if p == nil || p.Commits == nil {
+		return nil
+	}
+	return p.Commits
+}
+
 // GetCompare returns the Compare field if it's non-nil, zero value otherwise.
 func (p *PushEvent) GetCompare() string {
 	if p == nil || p.Compare == nil {
@@ -15454,6 +16646,102 @@ func (r *RepoName) GetFrom() string {
 	return *r.From
 }
 
+// GetBadgeURL returns the BadgeURL field if it's non-nil, zero value otherwise.
+func (r *RepoRequiredWorkflow) GetBadgeURL() string {
+	if r == nil || r.BadgeURL == nil {
+		return ""
+	}
+	return *r.BadgeURL
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (r *RepoRequiredWorkflow) GetCreatedAt() Timestamp {
+	if r == nil || r.CreatedAt == nil {
+		return Timestamp{}
+	}
+	return *r.CreatedAt
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (r *RepoRequiredWorkflow) GetHTMLURL() string {
+	if r == nil || r.HTMLURL == nil {
+		return ""
+	}
+	return *r.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (r *RepoRequiredWorkflow) GetID() int64 {
+	if r == nil || r.ID == nil {
+		return 0
+	}
+	return *r.ID
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (r *RepoRequiredWorkflow) GetName() string {
+	if r == nil || r.Name == nil {
+		return ""
+	}
+	return *r.Name
+}
+
+// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise.
+func (r *RepoRequiredWorkflow) GetNodeID() string {
+	if r == nil || r.NodeID == nil {
+		return ""
+	}
+	return *r.NodeID
+}
+
+// GetPath returns the Path field if it's non-nil, zero value otherwise.
+func (r *RepoRequiredWorkflow) GetPath() string {
+	if r == nil || r.Path == nil {
+		return ""
+	}
+	return *r.Path
+}
+
+// GetSourceRepository returns the SourceRepository field.
+func (r *RepoRequiredWorkflow) GetSourceRepository() *Repository {
+	if r == nil {
+		return nil
+	}
+	return r.SourceRepository
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (r *RepoRequiredWorkflow) GetState() string {
+	if r == nil || r.State == nil {
+		return ""
+	}
+	return *r.State
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (r *RepoRequiredWorkflow) GetUpdatedAt() Timestamp {
+	if r == nil || r.UpdatedAt == nil {
+		return Timestamp{}
+	}
+	return *r.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (r *RepoRequiredWorkflow) GetURL() string {
+	if r == nil || r.URL == nil {
+		return ""
+	}
+	return *r.URL
+}
+
+// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise.
+func (r *RepoRequiredWorkflows) GetTotalCount() int {
+	if r == nil || r.TotalCount == nil {
+		return 0
+	}
+	return *r.TotalCount
+}
+
 // GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise.
 func (r *RepositoriesSearchResult) GetIncompleteResults() bool {
 	if r == nil || r.IncompleteResults == nil {
@@ -17110,6 +18398,14 @@ func (r *RepositoryRelease) GetZipballURL() string {
 	return *r.ZipballURL
 }
 
+// GetParameters returns the Parameters field if it's non-nil, zero value otherwise.
+func (r *RepositoryRule) GetParameters() json.RawMessage {
+	if r == nil || r.Parameters == nil {
+		return json.RawMessage{}
+	}
+	return *r.Parameters
+}
+
 // GetCommit returns the Commit field.
 func (r *RepositoryTag) GetCommit() *Commit {
 	if r == nil {
@@ -17454,6 +18750,22 @@ func (r *RequiredStatusCheck) GetAppID() int64 {
 	return *r.AppID
 }
 
+// GetContextsURL returns the ContextsURL field if it's non-nil, zero value otherwise.
+func (r *RequiredStatusChecks) GetContextsURL() string {
+	if r == nil || r.ContextsURL == nil {
+		return ""
+	}
+	return *r.ContextsURL
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (r *RequiredStatusChecks) GetURL() string {
+	if r == nil || r.URL == nil {
+		return ""
+	}
+	return *r.URL
+}
+
 // GetFrom returns the From field if it's non-nil, zero value otherwise.
 func (r *RequiredStatusChecksEnforcementLevelChanges) GetFrom() string {
 	if r == nil || r.From == nil {
@@ -17470,6 +18782,14 @@ func (r *RequiredStatusChecksRequest) GetStrict() bool {
 	return *r.Strict
 }
 
+// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise.
+func (r *RequiredWorkflowSelectedRepos) GetTotalCount() int {
+	if r == nil || r.TotalCount == nil {
+		return 0
+	}
+	return *r.TotalCount
+}
+
 // GetNodeID returns the NodeID field if it's non-nil, zero value otherwise.
 func (r *ReviewersRequest) GetNodeID() string {
 	if r == nil || r.NodeID == nil {
@@ -17534,6 +18854,118 @@ func (r *Rule) GetSeverity() string {
 	return *r.Severity
 }
 
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (r *RulePatternParameters) GetName() string {
+	if r == nil || r.Name == nil {
+		return ""
+	}
+	return *r.Name
+}
+
+// GetNegate returns the Negate field if it's non-nil, zero value otherwise.
+func (r *RulePatternParameters) GetNegate() bool {
+	if r == nil || r.Negate == nil {
+		return false
+	}
+	return *r.Negate
+}
+
+// GetIntegrationID returns the IntegrationID field if it's non-nil, zero value otherwise.
+func (r *RuleRequiredStatusChecks) GetIntegrationID() int64 {
+	if r == nil || r.IntegrationID == nil {
+		return 0
+	}
+	return *r.IntegrationID
+}
+
+// GetBypassMode returns the BypassMode field if it's non-nil, zero value otherwise.
+func (r *Ruleset) GetBypassMode() string {
+	if r == nil || r.BypassMode == nil {
+		return ""
+	}
+	return *r.BypassMode
+}
+
+// GetConditions returns the Conditions field.
+func (r *Ruleset) GetConditions() *RulesetConditions {
+	if r == nil {
+		return nil
+	}
+	return r.Conditions
+}
+
+// GetLinks returns the Links field.
+func (r *Ruleset) GetLinks() *RulesetLinks {
+	if r == nil {
+		return nil
+	}
+	return r.Links
+}
+
+// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise.
+func (r *Ruleset) GetNodeID() string {
+	if r == nil || r.NodeID == nil {
+		return ""
+	}
+	return *r.NodeID
+}
+
+// GetSourceType returns the SourceType field if it's non-nil, zero value otherwise.
+func (r *Ruleset) GetSourceType() string {
+	if r == nil || r.SourceType == nil {
+		return ""
+	}
+	return *r.SourceType
+}
+
+// GetTarget returns the Target field if it's non-nil, zero value otherwise.
+func (r *Ruleset) GetTarget() string {
+	if r == nil || r.Target == nil {
+		return ""
+	}
+	return *r.Target
+}
+
+// GetRefName returns the RefName field.
+func (r *RulesetConditions) GetRefName() *RulesetRefConditionParameters {
+	if r == nil {
+		return nil
+	}
+	return r.RefName
+}
+
+// GetRepositoryName returns the RepositoryName field.
+func (r *RulesetConditions) GetRepositoryName() *RulesetRepositoryConditionParameters {
+	if r == nil {
+		return nil
+	}
+	return r.RepositoryName
+}
+
+// GetHRef returns the HRef field if it's non-nil, zero value otherwise.
+func (r *RulesetLink) GetHRef() string {
+	if r == nil || r.HRef == nil {
+		return ""
+	}
+	return *r.HRef
+}
+
+// GetSelf returns the Self field.
+func (r *RulesetLinks) GetSelf() *RulesetLink {
+	if r == nil {
+		return nil
+	}
+	return r.Self
+}
+
+// GetProtected returns the Protected field if it's non-nil, zero value otherwise.
+func (r *RulesetRepositoryConditionParameters) GetProtected() bool {
+	if r == nil || r.Protected == nil {
+		return false
+	}
+	return *r.Protected
+}
+
 // GetBusy returns the Busy field if it's non-nil, zero value otherwise.
 func (r *Runner) GetBusy() bool {
 	if r == nil || r.Busy == nil {
@@ -20030,6 +21462,14 @@ func (t *TrafficViews) GetUniques() int {
 	return *t.Uniques
 }
 
+// GetNewName returns the NewName field if it's non-nil, zero value otherwise.
+func (t *TransferRequest) GetNewName() string {
+	if t == nil || t.NewName == nil {
+		return ""
+	}
+	return *t.NewName
+}
+
 // GetSHA returns the SHA field if it's non-nil, zero value otherwise.
 func (t *Tree) GetSHA() string {
 	if t == nil || t.SHA == nil {
@@ -20158,6 +21598,30 @@ func (u *UpdateCheckRunOptions) GetStatus() string {
 	return *u.Status
 }
 
+// GetQuerySuite returns the QuerySuite field if it's non-nil, zero value otherwise.
+func (u *UpdateDefaultSetupConfigurationOptions) GetQuerySuite() string {
+	if u == nil || u.QuerySuite == nil {
+		return ""
+	}
+	return *u.QuerySuite
+}
+
+// GetRunID returns the RunID field if it's non-nil, zero value otherwise.
+func (u *UpdateDefaultSetupConfigurationResponse) GetRunID() int64 {
+	if u == nil || u.RunID == nil {
+		return 0
+	}
+	return *u.RunID
+}
+
+// GetRunURL returns the RunURL field if it's non-nil, zero value otherwise.
+func (u *UpdateDefaultSetupConfigurationResponse) GetRunURL() string {
+	if u == nil || u.RunURL == nil {
+		return ""
+	}
+	return *u.RunURL
+}
+
 // GetAllowsPublicRepositories returns the AllowsPublicRepositories field if it's non-nil, zero value otherwise.
 func (u *UpdateRunnerGroupRequest) GetAllowsPublicRepositories() bool {
 	if u == nil || u.AllowsPublicRepositories == nil {
@@ -20383,7 +21847,7 @@ func (u *User) GetOrganizationsURL() string {
 }
 
 // GetOwnedPrivateRepos returns the OwnedPrivateRepos field if it's non-nil, zero value otherwise.
-func (u *User) GetOwnedPrivateRepos() int {
+func (u *User) GetOwnedPrivateRepos() int64 {
 	if u == nil || u.OwnedPrivateRepos == nil {
 		return 0
 	}
@@ -20487,7 +21951,7 @@ func (u *User) GetSuspendedAt() Timestamp {
 }
 
 // GetTotalPrivateRepos returns the TotalPrivateRepos field if it's non-nil, zero value otherwise.
-func (u *User) GetTotalPrivateRepos() int {
+func (u *User) GetTotalPrivateRepos() int64 {
 	if u == nil || u.TotalPrivateRepos == nil {
 		return 0
 	}
@@ -21222,6 +22686,14 @@ func (w *WorkflowJob) GetCreatedAt() Timestamp {
 	return *w.CreatedAt
 }
 
+// GetHeadBranch returns the HeadBranch field if it's non-nil, zero value otherwise.
+func (w *WorkflowJob) GetHeadBranch() string {
+	if w == nil || w.HeadBranch == nil {
+		return ""
+	}
+	return *w.HeadBranch
+}
+
 // GetHeadSHA returns the HeadSHA field if it's non-nil, zero value otherwise.
 func (w *WorkflowJob) GetHeadSHA() string {
 	if w == nil || w.HeadSHA == nil {
@@ -21462,6 +22934,14 @@ func (w *WorkflowRun) GetCreatedAt() Timestamp {
 	return *w.CreatedAt
 }
 
+// GetDisplayTitle returns the DisplayTitle field if it's non-nil, zero value otherwise.
+func (w *WorkflowRun) GetDisplayTitle() string {
+	if w == nil || w.DisplayTitle == nil {
+		return ""
+	}
+	return *w.DisplayTitle
+}
+
 // GetEvent returns the Event field if it's non-nil, zero value otherwise.
 func (w *WorkflowRun) GetEvent() string {
 	if w == nil || w.Event == nil {
diff --git a/vendor/github.com/google/go-github/v50/github/github.go b/vendor/github.com/google/go-github/v53/github/github.go
similarity index 97%
rename from vendor/github.com/google/go-github/v50/github/github.go
rename to vendor/github.com/google/go-github/v53/github/github.go
index 4c4827e53c..34a27282f0 100644
--- a/vendor/github.com/google/go-github/v50/github/github.go
+++ b/vendor/github.com/google/go-github/v53/github/github.go
@@ -28,7 +28,7 @@ import (
 )
 
 const (
-	Version = "v50.2.0"
+	Version = "v53.2.0"
 
 	defaultAPIVersion = "2022-11-28"
 	defaultBaseURL    = "https://api.github.com/"
@@ -40,6 +40,7 @@ const (
 	headerRateRemaining = "X-RateLimit-Remaining"
 	headerRateReset     = "X-RateLimit-Reset"
 	headerOTP           = "X-GitHub-OTP"
+	headerRetryAfter    = "Retry-After"
 
 	headerTokenExpiration = "GitHub-Authentication-Token-Expiration"
 
@@ -186,6 +187,7 @@ type Client struct {
 	Billing        *BillingService
 	Checks         *ChecksService
 	CodeScanning   *CodeScanningService
+	Codespaces     *CodespacesService
 	Dependabot     *DependabotService
 	Enterprise     *EnterpriseService
 	Gists          *GistsService
@@ -324,6 +326,7 @@ func NewClient(httpClient *http.Client) *Client {
 	c.Billing = (*BillingService)(&c.common)
 	c.Checks = (*ChecksService)(&c.common)
 	c.CodeScanning = (*CodeScanningService)(&c.common)
+	c.Codespaces = (*CodespacesService)(&c.common)
 	c.Dependabot = (*DependabotService)(&c.common)
 	c.Enterprise = (*EnterpriseService)(&c.common)
 	c.Gists = (*GistsService)(&c.common)
@@ -677,6 +680,30 @@ func parseRate(r *http.Response) Rate {
 	return rate
 }
 
+// parseSecondaryRate parses the secondary rate related headers,
+// and returns the time to retry after.
+func parseSecondaryRate(r *http.Response) *time.Duration {
+	// According to GitHub support, the "Retry-After" header value will be
+	// an integer which represents the number of seconds that one should
+	// wait before resuming making requests.
+	if v := r.Header.Get(headerRetryAfter); v != "" {
+		retryAfterSeconds, _ := strconv.ParseInt(v, 10, 64) // Error handling is noop.
+		retryAfter := time.Duration(retryAfterSeconds) * time.Second
+		return &retryAfter
+	}
+
+	// According to GitHub support, endpoints might return x-ratelimit-reset instead,
+	// as an integer which represents the number of seconds since epoch UTC,
+	// represting the time to resume making requests.
+	if v := r.Header.Get(headerRateReset); v != "" {
+		secondsSinceEpoch, _ := strconv.ParseInt(v, 10, 64) // Error handling is noop.
+		retryAfter := time.Until(time.Unix(secondsSinceEpoch, 0))
+		return &retryAfter
+	}
+
+	return nil
+}
+
 // parseTokenExpiration parses the TokenExpiration related headers.
 // Returns 0001-01-01 if the header is not defined or could not be parsed.
 func parseTokenExpiration(r *http.Response) Timestamp {
@@ -1156,13 +1183,8 @@ func CheckResponse(r *http.Response) error {
 			Response: errorResponse.Response,
 			Message:  errorResponse.Message,
 		}
-		if v := r.Header["Retry-After"]; len(v) > 0 {
-			// According to GitHub support, the "Retry-After" header value will be
-			// an integer which represents the number of seconds that one should
-			// wait before resuming making requests.
-			retryAfterSeconds, _ := strconv.ParseInt(v[0], 10, 64) // Error handling is noop.
-			retryAfter := time.Duration(retryAfterSeconds) * time.Second
-			abuseRateLimitError.RetryAfter = &retryAfter
+		if retryAfter := parseSecondaryRate(r); retryAfter != nil {
+			abuseRateLimitError.RetryAfter = retryAfter
 		}
 		return abuseRateLimitError
 	default:
diff --git a/vendor/github.com/google/go-github/v50/github/gitignore.go b/vendor/github.com/google/go-github/v53/github/gitignore.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/gitignore.go
rename to vendor/github.com/google/go-github/v53/github/gitignore.go
diff --git a/vendor/github.com/google/go-github/v50/github/interactions.go b/vendor/github.com/google/go-github/v53/github/interactions.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/interactions.go
rename to vendor/github.com/google/go-github/v53/github/interactions.go
diff --git a/vendor/github.com/google/go-github/v50/github/interactions_orgs.go b/vendor/github.com/google/go-github/v53/github/interactions_orgs.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/interactions_orgs.go
rename to vendor/github.com/google/go-github/v53/github/interactions_orgs.go
diff --git a/vendor/github.com/google/go-github/v50/github/interactions_repos.go b/vendor/github.com/google/go-github/v53/github/interactions_repos.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/interactions_repos.go
rename to vendor/github.com/google/go-github/v53/github/interactions_repos.go
diff --git a/vendor/github.com/google/go-github/v50/github/issue_import.go b/vendor/github.com/google/go-github/v53/github/issue_import.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/issue_import.go
rename to vendor/github.com/google/go-github/v53/github/issue_import.go
diff --git a/vendor/github.com/google/go-github/v50/github/issues.go b/vendor/github.com/google/go-github/v53/github/issues.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/issues.go
rename to vendor/github.com/google/go-github/v53/github/issues.go
diff --git a/vendor/github.com/google/go-github/v50/github/issues_assignees.go b/vendor/github.com/google/go-github/v53/github/issues_assignees.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/issues_assignees.go
rename to vendor/github.com/google/go-github/v53/github/issues_assignees.go
diff --git a/vendor/github.com/google/go-github/v50/github/issues_comments.go b/vendor/github.com/google/go-github/v53/github/issues_comments.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/issues_comments.go
rename to vendor/github.com/google/go-github/v53/github/issues_comments.go
diff --git a/vendor/github.com/google/go-github/v50/github/issues_events.go b/vendor/github.com/google/go-github/v53/github/issues_events.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/issues_events.go
rename to vendor/github.com/google/go-github/v53/github/issues_events.go
diff --git a/vendor/github.com/google/go-github/v50/github/issues_labels.go b/vendor/github.com/google/go-github/v53/github/issues_labels.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/issues_labels.go
rename to vendor/github.com/google/go-github/v53/github/issues_labels.go
diff --git a/vendor/github.com/google/go-github/v50/github/issues_milestones.go b/vendor/github.com/google/go-github/v53/github/issues_milestones.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/issues_milestones.go
rename to vendor/github.com/google/go-github/v53/github/issues_milestones.go
diff --git a/vendor/github.com/google/go-github/v50/github/issues_timeline.go b/vendor/github.com/google/go-github/v53/github/issues_timeline.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/issues_timeline.go
rename to vendor/github.com/google/go-github/v53/github/issues_timeline.go
diff --git a/vendor/github.com/google/go-github/v50/github/licenses.go b/vendor/github.com/google/go-github/v53/github/licenses.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/licenses.go
rename to vendor/github.com/google/go-github/v53/github/licenses.go
diff --git a/vendor/github.com/google/go-github/v50/github/messages.go b/vendor/github.com/google/go-github/v53/github/messages.go
similarity index 96%
rename from vendor/github.com/google/go-github/v50/github/messages.go
rename to vendor/github.com/google/go-github/v53/github/messages.go
index 0943b9e987..bb5ae3f389 100644
--- a/vendor/github.com/google/go-github/v50/github/messages.go
+++ b/vendor/github.com/google/go-github/v53/github/messages.go
@@ -55,6 +55,7 @@ var (
 		"deploy_key":                     "DeployKeyEvent",
 		"deployment":                     "DeploymentEvent",
 		"deployment_status":              "DeploymentStatusEvent",
+		"deployment_protection_rule":     "DeploymentProtectionRuleEvent",
 		"discussion":                     "DiscussionEvent",
 		"discussion_comment":             "DiscussionCommentEvent",
 		"fork":                           "ForkEvent",
@@ -92,6 +93,7 @@ var (
 		"repository_vulnerability_alert": "RepositoryVulnerabilityAlertEvent",
 		"release":                        "ReleaseEvent",
 		"secret_scanning_alert":          "SecretScanningAlertEvent",
+		"security_advisory":              "SecurityAdvisoryEvent",
 		"star":                           "StarEvent",
 		"status":                         "StatusEvent",
 		"team":                           "TeamEvent",
@@ -148,13 +150,13 @@ func messageMAC(signature string) ([]byte, func() hash.Hash, error) {
 	return buf, hashFunc, nil
 }
 
-// ValidatePayload validates an incoming GitHub Webhook event request body
+// ValidatePayloadFromBody validates an incoming GitHub Webhook event request body
 // and returns the (JSON) payload.
 // The Content-Type header of the payload can be "application/json" or "application/x-www-form-urlencoded".
 // If the Content-Type is neither then an error is returned.
 // secretToken is the GitHub Webhook secret token.
-// If your webhook does not contain a secret token, you can pass nil or an empty slice.
-// This is intended for local development purposes only and all webhooks should ideally set up a secret token.
+// If your webhook does not contain a secret token, you can pass an empty secretToken.
+// Webhooks without a secret token are not secure and should be avoided.
 //
 // Example usage:
 //
@@ -201,9 +203,8 @@ func ValidatePayloadFromBody(contentType string, readable io.Reader, signature s
 		return nil, fmt.Errorf("webhook request has unsupported Content-Type %q", contentType)
 	}
 
-	// Only validate the signature if a secret token exists. This is intended for
-	// local development only and all webhooks should ideally set up a secret token.
-	if len(secretToken) > 0 {
+	// Validate the signature if present or if one is expected (secretToken is non-empty).
+	if len(secretToken) > 0 || len(signature) > 0 {
 		if err := ValidateSignature(signature, body, secretToken); err != nil {
 			return nil, err
 		}
diff --git a/vendor/github.com/google/go-github/v50/github/migrations.go b/vendor/github.com/google/go-github/v53/github/migrations.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/migrations.go
rename to vendor/github.com/google/go-github/v53/github/migrations.go
diff --git a/vendor/github.com/google/go-github/v50/github/migrations_source_import.go b/vendor/github.com/google/go-github/v53/github/migrations_source_import.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/migrations_source_import.go
rename to vendor/github.com/google/go-github/v53/github/migrations_source_import.go
diff --git a/vendor/github.com/google/go-github/v50/github/migrations_user.go b/vendor/github.com/google/go-github/v53/github/migrations_user.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/migrations_user.go
rename to vendor/github.com/google/go-github/v53/github/migrations_user.go
diff --git a/vendor/github.com/google/go-github/v50/github/misc.go b/vendor/github.com/google/go-github/v53/github/misc.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/misc.go
rename to vendor/github.com/google/go-github/v53/github/misc.go
diff --git a/vendor/github.com/google/go-github/v50/github/orgs.go b/vendor/github.com/google/go-github/v53/github/orgs.go
similarity index 95%
rename from vendor/github.com/google/go-github/v50/github/orgs.go
rename to vendor/github.com/google/go-github/v53/github/orgs.go
index 487405778f..0c7e361b3f 100644
--- a/vendor/github.com/google/go-github/v50/github/orgs.go
+++ b/vendor/github.com/google/go-github/v53/github/orgs.go
@@ -36,8 +36,8 @@ type Organization struct {
 	Following                   *int       `json:"following,omitempty"`
 	CreatedAt                   *Timestamp `json:"created_at,omitempty"`
 	UpdatedAt                   *Timestamp `json:"updated_at,omitempty"`
-	TotalPrivateRepos           *int       `json:"total_private_repos,omitempty"`
-	OwnedPrivateRepos           *int       `json:"owned_private_repos,omitempty"`
+	TotalPrivateRepos           *int64     `json:"total_private_repos,omitempty"`
+	OwnedPrivateRepos           *int64     `json:"owned_private_repos,omitempty"`
 	PrivateGists                *int       `json:"private_gists,omitempty"`
 	DiskUsage                   *int       `json:"disk_usage,omitempty"`
 	Collaborators               *int       `json:"collaborators,omitempty"`
@@ -121,7 +121,7 @@ type Plan struct {
 	Name          *string `json:"name,omitempty"`
 	Space         *int    `json:"space,omitempty"`
 	Collaborators *int    `json:"collaborators,omitempty"`
-	PrivateRepos  *int    `json:"private_repos,omitempty"`
+	PrivateRepos  *int64  `json:"private_repos,omitempty"`
 	FilledSeats   *int    `json:"filled_seats,omitempty"`
 	Seats         *int    `json:"seats,omitempty"`
 }
@@ -262,6 +262,19 @@ func (s *OrganizationsService) Edit(ctx context.Context, name string, org *Organ
 	return o, resp, nil
 }
 
+// Delete an organization by name.
+//
+// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#delete-an-organization
+func (s *OrganizationsService) Delete(ctx context.Context, org string) (*Response, error) {
+	u := fmt.Sprintf("orgs/%v", org)
+	req, err := s.client.NewRequest("DELETE", u, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	return s.client.Do(ctx, req, nil)
+}
+
 // ListInstallations lists installations for an organization.
 //
 // GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#list-app-installations-for-an-organization
diff --git a/vendor/github.com/google/go-github/v50/github/orgs_actions_allowed.go b/vendor/github.com/google/go-github/v53/github/orgs_actions_allowed.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/orgs_actions_allowed.go
rename to vendor/github.com/google/go-github/v53/github/orgs_actions_allowed.go
diff --git a/vendor/github.com/google/go-github/v50/github/orgs_actions_permissions.go b/vendor/github.com/google/go-github/v53/github/orgs_actions_permissions.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/orgs_actions_permissions.go
rename to vendor/github.com/google/go-github/v53/github/orgs_actions_permissions.go
diff --git a/vendor/github.com/google/go-github/v53/github/orgs_audit_log.go b/vendor/github.com/google/go-github/v53/github/orgs_audit_log.go
new file mode 100644
index 0000000000..e2e4692e57
--- /dev/null
+++ b/vendor/github.com/google/go-github/v53/github/orgs_audit_log.go
@@ -0,0 +1,148 @@
+// Copyright 2021 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+	"context"
+	"fmt"
+)
+
+// GetAuditLogOptions sets up optional parameters to query audit-log endpoint.
+type GetAuditLogOptions struct {
+	Phrase  *string `url:"phrase,omitempty"`  // A search phrase. (Optional.)
+	Include *string `url:"include,omitempty"` // Event type includes. Can be one of "web", "git", "all". Default: "web". (Optional.)
+	Order   *string `url:"order,omitempty"`   // The order of audit log events. Can be one of "asc" or "desc". Default: "desc". (Optional.)
+
+	ListCursorOptions
+}
+
+// HookConfig describes metadata about a webhook configuration.
+type HookConfig struct {
+	ContentType *string `json:"content_type,omitempty"`
+	InsecureSSL *string `json:"insecure_ssl,omitempty"`
+	URL         *string `json:"url,omitempty"`
+
+	// Secret is returned obfuscated by GitHub, but it can be set for outgoing requests.
+	Secret *string `json:"secret,omitempty"`
+}
+
+// ActorLocation contains information about reported location for an actor.
+type ActorLocation struct {
+	CountryCode *string `json:"country_code,omitempty"`
+}
+
+// PolicyOverrideReason contains user-supplied information about why a policy was overridden.
+type PolicyOverrideReason struct {
+	Code    *string `json:"code,omitempty"`
+	Message *string `json:"message,omitempty"`
+}
+
+// AuditEntry describes the fields that may be represented by various audit-log "action" entries.
+// For a list of actions see - https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/reviewing-the-audit-log-for-your-organization#audit-log-actions
+type AuditEntry struct {
+	ActorIP                *string                 `json:"actor_ip,omitempty"`
+	Action                 *string                 `json:"action,omitempty"` // The name of the action that was performed, for example `user.login` or `repo.create`.
+	Active                 *bool                   `json:"active,omitempty"`
+	ActiveWas              *bool                   `json:"active_was,omitempty"`
+	Actor                  *string                 `json:"actor,omitempty"` // The actor who performed the action.
+	ActorLocation          *ActorLocation          `json:"actor_location,omitempty"`
+	BlockedUser            *string                 `json:"blocked_user,omitempty"`
+	Business               *string                 `json:"business,omitempty"`
+	CancelledAt            *Timestamp              `json:"cancelled_at,omitempty"`
+	CompletedAt            *Timestamp              `json:"completed_at,omitempty"`
+	Conclusion             *string                 `json:"conclusion,omitempty"`
+	Config                 *HookConfig             `json:"config,omitempty"`
+	ConfigWas              *HookConfig             `json:"config_was,omitempty"`
+	ContentType            *string                 `json:"content_type,omitempty"`
+	CreatedAt              *Timestamp              `json:"created_at,omitempty"`
+	DeployKeyFingerprint   *string                 `json:"deploy_key_fingerprint,omitempty"`
+	DocumentID             *string                 `json:"_document_id,omitempty"`
+	Emoji                  *string                 `json:"emoji,omitempty"`
+	EnvironmentName        *string                 `json:"environment_name,omitempty"`
+	Event                  *string                 `json:"event,omitempty"`
+	Events                 []string                `json:"events,omitempty"`
+	EventsWere             []string                `json:"events_were,omitempty"`
+	Explanation            *string                 `json:"explanation,omitempty"`
+	Fingerprint            *string                 `json:"fingerprint,omitempty"`
+	HashedToken            *string                 `json:"hashed_token,omitempty"`
+	HeadBranch             *string                 `json:"head_branch,omitempty"`
+	HeadSHA                *string                 `json:"head_sha,omitempty"`
+	HookID                 *int64                  `json:"hook_id,omitempty"`
+	IsHostedRunner         *bool                   `json:"is_hosted_runner,omitempty"`
+	JobName                *string                 `json:"job_name,omitempty"`
+	JobWorkflowRef         *string                 `json:"job_workflow_ref,omitempty"`
+	LimitedAvailability    *bool                   `json:"limited_availability,omitempty"`
+	Message                *string                 `json:"message,omitempty"`
+	Name                   *string                 `json:"name,omitempty"`
+	OAuthApplicationID     *int64                  `json:"oauth_application_id,omitempty"`
+	OldUser                *string                 `json:"old_user,omitempty"`
+	OldPermission          *string                 `json:"old_permission,omitempty"` // The permission level for membership changes, for example `admin` or `read`.
+	OpenSSHPublicKey       *string                 `json:"openssh_public_key,omitempty"`
+	OperationType          *string                 `json:"operation_type,omitempty"`
+	Org                    *string                 `json:"org,omitempty"`
+	OrgID                  *int64                  `json:"org_id,omitempty"`
+	OverriddenCodes        []string                `json:"overridden_codes,omitempty"`
+	Permission             *string                 `json:"permission,omitempty"` // The permission level for membership changes, for example `admin` or `read`.
+	PreviousVisibility     *string                 `json:"previous_visibility,omitempty"`
+	ProgrammaticAccessType *string                 `json:"programmatic_access_type,omitempty"`
+	PullRequestID          *int64                  `json:"pull_request_id,omitempty"`
+	PullRequestTitle       *string                 `json:"pull_request_title,omitempty"`
+	PullRequestURL         *string                 `json:"pull_request_url,omitempty"`
+	ReadOnly               *string                 `json:"read_only,omitempty"`
+	Reasons                []*PolicyOverrideReason `json:"reasons,omitempty"`
+	Repo                   *string                 `json:"repo,omitempty"`
+	Repository             *string                 `json:"repository,omitempty"`
+	RepositoryPublic       *bool                   `json:"repository_public,omitempty"`
+	RunAttempt             *int64                  `json:"run_attempt,omitempty"`
+	RunnerGroupID          *int64                  `json:"runner_group_id,omitempty"`
+	RunnerGroupName        *string                 `json:"runner_group_name,omitempty"`
+	RunnerID               *int64                  `json:"runner_id,omitempty"`
+	RunnerLabels           []string                `json:"runner_labels,omitempty"`
+	RunnerName             *string                 `json:"runner_name,omitempty"`
+	RunNumber              *int64                  `json:"run_number,omitempty"`
+	SecretsPassed          []string                `json:"secrets_passed,omitempty"`
+	SourceVersion          *string                 `json:"source_version,omitempty"`
+	StartedAt              *Timestamp              `json:"started_at,omitempty"`
+	TargetLogin            *string                 `json:"target_login,omitempty"`
+	TargetVersion          *string                 `json:"target_version,omitempty"`
+	Team                   *string                 `json:"team,omitempty"`
+	Timestamp              *Timestamp              `json:"@timestamp,omitempty"` // The time the audit log event occurred, given as a [Unix timestamp](http://en.wikipedia.org/wiki/Unix_time).
+	TokenID                *int64                  `json:"token_id,omitempty"`
+	TokenScopes            *string                 `json:"token_scopes,omitempty"`
+	Topic                  *string                 `json:"topic,omitempty"`
+	TransportProtocolName  *string                 `json:"transport_protocol_name,omitempty"` // A human readable name for the protocol (for example, HTTP or SSH) used to transfer Git data.
+	TransportProtocol      *int                    `json:"transport_protocol,omitempty"`      // The type of protocol (for example, HTTP=1 or SSH=2) used to transfer Git data.
+	TriggerID              *int64                  `json:"trigger_id,omitempty"`
+	User                   *string                 `json:"user,omitempty"` // The user that was affected by the action performed (if available).
+	UserAgent              *string                 `json:"user_agent,omitempty"`
+	Visibility             *string                 `json:"visibility,omitempty"` // The repository visibility, for example `public` or `private`.
+	WorkflowID             *int64                  `json:"workflow_id,omitempty"`
+	WorkflowRunID          *int64                  `json:"workflow_run_id,omitempty"`
+}
+
+// GetAuditLog gets the audit-log entries for an organization.
+//
+// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#get-the-audit-log-for-an-organization
+func (s *OrganizationsService) GetAuditLog(ctx context.Context, org string, opts *GetAuditLogOptions) ([]*AuditEntry, *Response, error) {
+	u := fmt.Sprintf("orgs/%v/audit-log", org)
+	u, err := addOptions(u, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var auditEntries []*AuditEntry
+	resp, err := s.client.Do(ctx, req, &auditEntries)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return auditEntries, resp, nil
+}
diff --git a/vendor/github.com/google/go-github/v50/github/orgs_custom_roles.go b/vendor/github.com/google/go-github/v53/github/orgs_custom_roles.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/orgs_custom_roles.go
rename to vendor/github.com/google/go-github/v53/github/orgs_custom_roles.go
diff --git a/vendor/github.com/google/go-github/v50/github/orgs_hooks.go b/vendor/github.com/google/go-github/v53/github/orgs_hooks.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/orgs_hooks.go
rename to vendor/github.com/google/go-github/v53/github/orgs_hooks.go
diff --git a/vendor/github.com/google/go-github/v50/github/orgs_hooks_deliveries.go b/vendor/github.com/google/go-github/v53/github/orgs_hooks_deliveries.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/orgs_hooks_deliveries.go
rename to vendor/github.com/google/go-github/v53/github/orgs_hooks_deliveries.go
diff --git a/vendor/github.com/google/go-github/v50/github/orgs_members.go b/vendor/github.com/google/go-github/v53/github/orgs_members.go
similarity index 99%
rename from vendor/github.com/google/go-github/v50/github/orgs_members.go
rename to vendor/github.com/google/go-github/v53/github/orgs_members.go
index 38f43bad5a..79f8a65333 100644
--- a/vendor/github.com/google/go-github/v50/github/orgs_members.go
+++ b/vendor/github.com/google/go-github/v53/github/orgs_members.go
@@ -315,8 +315,8 @@ type CreateOrgInvitationOptions struct {
 	// * billing_manager - Non-owner organization members with ability to
 	//   manage the billing settings of your organization.
 	// Default is "direct_member".
-	Role   *string `json:"role"`
-	TeamID []int64 `json:"team_ids"`
+	Role   *string `json:"role,omitempty"`
+	TeamID []int64 `json:"team_ids,omitempty"`
 }
 
 // CreateOrgInvitation invites people to an organization by using their GitHub user ID or their email address.
diff --git a/vendor/github.com/google/go-github/v50/github/orgs_outside_collaborators.go b/vendor/github.com/google/go-github/v53/github/orgs_outside_collaborators.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/orgs_outside_collaborators.go
rename to vendor/github.com/google/go-github/v53/github/orgs_outside_collaborators.go
diff --git a/vendor/github.com/google/go-github/v50/github/orgs_packages.go b/vendor/github.com/google/go-github/v53/github/orgs_packages.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/orgs_packages.go
rename to vendor/github.com/google/go-github/v53/github/orgs_packages.go
diff --git a/vendor/github.com/google/go-github/v50/github/orgs_projects.go b/vendor/github.com/google/go-github/v53/github/orgs_projects.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/orgs_projects.go
rename to vendor/github.com/google/go-github/v53/github/orgs_projects.go
diff --git a/vendor/github.com/google/go-github/v53/github/orgs_rules.go b/vendor/github.com/google/go-github/v53/github/orgs_rules.go
new file mode 100644
index 0000000000..a3905af8fb
--- /dev/null
+++ b/vendor/github.com/google/go-github/v53/github/orgs_rules.go
@@ -0,0 +1,105 @@
+// Copyright 2023 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+	"context"
+	"fmt"
+)
+
+// GetAllOrganizationRulesets gets all the rulesets for the specified organization.
+//
+// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#get-all-organization-repository-rulesets
+func (s *OrganizationsService) GetAllOrganizationRulesets(ctx context.Context, org string) ([]*Ruleset, *Response, error) {
+	u := fmt.Sprintf("orgs/%v/rulesets", org)
+
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var rulesets []*Ruleset
+	resp, err := s.client.Do(ctx, req, &rulesets)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return rulesets, resp, nil
+}
+
+// CreateOrganizationRuleset creates a ruleset for the specified organization.
+//
+// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#create-an-organization-repository-ruleset
+func (s *OrganizationsService) CreateOrganizationRuleset(ctx context.Context, org string, rs *Ruleset) (*Ruleset, *Response, error) {
+	u := fmt.Sprintf("orgs/%v/rulesets", org)
+
+	req, err := s.client.NewRequest("POST", u, rs)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var ruleset *Ruleset
+	resp, err := s.client.Do(ctx, req, &ruleset)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return ruleset, resp, nil
+}
+
+// GetOrganizationRuleset gets a ruleset from the specified organization.
+//
+// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#get-an-organization-repository-ruleset
+func (s *OrganizationsService) GetOrganizationRuleset(ctx context.Context, org string, rulesetID int64) (*Ruleset, *Response, error) {
+	u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID)
+
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var ruleset *Ruleset
+	resp, err := s.client.Do(ctx, req, &ruleset)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return ruleset, resp, nil
+}
+
+// UpdateOrganizationRuleset updates a ruleset from the specified organization.
+//
+// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#update-an-organization-repository-ruleset
+func (s *OrganizationsService) UpdateOrganizationRuleset(ctx context.Context, org string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) {
+	u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID)
+
+	req, err := s.client.NewRequest("PUT", u, rs)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var ruleset *Ruleset
+	resp, err := s.client.Do(ctx, req, &ruleset)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return ruleset, resp, nil
+}
+
+// DeleteOrganizationRuleset deletes a ruleset from the specified organization.
+//
+// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#delete-an-organization-repository-ruleset
+func (s *OrganizationsService) DeleteOrganizationRuleset(ctx context.Context, org string, rulesetID int64) (*Response, error) {
+	u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID)
+
+	req, err := s.client.NewRequest("DELETE", u, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	return s.client.Do(ctx, req, nil)
+}
diff --git a/vendor/github.com/google/go-github/v50/github/orgs_security_managers.go b/vendor/github.com/google/go-github/v53/github/orgs_security_managers.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/orgs_security_managers.go
rename to vendor/github.com/google/go-github/v53/github/orgs_security_managers.go
diff --git a/vendor/github.com/google/go-github/v50/github/orgs_users_blocking.go b/vendor/github.com/google/go-github/v53/github/orgs_users_blocking.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/orgs_users_blocking.go
rename to vendor/github.com/google/go-github/v53/github/orgs_users_blocking.go
diff --git a/vendor/github.com/google/go-github/v50/github/packages.go b/vendor/github.com/google/go-github/v53/github/packages.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/packages.go
rename to vendor/github.com/google/go-github/v53/github/packages.go
diff --git a/vendor/github.com/google/go-github/v50/github/projects.go b/vendor/github.com/google/go-github/v53/github/projects.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/projects.go
rename to vendor/github.com/google/go-github/v53/github/projects.go
diff --git a/vendor/github.com/google/go-github/v50/github/pulls.go b/vendor/github.com/google/go-github/v53/github/pulls.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/pulls.go
rename to vendor/github.com/google/go-github/v53/github/pulls.go
diff --git a/vendor/github.com/google/go-github/v50/github/pulls_comments.go b/vendor/github.com/google/go-github/v53/github/pulls_comments.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/pulls_comments.go
rename to vendor/github.com/google/go-github/v53/github/pulls_comments.go
diff --git a/vendor/github.com/google/go-github/v50/github/pulls_reviewers.go b/vendor/github.com/google/go-github/v53/github/pulls_reviewers.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/pulls_reviewers.go
rename to vendor/github.com/google/go-github/v53/github/pulls_reviewers.go
diff --git a/vendor/github.com/google/go-github/v50/github/pulls_reviews.go b/vendor/github.com/google/go-github/v53/github/pulls_reviews.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/pulls_reviews.go
rename to vendor/github.com/google/go-github/v53/github/pulls_reviews.go
diff --git a/vendor/github.com/google/go-github/v50/github/pulls_threads.go b/vendor/github.com/google/go-github/v53/github/pulls_threads.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/pulls_threads.go
rename to vendor/github.com/google/go-github/v53/github/pulls_threads.go
diff --git a/vendor/github.com/google/go-github/v50/github/reactions.go b/vendor/github.com/google/go-github/v53/github/reactions.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/reactions.go
rename to vendor/github.com/google/go-github/v53/github/reactions.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos.go b/vendor/github.com/google/go-github/v53/github/repos.go
similarity index 99%
rename from vendor/github.com/google/go-github/v50/github/repos.go
rename to vendor/github.com/google/go-github/v53/github/repos.go
index fad152e22f..5ffad6dd3c 100644
--- a/vendor/github.com/google/go-github/v50/github/repos.go
+++ b/vendor/github.com/google/go-github/v53/github/repos.go
@@ -847,6 +847,8 @@ type Protection struct {
 	BlockCreations                 *BlockCreations                 `json:"block_creations,omitempty"`
 	LockBranch                     *LockBranch                     `json:"lock_branch,omitempty"`
 	AllowForkSyncing               *AllowForkSyncing               `json:"allow_fork_syncing,omitempty"`
+	RequiredSignatures             *SignaturesProtectedBranch      `json:"required_signatures,omitempty"`
+	URL                            *string                         `json:"url,omitempty"`
 }
 
 // BlockCreations represents whether users can push changes that create branches. If this is true, this
@@ -1023,7 +1025,9 @@ type RequiredStatusChecks struct {
 	Contexts []string `json:"contexts,omitempty"`
 	// The list of status checks to require in order to merge into this
 	// branch.
-	Checks []*RequiredStatusCheck `json:"checks"`
+	Checks      []*RequiredStatusCheck `json:"checks"`
+	ContextsURL *string                `json:"contexts_url,omitempty"`
+	URL         *string                `json:"url,omitempty"`
 }
 
 // RequiredStatusChecksRequest represents a request to edit a protected branch's status checks.
@@ -1965,6 +1969,7 @@ func (s *RepositoriesService) RemoveUserRestrictions(ctx context.Context, owner,
 // TransferRequest represents a request to transfer a repository.
 type TransferRequest struct {
 	NewOwner string  `json:"new_owner"`
+	NewName  *string `json:"new_name,omitempty"`
 	TeamID   []int64 `json:"team_ids,omitempty"`
 }
 
diff --git a/vendor/github.com/google/go-github/v50/github/repos_actions_access.go b/vendor/github.com/google/go-github/v53/github/repos_actions_access.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_actions_access.go
rename to vendor/github.com/google/go-github/v53/github/repos_actions_access.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_actions_allowed.go b/vendor/github.com/google/go-github/v53/github/repos_actions_allowed.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_actions_allowed.go
rename to vendor/github.com/google/go-github/v53/github/repos_actions_allowed.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_actions_permissions.go b/vendor/github.com/google/go-github/v53/github/repos_actions_permissions.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_actions_permissions.go
rename to vendor/github.com/google/go-github/v53/github/repos_actions_permissions.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_autolinks.go b/vendor/github.com/google/go-github/v53/github/repos_autolinks.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_autolinks.go
rename to vendor/github.com/google/go-github/v53/github/repos_autolinks.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_codeowners.go b/vendor/github.com/google/go-github/v53/github/repos_codeowners.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_codeowners.go
rename to vendor/github.com/google/go-github/v53/github/repos_codeowners.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_collaborators.go b/vendor/github.com/google/go-github/v53/github/repos_collaborators.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_collaborators.go
rename to vendor/github.com/google/go-github/v53/github/repos_collaborators.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_comments.go b/vendor/github.com/google/go-github/v53/github/repos_comments.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_comments.go
rename to vendor/github.com/google/go-github/v53/github/repos_comments.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_commits.go b/vendor/github.com/google/go-github/v53/github/repos_commits.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_commits.go
rename to vendor/github.com/google/go-github/v53/github/repos_commits.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_community_health.go b/vendor/github.com/google/go-github/v53/github/repos_community_health.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_community_health.go
rename to vendor/github.com/google/go-github/v53/github/repos_community_health.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_contents.go b/vendor/github.com/google/go-github/v53/github/repos_contents.go
similarity index 97%
rename from vendor/github.com/google/go-github/v50/github/repos_contents.go
rename to vendor/github.com/google/go-github/v53/github/repos_contents.go
index be58fd52f6..874a327728 100644
--- a/vendor/github.com/google/go-github/v50/github/repos_contents.go
+++ b/vendor/github.com/google/go-github/v53/github/repos_contents.go
@@ -192,8 +192,15 @@ func (s *RepositoriesService) DownloadContentsWithMeta(ctx context.Context, owne
 // as possible, both result types will be returned but only one will contain a
 // value and the other will be nil.
 //
+// Due to an auth vulnerability issue in the GitHub v3 API, ".." is not allowed
+// to appear anywhere in the "path" or this method will return an error.
+//
 // GitHub API docs: https://docs.github.com/en/rest/repos/contents#get-repository-content
 func (s *RepositoriesService) GetContents(ctx context.Context, owner, repo, path string, opts *RepositoryContentGetOptions) (fileContent *RepositoryContent, directoryContent []*RepositoryContent, resp *Response, err error) {
+	if strings.Contains(path, "..") {
+		return nil, nil, nil, errors.New("path must not contain '..' due to auth vulnerability issue")
+	}
+
 	escapedPath := (&url.URL{Path: strings.TrimSuffix(path, "/")}).String()
 	u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, escapedPath)
 	u, err = addOptions(u, opts)
diff --git a/vendor/github.com/google/go-github/v50/github/repos_deployment_branch_policies.go b/vendor/github.com/google/go-github/v53/github/repos_deployment_branch_policies.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_deployment_branch_policies.go
rename to vendor/github.com/google/go-github/v53/github/repos_deployment_branch_policies.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_deployments.go b/vendor/github.com/google/go-github/v53/github/repos_deployments.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_deployments.go
rename to vendor/github.com/google/go-github/v53/github/repos_deployments.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_environments.go b/vendor/github.com/google/go-github/v53/github/repos_environments.go
similarity index 96%
rename from vendor/github.com/google/go-github/v50/github/repos_environments.go
rename to vendor/github.com/google/go-github/v53/github/repos_environments.go
index 2e85fdf99c..2399a42c74 100644
--- a/vendor/github.com/google/go-github/v50/github/repos_environments.go
+++ b/vendor/github.com/google/go-github/v53/github/repos_environments.go
@@ -28,6 +28,7 @@ type Environment struct {
 	HTMLURL         *string           `json:"html_url,omitempty"`
 	CreatedAt       *Timestamp        `json:"created_at,omitempty"`
 	UpdatedAt       *Timestamp        `json:"updated_at,omitempty"`
+	CanAdminsBypass *bool             `json:"can_admins_bypass,omitempty"`
 	ProtectionRules []*ProtectionRule `json:"protection_rules,omitempty"`
 }
 
@@ -147,11 +148,15 @@ func (s *RepositoriesService) GetEnvironment(ctx context.Context, owner, repo, n
 
 // MarshalJSON implements the json.Marshaler interface.
 // As the only way to clear a WaitTimer is to set it to 0, a missing WaitTimer object should default to 0, not null.
+// As the default value for CanAdminsBypass is true, a nil value here marshals to true.
 func (c *CreateUpdateEnvironment) MarshalJSON() ([]byte, error) {
 	type Alias CreateUpdateEnvironment
 	if c.WaitTimer == nil {
 		c.WaitTimer = Int(0)
 	}
+	if c.CanAdminsBypass == nil {
+		c.CanAdminsBypass = Bool(true)
+	}
 	return json.Marshal(&struct {
 		*Alias
 	}{
@@ -166,6 +171,7 @@ func (c *CreateUpdateEnvironment) MarshalJSON() ([]byte, error) {
 type CreateUpdateEnvironment struct {
 	WaitTimer              *int            `json:"wait_timer"`
 	Reviewers              []*EnvReviewers `json:"reviewers"`
+	CanAdminsBypass        *bool           `json:"can_admins_bypass"`
 	DeploymentBranchPolicy *BranchPolicy   `json:"deployment_branch_policy"`
 }
 
diff --git a/vendor/github.com/google/go-github/v50/github/repos_forks.go b/vendor/github.com/google/go-github/v53/github/repos_forks.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_forks.go
rename to vendor/github.com/google/go-github/v53/github/repos_forks.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_hooks.go b/vendor/github.com/google/go-github/v53/github/repos_hooks.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_hooks.go
rename to vendor/github.com/google/go-github/v53/github/repos_hooks.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_hooks_deliveries.go b/vendor/github.com/google/go-github/v53/github/repos_hooks_deliveries.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_hooks_deliveries.go
rename to vendor/github.com/google/go-github/v53/github/repos_hooks_deliveries.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_invitations.go b/vendor/github.com/google/go-github/v53/github/repos_invitations.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_invitations.go
rename to vendor/github.com/google/go-github/v53/github/repos_invitations.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_keys.go b/vendor/github.com/google/go-github/v53/github/repos_keys.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_keys.go
rename to vendor/github.com/google/go-github/v53/github/repos_keys.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_lfs.go b/vendor/github.com/google/go-github/v53/github/repos_lfs.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_lfs.go
rename to vendor/github.com/google/go-github/v53/github/repos_lfs.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_merging.go b/vendor/github.com/google/go-github/v53/github/repos_merging.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_merging.go
rename to vendor/github.com/google/go-github/v53/github/repos_merging.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_pages.go b/vendor/github.com/google/go-github/v53/github/repos_pages.go
similarity index 68%
rename from vendor/github.com/google/go-github/v50/github/repos_pages.go
rename to vendor/github.com/google/go-github/v53/github/repos_pages.go
index 737cec0f22..83075dbdd2 100644
--- a/vendor/github.com/google/go-github/v50/github/repos_pages.go
+++ b/vendor/github.com/google/go-github/v53/github/repos_pages.go
@@ -17,6 +17,7 @@ type Pages struct {
 	CNAME            *string                `json:"cname,omitempty"`
 	Custom404        *bool                  `json:"custom_404,omitempty"`
 	HTMLURL          *string                `json:"html_url,omitempty"`
+	BuildType        *string                `json:"build_type,omitempty"`
 	Source           *PagesSource           `json:"source,omitempty"`
 	Public           *bool                  `json:"public,omitempty"`
 	HTTPSCertificate *PagesHTTPSCertificate `json:"https_certificate,omitempty"`
@@ -46,6 +47,44 @@ type PagesBuild struct {
 	UpdatedAt *Timestamp  `json:"updated_at,omitempty"`
 }
 
+// PagesDomain represents a domain associated with a GitHub Pages site.
+type PagesDomain struct {
+	Host                          *string `json:"host,omitempty"`
+	URI                           *string `json:"uri,omitempty"`
+	Nameservers                   *string `json:"nameservers,omitempty"`
+	DNSResolves                   *bool   `json:"dns_resolves,omitempty"`
+	IsProxied                     *bool   `json:"is_proxied,omitempty"`
+	IsCloudflareIP                *bool   `json:"is_cloudflare_ip,omitempty"`
+	IsFastlyIP                    *bool   `json:"is_fastly_ip,omitempty"`
+	IsOldIPAddress                *bool   `json:"is_old_ip_address,omitempty"`
+	IsARecord                     *bool   `json:"is_a_record,omitempty"`
+	HasCNAMERecord                *bool   `json:"has_cname_record,omitempty"`
+	HasMXRecordsPresent           *bool   `json:"has_mx_records_present,omitempty"`
+	IsValidDomain                 *bool   `json:"is_valid_domain,omitempty"`
+	IsApexDomain                  *bool   `json:"is_apex_domain,omitempty"`
+	ShouldBeARecord               *bool   `json:"should_be_a_record,omitempty"`
+	IsCNAMEToGithubUserDomain     *bool   `json:"is_cname_to_github_user_domain,omitempty"`
+	IsCNAMEToPagesDotGithubDotCom *bool   `json:"is_cname_to_pages_dot_github_dot_com,omitempty"`
+	IsCNAMEToFastly               *bool   `json:"is_cname_to_fastly,omitempty"`
+	IsPointedToGithubPagesIP      *bool   `json:"is_pointed_to_github_pages_ip,omitempty"`
+	IsNonGithubPagesIPPresent     *bool   `json:"is_non_github_pages_ip_present,omitempty"`
+	IsPagesDomain                 *bool   `json:"is_pages_domain,omitempty"`
+	IsServedByPages               *bool   `json:"is_served_by_pages,omitempty"`
+	IsValid                       *bool   `json:"is_valid,omitempty"`
+	Reason                        *string `json:"reason,omitempty"`
+	RespondsToHTTPS               *bool   `json:"responds_to_https,omitempty"`
+	EnforcesHTTPS                 *bool   `json:"enforces_https,omitempty"`
+	HTTPSError                    *string `json:"https_error,omitempty"`
+	IsHTTPSEligible               *bool   `json:"is_https_eligible,omitempty"`
+	CAAError                      *string `json:"caa_error,omitempty"`
+}
+
+// PagesHealthCheckResponse represents the response given for the health check of a GitHub Pages site.
+type PagesHealthCheckResponse struct {
+	Domain    *PagesDomain `json:"domain,omitempty"`
+	AltDomain *PagesDomain `json:"alt_domain,omitempty"`
+}
+
 // PagesHTTPSCertificate represents the HTTPS Certificate information for a GitHub Pages site.
 type PagesHTTPSCertificate struct {
 	State       *string  `json:"state,omitempty"`
@@ -58,7 +97,8 @@ type PagesHTTPSCertificate struct {
 // createPagesRequest is a subset of Pages and is used internally
 // by EnablePages to pass only the known fields for the endpoint.
 type createPagesRequest struct {
-	Source *PagesSource `json:"source,omitempty"`
+	BuildType *string      `json:"build_type,omitempty"`
+	Source    *PagesSource `json:"source,omitempty"`
 }
 
 // EnablePages enables GitHub Pages for the named repo.
@@ -68,7 +108,8 @@ func (s *RepositoriesService) EnablePages(ctx context.Context, owner, repo strin
 	u := fmt.Sprintf("repos/%v/%v/pages", owner, repo)
 
 	pagesReq := &createPagesRequest{
-		Source: pages.Source,
+		BuildType: pages.BuildType,
+		Source:    pages.Source,
 	}
 
 	req, err := s.client.NewRequest("POST", u, pagesReq)
@@ -92,6 +133,10 @@ type PagesUpdate struct {
 	// CNAME represents a custom domain for the repository.
 	// Leaving CNAME empty will remove the custom domain.
 	CNAME *string `json:"cname"`
+	// BuildType is optional and can either be "legacy" or "workflow".
+	// "workflow" - You are using a github workflow to build your pages.
+	// "legacy"   - You are deploying from a branch.
+	BuildType *string `json:"build_type,omitempty"`
 	// Source must include the branch name, and may optionally specify the subdirectory "/docs".
 	// Possible values for Source.Branch are usually "gh-pages", "main", and "master",
 	// or any other existing branch name.
@@ -240,3 +285,22 @@ func (s *RepositoriesService) RequestPageBuild(ctx context.Context, owner, repo
 
 	return build, resp, nil
 }
+
+// GetPagesHealthCheck gets a DNS health check for the CNAME record configured for a repository's GitHub Pages.
+//
+// GitHub API docs: https://docs.github.com/en/rest/pages#get-a-dns-health-check-for-github-pages
+func (s *RepositoriesService) GetPageHealthCheck(ctx context.Context, owner, repo string) (*PagesHealthCheckResponse, *Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/pages/health", owner, repo)
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	healthCheckResponse := new(PagesHealthCheckResponse)
+	resp, err := s.client.Do(ctx, req, healthCheckResponse)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return healthCheckResponse, resp, nil
+}
diff --git a/vendor/github.com/google/go-github/v50/github/repos_prereceive_hooks.go b/vendor/github.com/google/go-github/v53/github/repos_prereceive_hooks.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_prereceive_hooks.go
rename to vendor/github.com/google/go-github/v53/github/repos_prereceive_hooks.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_projects.go b/vendor/github.com/google/go-github/v53/github/repos_projects.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_projects.go
rename to vendor/github.com/google/go-github/v53/github/repos_projects.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_releases.go b/vendor/github.com/google/go-github/v53/github/repos_releases.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_releases.go
rename to vendor/github.com/google/go-github/v53/github/repos_releases.go
diff --git a/vendor/github.com/google/go-github/v53/github/repos_rules.go b/vendor/github.com/google/go-github/v53/github/repos_rules.go
new file mode 100644
index 0000000000..9299d3e7f3
--- /dev/null
+++ b/vendor/github.com/google/go-github/v53/github/repos_rules.go
@@ -0,0 +1,447 @@
+// Copyright 2023 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+)
+
+// BypassActor represents the bypass actors from a ruleset.
+type BypassActor struct {
+	ActorID *int64 `json:"actor_id,omitempty"`
+	// Possible values for ActorType are: Team, Integration
+	ActorType *string `json:"actor_type,omitempty"`
+}
+
+// RulesetLink represents a single link object from GitHub ruleset request _links.
+type RulesetLink struct {
+	HRef *string `json:"href,omitempty"`
+}
+
+// RulesetLinks represents the "_links" object in a Ruleset.
+type RulesetLinks struct {
+	Self *RulesetLink `json:"self,omitempty"`
+}
+
+// RulesetRefConditionParameters represents the conditions object for ref_names.
+type RulesetRefConditionParameters struct {
+	Include []string `json:"include"`
+	Exclude []string `json:"exclude"`
+}
+
+// RulesetRepositoryConditionParameters represents the conditions object for repository_names.
+type RulesetRepositoryConditionParameters struct {
+	Include   []string `json:"include,omitempty"`
+	Exclude   []string `json:"exclude,omitempty"`
+	Protected *bool    `json:"protected,omitempty"`
+}
+
+// RulesetCondition represents the conditions object in a ruleset.
+type RulesetConditions struct {
+	RefName        *RulesetRefConditionParameters        `json:"ref_name,omitempty"`
+	RepositoryName *RulesetRepositoryConditionParameters `json:"repository_name,omitempty"`
+}
+
+// RulePatternParameters represents the rule pattern parameters.
+type RulePatternParameters struct {
+	Name *string `json:"name,omitempty"`
+	// If Negate is true, the rule will fail if the pattern matches.
+	Negate *bool `json:"negate,omitempty"`
+	// Possible values for Operator are: starts_with, ends_with, contains, regex
+	Operator string `json:"operator"`
+	Pattern  string `json:"pattern"`
+}
+
+// UpdateAllowsFetchAndMergeRuleParameters represents the update rule parameters.
+type UpdateAllowsFetchAndMergeRuleParameters struct {
+	UpdateAllowsFetchAndMerge bool `json:"update_allows_fetch_and_merge"`
+}
+
+// RequiredDeploymentEnvironmentsRuleParameters represents the required_deployments rule parameters.
+type RequiredDeploymentEnvironmentsRuleParameters struct {
+	RequiredDeploymentEnvironments []string `json:"required_deployment_environments"`
+}
+
+// PullRequestRuleParameters represents the pull_request rule parameters.
+type PullRequestRuleParameters struct {
+	DismissStaleReviewsOnPush      bool `json:"dismiss_stale_reviews_on_push"`
+	RequireCodeOwnerReview         bool `json:"require_code_owner_review"`
+	RequireLastPushApproval        bool `json:"require_last_push_approval"`
+	RequiredApprovingReviewCount   int  `json:"required_approving_review_count"`
+	RequiredReviewThreadResolution bool `json:"required_review_thread_resolution"`
+}
+
+// RuleRequiredStatusChecks represents the RequiredStatusChecks for the RequiredStatusChecksRuleParameters object.
+type RuleRequiredStatusChecks struct {
+	Context       string `json:"context"`
+	IntegrationID *int64 `json:"integration_id,omitempty"`
+}
+
+// RequiredStatusChecksRuleParameters represents the required_status_checks rule parameters.
+type RequiredStatusChecksRuleParameters struct {
+	RequiredStatusChecks             []RuleRequiredStatusChecks `json:"required_status_checks"`
+	StrictRequiredStatusChecksPolicy bool                       `json:"strict_required_status_checks_policy"`
+}
+
+// RepositoryRule represents a GitHub Rule.
+type RepositoryRule struct {
+	Type       string           `json:"type"`
+	Parameters *json.RawMessage `json:"parameters,omitempty"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+// This helps us handle the fact that RepositoryRule parameter field can be of numerous types.
+func (r *RepositoryRule) UnmarshalJSON(data []byte) error {
+	type rule RepositoryRule
+	var RepositoryRule rule
+	if err := json.Unmarshal(data, &RepositoryRule); err != nil {
+		return err
+	}
+
+	r.Type = RepositoryRule.Type
+
+	switch RepositoryRule.Type {
+	case "creation", "deletion", "required_linear_history", "required_signatures", "non_fast_forward":
+		r.Parameters = nil
+	case "update":
+		params := UpdateAllowsFetchAndMergeRuleParameters{}
+		if err := json.Unmarshal(*RepositoryRule.Parameters, &params); err != nil {
+			return err
+		}
+
+		bytes, _ := json.Marshal(params)
+		rawParams := json.RawMessage(bytes)
+
+		r.Parameters = &rawParams
+	case "required_deployments":
+		params := RequiredDeploymentEnvironmentsRuleParameters{}
+		if err := json.Unmarshal(*RepositoryRule.Parameters, &params); err != nil {
+			return err
+		}
+
+		bytes, _ := json.Marshal(params)
+		rawParams := json.RawMessage(bytes)
+
+		r.Parameters = &rawParams
+	case "commit_message_pattern", "commit_author_email_pattern", "committer_email_pattern", "branch_name_pattern", "tag_name_pattern":
+		params := RulePatternParameters{}
+		if err := json.Unmarshal(*RepositoryRule.Parameters, &params); err != nil {
+			return err
+		}
+
+		bytes, _ := json.Marshal(params)
+		rawParams := json.RawMessage(bytes)
+
+		r.Parameters = &rawParams
+	case "pull_request":
+		params := PullRequestRuleParameters{}
+		if err := json.Unmarshal(*RepositoryRule.Parameters, &params); err != nil {
+			return err
+		}
+
+		bytes, _ := json.Marshal(params)
+		rawParams := json.RawMessage(bytes)
+
+		r.Parameters = &rawParams
+	case "required_status_checks":
+		params := RequiredStatusChecksRuleParameters{}
+		if err := json.Unmarshal(*RepositoryRule.Parameters, &params); err != nil {
+			return err
+		}
+
+		bytes, _ := json.Marshal(params)
+		rawParams := json.RawMessage(bytes)
+
+		r.Parameters = &rawParams
+	default:
+		r.Type = ""
+		r.Parameters = nil
+		return fmt.Errorf("RepositoryRule.Type %T is not yet implemented, unable to unmarshal", RepositoryRule.Type)
+	}
+
+	return nil
+}
+
+// NewCreationRule creates a rule to only allow users with bypass permission to create matching refs.
+func NewCreationRule() (rule *RepositoryRule) {
+	return &RepositoryRule{
+		Type: "creation",
+	}
+}
+
+// NewUpdateRule creates a rule to only allow users with bypass permission to update matching refs.
+func NewUpdateRule(params *UpdateAllowsFetchAndMergeRuleParameters) (rule *RepositoryRule) {
+	bytes, _ := json.Marshal(params)
+
+	rawParams := json.RawMessage(bytes)
+
+	return &RepositoryRule{
+		Type:       "update",
+		Parameters: &rawParams,
+	}
+}
+
+// NewDeletionRule creates a rule to only allow users with bypass permissions to delete matching refs.
+func NewDeletionRule() (rule *RepositoryRule) {
+	return &RepositoryRule{
+		Type: "deletion",
+	}
+}
+
+// NewRequiredLinearHistoryRule creates a rule to prevent merge commits from being pushed to matching branches.
+func NewRequiredLinearHistoryRule() (rule *RepositoryRule) {
+	return &RepositoryRule{
+		Type: "required_linear_history",
+	}
+}
+
+// NewRequiredDeploymentsRule creates a rule to require environments to be successfully deployed before they can be merged into the matching branches.
+func NewRequiredDeploymentsRule(params *RequiredDeploymentEnvironmentsRuleParameters) (rule *RepositoryRule) {
+	bytes, _ := json.Marshal(params)
+
+	rawParams := json.RawMessage(bytes)
+
+	return &RepositoryRule{
+		Type:       "required_deployments",
+		Parameters: &rawParams,
+	}
+}
+
+// NewRequiredSignaturesRule creates a rule a to require commits pushed to matching branches to have verified signatures.
+func NewRequiredSignaturesRule() (rule *RepositoryRule) {
+	return &RepositoryRule{
+		Type: "required_signatures",
+	}
+}
+
+// NewPullRequestRule creates a rule to require all commits be made to a non-target branch and submitted via a pull request before they can be merged.
+func NewPullRequestRule(params *PullRequestRuleParameters) (rule *RepositoryRule) {
+	bytes, _ := json.Marshal(params)
+
+	rawParams := json.RawMessage(bytes)
+
+	return &RepositoryRule{
+		Type:       "pull_request",
+		Parameters: &rawParams,
+	}
+}
+
+// NewRequiredStatusChecksRule creates a rule to require which status checks must pass before branches can be merged into a branch rule.
+func NewRequiredStatusChecksRule(params *RequiredStatusChecksRuleParameters) (rule *RepositoryRule) {
+	bytes, _ := json.Marshal(params)
+
+	rawParams := json.RawMessage(bytes)
+
+	return &RepositoryRule{
+		Type:       "required_status_checks",
+		Parameters: &rawParams,
+	}
+}
+
+// NewNonFastForwardRule creates a rule as part to prevent users with push access from force pushing to matching branches.
+func NewNonFastForwardRule() (rule *RepositoryRule) {
+	return &RepositoryRule{
+		Type: "non_fast_forward",
+	}
+}
+
+// NewCommitMessagePatternRule creates a rule to restrict commit message patterns being pushed to matching branches.
+func NewCommitMessagePatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
+	bytes, _ := json.Marshal(params)
+
+	rawParams := json.RawMessage(bytes)
+
+	return &RepositoryRule{
+		Type:       "commit_message_pattern",
+		Parameters: &rawParams,
+	}
+}
+
+// NewCommitAuthorEmailPatternRule creates a rule to restrict commits with author email patterns being merged into matching branches.
+func NewCommitAuthorEmailPatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
+	bytes, _ := json.Marshal(params)
+
+	rawParams := json.RawMessage(bytes)
+
+	return &RepositoryRule{
+		Type:       "commit_author_email_pattern",
+		Parameters: &rawParams,
+	}
+}
+
+// NewCommitterEmailPatternRule creates a rule to restrict commits with committer email patterns being merged into matching branches.
+func NewCommitterEmailPatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
+	bytes, _ := json.Marshal(params)
+
+	rawParams := json.RawMessage(bytes)
+
+	return &RepositoryRule{
+		Type:       "committer_email_pattern",
+		Parameters: &rawParams,
+	}
+}
+
+// NewBranchNamePatternRule creates a rule to restrict branch patterns from being merged into matching branches.
+func NewBranchNamePatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
+	bytes, _ := json.Marshal(params)
+
+	rawParams := json.RawMessage(bytes)
+
+	return &RepositoryRule{
+		Type:       "branch_name_pattern",
+		Parameters: &rawParams,
+	}
+}
+
+// NewTagNamePatternRule creates a rule to restrict tag patterns contained in non-target branches from being merged into matching branches.
+func NewTagNamePatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
+	bytes, _ := json.Marshal(params)
+
+	rawParams := json.RawMessage(bytes)
+
+	return &RepositoryRule{
+		Type:       "tag_name_pattern",
+		Parameters: &rawParams,
+	}
+}
+
+// Ruleset represents a GitHub ruleset object.
+type Ruleset struct {
+	ID   int64  `json:"id"`
+	Name string `json:"name"`
+	// Possible values for Target are branch, tag
+	Target *string `json:"target,omitempty"`
+	// Possible values for SourceType are: Repository, Organization
+	SourceType *string `json:"source_type,omitempty"`
+	Source     string  `json:"source"`
+	// Possible values for Enforcement are: disabled, active, evaluate
+	Enforcement string `json:"enforcement"`
+	// Possible values for BypassMode are: none, repository, organization
+	BypassMode   *string            `json:"bypass_mode,omitempty"`
+	BypassActors []*BypassActor     `json:"bypass_actors,omitempty"`
+	NodeID       *string            `json:"node_id,omitempty"`
+	Links        *RulesetLinks      `json:"_links,omitempty"`
+	Conditions   *RulesetConditions `json:"conditions,omitempty"`
+	Rules        []*RepositoryRule  `json:"rules,omitempty"`
+}
+
+// GetRulesForBranch gets all the rules that apply to the specified branch.
+//
+// GitHub API docs: https://docs.github.com/en/rest/repos/rules#get-rules-for-a-branch
+func (s *RepositoriesService) GetRulesForBranch(ctx context.Context, owner, repo, branch string) ([]*RepositoryRule, *Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/rules/branches/%v", owner, repo, branch)
+
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var rules []*RepositoryRule
+	resp, err := s.client.Do(ctx, req, &rules)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return rules, resp, nil
+}
+
+// GetAllRulesets gets all the rules that apply to the specified repository.
+// If includesParents is true, rulesets configured at the organization level that apply to the repository will be returned.
+//
+// GitHub API docs: https://docs.github.com/en/rest/repos/rules#get-all-repository-rulesets
+func (s *RepositoriesService) GetAllRulesets(ctx context.Context, owner, repo string, includesParents bool) ([]*Ruleset, *Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/rulesets?includes_parents=%v", owner, repo, includesParents)
+
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var ruleset []*Ruleset
+	resp, err := s.client.Do(ctx, req, &ruleset)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return ruleset, resp, nil
+}
+
+// CreateRuleset creates a ruleset for the specified repository.
+//
+// GitHub API docs: https://docs.github.com/en/rest/repos/rules#create-a-repository-ruleset
+func (s *RepositoriesService) CreateRuleset(ctx context.Context, owner, repo string, rs *Ruleset) (*Ruleset, *Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/rulesets", owner, repo)
+
+	req, err := s.client.NewRequest("POST", u, rs)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var ruleset *Ruleset
+	resp, err := s.client.Do(ctx, req, &ruleset)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return ruleset, resp, nil
+}
+
+// GetRuleset gets a ruleset for the specified repository.
+// If includesParents is true, rulesets configured at the organization level that apply to the repository will be returned.
+//
+// GitHub API docs: https://docs.github.com/en/rest/repos/rules#get-a-repository-ruleset
+func (s *RepositoriesService) GetRuleset(ctx context.Context, owner, repo string, rulesetID int64, includesParents bool) (*Ruleset, *Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/rulesets/%v?includes_parents=%v", owner, repo, rulesetID, includesParents)
+
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var ruleset *Ruleset
+	resp, err := s.client.Do(ctx, req, &ruleset)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return ruleset, resp, nil
+}
+
+// UpdateRuleset updates a ruleset for the specified repository.
+//
+// GitHub API docs: https://docs.github.com/en/rest/repos/rules#update-a-repository-ruleset
+func (s *RepositoriesService) UpdateRuleset(ctx context.Context, owner, repo string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID)
+
+	req, err := s.client.NewRequest("PUT", u, rs)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var ruleset *Ruleset
+	resp, err := s.client.Do(ctx, req, &ruleset)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return ruleset, resp, nil
+}
+
+// DeleteRuleset deletes a ruleset for the specified repository.
+//
+// GitHub API docs: https://docs.github.com/en/rest/repos/rules#delete-a-repository-ruleset
+func (s *RepositoriesService) DeleteRuleset(ctx context.Context, owner, repo string, rulesetID int64) (*Response, error) {
+	u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID)
+
+	req, err := s.client.NewRequest("DELETE", u, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	return s.client.Do(ctx, req, nil)
+}
diff --git a/vendor/github.com/google/go-github/v50/github/repos_stats.go b/vendor/github.com/google/go-github/v53/github/repos_stats.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_stats.go
rename to vendor/github.com/google/go-github/v53/github/repos_stats.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_statuses.go b/vendor/github.com/google/go-github/v53/github/repos_statuses.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_statuses.go
rename to vendor/github.com/google/go-github/v53/github/repos_statuses.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_tags.go b/vendor/github.com/google/go-github/v53/github/repos_tags.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_tags.go
rename to vendor/github.com/google/go-github/v53/github/repos_tags.go
diff --git a/vendor/github.com/google/go-github/v50/github/repos_traffic.go b/vendor/github.com/google/go-github/v53/github/repos_traffic.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/repos_traffic.go
rename to vendor/github.com/google/go-github/v53/github/repos_traffic.go
diff --git a/vendor/github.com/google/go-github/v50/github/scim.go b/vendor/github.com/google/go-github/v53/github/scim.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/scim.go
rename to vendor/github.com/google/go-github/v53/github/scim.go
diff --git a/vendor/github.com/google/go-github/v50/github/search.go b/vendor/github.com/google/go-github/v53/github/search.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/search.go
rename to vendor/github.com/google/go-github/v53/github/search.go
diff --git a/vendor/github.com/google/go-github/v50/github/secret_scanning.go b/vendor/github.com/google/go-github/v53/github/secret_scanning.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/secret_scanning.go
rename to vendor/github.com/google/go-github/v53/github/secret_scanning.go
diff --git a/vendor/github.com/google/go-github/v50/github/strings.go b/vendor/github.com/google/go-github/v53/github/strings.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/strings.go
rename to vendor/github.com/google/go-github/v53/github/strings.go
diff --git a/vendor/github.com/google/go-github/v50/github/teams.go b/vendor/github.com/google/go-github/v53/github/teams.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/teams.go
rename to vendor/github.com/google/go-github/v53/github/teams.go
diff --git a/vendor/github.com/google/go-github/v50/github/teams_discussion_comments.go b/vendor/github.com/google/go-github/v53/github/teams_discussion_comments.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/teams_discussion_comments.go
rename to vendor/github.com/google/go-github/v53/github/teams_discussion_comments.go
diff --git a/vendor/github.com/google/go-github/v50/github/teams_discussions.go b/vendor/github.com/google/go-github/v53/github/teams_discussions.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/teams_discussions.go
rename to vendor/github.com/google/go-github/v53/github/teams_discussions.go
diff --git a/vendor/github.com/google/go-github/v50/github/teams_members.go b/vendor/github.com/google/go-github/v53/github/teams_members.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/teams_members.go
rename to vendor/github.com/google/go-github/v53/github/teams_members.go
diff --git a/vendor/github.com/google/go-github/v50/github/timestamp.go b/vendor/github.com/google/go-github/v53/github/timestamp.go
similarity index 90%
rename from vendor/github.com/google/go-github/v50/github/timestamp.go
rename to vendor/github.com/google/go-github/v53/github/timestamp.go
index 1061a55e68..00c1235e9d 100644
--- a/vendor/github.com/google/go-github/v50/github/timestamp.go
+++ b/vendor/github.com/google/go-github/v53/github/timestamp.go
@@ -22,6 +22,14 @@ func (t Timestamp) String() string {
 	return t.Time.String()
 }
 
+// GetTime returns std time.Time.
+func (t *Timestamp) GetTime() *time.Time {
+	if t == nil {
+		return nil
+	}
+	return &t.Time
+}
+
 // UnmarshalJSON implements the json.Unmarshaler interface.
 // Time is expected in RFC3339 or Unix format.
 func (t *Timestamp) UnmarshalJSON(data []byte) (err error) {
diff --git a/vendor/github.com/google/go-github/v50/github/users.go b/vendor/github.com/google/go-github/v53/github/users.go
similarity index 98%
rename from vendor/github.com/google/go-github/v50/github/users.go
rename to vendor/github.com/google/go-github/v53/github/users.go
index d40d23e90f..1b0670103b 100644
--- a/vendor/github.com/google/go-github/v50/github/users.go
+++ b/vendor/github.com/google/go-github/v53/github/users.go
@@ -41,8 +41,8 @@ type User struct {
 	SuspendedAt             *Timestamp `json:"suspended_at,omitempty"`
 	Type                    *string    `json:"type,omitempty"`
 	SiteAdmin               *bool      `json:"site_admin,omitempty"`
-	TotalPrivateRepos       *int       `json:"total_private_repos,omitempty"`
-	OwnedPrivateRepos       *int       `json:"owned_private_repos,omitempty"`
+	TotalPrivateRepos       *int64     `json:"total_private_repos,omitempty"`
+	OwnedPrivateRepos       *int64     `json:"owned_private_repos,omitempty"`
 	PrivateGists            *int       `json:"private_gists,omitempty"`
 	DiskUsage               *int       `json:"disk_usage,omitempty"`
 	Collaborators           *int       `json:"collaborators,omitempty"`
diff --git a/vendor/github.com/google/go-github/v50/github/users_administration.go b/vendor/github.com/google/go-github/v53/github/users_administration.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/users_administration.go
rename to vendor/github.com/google/go-github/v53/github/users_administration.go
diff --git a/vendor/github.com/google/go-github/v50/github/users_blocking.go b/vendor/github.com/google/go-github/v53/github/users_blocking.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/users_blocking.go
rename to vendor/github.com/google/go-github/v53/github/users_blocking.go
diff --git a/vendor/github.com/google/go-github/v50/github/users_emails.go b/vendor/github.com/google/go-github/v53/github/users_emails.go
similarity index 73%
rename from vendor/github.com/google/go-github/v50/github/users_emails.go
rename to vendor/github.com/google/go-github/v53/github/users_emails.go
index be7e0f819e..67bd210e8d 100644
--- a/vendor/github.com/google/go-github/v50/github/users_emails.go
+++ b/vendor/github.com/google/go-github/v53/github/users_emails.go
@@ -70,3 +70,28 @@ func (s *UsersService) DeleteEmails(ctx context.Context, emails []string) (*Resp
 
 	return s.client.Do(ctx, req, nil)
 }
+
+// SetEmailVisibility sets the visibility for the primary email address of the authenticated user.
+// `visibility` can be "private" or "public".
+//
+// GitHub API docs: https://docs.github.com/en/rest/users/emails#set-primary-email-visibility-for-the-authenticated-user
+func (s *UsersService) SetEmailVisibility(ctx context.Context, visibility string) ([]*UserEmail, *Response, error) {
+	u := "user/email/visibility"
+
+	updateVisiblilityReq := &UserEmail{
+		Visibility: &visibility,
+	}
+
+	req, err := s.client.NewRequest("PATCH", u, updateVisiblilityReq)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var e []*UserEmail
+	resp, err := s.client.Do(ctx, req, &e)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return e, resp, nil
+}
diff --git a/vendor/github.com/google/go-github/v50/github/users_followers.go b/vendor/github.com/google/go-github/v53/github/users_followers.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/users_followers.go
rename to vendor/github.com/google/go-github/v53/github/users_followers.go
diff --git a/vendor/github.com/google/go-github/v50/github/users_gpg_keys.go b/vendor/github.com/google/go-github/v53/github/users_gpg_keys.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/users_gpg_keys.go
rename to vendor/github.com/google/go-github/v53/github/users_gpg_keys.go
diff --git a/vendor/github.com/google/go-github/v50/github/users_keys.go b/vendor/github.com/google/go-github/v53/github/users_keys.go
similarity index 96%
rename from vendor/github.com/google/go-github/v50/github/users_keys.go
rename to vendor/github.com/google/go-github/v53/github/users_keys.go
index 59d26cdefa..b49b8e4b4e 100644
--- a/vendor/github.com/google/go-github/v50/github/users_keys.go
+++ b/vendor/github.com/google/go-github/v53/github/users_keys.go
@@ -19,6 +19,8 @@ type Key struct {
 	ReadOnly  *bool      `json:"read_only,omitempty"`
 	Verified  *bool      `json:"verified,omitempty"`
 	CreatedAt *Timestamp `json:"created_at,omitempty"`
+	AddedBy   *string    `json:"added_by,omitempty"`
+	LastUsed  *Timestamp `json:"last_used,omitempty"`
 }
 
 func (k Key) String() string {
diff --git a/vendor/github.com/google/go-github/v50/github/users_packages.go b/vendor/github.com/google/go-github/v53/github/users_packages.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/users_packages.go
rename to vendor/github.com/google/go-github/v53/github/users_packages.go
diff --git a/vendor/github.com/google/go-github/v50/github/users_projects.go b/vendor/github.com/google/go-github/v53/github/users_projects.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/users_projects.go
rename to vendor/github.com/google/go-github/v53/github/users_projects.go
diff --git a/vendor/github.com/google/go-github/v50/github/users_ssh_signing_keys.go b/vendor/github.com/google/go-github/v53/github/users_ssh_signing_keys.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/users_ssh_signing_keys.go
rename to vendor/github.com/google/go-github/v53/github/users_ssh_signing_keys.go
diff --git a/vendor/github.com/google/go-github/v50/github/with_appengine.go b/vendor/github.com/google/go-github/v53/github/with_appengine.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/with_appengine.go
rename to vendor/github.com/google/go-github/v53/github/with_appengine.go
diff --git a/vendor/github.com/google/go-github/v50/github/without_appengine.go b/vendor/github.com/google/go-github/v53/github/without_appengine.go
similarity index 100%
rename from vendor/github.com/google/go-github/v50/github/without_appengine.go
rename to vendor/github.com/google/go-github/v53/github/without_appengine.go
diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go
index ff172883f2..26fac02dcc 100644
--- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go
+++ b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go
@@ -33,6 +33,7 @@ import (
 	"github.com/google/s2a-go/internal/handshaker/service"
 	"github.com/google/s2a-go/internal/tokenmanager"
 	"github.com/google/s2a-go/internal/v2/tlsconfigstore"
+	"github.com/google/s2a-go/retry"
 	"github.com/google/s2a-go/stream"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/credentials"
@@ -44,7 +45,7 @@ import (
 
 const (
 	s2aSecurityProtocol = "tls"
-	defaultS2ATimeout   = 3 * time.Second
+	defaultS2ATimeout   = 6 * time.Second
 )
 
 // An environment variable, which sets the timeout enforced on the connection to the S2A service for handshake.
@@ -131,7 +132,13 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori
 	serverName := removeServerNamePort(serverAuthority)
 	timeoutCtx, cancel := context.WithTimeout(ctx, GetS2ATimeout())
 	defer cancel()
-	s2AStream, err := createStream(timeoutCtx, c.s2av2Address, c.getS2AStream)
+	var s2AStream stream.S2AStream
+	var err error
+	retry.Run(timeoutCtx,
+		func() error {
+			s2AStream, err = createStream(timeoutCtx, c.s2av2Address, c.getS2AStream)
+			return err
+		})
 	if err != nil {
 		grpclog.Infof("Failed to connect to S2Av2: %v", err)
 		if c.fallbackClientHandshake != nil {
@@ -152,31 +159,34 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori
 		tokenManager = *c.tokenManager
 	}
 
-	if c.serverName == "" {
-		config, err = tlsconfigstore.GetTLSConfigurationForClient(serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy)
-		if err != nil {
-			grpclog.Info("Failed to get client TLS config from S2Av2: %v", err)
-			if c.fallbackClientHandshake != nil {
-				return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err)
-			}
-			return nil, nil, err
-		}
-	} else {
-		config, err = tlsconfigstore.GetTLSConfigurationForClient(c.serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy)
-		if err != nil {
-			grpclog.Info("Failed to get client TLS config from S2Av2: %v", err)
-			if c.fallbackClientHandshake != nil {
-				return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err)
-			}
-			return nil, nil, err
+	sn := serverName
+	if c.serverName != "" {
+		sn = c.serverName
+	}
+	retry.Run(timeoutCtx,
+		func() error {
+			config, err = tlsconfigstore.GetTLSConfigurationForClient(sn, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy)
+			return err
+		})
+	if err != nil {
+		grpclog.Info("Failed to get client TLS config from S2Av2: %v", err)
+		if c.fallbackClientHandshake != nil {
+			return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err)
 		}
+		return nil, nil, err
 	}
 	if grpclog.V(1) {
 		grpclog.Infof("Got client TLS config from S2Av2.")
 	}
-	creds := credentials.NewTLS(config)
 
-	conn, authInfo, err := creds.ClientHandshake(ctx, serverName, rawConn)
+	creds := credentials.NewTLS(config)
+	var conn net.Conn
+	var authInfo credentials.AuthInfo
+	retry.Run(timeoutCtx,
+		func() error {
+			conn, authInfo, err = creds.ClientHandshake(timeoutCtx, serverName, rawConn)
+			return err
+		})
 	if err != nil {
 		grpclog.Infof("Failed to do client handshake using S2Av2: %v", err)
 		if c.fallbackClientHandshake != nil {
@@ -196,7 +206,13 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede
 	}
 	ctx, cancel := context.WithTimeout(context.Background(), GetS2ATimeout())
 	defer cancel()
-	s2AStream, err := createStream(ctx, c.s2av2Address, c.getS2AStream)
+	var s2AStream stream.S2AStream
+	var err error
+	retry.Run(ctx,
+		func() error {
+			s2AStream, err = createStream(ctx, c.s2av2Address, c.getS2AStream)
+			return err
+		})
 	if err != nil {
 		grpclog.Infof("Failed to connect to S2Av2: %v", err)
 		return nil, nil, err
@@ -213,7 +229,12 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede
 		tokenManager = *c.tokenManager
 	}
 
-	config, err := tlsconfigstore.GetTLSConfigurationForServer(s2AStream, tokenManager, c.localIdentities, c.verificationMode)
+	var config *tls.Config
+	retry.Run(ctx,
+		func() error {
+			config, err = tlsconfigstore.GetTLSConfigurationForServer(s2AStream, tokenManager, c.localIdentities, c.verificationMode)
+			return err
+		})
 	if err != nil {
 		grpclog.Infof("Failed to get server TLS config from S2Av2: %v", err)
 		return nil, nil, err
@@ -221,8 +242,20 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede
 	if grpclog.V(1) {
 		grpclog.Infof("Got server TLS config from S2Av2.")
 	}
+
 	creds := credentials.NewTLS(config)
-	return creds.ServerHandshake(rawConn)
+	var conn net.Conn
+	var authInfo credentials.AuthInfo
+	retry.Run(ctx,
+		func() error {
+			conn, authInfo, err = creds.ServerHandshake(rawConn)
+			return err
+		})
+	if err != nil {
+		grpclog.Infof("Failed to do server handshake using S2Av2: %v", err)
+		return nil, nil, err
+	}
+	return conn, authInfo, err
 }
 
 // Info returns protocol info of s2av2TransportCreds.
diff --git a/vendor/github.com/google/s2a-go/retry/retry.go b/vendor/github.com/google/s2a-go/retry/retry.go
new file mode 100644
index 0000000000..224915f4dd
--- /dev/null
+++ b/vendor/github.com/google/s2a-go/retry/retry.go
@@ -0,0 +1,144 @@
+/*
+ *
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package retry provides a retry helper for talking to S2A gRPC server.
+// The implementation is modeled after
+// https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/retry.go
+package retry
+
+import (
+	"context"
+	"math/rand"
+	"time"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+const (
+	maxRetryAttempts = 5
+	maxRetryForLoops = 10
+)
+
+type defaultBackoff struct {
+	max time.Duration
+	mul float64
+	cur time.Duration
+}
+
+// Pause returns a duration, which is used as the backoff wait time
+// before the next retry.
+func (b *defaultBackoff) Pause() time.Duration {
+	d := time.Duration(1 + rand.Int63n(int64(b.cur)))
+	b.cur = time.Duration(float64(b.cur) * b.mul)
+	if b.cur > b.max {
+		b.cur = b.max
+	}
+	return d
+}
+
+// Sleep will wait for the specified duration or return on context
+// expiration.
+func Sleep(ctx context.Context, d time.Duration) error {
+	t := time.NewTimer(d)
+	select {
+	case <-ctx.Done():
+		t.Stop()
+		return ctx.Err()
+	case <-t.C:
+		return nil
+	}
+}
+
+// NewRetryer creates an instance of S2ARetryer using the defaultBackoff
+// implementation.
+var NewRetryer = func() *S2ARetryer {
+	return &S2ARetryer{bo: &defaultBackoff{
+		cur: 100 * time.Millisecond,
+		max: 30 * time.Second,
+		mul: 2,
+	}}
+}
+
+type backoff interface {
+	Pause() time.Duration
+}
+
+// S2ARetryer implements a retry helper for talking to S2A gRPC server.
+type S2ARetryer struct {
+	bo       backoff
+	attempts int
+}
+
+// Attempts return the number of retries attempted.
+func (r *S2ARetryer) Attempts() int {
+	return r.attempts
+}
+
+// Retry returns a boolean indicating whether retry should be performed
+// and the backoff duration.
+func (r *S2ARetryer) Retry(err error) (time.Duration, bool) {
+	if err == nil {
+		return 0, false
+	}
+	if r.attempts >= maxRetryAttempts {
+		return 0, false
+	}
+	r.attempts++
+	return r.bo.Pause(), true
+}
+
+// Run uses S2ARetryer to execute the function passed in, until success or reaching
+// max number of retry attempts.
+func Run(ctx context.Context, f func() error) {
+	retryer := NewRetryer()
+	forLoopCnt := 0
+	var err error
+	for {
+		err = f()
+		if bo, shouldRetry := retryer.Retry(err); shouldRetry {
+			if grpclog.V(1) {
+				grpclog.Infof("will attempt retry: %v", err)
+			}
+			if ctx.Err() != nil {
+				if grpclog.V(1) {
+					grpclog.Infof("exit retry loop due to context error: %v", ctx.Err())
+				}
+				break
+			}
+			if sleepErr := Sleep(ctx, bo); sleepErr != nil {
+				if grpclog.V(1) {
+					grpclog.Infof("exit retry loop due to sleep error: %v", sleepErr)
+				}
+				break
+			}
+			// This shouldn't happen, just make sure we are not stuck in the for loops.
+			forLoopCnt++
+			if forLoopCnt > maxRetryForLoops {
+				if grpclog.V(1) {
+					grpclog.Infof("exit the for loop after too many retries")
+				}
+				break
+			}
+			continue
+		}
+		if grpclog.V(1) {
+			grpclog.Infof("retry conditions not met, exit the loop")
+		}
+		break
+	}
+}
diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go
index 1c1349de4a..d684c2c738 100644
--- a/vendor/github.com/google/s2a-go/s2a.go
+++ b/vendor/github.com/google/s2a-go/s2a.go
@@ -35,6 +35,7 @@ import (
 	"github.com/google/s2a-go/internal/handshaker/service"
 	"github.com/google/s2a-go/internal/tokenmanager"
 	"github.com/google/s2a-go/internal/v2"
+	"github.com/google/s2a-go/retry"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/grpclog"
 
@@ -390,9 +391,15 @@ func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, net
 		}
 		timeoutCtx, cancel := context.WithTimeout(ctx, v2.GetS2ATimeout())
 		defer cancel()
-		s2aTLSConfig, err := factory.Build(timeoutCtx, &TLSClientConfigOptions{
-			ServerName: serverName,
-		})
+
+		var s2aTLSConfig *tls.Config
+		retry.Run(timeoutCtx,
+			func() error {
+				s2aTLSConfig, err = factory.Build(timeoutCtx, &TLSClientConfigOptions{
+					ServerName: serverName,
+				})
+				return err
+			})
 		if err != nil {
 			grpclog.Infof("error building S2A TLS config: %v", err)
 			return fallback(err)
@@ -401,7 +408,12 @@ func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, net
 		s2aDialer := &tls.Dialer{
 			Config: s2aTLSConfig,
 		}
-		c, err := s2aDialer.DialContext(ctx, network, addr)
+		var c net.Conn
+		retry.Run(timeoutCtx,
+			func() error {
+				c, err = s2aDialer.DialContext(timeoutCtx, network, addr)
+				return err
+			})
 		if err != nil {
 			grpclog.Infof("error dialing with S2A to %s: %v", addr, err)
 			return fallback(err)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
index b5140a3c9d..a8789f1702 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
@@ -27,7 +27,6 @@ go_library(
         "//internal/httprule",
         "//utilities",
         "@go_googleapis//google/api:httpbody_go_proto",
-        "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
         "@org_golang_google_grpc//codes",
         "@org_golang_google_grpc//grpclog",
         "@org_golang_google_grpc//health/grpc_health_v1",
@@ -38,6 +37,7 @@ go_library(
         "@org_golang_google_protobuf//reflect/protoreflect",
         "@org_golang_google_protobuf//reflect/protoregistry",
         "@org_golang_google_protobuf//types/known/durationpb",
+        "@org_golang_google_protobuf//types/known/fieldmaskpb",
         "@org_golang_google_protobuf//types/known/structpb",
         "@org_golang_google_protobuf//types/known/timestamppb",
         "@org_golang_google_protobuf//types/known/wrapperspb",
@@ -73,7 +73,6 @@ go_test(
         "@go_googleapis//google/api:httpbody_go_proto",
         "@go_googleapis//google/rpc:errdetails_go_proto",
         "@go_googleapis//google/rpc:status_go_proto",
-        "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
         "@org_golang_google_grpc//:go_default_library",
         "@org_golang_google_grpc//codes",
         "@org_golang_google_grpc//health/grpc_health_v1",
@@ -84,6 +83,7 @@ go_test(
         "@org_golang_google_protobuf//testing/protocmp",
         "@org_golang_google_protobuf//types/known/durationpb",
         "@org_golang_google_protobuf//types/known/emptypb",
+        "@org_golang_google_protobuf//types/known/fieldmaskpb",
         "@org_golang_google_protobuf//types/known/structpb",
         "@org_golang_google_protobuf//types/known/timestamppb",
         "@org_golang_google_protobuf//types/known/wrapperspb",
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
index cd49097fde..a03dd166bd 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
@@ -7,9 +7,9 @@ import (
 	"io"
 	"sort"
 
-	"google.golang.org/genproto/protobuf/field_mask"
 	"google.golang.org/protobuf/proto"
 	"google.golang.org/protobuf/reflect/protoreflect"
+	field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
 )
 
 func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor {
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
index 139bbbad49..f451cb441f 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
@@ -389,8 +389,12 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
-	// lookup other methods to handle fallback from GET to POST and
-	// to determine if it is NotImplemented or NotFound.
+	// if no handler has found for the request, lookup for other methods
+	// to handle POST -> GET fallback if the request is subject to path
+	// length fallback.
+	// Note we are not eagerly checking the request here as we want to return the
+	// right HTTP status code, and we need to process the fallback candidates in
+	// order to do that.
 	for m, handlers := range s.handlers {
 		if m == r.Method {
 			continue
@@ -423,8 +427,11 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 				}
 				continue
 			}
+
 			// X-HTTP-Method-Override is optional. Always allow fallback to POST.
-			if s.isPathLengthFallback(r) {
+			// Also, only consider POST -> GET fallbacks, and avoid falling back to
+			// potentially dangerous operations like DELETE.
+			if s.isPathLengthFallback(r) && m == http.MethodGet {
 				if err := r.ParseForm(); err != nil {
 					_, outboundMarshaler := MarshalerForRequest(s, r)
 					sterr := status.Error(codes.InvalidArgument, err.Error())
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
index 31ce33a762..d01933c4fd 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
@@ -10,13 +10,13 @@ import (
 	"time"
 
 	"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
-	"google.golang.org/genproto/protobuf/field_mask"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/protobuf/encoding/protojson"
 	"google.golang.org/protobuf/proto"
 	"google.golang.org/protobuf/reflect/protoreflect"
 	"google.golang.org/protobuf/reflect/protoregistry"
 	"google.golang.org/protobuf/types/known/durationpb"
+	field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
 	"google.golang.org/protobuf/types/known/structpb"
 	"google.golang.org/protobuf/types/known/timestamppb"
 	"google.golang.org/protobuf/types/known/wrapperspb"
diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md
index 063bb16056..03bcfb5b76 100644
--- a/vendor/github.com/hashicorp/golang-lru/README.md
+++ b/vendor/github.com/hashicorp/golang-lru/README.md
@@ -1,25 +1,7 @@
 golang-lru
 ==========
 
-This provides the `lru` package which implements a fixed-size
-thread safe LRU cache. It is based on the cache in Groupcache.
-
-Documentation
-=============
-
-Full docs are available on [Godoc](https://pkg.go.dev/github.com/hashicorp/golang-lru)
-
-Example
-=======
-
-Using the LRU is very simple:
-
-```go
-l, _ := New(128)
-for i := 0; i < 256; i++ {
-    l.Add(i, nil)
-}
-if l.Len() != 128 {
-    panic(fmt.Sprintf("bad len: %v", l.Len()))
-}
-```
+Please upgrade to github.com/hashicorp/golang-lru/v2 for all new code as v1 will
+not be updated anymore. The v2 version supports generics and is faster; old code
+can specify a specific tag, e.g. github.com/hashicorp/golang-lru/v1.0.2 for
+backwards compatibility.
diff --git a/vendor/github.com/lestrrat-go/blackmagic/.gitignore b/vendor/github.com/lestrrat-go/blackmagic/.gitignore
new file mode 100644
index 0000000000..66fd13c903
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/blackmagic/.gitignore
@@ -0,0 +1,15 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
diff --git a/vendor/github.com/lestrrat-go/blackmagic/LICENSE b/vendor/github.com/lestrrat-go/blackmagic/LICENSE
new file mode 100644
index 0000000000..188ea7685c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/blackmagic/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 lestrrat-go
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/lestrrat-go/blackmagic/README.md b/vendor/github.com/lestrrat-go/blackmagic/README.md
new file mode 100644
index 0000000000..0356f8a72b
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/blackmagic/README.md
@@ -0,0 +1,3 @@
+# blackmagic
+
+Reflect-based black magic. YMMV, and use with caution
diff --git a/vendor/github.com/lestrrat-go/blackmagic/blackmagic.go b/vendor/github.com/lestrrat-go/blackmagic/blackmagic.go
new file mode 100644
index 0000000000..8d1d468543
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/blackmagic/blackmagic.go
@@ -0,0 +1,54 @@
+package blackmagic
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// AssignIfCompatible is a convenience function to safely
+// assign arbitrary values. dst must be a pointer to an
+// empty interface, or it must be a pointer to a compatible
+// variable type that can hold src.
+func AssignIfCompatible(dst, src interface{}) error {
+	orv := reflect.ValueOf(src) // save this value for error reporting
+	result := orv
+
+	// t can be a pointer or a slice, and the code will slightly change
+	// depending on this
+	var isSlice bool
+	switch result.Kind() {
+	case reflect.Ptr:
+		// no op
+	case reflect.Slice:
+		isSlice = true
+	default:
+		return fmt.Errorf("argument t to AssignIfCompatible must be a pointer or a slice: %T", src)
+	}
+
+	rv := reflect.ValueOf(dst)
+	if rv.Kind() != reflect.Ptr {
+		return fmt.Errorf(`argument to AssignIfCompatible() must be a pointer: %T`, dst)
+	}
+
+	actualDst := rv.Elem()
+	switch actualDst.Kind() {
+	case reflect.Interface:
+		// If it's an interface, we can just assign the pointer to the interface{}
+	default:
+		// If it's a pointer to the struct we're looking for, we need to set
+		// the de-referenced struct
+		if !isSlice {
+			result = result.Elem()
+		}
+	}
+	if !result.Type().AssignableTo(actualDst.Type()) {
+		return fmt.Errorf(`argument to AssignIfCompatible() must be compatible with %T (was %T)`, orv.Interface(), dst)
+	}
+
+	if !actualDst.CanSet() {
+		return fmt.Errorf(`argument to AssignIfCompatible() must be settable`)
+	}
+	actualDst.Set(result)
+
+	return nil
+}
diff --git a/vendor/github.com/lestrrat-go/httpcc/.gitignore b/vendor/github.com/lestrrat-go/httpcc/.gitignore
new file mode 100644
index 0000000000..66fd13c903
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httpcc/.gitignore
@@ -0,0 +1,15 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
diff --git a/vendor/github.com/lestrrat-go/httpcc/LICENSE b/vendor/github.com/lestrrat-go/httpcc/LICENSE
new file mode 100644
index 0000000000..963209bfba
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httpcc/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 lestrrat-go
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/lestrrat-go/httpcc/README.md b/vendor/github.com/lestrrat-go/httpcc/README.md
new file mode 100644
index 0000000000..cf2dcb327c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httpcc/README.md
@@ -0,0 +1,35 @@
+httpcc
+======
+
+Parses HTTP/1.1 Cache-Control header, and returns a struct that is convenient
+for the end-user to do what they will with.
+
+# Parsing the HTTP Request
+
+```go
+dir, err := httpcc.ParseRequest(req.Header.Get(`Cache-Control`))
+// dir.MaxAge()       uint64, bool
+// dir.MaxStale()     uint64, bool
+// dir.MinFresh()     uint64, bool
+// dir.NoCache()      bool
+// dir.NoStore()      bool
+// dir.NoTransform()  bool
+// dir.OnlyIfCached() bool
+// dir.Extensions()   map[string]string
+```
+
+# Parsing the HTTP Response
+
+```go
+directives, err := httpcc.ParseResponse(res.Header.Get(`Cache-Control`))
+// dir.MaxAge()         uint64, bool
+// dir.MustRevalidate() bool
+// dir.NoCache()        []string
+// dir.NoStore()        bool
+// dir.NoTransform()    bool
+// dir.Public()         bool
+// dir.Private()        bool
+// dir.SMaxAge()        uint64, bool
+// dir.Extensions()     map[string]string
+```
+
diff --git a/vendor/github.com/lestrrat-go/httpcc/directives.go b/vendor/github.com/lestrrat-go/httpcc/directives.go
new file mode 100644
index 0000000000..86cbbf0b9a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httpcc/directives.go
@@ -0,0 +1,117 @@
+package httpcc
+
+type RequestDirective struct {
+	maxAge       *uint64
+	maxStale     *uint64
+	minFresh     *uint64
+	noCache      bool
+	noStore      bool
+	noTransform  bool
+	onlyIfCached bool
+	extensions   map[string]string
+}
+
+func (d *RequestDirective) MaxAge() (uint64, bool) {
+	if v := d.maxAge; v != nil {
+		return *v, true
+	}
+	return 0, false
+}
+
+func (d *RequestDirective) MaxStale() (uint64, bool) {
+	if v := d.maxStale; v != nil {
+		return *v, true
+	}
+	return 0, false
+}
+
+func (d *RequestDirective) MinFresh() (uint64, bool) {
+	if v := d.minFresh; v != nil {
+		return *v, true
+	}
+	return 0, false
+}
+
+func (d *RequestDirective) NoCache() bool {
+	return d.noCache
+}
+
+func (d *RequestDirective) NoStore() bool {
+	return d.noStore
+}
+
+func (d *RequestDirective) NoTransform() bool {
+	return d.noTransform
+}
+
+func (d *RequestDirective) OnlyIfCached() bool {
+	return d.onlyIfCached
+}
+
+func (d *RequestDirective) Extensions() map[string]string {
+	return d.extensions
+}
+
+func (d *RequestDirective) Extension(s string) string {
+	return d.extensions[s]
+}
+
+type ResponseDirective struct {
+	maxAge          *uint64
+	noCache         []string
+	noStore         bool
+	noTransform     bool
+	public          bool
+	private         []string
+	proxyRevalidate bool
+	sMaxAge         *uint64
+	extensions      map[string]string
+}
+
+func (d *ResponseDirective) MaxAge() (uint64, bool) {
+	if v := d.maxAge; v != nil {
+		return *v, true
+	}
+	return 0, false
+}
+
+func (d *ResponseDirective) NoCache() []string {
+	return d.noCache
+}
+
+func (d *ResponseDirective) NoStore() bool {
+	return d.noStore
+}
+
+func (d *ResponseDirective) NoTransform() bool {
+	return d.noTransform
+}
+
+func (d *ResponseDirective) Public() bool {
+	return d.public
+}
+
+func (d *ResponseDirective) Private() []string {
+	return d.private
+}
+
+func (d *ResponseDirective) ProxyRevalidate() bool {
+	return d.proxyRevalidate
+}
+
+func (d *ResponseDirective) SMaxAge() (uint64, bool) {
+	if v := d.sMaxAge; v != nil {
+		return *v, true
+	}
+	return 0, false
+}
+
+func (d *ResponseDirective) Extensions() map[string]string {
+	return d.extensions
+}
+
+func (d *ResponseDirective) Extension(s string) string {
+	return d.extensions[s]
+}
+
+
diff --git a/vendor/github.com/lestrrat-go/httpcc/httpcc.go b/vendor/github.com/lestrrat-go/httpcc/httpcc.go
new file mode 100644
index 0000000000..14679f9b1c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httpcc/httpcc.go
@@ -0,0 +1,310 @@
+package httpcc
+
+import (
+	"bufio"
+	"fmt"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+const (
+	// Request Cache-Control directives
+	MaxAge       = "max-age" // used in response as well
+	MaxStale     = "max-stale"
+	MinFresh     = "min-fresh"
+	NoCache      = "no-cache"     // used in response as well
+	NoStore      = "no-store"     // used in response as well
+	NoTransform  = "no-transform" // used in response as well
+	OnlyIfCached = "only-if-cached"
+
+	// Response Cache-Control directive
+	MustRevalidate  = "must-revalidate"
+	Public          = "public"
+	Private         = "private"
+	ProxyRevalidate = "proxy-revalidate"
+	SMaxAge         = "s-maxage"
+)
+
+type TokenPair struct {
+	Name  string
+	Value string
+}
+
+type TokenValuePolicy int
+
+const (
+	NoArgument TokenValuePolicy = iota
+	TokenOnly
+	QuotedStringOnly
+	AnyTokenValue
+)
+
+type directiveValidator interface {
+	Validate(string) TokenValuePolicy
+}
+type directiveValidatorFn func(string) TokenValuePolicy
+
+func (fn directiveValidatorFn) Validate(ccd string) TokenValuePolicy {
+	return fn(ccd)
+}
+
+func responseDirectiveValidator(s string) TokenValuePolicy {
+	switch s {
+	case MustRevalidate, NoStore, NoTransform, Public, ProxyRevalidate:
+		return NoArgument
+	case NoCache, Private:
+		return QuotedStringOnly
+	case MaxAge, SMaxAge:
+		return TokenOnly
+	default:
+		return AnyTokenValue
+	}
+}
+
+func requestDirectiveValidator(s string) TokenValuePolicy {
+	switch s {
+	case MaxAge, MaxStale, MinFresh:
+		return TokenOnly
+	case NoCache, NoStore, NoTransform, OnlyIfCached:
+		return NoArgument
+	default:
+		return AnyTokenValue
+	}
+}
+
+// ParseRequestDirective parses a single token.
+func ParseRequestDirective(s string) (*TokenPair, error) {
+	return parseDirective(s, directiveValidatorFn(requestDirectiveValidator))
+}
+
+func ParseResponseDirective(s string) (*TokenPair, error) {
+	return parseDirective(s, directiveValidatorFn(responseDirectiveValidator))
+}
+
+func parseDirective(s string, ccd directiveValidator) (*TokenPair, error) {
+	s = strings.TrimSpace(s)
+
+	i := strings.IndexByte(s, '=')
+	if i == -1 {
+		return &TokenPair{Name: s}, nil
+	}
+
+	pair := &TokenPair{Name: strings.TrimSpace(s[:i])}
+
+	if len(s) <= i {
+		// `key=` feels like it's a parse error, but it's HTTP...
+		// for now, return as if nothing happened.
+		return pair, nil
+	}
+
+	v := strings.TrimSpace(s[i+1:])
+	switch ccd.Validate(pair.Name) {
+	case TokenOnly:
+		if v[0] == '"' {
+			return nil, fmt.Errorf(`invalid value for %s (quoted string not allowed)`, pair.Name)
+		}
+	case QuotedStringOnly: // quoted-string only
+		if v[0] != '"' {
+			return nil, fmt.Errorf(`invalid value for %s (bare token not allowed)`, pair.Name)
+		}
+		tmp, err := strconv.Unquote(v)
+		if err != nil {
+			return nil, fmt.Errorf(`malformed quoted string in token`)
+		}
+		v = tmp
+	case AnyTokenValue:
+		if v[0] == '"' {
+			tmp, err := strconv.Unquote(v)
+			if err != nil {
+				return nil, fmt.Errorf(`malformed quoted string in token`)
+			}
+			v = tmp
+		}
+	case NoArgument:
+		if len(v) > 0 {
+			return nil, fmt.Errorf(`received argument to directive %s`, pair.Name)
+		}
+	}
+
+	pair.Value = v
+	return pair, nil
+}
+
+func ParseResponseDirectives(s string) ([]*TokenPair, error) {
+	return parseDirectives(s, ParseResponseDirective)
+}
+
+func ParseRequestDirectives(s string) ([]*TokenPair, error) {
+	return parseDirectives(s, ParseRequestDirective)
+}
+
+func parseDirectives(s string, p func(string) (*TokenPair, error)) ([]*TokenPair, error) {
+	scanner := bufio.NewScanner(strings.NewReader(s))
+	scanner.Split(scanCommaSeparatedWords)
+
+	var tokens []*TokenPair
+	for scanner.Scan() {
+		tok, err := p(scanner.Text())
+		if err != nil {
+			return nil, fmt.Errorf(`failed to parse token #%d: %w`, len(tokens)+1, err)
+		}
+		tokens = append(tokens, tok)
+	}
+	return tokens, nil
+}
+
+// isSpace reports whether the character is a Unicode white space character.
+// We avoid dependency on the unicode package, but check validity of the implementation
+// in the tests.
+func isSpace(r rune) bool {
+	if r <= '\u00FF' {
+		// Obvious ASCII ones: \t through \r plus space. Plus two Latin-1 oddballs.
+		switch r {
+		case ' ', '\t', '\n', '\v', '\f', '\r':
+			return true
+		case '\u0085', '\u00A0':
+			return true
+		}
+		return false
+	}
+	// High-valued ones.
+	if '\u2000' <= r && r <= '\u200a' {
+		return true
+	}
+	switch r {
+	case '\u1680', '\u2028', '\u2029', '\u202f', '\u205f', '\u3000':
+		return true
+	}
+	return false
+}
+
+func scanCommaSeparatedWords(data []byte, atEOF bool) (advance int, token []byte, err error) {
+	// Skip leading spaces.
+	start := 0
+	for width := 0; start < len(data); start += width {
+		var r rune
+		r, width = utf8.DecodeRune(data[start:])
+		if !isSpace(r) {
+			break
+		}
+	}
+	// Scan until we find a comma. Keep track of consecutive whitespaces
+	// so we remove them from the end result
+	var ws int
+	for width, i := 0, start; i < len(data); i += width {
+		var r rune
+		r, width = utf8.DecodeRune(data[i:])
+		switch {
+		case isSpace(r):
+			ws++
+		case r == ',':
+			return i + width, data[start : i-ws], nil
+		default:
+			ws = 0
+		}
+	}
+
+	// If we're at EOF, we have a final, non-empty, non-terminated word. Return it.
+	if atEOF && len(data) > start {
+		return len(data), data[start : len(data)-ws], nil
+	}
+
+	// Request more data.
+	return start, nil, nil
+}
+
+// ParseRequest parses the content of `Cache-Control` header of an HTTP Request.
+func ParseRequest(v string) (*RequestDirective, error) {
+	var dir RequestDirective
+	tokens, err := ParseRequestDirectives(v)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to parse tokens: %w`, err)
+	}
+
+	for _, token := range tokens {
+		name := strings.ToLower(token.Name)
+		switch name {
+		case MaxAge:
+			iv, err := strconv.ParseUint(token.Value, 10, 64)
+			if err != nil {
+				return nil, fmt.Errorf(`failed to parse max-age: %w`, err)
+			}
+			dir.maxAge = &iv
+		case MaxStale:
+			iv, err := strconv.ParseUint(token.Value, 10, 64)
+			if err != nil {
+				return nil, fmt.Errorf(`failed to parse max-stale: %w`, err)
+			}
+			dir.maxStale = &iv
+		case MinFresh:
+			iv, err := strconv.ParseUint(token.Value, 10, 64)
+			if err != nil {
+				return nil, fmt.Errorf(`failed to parse min-fresh: %w`, err)
+			}
+			dir.minFresh = &iv
+		case NoCache:
+			dir.noCache = true
+		case NoStore:
+			dir.noStore = true
+		case NoTransform:
+			dir.noTransform = true
+		case OnlyIfCached:
+			dir.onlyIfCached = true
+		default:
+			dir.extensions[token.Name] = token.Value
+		}
+	}
+	return &dir, nil
+}
+
+// ParseResponse parses the content of `Cache-Control` header of an HTTP Response.
+func ParseResponse(v string) (*ResponseDirective, error) {
+	tokens, err := ParseResponseDirectives(v)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to parse tokens: %w`, err)
+	}
+
+	var dir ResponseDirective
+	dir.extensions = make(map[string]string)
+	for _, token := range tokens {
+		name := strings.ToLower(token.Name)
+		switch name {
+		case MaxAge:
+			iv, err := strconv.ParseUint(token.Value, 10, 64)
+			if err != nil {
+				return nil, fmt.Errorf(`failed to parse max-age: %w`, err)
+			}
+			dir.maxAge = &iv
+		case NoCache:
+			scanner := bufio.NewScanner(strings.NewReader(token.Value))
+			scanner.Split(scanCommaSeparatedWords)
+			for scanner.Scan() {
+				dir.noCache = append(dir.noCache, scanner.Text())
+			}
+		case NoStore:
+			dir.noStore = true
+		case NoTransform:
+			dir.noTransform = true
+		case Public:
+			dir.public = true
+		case Private:
+			scanner := bufio.NewScanner(strings.NewReader(token.Value))
+			scanner.Split(scanCommaSeparatedWords)
+			for scanner.Scan() {
+				dir.private = append(dir.private, scanner.Text())
+			}
+		case ProxyRevalidate:
+			dir.proxyRevalidate = true
+		case SMaxAge:
+			iv, err := strconv.ParseUint(token.Value, 10, 64)
+			if err != nil {
+				return nil, fmt.Errorf(`failed to parse s-maxage: %w`, err)
+			}
+			dir.sMaxAge = &iv
+		default:
+			dir.extensions[token.Name] = token.Value
+		}
+	}
+	return &dir, nil
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/.gitignore b/vendor/github.com/lestrrat-go/httprc/.gitignore
new file mode 100644
index 0000000000..66fd13c903
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/.gitignore
@@ -0,0 +1,15 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
diff --git a/vendor/github.com/lestrrat-go/httprc/.golangci.yml b/vendor/github.com/lestrrat-go/httprc/.golangci.yml
new file mode 100644
index 0000000000..8642432169
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/.golangci.yml
@@ -0,0 +1,84 @@
+run:
+
+linters-settings:
+  govet:
+    enable-all: true
+    disable:
+      - shadow
+      - fieldalignment
+
+linters:
+  enable-all: true
+  disable:
+    - cyclop
+    - dupl
+    - exhaustive
+    - exhaustivestruct
+    - errorlint
+    - funlen
+    - gci
+    - gochecknoglobals
+    - gochecknoinits
+    - gocognit
+    - gocritic
+    - gocyclo
+    - godot
+    - godox
+    - goerr113
+    - gofumpt
+    - golint #deprecated
+    - gomnd
+    - gosec
+    - govet
+    - interfacer # deprecated
+    - ifshort
+    - ireturn # No, I _LIKE_ returning interfaces
+    - lll
+    - maligned # deprecated
+    - makezero
+    - nakedret
+    - nestif
+    - nlreturn
+    - paralleltest
+    - scopelint # deprecated
+    - tagliatelle
+    - testpackage
+    - thelper
+    - varnamelen # short names are ok
+    - wrapcheck
+    - wsl
+
+issues:
+  exclude-rules:
+    # not needed
+    - path: /*.go
+      text: "ST1003: should not use underscores in package names"
+      linters:
+        - stylecheck
+    - path: /*.go
+      text: "don't use an underscore in package name"
+      linters:
+        - revive
+    - path: /main.go
+      linters:
+        - errcheck
+    - path: internal/codegen/codegen.go
+      linters:
+        - errcheck
+    - path: /*_test.go
+      linters:
+        - errcheck
+        - forcetypeassert
+    - path: /*_example_test.go
+      linters:
+        - forbidigo
+    - path: cmd/jwx/jwx.go
+      linters:
+        - forbidigo
+
+  # Maximum issues count per one linter. Set to 0 to disable. Default is 50.
+  max-issues-per-linter: 0
+
+  # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
+  max-same-issues: 0
+
diff --git a/vendor/github.com/lestrrat-go/httprc/Changes b/vendor/github.com/lestrrat-go/httprc/Changes
new file mode 100644
index 0000000000..e2629fdd78
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/Changes
@@ -0,0 +1,17 @@
+Changes
+=======
+
+v1.0.4 19 Jul 2022
+  * Fix sloppy API breakage
+
+v1.0.3 19 Jul 2022
+  * Fix queue insertion in the middle of the queue (#7)
+
+v1.0.2 13 Jun 2022
+  * Properly release a lock when the fetch fails (#5)
+
+v1.0.1 29 Mar 2022
+  * Bump dependency for github.com/lestrrat-go/httpcc to v1.0.1
+
+v1.0.0 29 Mar 2022
+  * Initial release, refactored out of github.com/lestrrat-go/jwx
diff --git a/vendor/github.com/lestrrat-go/httprc/LICENSE b/vendor/github.com/lestrrat-go/httprc/LICENSE
new file mode 100644
index 0000000000..3e196892ca
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2022 lestrrat
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/lestrrat-go/httprc/README.md b/vendor/github.com/lestrrat-go/httprc/README.md
new file mode 100644
index 0000000000..1583806520
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/README.md
@@ -0,0 +1,130 @@
+# httprc
+
+`httprc` is a HTTP "Refresh" Cache. Its aim is to cache a remote resource that
+can be fetched via HTTP, but keep the cached content up-to-date based on periodic
+refreshing.
+
+# SYNOPSIS
+
+<!-- INCLUDE(httprc_example_test.go) -->
+```go
+package httprc_test
+
+import (
+  "context"
+  "fmt"
+  "net/http"
+  "net/http/httptest"
+  "sync"
+  "time"
+
+  "github.com/lestrrat-go/httprc"
+)
+
+const (
+  helloWorld   = `Hello World!`
+  goodbyeWorld = `Goodbye World!`
+)
+
+func ExampleCache() {
+  var mu sync.RWMutex
+
+  msg := helloWorld
+
+  srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+    w.Header().Set(`Cache-Control`, fmt.Sprintf(`max-age=%d`, 2))
+    w.WriteHeader(http.StatusOK)
+    mu.RLock()
+    fmt.Fprint(w, msg)
+    mu.RUnlock()
+  }))
+  defer srv.Close()
+
+  ctx, cancel := context.WithCancel(context.Background())
+  defer cancel()
+
+  errSink := httprc.ErrSinkFunc(func(err error) {
+    fmt.Printf("%s\n", err)
+  })
+
+  c := httprc.NewCache(ctx,
+    httprc.WithErrSink(errSink),
+    httprc.WithRefreshWindow(time.Second), // force checks every second
+  )
+
+  c.Register(srv.URL,
+    httprc.WithHTTPClient(srv.Client()),        // we need client with TLS settings
+    httprc.WithMinRefreshInterval(time.Second), // allow max-age=1 (smallest)
+  )
+
+  payload, err := c.Get(ctx, srv.URL)
+  if err != nil {
+    fmt.Printf("%s\n", err)
+    return
+  }
+
+  if string(payload.([]byte)) != helloWorld {
+    fmt.Printf("payload mismatch: %s\n", payload)
+    return
+  }
+
+  mu.Lock()
+  msg = goodbyeWorld
+  mu.Unlock()
+
+  time.Sleep(4 * time.Second)
+
+  payload, err = c.Get(ctx, srv.URL)
+  if err != nil {
+    fmt.Printf("%s\n", err)
+    return
+  }
+
+  if string(payload.([]byte)) != goodbyeWorld {
+    fmt.Printf("payload mismatch: %s\n", payload)
+    return
+  }
+
+  cancel()
+
+  // OUTPUT:
+}
+```
+source: [httprc_example_test.go](https://github.com/lestrrat-go/jwx/blob/main/httprc_example_test.go)
+<!-- END INCLUDE -->
+
+# Sequence Diagram
+
+```mermaid
+sequenceDiagram
+  autonumber
+  actor User
+  participant httprc.Cache
+  participant httprc.Storage
+  User->>httprc.Cache: Fetch URL `u`
+  activate httprc.Storage
+  httprc.Cache->>httprc.Storage: Fetch local cache for `u`
+  alt Cache exists
+    httprc.Storage-->httprc.Cache: Return local cache
+    httprc.Cache-->>User: Return data
+    Note over httprc.Storage: If the cache exists, there's nothing more to do.<br />The cached content will be updated periodically in httprc.Refresher
+    deactivate httprc.Storage
+  else Cache does not exist
+    activate httprc.Fetcher
+    httprc.Cache->>httprc.Fetcher: Fetch remote resource `u`
+    httprc.Fetcher-->>httprc.Cache: Return fetched data
+    deactivate httprc.Fetcher
+    httprc.Cache-->>User: Return data
+    httprc.Cache-)httprc.Refresher: Enqueue into auto-refresh queue
+    activate httprc.Refresher
+    loop Refresh Loop
+      Note over httprc.Storage,httprc.Fetcher: Cached contents are updated synchronously
+      httprc.Refresher->>httprc.Refresher: Wait until next refresh
+      httprc.Refresher-->>httprc.Fetcher: Request fetch
+      httprc.Fetcher->>httprc.Refresher: Return fetched data
+      httprc.Refresher-->>httprc.Storage: Store new version in cache
+      httprc.Refresher->>httprc.Refresher: Enqueue into auto-refresh queue (again)
+    end
+    deactivate httprc.Refresher
+  end
+```
diff --git a/vendor/github.com/lestrrat-go/httprc/cache.go b/vendor/github.com/lestrrat-go/httprc/cache.go
new file mode 100644
index 0000000000..505e5ae446
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/cache.go
@@ -0,0 +1,172 @@
+package httprc
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"sync"
+	"time"
+)
+
+// ErrSink is an abstraction that allows users to consume errors
+// produced while the cache queue is running.
+type HTTPClient interface {
+	Get(string) (*http.Response, error)
+}
+
+// Cache represents a cache that stores resources locally, while
+// periodically refreshing the contents based on HTTP header values
+// and/or user-supplied hints.
+//
+// Refresh is performed _periodically_, and therefore the contents
+// are not kept up-to-date in real time. The interval between checks
+// for refreshes is called the refresh window.
+//
+// The default refresh window is 15 minutes. This means that if a
+// resource is fetched is at time T, and it is supposed to be
+// refreshed in 20 minutes, the next refresh for this resource will
+// happen at T+30 minutes (15+15 minutes).
+type Cache struct {
+	mu    sync.RWMutex
+	queue *queue
+	wl    Whitelist
+}
+
+const defaultRefreshWindow = 15 * time.Minute
+
+// New creates a new Cache object.
+//
+// The context object in the argument controls the life-cycle of the
+// auto-refresh worker. If you cancel the `ctx`, then the automatic
+// refresh will stop working.
+//
+// Refresh will only be performed periodically where the interval between
+// refreshes are controlled by the `refresh window` variable. For example,
+// if the refresh window is every 5 minutes and the resource was queued
+// to be refreshed at 7 minutes, the resource will be refreshed after 10
+// minutes (in 2 refresh window time).
+//
+// The refresh window can be configured by using `httprc.WithRefreshWindow`
+// option. If you want refreshes to be performed more often, provide a smaller
+// refresh window. If you specify a refresh window that is smaller than 1
+// second, it will automatically be set to the default value, which is 15
+// minutes.
+//
+// Internally the HTTP fetching is done using a pool of HTTP fetch
+// workers. The default number of workers is 3. You may change this
+// number by specifying the `httprc.WithFetcherWorkerCount`
+func NewCache(ctx context.Context, options ...CacheOption) *Cache {
+	var refreshWindow time.Duration
+	var errSink ErrSink
+	var wl Whitelist
+	var fetcherOptions []FetcherOption
+	for _, option := range options {
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identRefreshWindow{}:
+			refreshWindow = option.Value().(time.Duration)
+		case identFetcherWorkerCount{}, identWhitelist{}:
+			fetcherOptions = append(fetcherOptions, option)
+		case identErrSink{}:
+			errSink = option.Value().(ErrSink)
+		}
+	}
+
+	if refreshWindow < time.Second {
+		refreshWindow = defaultRefreshWindow
+	}
+
+	fetch := NewFetcher(ctx, fetcherOptions...)
+	queue := newQueue(ctx, refreshWindow, fetch, errSink)
+
+	return &Cache{
+		queue: queue,
+		wl:    wl,
+	}
+}
+
+// Register configures a URL to be stored in the cache.
+//
+// For any given URL, the URL must be registered _BEFORE_ it is
+// accessed using `Get()` method.
+func (c *Cache) Register(u string, options ...RegisterOption) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if wl := c.wl; wl != nil {
+		if !wl.IsAllowed(u) {
+			return fmt.Errorf(`httprc.Cache: url %q has been rejected by whitelist`, u)
+		}
+	}
+
+	return c.queue.Register(u, options...)
+}
+
+// Unregister removes the given URL `u` from the cache.
+//
+// Subsequent calls to `Get()` will fail until `u` is registered again.
+func (c *Cache) Unregister(u string) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	return c.queue.Unregister(u)
+}
+
+// IsRegistered returns true if the given URL `u` has already been
+// registered in the cache.
+func (c *Cache) IsRegistered(u string) bool {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.queue.IsRegistered(u)
+}
+
+// Refresh is identical to Get(), except it always fetches the
+// specified resource anew, and updates the cached content
+func (c *Cache) Refresh(ctx context.Context, u string) (interface{}, error) {
+	return c.getOrFetch(ctx, u, true)
+}
+
+// Get returns the cached object.
+//
+// The context.Context argument is used to control the timeout for
+// synchronous fetches, when they need to happen. Synchronous fetches
+// will be performed when the cache does not contain the specified
+// resource.
+func (c *Cache) Get(ctx context.Context, u string) (interface{}, error) {
+	return c.getOrFetch(ctx, u, false)
+}
+
+func (c *Cache) getOrFetch(ctx context.Context, u string, forceRefresh bool) (interface{}, error) {
+	c.mu.RLock()
+	e, ok := c.queue.getRegistered(u)
+	if !ok {
+		c.mu.RUnlock()
+		return nil, fmt.Errorf(`url %q is not registered (did you make sure to call Register() first?)`, u)
+	}
+	c.mu.RUnlock()
+
+	// Only one goroutine may enter this section.
+	e.acquireSem()
+
+	// has this entry been fetched? (but ignore and do a fetch
+	// if forceRefresh is true)
+	if forceRefresh || !e.hasBeenFetched() {
+		if err := c.queue.fetchAndStore(ctx, e); err != nil {
+			e.releaseSem()
+			return nil, fmt.Errorf(`failed to fetch %q: %w`, u, err)
+		}
+	}
+
+	e.releaseSem()
+
+	e.mu.RLock()
+	data := e.data
+	e.mu.RUnlock()
+
+	return data, nil
+}
+
+func (c *Cache) Snapshot() *Snapshot {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.queue.snapshot()
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/fetcher.go b/vendor/github.com/lestrrat-go/httprc/fetcher.go
new file mode 100644
index 0000000000..0bce87a01b
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/fetcher.go
@@ -0,0 +1,182 @@
+package httprc
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"sync"
+)
+
+type fetchRequest struct {
+	mu sync.RWMutex
+
+	// client contains the HTTP Client that can be used to make a
+	// request. By setting a custom *http.Client, you can for example
+	// provide a custom http.Transport
+	//
+	// If not specified, http.DefaultClient will be used.
+	client HTTPClient
+
+	wl Whitelist
+
+	// u contains the URL to be fetched
+	url string
+
+	// reply is a field that is only used by the internals of the fetcher
+	// it is used to return the result of fetching
+	reply chan *fetchResult
+}
+
+type fetchResult struct {
+	mu  sync.RWMutex
+	res *http.Response
+	err error
+}
+
+func (fr *fetchResult) reply(ctx context.Context, reply chan *fetchResult) error {
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	case reply <- fr:
+	}
+
+	close(reply)
+	return nil
+}
+
+type fetcher struct {
+	requests chan *fetchRequest
+}
+
+type Fetcher interface {
+	Fetch(context.Context, string, ...FetchOption) (*http.Response, error)
+	fetch(context.Context, *fetchRequest) (*http.Response, error)
+}
+
+func NewFetcher(ctx context.Context, options ...FetcherOption) Fetcher {
+	var nworkers int
+	var wl Whitelist
+	for _, option := range options {
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identFetcherWorkerCount{}:
+			nworkers = option.Value().(int)
+		case identWhitelist{}:
+			wl = option.Value().(Whitelist)
+		}
+	}
+
+	if nworkers < 1 {
+		nworkers = 3
+	}
+
+	incoming := make(chan *fetchRequest)
+	for i := 0; i < nworkers; i++ {
+		go runFetchWorker(ctx, incoming, wl)
+	}
+	return &fetcher{
+		requests: incoming,
+	}
+}
+
+func (f *fetcher) Fetch(ctx context.Context, u string, options ...FetchOption) (*http.Response, error) {
+	var client HTTPClient
+	var wl Whitelist
+	for _, option := range options {
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identHTTPClient{}:
+			client = option.Value().(HTTPClient)
+		case identWhitelist{}:
+			wl = option.Value().(Whitelist)
+		}
+	}
+
+	req := fetchRequest{
+		client: client,
+		url:    u,
+		wl:     wl,
+	}
+
+	return f.fetch(ctx, &req)
+}
+
+// fetch (unexported) is the main fetching implemntation.
+// it allows the caller to reuse the same *fetchRequest object
+func (f *fetcher) fetch(ctx context.Context, req *fetchRequest) (*http.Response, error) {
+	reply := make(chan *fetchResult, 1)
+	req.mu.Lock()
+	req.reply = reply
+	req.mu.Unlock()
+
+	// Send a request to the backend
+	select {
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	case f.requests <- req:
+	}
+
+	// wait until we get a reply
+	select {
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	case fr := <-reply:
+		fr.mu.RLock()
+		res := fr.res
+		err := fr.err
+		fr.mu.RUnlock()
+		return res, err
+	}
+}
+
+func runFetchWorker(ctx context.Context, incoming chan *fetchRequest, wl Whitelist) {
+LOOP:
+	for {
+		select {
+		case <-ctx.Done():
+			break LOOP
+		case req := <-incoming:
+			req.mu.RLock()
+			reply := req.reply
+			client := req.client
+			if client == nil {
+				client = http.DefaultClient
+			}
+			url := req.url
+			reqwl := req.wl
+			req.mu.RUnlock()
+
+			var wls []Whitelist
+			for _, v := range []Whitelist{wl, reqwl} {
+				if v != nil {
+					wls = append(wls, v)
+				}
+			}
+
+			if len(wls) > 0 {
+				for _, wl := range wls {
+					if !wl.IsAllowed(url) {
+						r := &fetchResult{
+							err: fmt.Errorf(`fetching url %q rejected by whitelist`, url),
+						}
+						if err := r.reply(ctx, reply); err != nil {
+							break LOOP
+						}
+						continue LOOP
+					}
+				}
+			}
+
+			// The body is handled by the consumer of the fetcher
+			//nolint:bodyclose
+			res, err := client.Get(url)
+			r := &fetchResult{
+				res: res,
+				err: err,
+			}
+			if err := r.reply(ctx, reply); err != nil {
+				break LOOP
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/httprc.go b/vendor/github.com/lestrrat-go/httprc/httprc.go
new file mode 100644
index 0000000000..8ae056a7e1
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/httprc.go
@@ -0,0 +1,22 @@
+//go:generate tools/genoptions.sh
+
+// Package httprc implements a cache for resources available
+// over http(s). Its aim is not only to cache these resources so
+// that it saves on HTTP roundtrips, but it also periodically
+// attempts to auto-refresh these resources once they are cached
+// based on the user-specified intervals and HTTP `Expires` and
+// `Cache-Control` headers, thus keeping the entries _relatively_ fresh.
+package httprc
+
+import "fmt"
+
+// RefreshError is the underlying error type that is sent to
+// the `httprc.ErrSink` objects
+type RefreshError struct {
+	URL string
+	Err error
+}
+
+func (re *RefreshError) Error() string {
+	return fmt.Sprintf(`refresh error (%q): %s`, re.URL, re.Err)
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/options.yaml b/vendor/github.com/lestrrat-go/httprc/options.yaml
new file mode 100644
index 0000000000..5a5139cb8a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/options.yaml
@@ -0,0 +1,119 @@
+package_name: httprc
+output: options_gen.go
+interfaces:
+  - name: RegisterOption
+    comment: |
+      RegisterOption desribes options that can be passed to `(httprc.Cache).Register()`
+  - name: CacheOption
+    comment: |
+      CacheOption desribes options that can be passed to `New()`
+  - name: FetcherOption
+    methods:
+      - cacheOption
+    comment: |
+      FetcherOption describes options that can be passed to `(httprc.Fetcher).NewFetcher()`
+  - name: FetchOption
+    comment: |
+      FetchOption describes options that can be passed to `(httprc.Fetcher).Fetch()`
+  - name: FetchRegisterOption
+    methods:
+      - fetchOption
+      - registerOption
+  - name: FetchFetcherRegisterOption
+    methods:
+      - fetchOption
+      - fetcherOption
+      - registerOption
+options:
+  - ident: FetcherWorkerCount
+    interface: FetcherOption
+    argument_type: int
+    comment: |
+      WithFetchWorkerCount specifies the number of HTTP fetch workers that are spawned
+      in the backend. By default 3 workers are spawned.
+  - ident: Whitelist
+    interface: FetchFetcherRegisterOption
+    argument_type: Whitelist
+    comment: |
+      WithWhitelist specifies the Whitelist object that can control which URLs are
+      allowed to be processed.
+
+      It can be passed to `httprc.NewCache` as a whitelist applied to all
+      URLs that are fetched by the cache, or it can be passed on a per-URL
+      basis using `(httprc.Cache).Register()`. If both are specified,
+      the url must fulfill _both_ the cache-wide whitelist and the per-URL
+      whitelist.
+  - ident: Transformer
+    interface: RegisterOption
+    argument_type: Transformer
+    comment: |
+      WithTransformer specifies the `httprc.Transformer` object that should be applied
+      to the fetched resource. The `Transform()` method is only called if the HTTP request
+      returns a `200 OK` status.
+  - ident: HTTPClient
+    interface: FetchRegisterOption
+    argument_type: HTTPClient
+    comment: |
+      WithHTTPClient specififes the HTTP Client object that should be used to fetch
+      the resource. For example, if you need an `*http.Client` instance that requires
+      special TLS or Authorization setup, you might want to pass it using this option.
+  - ident: MinRefreshInterval
+    interface: RegisterOption
+    argument_type: time.Duration
+    comment: |
+      WithMinRefreshInterval specifies the minimum refresh interval to be used.
+
+      When we fetch the key from a remote URL, we first look at the `max-age`
+      directive from `Cache-Control` response header. If this value is present,
+      we compare the `max-age` value and the value specified by this option
+      and take the larger one (e.g. if `max-age` = 5 minutes and `min refresh` = 10
+      minutes, then next fetch will happen in 10 minutes)
+
+      Next we check for the `Expires` header, and similarly if the header is
+      present, we compare it against the value specified by this option,
+      and take the larger one.
+
+      Finally, if neither of the above headers are present, we use the
+      value specified by this option as the interval until the next refresh.
+
+      If unspecified, the minimum refresh interval is 1 hour.
+
+      This value and the header values are ignored if `WithRefreshInterval` is specified.
+  - ident: RefreshInterval
+    interface: RegisterOption
+    argument_type: time.Duration
+    comment: |
+      WithRefreshInterval specifies the static interval between refreshes
+      of resources controlled by `httprc.Cache`.
+
+      Providing this option overrides the adaptive token refreshing based
+      on Cache-Control/Expires header (and `httprc.WithMinRefreshInterval`),
+      and refreshes will *always* happen in this interval.
+
+      You generally do not want to make this value too small, as it can easily
+      be considered a DoS attack, and there is no backoff mechanism for failed
+      attempts.
+  - ident: RefreshWindow
+    interface: CacheOption
+    argument_type: time.Duration
+    comment: |
+      WithRefreshWindow specifies the interval between checks for refreshes.
+      `httprc.Cache` does not check for refreshes in exact intervals. Instead,
+      it wakes up at every tick that occurs in the interval specified by
+      `WithRefreshWindow` option, and refreshes all entries that need to be
+      refreshed within this window.
+
+      The default value is 15 minutes.
+
+      You generally do not want to make this value too small, as it can easily
+      be considered a DoS attack, and there is no backoff mechanism for failed
+      attempts.
+  - ident: ErrSink
+    interface: CacheOption
+    argument_type: ErrSink
+    comment: |
+      WithErrSink specifies the `httprc.ErrSink` object that handles errors
+      that occurred during the cache's execution. For example, you will be
+      able to intercept errors that occurred during the execution of Transformers.
+
+
diff --git a/vendor/github.com/lestrrat-go/httprc/options_gen.go b/vendor/github.com/lestrrat-go/httprc/options_gen.go
new file mode 100644
index 0000000000..daaf65f951
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/options_gen.go
@@ -0,0 +1,221 @@
+// This file is auto-generated by github.com/lestrrat-go/option/cmd/genoptions. DO NOT EDIT
+
+package httprc
+
+import (
+	"time"
+
+	"github.com/lestrrat-go/option"
+)
+
+type Option = option.Interface
+
+// CacheOption desribes options that can be passed to `New()`
+type CacheOption interface {
+	Option
+	cacheOption()
+}
+
+type cacheOption struct {
+	Option
+}
+
+func (*cacheOption) cacheOption() {}
+
+type FetchFetcherRegisterOption interface {
+	Option
+	fetchOption()
+	fetcherOption()
+	registerOption()
+}
+
+type fetchFetcherRegisterOption struct {
+	Option
+}
+
+func (*fetchFetcherRegisterOption) fetchOption() {}
+
+func (*fetchFetcherRegisterOption) fetcherOption() {}
+
+func (*fetchFetcherRegisterOption) registerOption() {}
+
+// FetchOption describes options that can be passed to `(httprc.Fetcher).Fetch()`
+type FetchOption interface {
+	Option
+	fetchOption()
+}
+
+type fetchOption struct {
+	Option
+}
+
+func (*fetchOption) fetchOption() {}
+
+type FetchRegisterOption interface {
+	Option
+	fetchOption()
+	registerOption()
+}
+
+type fetchRegisterOption struct {
+	Option
+}
+
+func (*fetchRegisterOption) fetchOption() {}
+
+func (*fetchRegisterOption) registerOption() {}
+
+// FetcherOption describes options that can be passed to `(httprc.Fetcher).NewFetcher()`
+type FetcherOption interface {
+	Option
+	cacheOption()
+}
+
+type fetcherOption struct {
+	Option
+}
+
+func (*fetcherOption) cacheOption() {}
+
+// RegisterOption desribes options that can be passed to `(httprc.Cache).Register()`
+type RegisterOption interface {
+	Option
+	registerOption()
+}
+
+type registerOption struct {
+	Option
+}
+
+func (*registerOption) registerOption() {}
+
+type identErrSink struct{}
+type identFetcherWorkerCount struct{}
+type identHTTPClient struct{}
+type identMinRefreshInterval struct{}
+type identRefreshInterval struct{}
+type identRefreshWindow struct{}
+type identTransformer struct{}
+type identWhitelist struct{}
+
+func (identErrSink) String() string {
+	return "WithErrSink"
+}
+
+func (identFetcherWorkerCount) String() string {
+	return "WithFetcherWorkerCount"
+}
+
+func (identHTTPClient) String() string {
+	return "WithHTTPClient"
+}
+
+func (identMinRefreshInterval) String() string {
+	return "WithMinRefreshInterval"
+}
+
+func (identRefreshInterval) String() string {
+	return "WithRefreshInterval"
+}
+
+func (identRefreshWindow) String() string {
+	return "WithRefreshWindow"
+}
+
+func (identTransformer) String() string {
+	return "WithTransformer"
+}
+
+func (identWhitelist) String() string {
+	return "WithWhitelist"
+}
+
+// WithErrSink specifies the `httprc.ErrSink` object that handles errors
+// that occurred during the cache's execution. For example, you will be
+// able to intercept errors that occurred during the execution of Transformers.
+func WithErrSink(v ErrSink) CacheOption {
+	return &cacheOption{option.New(identErrSink{}, v)}
+}
+
+// WithFetchWorkerCount specifies the number of HTTP fetch workers that are spawned
+// in the backend. By default 3 workers are spawned.
+func WithFetcherWorkerCount(v int) FetcherOption {
+	return &fetcherOption{option.New(identFetcherWorkerCount{}, v)}
+}
+
+// WithHTTPClient specififes the HTTP Client object that should be used to fetch
+// the resource. For example, if you need an `*http.Client` instance that requires
+// special TLS or Authorization setup, you might want to pass it using this option.
+func WithHTTPClient(v HTTPClient) FetchRegisterOption {
+	return &fetchRegisterOption{option.New(identHTTPClient{}, v)}
+}
+
+// WithMinRefreshInterval specifies the minimum refresh interval to be used.
+//
+// When we fetch the key from a remote URL, we first look at the `max-age`
+// directive from `Cache-Control` response header. If this value is present,
+// we compare the `max-age` value and the value specified by this option
+// and take the larger one (e.g. if `max-age` = 5 minutes and `min refresh` = 10
+// minutes, then next fetch will happen in 10 minutes)
+//
+// Next we check for the `Expires` header, and similarly if the header is
+// present, we compare it against the value specified by this option,
+// and take the larger one.
+//
+// Finally, if neither of the above headers are present, we use the
+// value specified by this option as the interval until the next refresh.
+//
+// If unspecified, the minimum refresh interval is 1 hour.
+//
+// This value and the header values are ignored if `WithRefreshInterval` is specified.
+func WithMinRefreshInterval(v time.Duration) RegisterOption {
+	return &registerOption{option.New(identMinRefreshInterval{}, v)}
+}
+
+// WithRefreshInterval specifies the static interval between refreshes
+// of resources controlled by `httprc.Cache`.
+//
+// Providing this option overrides the adaptive token refreshing based
+// on Cache-Control/Expires header (and `httprc.WithMinRefreshInterval`),
+// and refreshes will *always* happen in this interval.
+//
+// You generally do not want to make this value too small, as it can easily
+// be considered a DoS attack, and there is no backoff mechanism for failed
+// attempts.
+func WithRefreshInterval(v time.Duration) RegisterOption {
+	return &registerOption{option.New(identRefreshInterval{}, v)}
+}
+
+// WithRefreshWindow specifies the interval between checks for refreshes.
+// `httprc.Cache` does not check for refreshes in exact intervals. Instead,
+// it wakes up at every tick that occurs in the interval specified by
+// `WithRefreshWindow` option, and refreshes all entries that need to be
+// refreshed within this window.
+//
+// The default value is 15 minutes.
+//
+// You generally do not want to make this value too small, as it can easily
+// be considered a DoS attack, and there is no backoff mechanism for failed
+// attempts.
+func WithRefreshWindow(v time.Duration) CacheOption {
+	return &cacheOption{option.New(identRefreshWindow{}, v)}
+}
+
+// WithTransformer specifies the `httprc.Transformer` object that should be applied
+// to the fetched resource. The `Transform()` method is only called if the HTTP request
+// returns a `200 OK` status.
+func WithTransformer(v Transformer) RegisterOption {
+	return &registerOption{option.New(identTransformer{}, v)}
+}
+
+// WithWhitelist specifies the Whitelist object that can control which URLs are
+// allowed to be processed.
+//
+// It can be passed to `httprc.NewCache` as a whitelist applied to all
+// URLs that are fetched by the cache, or it can be passed on a per-URL
+// basis using `(httprc.Cache).Register()`. If both are specified,
+// the url must fulfill _both_ the cache-wide whitelist and the per-URL
+// whitelist.
+func WithWhitelist(v Whitelist) FetchFetcherRegisterOption {
+	return &fetchFetcherRegisterOption{option.New(identWhitelist{}, v)}
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/queue.go b/vendor/github.com/lestrrat-go/httprc/queue.go
new file mode 100644
index 0000000000..897207b7d2
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/queue.go
@@ -0,0 +1,459 @@
+package httprc
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"sync"
+	"time"
+
+	"github.com/lestrrat-go/httpcc"
+)
+
+// ErrSink is an abstraction that allows users to consume errors
+// produced while the cache queue is running.
+type ErrSink interface {
+	// Error accepts errors produced during the cache queue's execution.
+	// The method should never block, otherwise the fetch loop may be
+	// paused for a prolonged amount of time.
+	Error(error)
+}
+
+type ErrSinkFunc func(err error)
+
+func (f ErrSinkFunc) Error(err error) {
+	f(err)
+}
+
+// Transformer is responsible for converting an HTTP response
+// into an appropriate form of your choosing.
+type Transformer interface {
+	// Transform receives an HTTP response object, and should
+	// return an appropriate object that suits your needs.
+	//
+	// If you happen to use the response body, you are responsible
+	// for closing the body
+	Transform(string, *http.Response) (interface{}, error)
+}
+
+type TransformFunc func(string, *http.Response) (interface{}, error)
+
+func (f TransformFunc) Transform(u string, res *http.Response) (interface{}, error) {
+	return f(u, res)
+}
+
+// BodyBytes is the default Transformer applied to all resources.
+// It takes an *http.Response object and extracts the body
+// of the response as `[]byte`
+type BodyBytes struct{}
+
+func (BodyBytes) Transform(_ string, res *http.Response) (interface{}, error) {
+	buf, err := ioutil.ReadAll(res.Body)
+	defer res.Body.Close()
+	if err != nil {
+		return nil, fmt.Errorf(`failed to read response body: %w`, err)
+	}
+
+	return buf, nil
+}
+
+type rqentry struct {
+	fireAt time.Time
+	url    string
+}
+
+// entry represents a resource to be fetched over HTTP,
+// long with optional specifications such as the *http.Client
+// object to use.
+type entry struct {
+	mu  sync.RWMutex
+	sem chan struct{}
+
+	lastFetch time.Time
+
+	// Interval between refreshes are calculated two ways.
+	// 1) You can set an explicit refresh interval by using WithRefreshInterval().
+	//    In this mode, it doesn't matter what the HTTP response says in its
+	//    Cache-Control or Expires headers
+	// 2) You can let us calculate the time-to-refresh based on the key's
+	//    Cache-Control or Expires headers.
+	//    First, the user provides us the absolute minimum interval before
+	//    refreshes. We will never check for refreshes before this specified
+	//    amount of time.
+	//
+	//    Next, max-age directive in the Cache-Control header is consulted.
+	//    If `max-age` is not present, we skip the following section, and
+	//    proceed to the next option.
+	//    If `max-age > user-supplied minimum interval`, then we use the max-age,
+	//    otherwise the user-supplied minimum interval is used.
+	//
+	//    Next, the value specified in Expires header is consulted.
+	//    If the header is not present, we skip the following seciont and
+	//    proceed to the next option.
+	//    We take the time until expiration `expires - time.Now()`, and
+	//    if `time-until-expiration > user-supplied minimum interval`, then
+	//    we use the expires value, otherwise the user-supplied minimum interval is used.
+	//
+	//    If all of the above fails, we used the user-supplied minimum interval
+	refreshInterval    time.Duration
+	minRefreshInterval time.Duration
+
+	request *fetchRequest
+
+	transform Transformer
+	data      interface{}
+}
+
+func (e *entry) acquireSem() {
+	e.sem <- struct{}{}
+}
+
+func (e *entry) releaseSem() {
+	<-e.sem
+}
+
+func (e *entry) hasBeenFetched() bool {
+	e.mu.RLock()
+	defer e.mu.RUnlock()
+	return !e.lastFetch.IsZero()
+}
+
+// queue is responsible for updating the contents of the storage
+type queue struct {
+	mu         sync.RWMutex
+	registry   map[string]*entry
+	windowSize time.Duration
+	fetch      Fetcher
+	fetchCond  *sync.Cond
+	fetchQueue []*rqentry
+
+	// list is a sorted list of urls to their expected fire time
+	// when we get a new tick in the RQ loop, we process everything
+	// that can be fired up to the point the tick was called
+	list []*rqentry
+
+	// clock is really only used by testing
+	clock interface {
+		Now() time.Time
+	}
+}
+
+type clockFunc func() time.Time
+
+func (cf clockFunc) Now() time.Time {
+	return cf()
+}
+
+func newQueue(ctx context.Context, window time.Duration, fetch Fetcher, errSink ErrSink) *queue {
+	fetchLocker := &sync.Mutex{}
+	rq := &queue{
+		windowSize: window,
+		fetch:      fetch,
+		fetchCond:  sync.NewCond(fetchLocker),
+		registry:   make(map[string]*entry),
+		clock:      clockFunc(time.Now),
+	}
+
+	go rq.refreshLoop(ctx, errSink)
+
+	return rq
+}
+
+func (q *queue) Register(u string, options ...RegisterOption) error {
+	var refreshInterval time.Duration
+	var client HTTPClient
+	var wl Whitelist
+	var transform Transformer = BodyBytes{}
+
+	minRefreshInterval := 15 * time.Minute
+	for _, option := range options {
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identHTTPClient{}:
+			client = option.Value().(HTTPClient)
+		case identRefreshInterval{}:
+			refreshInterval = option.Value().(time.Duration)
+		case identMinRefreshInterval{}:
+			minRefreshInterval = option.Value().(time.Duration)
+		case identTransformer{}:
+			transform = option.Value().(Transformer)
+		case identWhitelist{}:
+			wl = option.Value().(Whitelist)
+		}
+	}
+
+	q.mu.RLock()
+	rWindow := q.windowSize
+	q.mu.RUnlock()
+
+	if refreshInterval > 0 && refreshInterval < rWindow {
+		return fmt.Errorf(`refresh interval (%s) is smaller than refresh window (%s): this will not as expected`, refreshInterval, rWindow)
+	}
+
+	e := entry{
+		sem:                make(chan struct{}, 1),
+		minRefreshInterval: minRefreshInterval,
+		transform:          transform,
+		refreshInterval:    refreshInterval,
+		request: &fetchRequest{
+			client: client,
+			url:    u,
+			wl:     wl,
+		},
+	}
+	q.mu.Lock()
+	q.registry[u] = &e
+	q.mu.Unlock()
+	return nil
+}
+
+func (q *queue) Unregister(u string) error {
+	q.mu.Lock()
+	defer q.mu.Unlock()
+	_, ok := q.registry[u]
+	if !ok {
+		return fmt.Errorf(`url %q has not been registered`, u)
+	}
+	delete(q.registry, u)
+	return nil
+}
+
+func (q *queue) getRegistered(u string) (*entry, bool) {
+	q.mu.RLock()
+	e, ok := q.registry[u]
+	q.mu.RUnlock()
+
+	return e, ok
+}
+
+func (q *queue) IsRegistered(u string) bool {
+	_, ok := q.getRegistered(u)
+	return ok
+}
+
+func (q *queue) fetchLoop(ctx context.Context, errSink ErrSink) {
+	for {
+		q.fetchCond.L.Lock()
+		for len(q.fetchQueue) <= 0 {
+			select {
+			case <-ctx.Done():
+				return
+			default:
+				q.fetchCond.Wait()
+			}
+		}
+		list := make([]*rqentry, len(q.fetchQueue))
+		copy(list, q.fetchQueue)
+		q.fetchQueue = q.fetchQueue[:0]
+		q.fetchCond.L.Unlock()
+
+		for _, rq := range list {
+			select {
+			case <-ctx.Done():
+				return
+			default:
+			}
+
+			e, ok := q.getRegistered(rq.url)
+			if !ok {
+				continue
+			}
+			if err := q.fetchAndStore(ctx, e); err != nil {
+				if errSink != nil {
+					errSink.Error(&RefreshError{
+						URL: rq.url,
+						Err: err,
+					})
+				}
+			}
+		}
+	}
+}
+
+// This loop is responsible for periodically updating the cached content
+func (q *queue) refreshLoop(ctx context.Context, errSink ErrSink) {
+	// Tick every q.windowSize duration.
+	ticker := time.NewTicker(q.windowSize)
+
+	go q.fetchLoop(ctx, errSink)
+	defer q.fetchCond.Signal()
+
+	for {
+		select {
+		case <-ctx.Done():
+			return
+		case t := <-ticker.C:
+			t = t.Round(time.Second)
+			// To avoid getting stuck here, we just copy the relevant
+			// items, and release the lock within this critical section
+			var list []*rqentry
+			q.mu.Lock()
+			var max int
+			for i, r := range q.list {
+				if r.fireAt.Before(t) || r.fireAt.Equal(t) {
+					max = i
+					list = append(list, r)
+					continue
+				}
+				break
+			}
+
+			if len(list) > 0 {
+				q.list = q.list[max+1:]
+			}
+			q.mu.Unlock() // release lock
+
+			if len(list) > 0 {
+				// Now we need to fetch these, but do this elsewhere so
+				// that we don't block this main loop
+				q.fetchCond.L.Lock()
+				q.fetchQueue = append(q.fetchQueue, list...)
+				q.fetchCond.L.Unlock()
+				q.fetchCond.Signal()
+			}
+		}
+	}
+}
+
+func (q *queue) fetchAndStore(ctx context.Context, e *entry) error {
+	e.mu.Lock()
+	defer e.mu.Unlock()
+
+	// synchronously go fetch
+	e.lastFetch = time.Now()
+	res, err := q.fetch.fetch(ctx, e.request)
+	if err != nil {
+		// Even if the request failed, we need to queue the next fetch
+		q.enqueueNextFetch(nil, e)
+		return fmt.Errorf(`failed to fetch %q: %w`, e.request.url, err)
+	}
+
+	q.enqueueNextFetch(res, e)
+
+	data, err := e.transform.Transform(e.request.url, res)
+	if err != nil {
+		return fmt.Errorf(`failed to transform HTTP response for %q: %w`, e.request.url, err)
+	}
+	e.data = data
+
+	return nil
+}
+
+func (q *queue) Enqueue(u string, interval time.Duration) error {
+	fireAt := q.clock.Now().Add(interval).Round(time.Second)
+
+	q.mu.Lock()
+	defer q.mu.Unlock()
+
+	list := q.list
+
+	ll := len(list)
+	if ll == 0 || list[ll-1].fireAt.Before(fireAt) {
+		list = append(list, &rqentry{
+			fireAt: fireAt,
+			url:    u,
+		})
+	} else {
+		for i := 0; i < ll; i++ {
+			if i == ll-1 || list[i].fireAt.After(fireAt) {
+				// insert here
+				list = append(list[:i+1], list[i:]...)
+				list[i] = &rqentry{fireAt: fireAt, url: u}
+				break
+			}
+		}
+	}
+
+	q.list = list
+	return nil
+}
+
+func (q *queue) MarshalJSON() ([]byte, error) {
+	var buf bytes.Buffer
+	buf.WriteString(`{"list":[`)
+	q.mu.RLock()
+	for i, e := range q.list {
+		if i > 0 {
+			buf.WriteByte(',')
+		}
+		fmt.Fprintf(&buf, `{"fire_at":%q,"url":%q}`, e.fireAt.Format(time.RFC3339), e.url)
+	}
+	q.mu.RUnlock()
+	buf.WriteString(`]}`)
+	return buf.Bytes(), nil
+}
+
+func (q *queue) enqueueNextFetch(res *http.Response, e *entry) {
+	dur := calculateRefreshDuration(res, e)
+	// TODO send to error sink
+	_ = q.Enqueue(e.request.url, dur)
+}
+
+func calculateRefreshDuration(res *http.Response, e *entry) time.Duration {
+	if e.refreshInterval > 0 {
+		return e.refreshInterval
+	}
+
+	if res != nil {
+		if v := res.Header.Get(`Cache-Control`); v != "" {
+			dir, err := httpcc.ParseResponse(v)
+			if err == nil {
+				maxAge, ok := dir.MaxAge()
+				if ok {
+					resDuration := time.Duration(maxAge) * time.Second
+					if resDuration > e.minRefreshInterval {
+						return resDuration
+					}
+					return e.minRefreshInterval
+				}
+				// fallthrough
+			}
+			// fallthrough
+		}
+
+		if v := res.Header.Get(`Expires`); v != "" {
+			expires, err := http.ParseTime(v)
+			if err == nil {
+				resDuration := time.Until(expires)
+				if resDuration > e.minRefreshInterval {
+					return resDuration
+				}
+				return e.minRefreshInterval
+			}
+			// fallthrough
+		}
+	}
+
+	// Previous fallthroughs are a little redandunt, but hey, it's all good.
+	return e.minRefreshInterval
+}
+
+type SnapshotEntry struct {
+	URL         string      `json:"url"`
+	Data        interface{} `json:"data"`
+	LastFetched time.Time   `json:"last_fetched"`
+}
+type Snapshot struct {
+	Entries []SnapshotEntry `json:"entries"`
+}
+
+// Snapshot returns the contents of the cache at the given moment.
+func (q *queue) snapshot() *Snapshot {
+	q.mu.RLock()
+	list := make([]SnapshotEntry, 0, len(q.registry))
+
+	for url, e := range q.registry {
+		list = append(list, SnapshotEntry{
+			URL:         url,
+			LastFetched: e.lastFetch,
+			Data:        e.data,
+		})
+	}
+	q.mu.RUnlock()
+
+	return &Snapshot{
+		Entries: list,
+	}
+}
diff --git a/vendor/github.com/lestrrat-go/httprc/whitelist.go b/vendor/github.com/lestrrat-go/httprc/whitelist.go
new file mode 100644
index 0000000000..b80332a6cd
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/httprc/whitelist.go
@@ -0,0 +1,73 @@
+package httprc
+
+import "regexp"
+
+// Whitelist is an interface for a set of URL whitelists. When provided
+// to fetching operations, urls are checked against this object, and
+// the object must return true for urls to be fetched.
+type Whitelist interface {
+	IsAllowed(string) bool
+}
+
+// InsecureWhitelist allows any URLs to be fetched.
+type InsecureWhitelist struct{}
+
+func (InsecureWhitelist) IsAllowed(string) bool {
+	return true
+}
+
+// RegexpWhitelist is a httprc.Whitelist object comprised of a list of *regexp.Regexp
+// objects. All entries in the list are tried until one matches. If none of the
+// *regexp.Regexp objects match, then the URL is deemed unallowed.
+type RegexpWhitelist struct {
+	patterns []*regexp.Regexp
+}
+
+func NewRegexpWhitelist() *RegexpWhitelist {
+	return &RegexpWhitelist{}
+}
+
+func (w *RegexpWhitelist) Add(pat *regexp.Regexp) *RegexpWhitelist {
+	w.patterns = append(w.patterns, pat)
+	return w
+}
+
+// IsAlloed returns true if any of the patterns in the whitelist
+// returns true.
+func (w *RegexpWhitelist) IsAllowed(u string) bool {
+	for _, pat := range w.patterns {
+		if pat.MatchString(u) {
+			return true
+		}
+	}
+	return false
+}
+
+// MapWhitelist is a httprc.Whitelist object comprised of a map of strings.
+// If the URL exists in the map, then the URL is allowed to be fetched.
+type MapWhitelist struct {
+	store map[string]struct{}
+}
+
+func NewMapWhitelist() *MapWhitelist {
+	return &MapWhitelist{store: make(map[string]struct{})}
+}
+
+func (w *MapWhitelist) Add(pat string) *MapWhitelist {
+	w.store[pat] = struct{}{}
+	return w
+}
+
+func (w *MapWhitelist) IsAllowed(u string) bool {
+	_, b := w.store[u]
+	return b
+}
+
+// WhitelistFunc is a httprc.Whitelist object based on a function.
+// You can perform any sort of check against the given URL to determine
+// if it can be fetched or not.
+type WhitelistFunc func(string) bool
+
+func (w WhitelistFunc) IsAllowed(u string) bool {
+	return w(u)
+}
diff --git a/vendor/github.com/lestrrat-go/iter/LICENSE b/vendor/github.com/lestrrat-go/iter/LICENSE
new file mode 100644
index 0000000000..963209bfba
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/iter/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 lestrrat-go
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/lestrrat-go/iter/arrayiter/arrayiter.go b/vendor/github.com/lestrrat-go/iter/arrayiter/arrayiter.go
new file mode 100644
index 0000000000..b531e769e0
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/iter/arrayiter/arrayiter.go
@@ -0,0 +1,192 @@
+package arrayiter
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+	"sync"
+)
+
+func Iterate(ctx context.Context, a interface{}) (Iterator, error) {
+	arv := reflect.ValueOf(a)
+
+	switch arv.Kind() {
+	case reflect.Array, reflect.Slice:
+	default:
+		return nil, fmt.Errorf(`argument must be an array/slice (%s)`, arv.Type())
+	}
+
+	ch := make(chan *Pair)
+	go func(ctx context.Context, ch chan *Pair, arv reflect.Value) {
+		defer close(ch)
+
+		for i := 0; i < arv.Len(); i++ {
+			value := arv.Index(i)
+			pair := &Pair{
+				Index: i,
+				Value: value.Interface(),
+			}
+			select {
+			case <-ctx.Done():
+				return
+			case ch <- pair:
+			}
+		}
+	}(ctx, ch, arv)
+
+	return New(ch), nil
+}
+
+// Source represents a array that knows how to create an iterator
+type Source interface {
+	Iterate(context.Context) Iterator
+}
+
+// Pair represents a single pair of key and value from a array
+type Pair struct {
+	Index int
+	Value interface{}
+}
+
+// Iterator iterates through keys and values of a array
+type Iterator interface {
+	Next(context.Context) bool
+	Pair() *Pair
+}
+
+type iter struct {
+	ch   chan *Pair
+	mu   sync.RWMutex
+	next *Pair
+}
+
+// Visitor represents an object that handles each pair in a array
+type Visitor interface {
+	Visit(int, interface{}) error
+}
+
+// VisitorFunc is a type of Visitor based on a function
+type VisitorFunc func(int, interface{}) error
+
+func (fn VisitorFunc) Visit(s int, v interface{}) error {
+	return fn(s, v)
+}
+
+func New(ch chan *Pair) Iterator {
+	return &iter{
+		ch: ch,
+	}
+}
+
+// Next returns true if there are more items to read from the iterator
+func (i *iter) Next(ctx context.Context) bool {
+	i.mu.RLock()
+	if i.ch == nil {
+		i.mu.RUnlock()
+		return false
+	}
+	i.mu.RUnlock()
+
+	i.mu.Lock()
+	defer i.mu.Unlock()
+	select {
+	case <-ctx.Done():
+		i.ch = nil
+		return false
+	case v, ok := <-i.ch:
+		if !ok {
+			i.ch = nil
+			return false
+		}
+		i.next = v
+		return true
+	}
+
+	//nolint:govet
+	return false // never reached
+}
+
+// Pair returns the currently buffered Pair. Calling Next() will reset its value
+func (i *iter) Pair() *Pair {
+	i.mu.RLock()
+	defer i.mu.RUnlock()
+	return i.next
+}
+
+// Walk walks through each element in the array
+func Walk(ctx context.Context, s Source, v Visitor) error {
+	for i := s.Iterate(ctx); i.Next(ctx); {
+		pair := i.Pair()
+		if err := v.Visit(pair.Index, pair.Value); err != nil {
+			return fmt.Errorf(`failed to visit index %d: %w`, pair.Index, err)
+		}
+	}
+	return nil
+}
+
+func AsArray(ctx context.Context, s interface{}, v interface{}) error {
+	var iter Iterator
+	switch reflect.ValueOf(s).Kind() {
+	case reflect.Array, reflect.Slice:
+		x, err := Iterate(ctx, s)
+		if err != nil {
+			return fmt.Errorf(`failed to iterate over array/slice type: %w`, err)
+		}
+		iter = x
+	default:
+		ssrc, ok := s.(Source)
+		if !ok {
+			return fmt.Errorf(`cannot iterate over %T: not a arrayiter.Source type`, s)
+		}
+		iter = ssrc.Iterate(ctx)
+	}
+
+	dst := reflect.ValueOf(v)
+
+	// dst MUST be a pointer to a array type
+	if kind := dst.Kind(); kind != reflect.Ptr {
+		return fmt.Errorf(`dst must be a pointer to a array (%s)`, dst.Type())
+	}
+
+	dst = dst.Elem()
+	switch dst.Kind() {
+	case reflect.Array, reflect.Slice:
+	default:
+		return fmt.Errorf(`dst must be a pointer to an array or slice (%s)`, dst.Type())
+	}
+
+	var pairs []*Pair
+	for iter.Next(ctx) {
+		pair := iter.Pair()
+		pairs = append(pairs, pair)
+	}
+
+	switch dst.Kind() {
+	case reflect.Array:
+		if len(pairs) < dst.Len() {
+			return fmt.Errorf(`dst array does not have enough space for elements (%d, want %d)`, dst.Len(), len(pairs))
+		}
+	case reflect.Slice:
+		if dst.IsNil() {
+			dst.Set(reflect.MakeSlice(dst.Type(), len(pairs), len(pairs)))
+		}
+	}
+
+	// dst must be assignable
+	if !dst.CanSet() {
+		return fmt.Errorf(`dst is not writeable`)
+	}
+
+	elemtyp := dst.Type().Elem()
+	for _, pair := range pairs {
+		rvvalue := reflect.ValueOf(pair.Value)
+
+		if !rvvalue.Type().AssignableTo(elemtyp) {
+			return fmt.Errorf(`cannot assign key of type %s to map key of type %s`, rvvalue.Type(), elemtyp)
+		}
+
+		dst.Index(pair.Index).Set(rvvalue)
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/lestrrat-go/iter/mapiter/mapiter.go b/vendor/github.com/lestrrat-go/iter/mapiter/mapiter.go
new file mode 100644
index 0000000000..ec332855eb
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/iter/mapiter/mapiter.go
@@ -0,0 +1,195 @@
+package mapiter
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+	"sync"
+)
+
+// Iterate creates an iterator from arbitrary map types. This is not
+// the most efficient tool, but it's the quickest way to create an
+// iterator for maps.
+// Also, note that you cannot make any assumptions on the order of
+// pairs being returned.
+func Iterate(ctx context.Context, m interface{}) (Iterator, error) {
+	mrv := reflect.ValueOf(m)
+
+	if mrv.Kind() != reflect.Map {
+		return nil, fmt.Errorf(`argument must be a map (%s)`, mrv.Type())
+	}
+
+	ch := make(chan *Pair)
+	go func(ctx context.Context, ch chan *Pair, mrv reflect.Value) {
+		defer close(ch)
+		for _, key := range mrv.MapKeys() {
+			value := mrv.MapIndex(key)
+			pair := &Pair{
+				Key:   key.Interface(),
+				Value: value.Interface(),
+			}
+			select {
+			case <-ctx.Done():
+				return
+			case ch <- pair:
+			}
+		}
+	}(ctx, ch, mrv)
+
+	return New(ch), nil
+}
+
+// Source represents a map that knows how to create an iterator
+type Source interface {
+	Iterate(context.Context) Iterator
+}
+
+// Pair represents a single pair of key and value from a map
+type Pair struct {
+	Key   interface{}
+	Value interface{}
+}
+
+// Iterator iterates through keys and values of a map
+type Iterator interface {
+	Next(context.Context) bool
+	Pair() *Pair
+}
+
+type iter struct {
+	ch   chan *Pair
+	mu   sync.RWMutex
+	next *Pair
+}
+
+// Visitor represents an object that handles each pair in a map
+type Visitor interface {
+	Visit(interface{}, interface{}) error
+}
+
+// VisitorFunc is a type of Visitor based on a function
+type VisitorFunc func(interface{}, interface{}) error
+
+func (fn VisitorFunc) Visit(s interface{}, v interface{}) error {
+	return fn(s, v)
+}
+
+func New(ch chan *Pair) Iterator {
+	return &iter{
+		ch: ch,
+	}
+}
+
+// Next returns true if there are more items to read from the iterator
+func (i *iter) Next(ctx context.Context) bool {
+	i.mu.RLock()
+	if i.ch == nil {
+		i.mu.RUnlock()
+		return false
+	}
+	i.mu.RUnlock()
+
+	i.mu.Lock()
+	defer i.mu.Unlock()
+	select {
+	case <-ctx.Done():
+		i.ch = nil
+		return false
+	case v, ok := <-i.ch:
+		if !ok {
+			i.ch = nil
+			return false
+		}
+		i.next = v
+		return true
+	}
+
+	//nolint:govet
+	return false // never reached
+}
+
+// Pair returns the currently buffered Pair. Calling Next() will reset its value
+func (i *iter) Pair() *Pair {
+	i.mu.RLock()
+	defer i.mu.RUnlock()
+	return i.next
+}
+
+// Walk walks through each element in the map
+func Walk(ctx context.Context, s Source, v Visitor) error {
+	for i := s.Iterate(ctx); i.Next(ctx); {
+		pair := i.Pair()
+		if err := v.Visit(pair.Key, pair.Value); err != nil {
+			return fmt.Errorf(`failed to visit key %s: %w`, pair.Key, err)
+		}
+	}
+	return nil
+}
+
+// AsMap returns the values obtained from the source as a map
+func AsMap(ctx context.Context, s interface{}, v interface{}) error {
+	var iter Iterator
+	switch reflect.ValueOf(s).Kind() {
+	case reflect.Map:
+		x, err := Iterate(ctx, s)
+		if err != nil {
+			return fmt.Errorf(`failed to iterate over map type: %w`, err)
+		}
+		iter = x
+	default:
+		ssrc, ok := s.(Source)
+		if !ok {
+			return fmt.Errorf(`cannot iterate over %T: not a mapiter.Source type`, s)
+		}
+		iter = ssrc.Iterate(ctx)
+	}
+
+	dst := reflect.ValueOf(v)
+
+	// dst MUST be a pointer to a map type
+	if kind := dst.Kind(); kind != reflect.Ptr {
+		return fmt.Errorf(`dst must be a pointer to a map (%s)`, dst.Type())
+	}
+
+	dst = dst.Elem()
+	if dst.Kind() != reflect.Map {
+		return fmt.Errorf(`dst must be a pointer to a map (%s)`, dst.Type())
+	}
+
+	if dst.IsNil() {
+		dst.Set(reflect.MakeMap(dst.Type()))
+	}
+
+	// dst must be assignable
+	if !dst.CanSet() {
+		return fmt.Errorf(`dst is not writeable`)
+	}
+
+	keytyp := dst.Type().Key()
+	valtyp := dst.Type().Elem()
+
+	for iter.Next(ctx) {
+		pair := iter.Pair()
+
+		rvkey := reflect.ValueOf(pair.Key)
+		rvvalue := reflect.ValueOf(pair.Value)
+
+		if !rvkey.Type().AssignableTo(keytyp) {
+			return fmt.Errorf(`cannot assign key of type %s to map key of type %s`, rvkey.Type(), keytyp)
+		}
+
+		switch rvvalue.Kind() {
+		// we can only check if we can assign to rvvalue to valtyp if it's non-nil
+		case reflect.Invalid:
+			rvvalue = reflect.New(valtyp).Elem()
+		default:
+			if !rvvalue.Type().AssignableTo(valtyp) {
+				return fmt.Errorf(`cannot assign value of type %s to map value of type %s`, rvvalue.Type(), valtyp)
+			}
+		}
+
+		dst.SetMapIndex(rvkey, rvvalue)
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/LICENSE b/vendor/github.com/lestrrat-go/jwx/v2/LICENSE
new file mode 100644
index 0000000000..205e33a7f1
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 lestrrat
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/cert/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v2/cert/BUILD.bazel
new file mode 100644
index 0000000000..7b2617e55c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/cert/BUILD.bazel
@@ -0,0 +1,32 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+    name = "cert",
+    srcs = [
+        "cert.go",
+        "chain.go",
+    ],
+    importpath = "github.com/lestrrat-go/jwx/v2/cert",
+    visibility = ["//visibility:public"],
+    deps = ["//internal/base64"],
+)
+
+go_test(
+    name = "cert_test",
+    srcs = [
+        "cert_test.go",
+        "chain_test.go",
+    ],
+    deps = [
+        ":cert",
+        "//internal/jwxtest",
+        "@com_github_stretchr_testify//assert",
+        "@com_github_stretchr_testify//require",
+    ],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":cert",
+    visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/cert/cert.go b/vendor/github.com/lestrrat-go/jwx/v2/cert/cert.go
new file mode 100644
index 0000000000..1dfdec65aa
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/cert/cert.go
@@ -0,0 +1,48 @@
+package cert
+
+import (
+	"crypto/x509"
+	stdlibb64 "encoding/base64"
+	"fmt"
+	"io"
+
+	"github.com/lestrrat-go/jwx/v2/internal/base64"
+)
+
+// Create is a wrapper around x509.CreateCertificate, but it additionally
+// encodes it in base64 so that it can be easily added to `x5c` fields
+func Create(rand io.Reader, template, parent *x509.Certificate, pub, priv interface{}) ([]byte, error) {
+	der, err := x509.CreateCertificate(rand, template, parent, pub, priv)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to create x509 certificate: %w`, err)
+	}
+	return EncodeBase64(der)
+}
+
+// EncodeBase64 is a utility function to encode ASN.1 DER certificates
+// using base64 encoding. This operation is normally done by `pem.Encode`
+// but since PEM would include the markers (`-----BEGIN`, and the like)
+// while `x5c` fields do not need this, this function can be used to
+// shave off a few lines
+func EncodeBase64(der []byte) ([]byte, error) {
+	enc := stdlibb64.StdEncoding
+	dst := make([]byte, enc.EncodedLen(len(der)))
+	enc.Encode(dst, der)
+	return dst, nil
+}
+
+// Parse is a utility function to decode a base64 encoded
+// ASN.1 DER format certificate, and to parse the byte sequence.
+// The certificate must be in PKIX format, and it must not contain PEM markers
+func Parse(src []byte) (*x509.Certificate, error) {
+	dst, err := base64.Decode(src)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to base64 decode the certificate: %w`, err)
+	}
+
+	cert, err := x509.ParseCertificate(dst)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to parse x509 certificate: %w`, err)
+	}
+	return cert, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/cert/chain.go b/vendor/github.com/lestrrat-go/jwx/v2/cert/chain.go
new file mode 100644
index 0000000000..0c4746fb20
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/cert/chain.go
@@ -0,0 +1,78 @@
+package cert
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+)
+
+// Chain represents a certificate chain as used in the `x5c` field of
+// various objects within JOSE.
+//
+// It stores the certificates as a list of base64 encoded []byte
+// sequence. By definition these values must PKIX encoded.
+type Chain struct {
+	certificates [][]byte
+}
+
+func (cc Chain) MarshalJSON() ([]byte, error) {
+	var buf bytes.Buffer
+	buf.WriteByte('[')
+	for i, cert := range cc.certificates {
+		if i > 0 {
+			buf.WriteByte(',')
+		}
+		buf.WriteByte('"')
+		buf.Write(cert)
+		buf.WriteByte('"')
+	}
+	buf.WriteByte(']')
+	return buf.Bytes(), nil
+}
+
+func (cc *Chain) UnmarshalJSON(data []byte) error {
+	var tmp []string
+	if err := json.Unmarshal(data, &tmp); err != nil {
+		return fmt.Errorf(`failed to unmarshal certificate chain: %w`, err)
+	}
+
+	certs := make([][]byte, len(tmp))
+	for i, cert := range tmp {
+		certs[i] = []byte(cert)
+	}
+	cc.certificates = certs
+	return nil
+}
+
+// Get returns the n-th ASN.1 DER + base64 encoded certificate
+// stored. `false` will be returned in the second argument if
+// the corresponding index is out of range.
+func (cc *Chain) Get(index int) ([]byte, bool) {
+	if index < 0 || index >= len(cc.certificates) {
+		return nil, false
+	}
+
+	return cc.certificates[index], true
+}
+
+// Len returns the number of certificates stored in this Chain
+func (cc *Chain) Len() int {
+	return len(cc.certificates)
+}
+
+var pemStart = []byte("----- BEGIN CERTIFICATE -----")
+var pemEnd = []byte("----- END CERTIFICATE -----")
+
+func (cc *Chain) AddString(der string) error {
+	return cc.Add([]byte(der))
+}
+
+func (cc *Chain) Add(der []byte) error {
+	// We're going to be nice and remove marker lines if they
+	// give it to us
+	der = bytes.TrimPrefix(der, pemStart)
+	der = bytes.TrimSuffix(der, pemEnd)
+	der = bytes.TrimSpace(der)
+	cc.certificates = append(cc.certificates, der)
+	return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/base64/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v2/internal/base64/BUILD.bazel
new file mode 100644
index 0000000000..688265f6b1
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/base64/BUILD.bazel
@@ -0,0 +1,21 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+    name = "base64",
+    srcs = ["base64.go"],
+    importpath = "github.com/lestrrat-go/jwx/v2/internal/base64",
+    visibility = ["//:__subpackages__"],
+)
+
+go_test(
+    name = "base64_test",
+    srcs = ["base64_test.go"],
+    embed = [":base64"],
+    deps = ["@com_github_stretchr_testify//assert"],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":base64",
+    visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/base64/asmbase64.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/base64/asmbase64.go
new file mode 100644
index 0000000000..b151b229ff
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/base64/asmbase64.go
@@ -0,0 +1,39 @@
+//go:build jwx_asmbase64
+
+package base64
+
+import (
+	"fmt"
+
+	asmbase64 "github.com/segmentio/asm/base64"
+)
+
+func init() {
+	SetEncoder(asmbase64.RawURLEncoding)
+	SetDecoder(asmDecoder{})
+}
+
+type asmDecoder struct{}
+
+func (d asmDecoder) Decode(src []byte) ([]byte, error) {
+	var enc *asmbase64.Encoding
+	switch Guess(src) {
+	case Std:
+		enc = asmbase64.StdEncoding
+	case RawStd:
+		enc = asmbase64.RawStdEncoding
+	case URL:
+		enc = asmbase64.URLEncoding
+	case RawURL:
+		enc = asmbase64.RawURLEncoding
+	default:
+		return nil, fmt.Errorf(`invalid encoding`)
+	}
+
+	dst := make([]byte, enc.DecodedLen(len(src)))
+	n, err := enc.Decode(dst, src)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to decode source: %w`, err)
+	}
+	return dst[:n], nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/base64/base64.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/base64/base64.go
new file mode 100644
index 0000000000..b227bc91de
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/base64/base64.go
@@ -0,0 +1,134 @@
+package base64
+
+import (
+	"bytes"
+	"encoding/base64"
+	"encoding/binary"
+	"fmt"
+	"sync"
+)
+
+type Decoder interface {
+	Decode([]byte) ([]byte, error)
+}
+
+type Encoder interface {
+	Encode([]byte, []byte)
+	EncodedLen(int) int
+	EncodeToString([]byte) string
+}
+
+var muEncoder sync.RWMutex
+var encoder Encoder = base64.RawURLEncoding
+var muDecoder sync.RWMutex
+var decoder Decoder = defaultDecoder{}
+
+func SetEncoder(enc Encoder) {
+	muEncoder.Lock()
+	defer muEncoder.Unlock()
+	encoder = enc
+}
+
+func getEncoder() Encoder {
+	muEncoder.RLock()
+	defer muEncoder.RUnlock()
+	return encoder
+}
+
+func SetDecoder(dec Decoder) {
+	muDecoder.Lock()
+	defer muDecoder.Unlock()
+	decoder = dec
+}
+
+func getDecoder() Decoder {
+	muDecoder.RLock()
+	defer muDecoder.RUnlock()
+	return decoder
+}
+
+func Encode(src []byte) []byte {
+	encoder := getEncoder()
+	dst := make([]byte, encoder.EncodedLen(len(src)))
+	encoder.Encode(dst, src)
+	return dst
+}
+
+func EncodeToString(src []byte) string {
+	return getEncoder().EncodeToString(src)
+}
+
+func EncodeUint64ToString(v uint64) string {
+	data := make([]byte, 8)
+	binary.BigEndian.PutUint64(data, v)
+
+	i := 0
+	for ; i < len(data); i++ {
+		if data[i] != 0x0 {
+			break
+		}
+	}
+
+	return EncodeToString(data[i:])
+}
+
+const (
+	InvalidEncoding = iota
+	Std
+	URL
+	RawStd
+	RawURL
+)
+
+func Guess(src []byte) int {
+	var isRaw = !bytes.HasSuffix(src, []byte{'='})
+	var isURL = !bytes.ContainsAny(src, "+/")
+	switch {
+	case isRaw && isURL:
+		return RawURL
+	case isURL:
+		return URL
+	case isRaw:
+		return RawStd
+	default:
+		return Std
+	}
+}
+
+// defaultDecoder is a Decoder that detects the encoding of the source and
+// decodes it accordingly. This shouldn't really be required per the spec, but
+// it exist because we have seen in the wild JWTs that are encoded using
+// various versions of the base64 encoding.
+type defaultDecoder struct{}
+
+func (defaultDecoder) Decode(src []byte) ([]byte, error) {
+	var enc *base64.Encoding
+
+	switch Guess(src) {
+	case RawURL:
+		enc = base64.RawURLEncoding
+	case URL:
+		enc = base64.URLEncoding
+	case RawStd:
+		enc = base64.RawStdEncoding
+	case Std:
+		enc = base64.StdEncoding
+	default:
+		return nil, fmt.Errorf(`invalid encoding`)
+	}
+
+	dst := make([]byte, enc.DecodedLen(len(src)))
+	n, err := enc.Decode(dst, src)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to decode source: %w`, err)
+	}
+	return dst[:n], nil
+}
+
+func Decode(src []byte) ([]byte, error) {
+	return getDecoder().Decode(src)
+}
+
+func DecodeString(src string) ([]byte, error) {
+	return getDecoder().Decode([]byte(src))
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/ecutil/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v2/internal/ecutil/BUILD.bazel
new file mode 100644
index 0000000000..3d5987ded7
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/ecutil/BUILD.bazel
@@ -0,0 +1,15 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+    name = "ecutil",
+    srcs = ["ecutil.go"],
+    importpath = "github.com/lestrrat-go/jwx/v2/internal/ecutil",
+    visibility = ["//:__subpackages__"],
+    deps = ["//jwa"],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":ecutil",
+    visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/ecutil/ecutil.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/ecutil/ecutil.go
new file mode 100644
index 0000000000..e70f81659d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/ecutil/ecutil.go
@@ -0,0 +1,110 @@
+// Package ecutil defines tools that help with elliptic curve related
+// computation
+package ecutil
+
+import (
+	"crypto/elliptic"
+	"math/big"
+	"sync"
+
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+// data for available curves. Some algorithms may be compiled in/out
+var curveToAlg = map[elliptic.Curve]jwa.EllipticCurveAlgorithm{}
+var algToCurve = map[jwa.EllipticCurveAlgorithm]elliptic.Curve{}
+var availableAlgs []jwa.EllipticCurveAlgorithm
+var availableCrvs []elliptic.Curve
+
+func RegisterCurve(crv elliptic.Curve, alg jwa.EllipticCurveAlgorithm) {
+	curveToAlg[crv] = alg
+	algToCurve[alg] = crv
+	availableAlgs = append(availableAlgs, alg)
+	availableCrvs = append(availableCrvs, crv)
+}
+
+func IsAvailable(alg jwa.EllipticCurveAlgorithm) bool {
+	_, ok := algToCurve[alg]
+	return ok
+}
+
+func AvailableAlgorithms() []jwa.EllipticCurveAlgorithm {
+	return availableAlgs
+}
+
+func AvailableCurves() []elliptic.Curve {
+	return availableCrvs
+}
+
+func AlgorithmForCurve(crv elliptic.Curve) (jwa.EllipticCurveAlgorithm, bool) {
+	v, ok := curveToAlg[crv]
+	return v, ok
+}
+
+func CurveForAlgorithm(alg jwa.EllipticCurveAlgorithm) (elliptic.Curve, bool) {
+	v, ok := algToCurve[alg]
+	return v, ok
+}
+
+const (
+	// size of buffer that needs to be allocated for EC521 curve
+	ec521BufferSize = 66 // (521 / 8) + 1
+)
+
+var ecpointBufferPool = sync.Pool{
+	New: func() interface{} {
+		// In most cases the curve bit size will be less than this length
+		// so allocate the maximum, and keep reusing
+		buf := make([]byte, 0, ec521BufferSize)
+		return &buf
+	},
+}
+
+func getCrvFixedBuffer(size int) []byte {
+	//nolint:forcetypeassert
+	buf := *(ecpointBufferPool.Get().(*[]byte))
+	if size > ec521BufferSize && cap(buf) < size {
+		buf = append(buf, make([]byte, size-cap(buf))...)
+	}
+	return buf[:size]
+}
+
+// ReleaseECPointBuffer releases the []byte buffer allocated.
+func ReleaseECPointBuffer(buf []byte) {
+	buf = buf[:cap(buf)]
+	buf[0] = 0x0
+	for i := 1; i < len(buf); i *= 2 {
+		copy(buf[i:], buf[:i])
+	}
+	buf = buf[:0]
+	ecpointBufferPool.Put(&buf)
+}
+
+// AllocECPointBuffer allocates a buffer for the given point in the given
+// curve. This buffer should be released using the ReleaseECPointBuffer
+// function.
+func AllocECPointBuffer(v *big.Int, crv elliptic.Curve) []byte {
+	// We need to create a buffer that fits the entire curve.
+	// If the curve size is 66, that fits in 9 bytes. If the curve
+	// size is 64, it fits in 8 bytes.
+	bits := crv.Params().BitSize
+
+	// For most common cases we know before hand what the byte length
+	// is going to be. optimize
+	var inBytes int
+	switch bits {
+	case 224, 256, 384: // TODO: use constant?
+		inBytes = bits / 8
+	case 521:
+		inBytes = ec521BufferSize
+	default:
+		inBytes = bits / 8
+		if (bits % 8) != 0 {
+			inBytes++
+		}
+	}
+
+	buf := getCrvFixedBuffer(inBytes)
+	v.FillBytes(buf)
+	return buf
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/iter/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v2/internal/iter/BUILD.bazel
new file mode 100644
index 0000000000..5d7b3d95d8
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/iter/BUILD.bazel
@@ -0,0 +1,15 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+    name = "iter",
+    srcs = ["mapiter.go"],
+    importpath = "github.com/lestrrat-go/jwx/v2/internal/iter",
+    visibility = ["//:__subpackages__"],
+    deps = ["@com_github_lestrrat_go_iter//mapiter:go_default_library"],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":iter",
+    visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/iter/mapiter.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/iter/mapiter.go
new file mode 100644
index 0000000000..c98fd46c3e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/iter/mapiter.go
@@ -0,0 +1,36 @@
+package iter
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/lestrrat-go/iter/mapiter"
+)
+
+// MapVisitor is a specialized visitor for our purposes.
+// Whereas mapiter.Visitor supports any type of key, this
+// visitor assumes the key is a string
+type MapVisitor interface {
+	Visit(string, interface{}) error
+}
+
+type MapVisitorFunc func(string, interface{}) error
+
+func (fn MapVisitorFunc) Visit(s string, v interface{}) error {
+	return fn(s, v)
+}
+
+func WalkMap(ctx context.Context, src mapiter.Source, visitor MapVisitor) error {
+	return mapiter.Walk(ctx, src, mapiter.VisitorFunc(func(k, v interface{}) error {
+		//nolint:forcetypeassert
+		return visitor.Visit(k.(string), v)
+	}))
+}
+
+func AsMap(ctx context.Context, src mapiter.Source) (map[string]interface{}, error) {
+	var m map[string]interface{}
+	if err := mapiter.AsMap(ctx, src, &m); err != nil {
+		return nil, fmt.Errorf(`mapiter.AsMap failed: %w`, err)
+	}
+	return m, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/json/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/BUILD.bazel
new file mode 100644
index 0000000000..f3dba97108
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/BUILD.bazel
@@ -0,0 +1,19 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+    name = "json",
+    srcs = [
+        "json.go",
+        "registry.go",
+        "stdlib.go",
+    ],
+    importpath = "github.com/lestrrat-go/jwx/v2/internal/json",
+    visibility = ["//:__subpackages__"],
+    deps = ["//internal/base64"],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":json",
+    visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/json/goccy.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/goccy.go
new file mode 100644
index 0000000000..59682104b5
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/goccy.go
@@ -0,0 +1,51 @@
+//go:build jwx_goccy
+// +build jwx_goccy
+
+package json
+
+import (
+	"io"
+
+	"github.com/goccy/go-json"
+)
+
+type Decoder = json.Decoder
+type Delim = json.Delim
+type Encoder = json.Encoder
+type Marshaler = json.Marshaler
+type Number = json.Number
+type RawMessage = json.RawMessage
+type Unmarshaler = json.Unmarshaler
+
+func Engine() string {
+	return "github.com/goccy/go-json"
+}
+
+// NewDecoder respects the values specified in DecoderSettings,
+// and creates a Decoder that has certain features turned on/off
+func NewDecoder(r io.Reader) *json.Decoder {
+	dec := json.NewDecoder(r)
+
+	muGlobalConfig.RLock()
+	if useNumber {
+		dec.UseNumber()
+	}
+	muGlobalConfig.RUnlock()
+
+	return dec
+}
+
+// NewEncoder is just a proxy for "encoding/json".NewEncoder
+func NewEncoder(w io.Writer) *json.Encoder {
+	return json.NewEncoder(w)
+}
+
+// Marshal is just a proxy for "encoding/json".Marshal
+func Marshal(v interface{}) ([]byte, error) {
+	return json.Marshal(v)
+}
+
+// MarshalIndent is just a proxy for "encoding/json".MarshalIndent
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+	return json.MarshalIndent(v, prefix, indent)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/json/json.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/json.go
new file mode 100644
index 0000000000..a4f1026a5a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/json.go
@@ -0,0 +1,112 @@
+package json
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"sync"
+
+	"github.com/lestrrat-go/jwx/v2/internal/base64"
+)
+
+var muGlobalConfig sync.RWMutex
+var useNumber bool
+
+// Sets the global configuration for json decoding
+func DecoderSettings(inUseNumber bool) {
+	muGlobalConfig.Lock()
+	useNumber = inUseNumber
+	muGlobalConfig.Unlock()
+}
+
+// Unmarshal respects the values specified in DecoderSettings,
+// and uses a Decoder that has certain features turned on/off
+func Unmarshal(b []byte, v interface{}) error {
+	dec := NewDecoder(bytes.NewReader(b))
+	return dec.Decode(v)
+}
+
+func AssignNextBytesToken(dst *[]byte, dec *Decoder) error {
+	var val string
+	if err := dec.Decode(&val); err != nil {
+		return fmt.Errorf(`error reading next value: %w`, err)
+	}
+
+	buf, err := base64.DecodeString(val)
+	if err != nil {
+		return fmt.Errorf(`expected base64 encoded []byte (%T)`, val)
+	}
+	*dst = buf
+	return nil
+}
+
+func ReadNextStringToken(dec *Decoder) (string, error) {
+	var val string
+	if err := dec.Decode(&val); err != nil {
+		return "", fmt.Errorf(`error reading next value: %w`, err)
+	}
+	return val, nil
+}
+
+func AssignNextStringToken(dst **string, dec *Decoder) error {
+	val, err := ReadNextStringToken(dec)
+	if err != nil {
+		return err
+	}
+	*dst = &val
+	return nil
+}
+
+// FlattenAudience is a flag to specify if we should flatten the "aud"
+// entry to a string when there's only one entry.
+// In jwx < 1.1.8 we just dumped everything as an array of strings,
+// but apparently AWS Cognito doesn't handle this well.
+//
+// So now we have the ability to dump "aud" as a string if there's
+// only one entry, but we need to retain the old behavior so that
+// we don't accidentally break somebody else's code. (e.g. messing
+// up how signatures are calculated)
+var FlattenAudience uint32
+
+func EncodeAudience(enc *Encoder, aud []string, flatten bool) error {
+	var val interface{}
+	if len(aud) == 1 && flatten {
+		val = aud[0]
+	} else {
+		val = aud
+	}
+	return enc.Encode(val)
+}
+
+// DecodeCtx is an interface for objects that needs that extra something
+// when decoding JSON into an object.
+type DecodeCtx interface {
+	Registry() *Registry
+}
+
+// DecodeCtxContainer is used to differentiate objects that can carry extra
+// decoding hints and those who can't.
+type DecodeCtxContainer interface {
+	DecodeCtx() DecodeCtx
+	SetDecodeCtx(DecodeCtx)
+}
+
+// stock decodeCtx. should cover 80% of the cases
+type decodeCtx struct {
+	registry *Registry
+}
+
+func NewDecodeCtx(r *Registry) DecodeCtx {
+	return &decodeCtx{registry: r}
+}
+
+func (dc *decodeCtx) Registry() *Registry {
+	return dc.registry
+}
+
+func Dump(v interface{}) {
+	enc := NewEncoder(os.Stdout)
+	enc.SetIndent("", "  ")
+	//nolint:errchkjson
+	_ = enc.Encode(v)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/json/registry.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/registry.go
new file mode 100644
index 0000000000..4830e86de3
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/registry.go
@@ -0,0 +1,52 @@
+package json
+
+import (
+	"fmt"
+	"reflect"
+	"sync"
+)
+
+type Registry struct {
+	mu   *sync.RWMutex
+	data map[string]reflect.Type
+}
+
+func NewRegistry() *Registry {
+	return &Registry{
+		mu:   &sync.RWMutex{},
+		data: make(map[string]reflect.Type),
+	}
+}
+
+func (r *Registry) Register(name string, object interface{}) {
+	if object == nil {
+		r.mu.Lock()
+		defer r.mu.Unlock()
+		delete(r.data, name)
+		return
+	}
+
+	typ := reflect.TypeOf(object)
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	r.data[name] = typ
+}
+
+func (r *Registry) Decode(dec *Decoder, name string) (interface{}, error) {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
+	if typ, ok := r.data[name]; ok {
+		ptr := reflect.New(typ).Interface()
+		if err := dec.Decode(ptr); err != nil {
+			return nil, fmt.Errorf(`failed to decode field %s: %w`, name, err)
+		}
+		return reflect.ValueOf(ptr).Elem().Interface(), nil
+	}
+
+	var decoded interface{}
+	if err := dec.Decode(&decoded); err != nil {
+		return nil, fmt.Errorf(`failed to decode field %s: %w`, name, err)
+	}
+	return decoded, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/json/stdlib.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/stdlib.go
new file mode 100644
index 0000000000..62b1a5ff51
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/stdlib.go
@@ -0,0 +1,49 @@
+//go:build !jwx_goccy
+// +build !jwx_goccy
+
+package json
+
+import (
+	"encoding/json"
+	"io"
+)
+
+type Decoder = json.Decoder
+type Delim = json.Delim
+type Encoder = json.Encoder
+type Marshaler = json.Marshaler
+type Number = json.Number
+type RawMessage = json.RawMessage
+type Unmarshaler = json.Unmarshaler
+
+func Engine() string {
+	return "encoding/json"
+}
+
+// NewDecoder respects the values specified in DecoderSettings,
+// and creates a Decoder that has certain features turned on/off
+func NewDecoder(r io.Reader) *json.Decoder {
+	dec := json.NewDecoder(r)
+
+	muGlobalConfig.RLock()
+	if useNumber {
+		dec.UseNumber()
+	}
+	muGlobalConfig.RUnlock()
+
+	return dec
+}
+
+func NewEncoder(w io.Writer) *json.Encoder {
+	return json.NewEncoder(w)
+}
+
+// Marshal is just a proxy for "encoding/json".Marshal
+func Marshal(v interface{}) ([]byte, error) {
+	return json.Marshal(v)
+}
+
+// MarshalIndent is just a proxy for "encoding/json".MarshalIndent
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+	return json.MarshalIndent(v, prefix, indent)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/keyconv/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v2/internal/keyconv/BUILD.bazel
new file mode 100644
index 0000000000..246dfb8646
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/keyconv/BUILD.bazel
@@ -0,0 +1,31 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+    name = "keyconv",
+    srcs = ["keyconv.go"],
+    importpath = "github.com/lestrrat-go/jwx/v2/internal/keyconv",
+    visibility = ["//:__subpackages__"],
+    deps = [
+        "//jwk",
+        "@com_github_lestrrat_go_blackmagic//:go_default_library",
+        "@org_golang_x_crypto//ed25519",
+    ],
+)
+
+go_test(
+    name = "keyconv_test",
+    srcs = ["keyconv_test.go"],
+    deps = [
+        ":keyconv",
+        "//internal/jwxtest",
+        "//jwa",
+        "//jwk",
+        "@com_github_stretchr_testify//assert",
+    ],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":keyconv",
+    visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/keyconv/keyconv.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/keyconv/keyconv.go
new file mode 100644
index 0000000000..807da1dee6
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/keyconv/keyconv.go
@@ -0,0 +1,177 @@
+package keyconv
+
+import (
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/rsa"
+	"fmt"
+
+	"github.com/lestrrat-go/blackmagic"
+	"github.com/lestrrat-go/jwx/v2/jwk"
+	"golang.org/x/crypto/ed25519"
+)
+
+// RSAPrivateKey assigns src to dst.
+// `dst` should be a pointer to a rsa.PrivateKey.
+// `src` may be rsa.PrivateKey, *rsa.PrivateKey, or a jwk.Key
+func RSAPrivateKey(dst, src interface{}) error {
+	if jwkKey, ok := src.(jwk.Key); ok {
+		var raw rsa.PrivateKey
+		if err := jwkKey.Raw(&raw); err != nil {
+			return fmt.Errorf(`failed to produce rsa.PrivateKey from %T: %w`, src, err)
+		}
+		src = &raw
+	}
+
+	var ptr *rsa.PrivateKey
+	switch src := src.(type) {
+	case rsa.PrivateKey:
+		ptr = &src
+	case *rsa.PrivateKey:
+		ptr = src
+	default:
+		return fmt.Errorf(`expected rsa.PrivateKey or *rsa.PrivateKey, got %T`, src)
+	}
+
+	return blackmagic.AssignIfCompatible(dst, ptr)
+}
+
+// RSAPublicKey assigns src to dst
+// `dst` should be a pointer to a non-zero rsa.PublicKey.
+// `src` may be rsa.PublicKey, *rsa.PublicKey, or a jwk.Key
+func RSAPublicKey(dst, src interface{}) error {
+	if jwkKey, ok := src.(jwk.Key); ok {
+		var raw rsa.PublicKey
+		if err := jwkKey.Raw(&raw); err != nil {
+			return fmt.Errorf(`failed to produce rsa.PublicKey from %T: %w`, src, err)
+		}
+		src = &raw
+	}
+
+	var ptr *rsa.PublicKey
+	switch src := src.(type) {
+	case rsa.PublicKey:
+		ptr = &src
+	case *rsa.PublicKey:
+		ptr = src
+	default:
+		return fmt.Errorf(`expected rsa.PublicKey or *rsa.PublicKey, got %T`, src)
+	}
+
+	return blackmagic.AssignIfCompatible(dst, ptr)
+}
+
+// ECDSAPrivateKey assigns src to dst, converting its type from a
+// non-pointer to a pointer
+func ECDSAPrivateKey(dst, src interface{}) error {
+	if jwkKey, ok := src.(jwk.Key); ok {
+		var raw ecdsa.PrivateKey
+		if err := jwkKey.Raw(&raw); err != nil {
+			return fmt.Errorf(`failed to produce ecdsa.PrivateKey from %T: %w`, src, err)
+		}
+		src = &raw
+	}
+
+	var ptr *ecdsa.PrivateKey
+	switch src := src.(type) {
+	case ecdsa.PrivateKey:
+		ptr = &src
+	case *ecdsa.PrivateKey:
+		ptr = src
+	default:
+		return fmt.Errorf(`expected ecdsa.PrivateKey or *ecdsa.PrivateKey, got %T`, src)
+	}
+	return blackmagic.AssignIfCompatible(dst, ptr)
+}
+
+// ECDSAPublicKey assigns src to dst, converting its type from a
+// non-pointer to a pointer
+func ECDSAPublicKey(dst, src interface{}) error {
+	if jwkKey, ok := src.(jwk.Key); ok {
+		var raw ecdsa.PublicKey
+		if err := jwkKey.Raw(&raw); err != nil {
+			return fmt.Errorf(`failed to produce ecdsa.PublicKey from %T: %w`, src, err)
+		}
+		src = &raw
+	}
+
+	var ptr *ecdsa.PublicKey
+	switch src := src.(type) {
+	case ecdsa.PublicKey:
+		ptr = &src
+	case *ecdsa.PublicKey:
+		ptr = src
+	default:
+		return fmt.Errorf(`expected ecdsa.PublicKey or *ecdsa.PublicKey, got %T`, src)
+	}
+	return blackmagic.AssignIfCompatible(dst, ptr)
+}
+
+func ByteSliceKey(dst, src interface{}) error {
+	if jwkKey, ok := src.(jwk.Key); ok {
+		var raw []byte
+		if err := jwkKey.Raw(&raw); err != nil {
+			return fmt.Errorf(`failed to produce []byte from %T: %w`, src, err)
+		}
+		src = raw
+	}
+
+	if _, ok := src.([]byte); !ok {
+		return fmt.Errorf(`expected []byte, got %T`, src)
+	}
+	return blackmagic.AssignIfCompatible(dst, src)
+}
+
+func Ed25519PrivateKey(dst, src interface{}) error {
+	if jwkKey, ok := src.(jwk.Key); ok {
+		var raw ed25519.PrivateKey
+		if err := jwkKey.Raw(&raw); err != nil {
+			return fmt.Errorf(`failed to produce ed25519.PrivateKey from %T: %w`, src, err)
+		}
+		src = &raw
+	}
+
+	var ptr *ed25519.PrivateKey
+	switch src := src.(type) {
+	case ed25519.PrivateKey:
+		ptr = &src
+	case *ed25519.PrivateKey:
+		ptr = src
+	default:
+		return fmt.Errorf(`expected ed25519.PrivateKey or *ed25519.PrivateKey, got %T`, src)
+	}
+	return blackmagic.AssignIfCompatible(dst, ptr)
+}
+
+func Ed25519PublicKey(dst, src interface{}) error {
+	if jwkKey, ok := src.(jwk.Key); ok {
+		var raw ed25519.PublicKey
+		if err := jwkKey.Raw(&raw); err != nil {
+			return fmt.Errorf(`failed to produce ed25519.PublicKey from %T: %w`, src, err)
+		}
+		src = &raw
+	}
+
+	var ptr *ed25519.PublicKey
+	switch src := src.(type) {
+	case ed25519.PublicKey:
+		ptr = &src
+	case *ed25519.PublicKey:
+		ptr = src
+	case *crypto.PublicKey:
+		tmp, ok := (*src).(ed25519.PublicKey)
+		if !ok {
+			return fmt.Errorf(`failed to retrieve ed25519.PublicKey out of *crypto.PublicKey`)
+		}
+		ptr = &tmp
+	case crypto.PublicKey:
+		tmp, ok := src.(ed25519.PublicKey)
+		if !ok {
+			return fmt.Errorf(`failed to retrieve ed25519.PublicKey out of crypto.PublicKey`)
+		}
+		ptr = &tmp
+	default:
+		return fmt.Errorf(`expected ed25519.PublicKey or *ed25519.PublicKey, got %T`, src)
+	}
+	return blackmagic.AssignIfCompatible(dst, ptr)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/pool/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v2/internal/pool/BUILD.bazel
new file mode 100644
index 0000000000..bab9745cbf
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/pool/BUILD.bazel
@@ -0,0 +1,14 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+    name = "pool",
+    srcs = ["pool.go"],
+    importpath = "github.com/lestrrat-go/jwx/v2/internal/pool",
+    visibility = ["//:__subpackages__"],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":pool",
+    visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/pool/pool.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/pool/pool.go
new file mode 100644
index 0000000000..fae560b7c5
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/pool/pool.go
@@ -0,0 +1,61 @@
+package pool
+
+import (
+	"bytes"
+	"math/big"
+	"sync"
+)
+
+var bytesBufferPool = sync.Pool{
+	New: allocBytesBuffer,
+}
+
+func allocBytesBuffer() interface{} {
+	return &bytes.Buffer{}
+}
+
+func GetBytesBuffer() *bytes.Buffer {
+	//nolint:forcetypeassert
+	return bytesBufferPool.Get().(*bytes.Buffer)
+}
+
+func ReleaseBytesBuffer(b *bytes.Buffer) {
+	b.Reset()
+	bytesBufferPool.Put(b)
+}
+
+var bigIntPool = sync.Pool{
+	New: allocBigInt,
+}
+
+func allocBigInt() interface{} {
+	return &big.Int{}
+}
+
+func GetBigInt() *big.Int {
+	//nolint:forcetypeassert
+	return bigIntPool.Get().(*big.Int)
+}
+
+func ReleaseBigInt(i *big.Int) {
+	bigIntPool.Put(i.SetInt64(0))
+}
+
+var keyToErrorMapPool = sync.Pool{
+	New: allocKeyToErrorMap,
+}
+
+func allocKeyToErrorMap() interface{} {
+	return make(map[string]error)
+}
+
+func GetKeyToErrorMap() map[string]error {
+	//nolint:forcetypeassert
+	return keyToErrorMapPool.Get().(map[string]error)
+}
+
+func ReleaseKeyToErrorMap(m map[string]error) {
+	for key := range m {
+		delete(m, key)
+	}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v2/jwa/BUILD.bazel
new file mode 100644
index 0000000000..63fffcf90c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/BUILD.bazel
@@ -0,0 +1,39 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+    name = "jwa",
+    srcs = [
+        "compression_gen.go",
+        "content_encryption_gen.go",
+        "elliptic_gen.go",
+        "jwa.go",
+        "key_encryption_gen.go",
+        "key_type_gen.go",
+        "signature_gen.go",
+    ],
+    importpath = "github.com/lestrrat-go/jwx/v2/jwa",
+    visibility = ["//visibility:public"],
+)
+
+go_test(
+    name = "jwa_test",
+    srcs = [
+        "compression_gen_test.go",
+        "content_encryption_gen_test.go",
+        "elliptic_gen_test.go",
+        "jwa_test.go",
+        "key_encryption_gen_test.go",
+        "key_type_gen_test.go",
+        "signature_gen_test.go",
+    ],
+    deps = [
+        ":jwa",
+        "@com_github_stretchr_testify//assert",
+    ],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":jwa",
+    visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/README.md b/vendor/github.com/lestrrat-go/jwx/v2/jwa/README.md
new file mode 100644
index 0000000000..d62f29276a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/README.md
@@ -0,0 +1,3 @@
+# JWA [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v2/jwa.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v2/jwa)
+
+Package [github.com/lestrrat-go/jwx/v2/jwa](./jwa) defines the various algorithm described in [RFC7518](https://tools.ietf.org/html/rfc7518)
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/compression_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/compression_gen.go
new file mode 100644
index 0000000000..9fb65220dd
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/compression_gen.go
@@ -0,0 +1,101 @@
+// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT.
+
+package jwa
+
+import (
+	"fmt"
+	"sort"
+	"sync"
+)
+
+// CompressionAlgorithm represents the compression algorithms as described in https://tools.ietf.org/html/rfc7518#section-7.3
+type CompressionAlgorithm string
+
+// Supported values for CompressionAlgorithm
+const (
+	Deflate    CompressionAlgorithm = "DEF" // DEFLATE (RFC 1951)
+	NoCompress CompressionAlgorithm = ""    // No compression
+)
+
+var muCompressionAlgorithms sync.RWMutex
+var allCompressionAlgorithms map[CompressionAlgorithm]struct{}
+var listCompressionAlgorithm []CompressionAlgorithm
+
+func init() {
+	muCompressionAlgorithms.Lock()
+	defer muCompressionAlgorithms.Unlock()
+	allCompressionAlgorithms = make(map[CompressionAlgorithm]struct{})
+	allCompressionAlgorithms[Deflate] = struct{}{}
+	allCompressionAlgorithms[NoCompress] = struct{}{}
+	rebuildCompressionAlgorithm()
+}
+
+// RegisterCompressionAlgorithm registers a new CompressionAlgorithm so that the jwx can properly handle the new value.
+// Duplicates will silently be ignored
+func RegisterCompressionAlgorithm(v CompressionAlgorithm) {
+	muCompressionAlgorithms.Lock()
+	defer muCompressionAlgorithms.Unlock()
+	if _, ok := allCompressionAlgorithms[v]; !ok {
+		allCompressionAlgorithms[v] = struct{}{}
+		rebuildCompressionAlgorithm()
+	}
+}
+
+// UnregisterCompressionAlgorithm unregisters a CompressionAlgorithm from its known database.
+// Non-existentn entries will silently be ignored
+func UnregisterCompressionAlgorithm(v CompressionAlgorithm) {
+	muCompressionAlgorithms.Lock()
+	defer muCompressionAlgorithms.Unlock()
+	if _, ok := allCompressionAlgorithms[v]; ok {
+		delete(allCompressionAlgorithms, v)
+		rebuildCompressionAlgorithm()
+	}
+}
+
+func rebuildCompressionAlgorithm() {
+	listCompressionAlgorithm = make([]CompressionAlgorithm, 0, len(allCompressionAlgorithms))
+	for v := range allCompressionAlgorithms {
+		listCompressionAlgorithm = append(listCompressionAlgorithm, v)
+	}
+	sort.Slice(listCompressionAlgorithm, func(i, j int) bool {
+		return string(listCompressionAlgorithm[i]) < string(listCompressionAlgorithm[j])
+	})
+}
+
+// CompressionAlgorithms returns a list of all available values for CompressionAlgorithm
+func CompressionAlgorithms() []CompressionAlgorithm {
+	muCompressionAlgorithms.RLock()
+	defer muCompressionAlgorithms.RUnlock()
+	return listCompressionAlgorithm
+}
+
+// Accept is used when conversion from values given by
+// outside sources (such as JSON payloads) is required
+func (v *CompressionAlgorithm) Accept(value interface{}) error {
+	var tmp CompressionAlgorithm
+	if x, ok := value.(CompressionAlgorithm); ok {
+		tmp = x
+	} else {
+		var s string
+		switch x := value.(type) {
+		case fmt.Stringer:
+			s = x.String()
+		case string:
+			s = x
+		default:
+			return fmt.Errorf(`invalid type for jwa.CompressionAlgorithm: %T`, value)
+		}
+		tmp = CompressionAlgorithm(s)
+	}
+	if _, ok := allCompressionAlgorithms[tmp]; !ok {
+		return fmt.Errorf(`invalid jwa.CompressionAlgorithm value`)
+	}
+
+	*v = tmp
+	return nil
+}
+
+// String returns the string representation of a CompressionAlgorithm
+func (v CompressionAlgorithm) String() string {
+	return string(v)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/content_encryption_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/content_encryption_gen.go
new file mode 100644
index 0000000000..115fa18e0e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/content_encryption_gen.go
@@ -0,0 +1,109 @@
+// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT.
+
+package jwa
+
+import (
+	"fmt"
+	"sort"
+	"sync"
+)
+
+// ContentEncryptionAlgorithm represents the various encryption algorithms as described in https://tools.ietf.org/html/rfc7518#section-5
+type ContentEncryptionAlgorithm string
+
+// Supported values for ContentEncryptionAlgorithm
+const (
+	A128CBC_HS256 ContentEncryptionAlgorithm = "A128CBC-HS256" // AES-CBC + HMAC-SHA256 (128)
+	A128GCM       ContentEncryptionAlgorithm = "A128GCM"       // AES-GCM (128)
+	A192CBC_HS384 ContentEncryptionAlgorithm = "A192CBC-HS384" // AES-CBC + HMAC-SHA384 (192)
+	A192GCM       ContentEncryptionAlgorithm = "A192GCM"       // AES-GCM (192)
+	A256CBC_HS512 ContentEncryptionAlgorithm = "A256CBC-HS512" // AES-CBC + HMAC-SHA512 (256)
+	A256GCM       ContentEncryptionAlgorithm = "A256GCM"       // AES-GCM (256)
+)
+
+var muContentEncryptionAlgorithms sync.RWMutex
+var allContentEncryptionAlgorithms map[ContentEncryptionAlgorithm]struct{}
+var listContentEncryptionAlgorithm []ContentEncryptionAlgorithm
+
+func init() {
+	muContentEncryptionAlgorithms.Lock()
+	defer muContentEncryptionAlgorithms.Unlock()
+	allContentEncryptionAlgorithms = make(map[ContentEncryptionAlgorithm]struct{})
+	allContentEncryptionAlgorithms[A128CBC_HS256] = struct{}{}
+	allContentEncryptionAlgorithms[A128GCM] = struct{}{}
+	allContentEncryptionAlgorithms[A192CBC_HS384] = struct{}{}
+	allContentEncryptionAlgorithms[A192GCM] = struct{}{}
+	allContentEncryptionAlgorithms[A256CBC_HS512] = struct{}{}
+	allContentEncryptionAlgorithms[A256GCM] = struct{}{}
+	rebuildContentEncryptionAlgorithm()
+}
+
+// RegisterContentEncryptionAlgorithm registers a new ContentEncryptionAlgorithm so that the jwx can properly handle the new value.
+// Duplicates will silently be ignored
+func RegisterContentEncryptionAlgorithm(v ContentEncryptionAlgorithm) {
+	muContentEncryptionAlgorithms.Lock()
+	defer muContentEncryptionAlgorithms.Unlock()
+	if _, ok := allContentEncryptionAlgorithms[v]; !ok {
+		allContentEncryptionAlgorithms[v] = struct{}{}
+		rebuildContentEncryptionAlgorithm()
+	}
+}
+
+// UnregisterContentEncryptionAlgorithm unregisters a ContentEncryptionAlgorithm from its known database.
+// Non-existentn entries will silently be ignored
+func UnregisterContentEncryptionAlgorithm(v ContentEncryptionAlgorithm) {
+	muContentEncryptionAlgorithms.Lock()
+	defer muContentEncryptionAlgorithms.Unlock()
+	if _, ok := allContentEncryptionAlgorithms[v]; ok {
+		delete(allContentEncryptionAlgorithms, v)
+		rebuildContentEncryptionAlgorithm()
+	}
+}
+
+func rebuildContentEncryptionAlgorithm() {
+	listContentEncryptionAlgorithm = make([]ContentEncryptionAlgorithm, 0, len(allContentEncryptionAlgorithms))
+	for v := range allContentEncryptionAlgorithms {
+		listContentEncryptionAlgorithm = append(listContentEncryptionAlgorithm, v)
+	}
+	sort.Slice(listContentEncryptionAlgorithm, func(i, j int) bool {
+		return string(listContentEncryptionAlgorithm[i]) < string(listContentEncryptionAlgorithm[j])
+	})
+}
+
+// ContentEncryptionAlgorithms returns a list of all available values for ContentEncryptionAlgorithm
+func ContentEncryptionAlgorithms() []ContentEncryptionAlgorithm {
+	muContentEncryptionAlgorithms.RLock()
+	defer muContentEncryptionAlgorithms.RUnlock()
+	return listContentEncryptionAlgorithm
+}
+
+// Accept is used when conversion from values given by
+// outside sources (such as JSON payloads) is required
+func (v *ContentEncryptionAlgorithm) Accept(value interface{}) error {
+	var tmp ContentEncryptionAlgorithm
+	if x, ok := value.(ContentEncryptionAlgorithm); ok {
+		tmp = x
+	} else {
+		var s string
+		switch x := value.(type) {
+		case fmt.Stringer:
+			s = x.String()
+		case string:
+			s = x
+		default:
+			return fmt.Errorf(`invalid type for jwa.ContentEncryptionAlgorithm: %T`, value)
+		}
+		tmp = ContentEncryptionAlgorithm(s)
+	}
+	if _, ok := allContentEncryptionAlgorithms[tmp]; !ok {
+		return fmt.Errorf(`invalid jwa.ContentEncryptionAlgorithm value`)
+	}
+
+	*v = tmp
+	return nil
+}
+
+// String returns the string representation of a ContentEncryptionAlgorithm
+func (v ContentEncryptionAlgorithm) String() string {
+	return string(v)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/elliptic_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/elliptic_gen.go
new file mode 100644
index 0000000000..fbfe466aae
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/elliptic_gen.go
@@ -0,0 +1,112 @@
+// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT.
+
+package jwa
+
+import (
+	"fmt"
+	"sort"
+	"sync"
+)
+
+// EllipticCurveAlgorithm represents the algorithms used for EC keys
+type EllipticCurveAlgorithm string
+
+// Supported values for EllipticCurveAlgorithm
+const (
+	Ed25519              EllipticCurveAlgorithm = "Ed25519"
+	Ed448                EllipticCurveAlgorithm = "Ed448"
+	InvalidEllipticCurve EllipticCurveAlgorithm = "P-invalid"
+	P256                 EllipticCurveAlgorithm = "P-256"
+	P384                 EllipticCurveAlgorithm = "P-384"
+	P521                 EllipticCurveAlgorithm = "P-521"
+	X25519               EllipticCurveAlgorithm = "X25519"
+	X448                 EllipticCurveAlgorithm = "X448"
+)
+
+var muEllipticCurveAlgorithms sync.RWMutex
+var allEllipticCurveAlgorithms map[EllipticCurveAlgorithm]struct{}
+var listEllipticCurveAlgorithm []EllipticCurveAlgorithm
+
+func init() {
+	muEllipticCurveAlgorithms.Lock()
+	defer muEllipticCurveAlgorithms.Unlock()
+	allEllipticCurveAlgorithms = make(map[EllipticCurveAlgorithm]struct{})
+	allEllipticCurveAlgorithms[Ed25519] = struct{}{}
+	allEllipticCurveAlgorithms[Ed448] = struct{}{}
+	allEllipticCurveAlgorithms[P256] = struct{}{}
+	allEllipticCurveAlgorithms[P384] = struct{}{}
+	allEllipticCurveAlgorithms[P521] = struct{}{}
+	allEllipticCurveAlgorithms[X25519] = struct{}{}
+	allEllipticCurveAlgorithms[X448] = struct{}{}
+	rebuildEllipticCurveAlgorithm()
+}
+
+// RegisterEllipticCurveAlgorithm registers a new EllipticCurveAlgorithm so that the jwx can properly handle the new value.
+// Duplicates will silently be ignored
+func RegisterEllipticCurveAlgorithm(v EllipticCurveAlgorithm) {
+	muEllipticCurveAlgorithms.Lock()
+	defer muEllipticCurveAlgorithms.Unlock()
+	if _, ok := allEllipticCurveAlgorithms[v]; !ok {
+		allEllipticCurveAlgorithms[v] = struct{}{}
+		rebuildEllipticCurveAlgorithm()
+	}
+}
+
+// UnregisterEllipticCurveAlgorithm unregisters a EllipticCurveAlgorithm from its known database.
+// Non-existentn entries will silently be ignored
+func UnregisterEllipticCurveAlgorithm(v EllipticCurveAlgorithm) {
+	muEllipticCurveAlgorithms.Lock()
+	defer muEllipticCurveAlgorithms.Unlock()
+	if _, ok := allEllipticCurveAlgorithms[v]; ok {
+		delete(allEllipticCurveAlgorithms, v)
+		rebuildEllipticCurveAlgorithm()
+	}
+}
+
+func rebuildEllipticCurveAlgorithm() {
+	listEllipticCurveAlgorithm = make([]EllipticCurveAlgorithm, 0, len(allEllipticCurveAlgorithms))
+	for v := range allEllipticCurveAlgorithms {
+		listEllipticCurveAlgorithm = append(listEllipticCurveAlgorithm, v)
+	}
+	sort.Slice(listEllipticCurveAlgorithm, func(i, j int) bool {
+		return string(listEllipticCurveAlgorithm[i]) < string(listEllipticCurveAlgorithm[j])
+	})
+}
+
+// EllipticCurveAlgorithms returns a list of all available values for EllipticCurveAlgorithm
+func EllipticCurveAlgorithms() []EllipticCurveAlgorithm {
+	muEllipticCurveAlgorithms.RLock()
+	defer muEllipticCurveAlgorithms.RUnlock()
+	return listEllipticCurveAlgorithm
+}
+
+// Accept is used when conversion from values given by
+// outside sources (such as JSON payloads) is required
+func (v *EllipticCurveAlgorithm) Accept(value interface{}) error {
+	var tmp EllipticCurveAlgorithm
+	if x, ok := value.(EllipticCurveAlgorithm); ok {
+		tmp = x
+	} else {
+		var s string
+		switch x := value.(type) {
+		case fmt.Stringer:
+			s = x.String()
+		case string:
+			s = x
+		default:
+			return fmt.Errorf(`invalid type for jwa.EllipticCurveAlgorithm: %T`, value)
+		}
+		tmp = EllipticCurveAlgorithm(s)
+	}
+	if _, ok := allEllipticCurveAlgorithms[tmp]; !ok {
+		return fmt.Errorf(`invalid jwa.EllipticCurveAlgorithm value`)
+	}
+
+	*v = tmp
+	return nil
+}
+
+// String returns the string representation of a EllipticCurveAlgorithm
+func (v EllipticCurveAlgorithm) String() string {
+	return string(v)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/jwa.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/jwa.go
new file mode 100644
index 0000000000..f9ce38e04c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/jwa.go
@@ -0,0 +1,61 @@
+//go:generate ../tools/cmd/genjwa.sh
+
+// Package jwa defines the various algorithm described in https://tools.ietf.org/html/rfc7518
+package jwa
+
+import "fmt"
+
+// KeyAlgorithm is a workaround for jwk.Key being able to contain different
+// types of algorithms in its `alg` field.
+//
+// Previously the storage for the `alg` field was represented as a string,
+// but this caused some users to wonder why the field was not typed appropriately
+// like other fields.
+//
+// Ideally we would like to keep track of Signature Algorithms and
+// Content Encryption Algorithms separately, and force the APIs to
+// type-check at compile time, but this allows users to pass a value from a
+// jwk.Key directly
+type KeyAlgorithm interface {
+	String() string
+}
+
+// InvalidKeyAlgorithm represents an algorithm that the library is not aware of.
+type InvalidKeyAlgorithm string
+
+func (s InvalidKeyAlgorithm) String() string {
+	return string(s)
+}
+
+func (InvalidKeyAlgorithm) Accept(_ interface{}) error {
+	return fmt.Errorf(`jwa.InvalidKeyAlgorithm does not support Accept() method calls`)
+}
+
+// KeyAlgorithmFrom takes either a string, `jwa.SignatureAlgorithm` or `jwa.KeyEncryptionAlgorithm`
+// and returns a `jwa.KeyAlgorithm`.
+//
+// If the value cannot be handled, it returns an `jwa.InvalidKeyAlgorithm`
+// object instead of returning an error. This design choice was made to allow
+// users to directly pass the return value to functions such as `jws.Sign()`
+func KeyAlgorithmFrom(v interface{}) KeyAlgorithm {
+	switch v := v.(type) {
+	case SignatureAlgorithm:
+		return v
+	case KeyEncryptionAlgorithm:
+		return v
+	case string:
+		var salg SignatureAlgorithm
+		if err := salg.Accept(v); err == nil {
+			return salg
+		}
+
+		var kealg KeyEncryptionAlgorithm
+		if err := kealg.Accept(v); err == nil {
+			return kealg
+		}
+
+		return InvalidKeyAlgorithm(v)
+	default:
+		return InvalidKeyAlgorithm(fmt.Sprintf("%s", v))
+	}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/key_encryption_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/key_encryption_gen.go
new file mode 100644
index 0000000000..49ed1f6788
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/key_encryption_gen.go
@@ -0,0 +1,140 @@
+// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT.
+
+package jwa
+
+import (
+	"fmt"
+	"sort"
+	"sync"
+)
+
+// KeyEncryptionAlgorithm represents the various encryption algorithms as described in https://tools.ietf.org/html/rfc7518#section-4.1
+type KeyEncryptionAlgorithm string
+
+// Supported values for KeyEncryptionAlgorithm
+const (
+	A128GCMKW          KeyEncryptionAlgorithm = "A128GCMKW"          // AES-GCM key wrap (128)
+	A128KW             KeyEncryptionAlgorithm = "A128KW"             // AES key wrap (128)
+	A192GCMKW          KeyEncryptionAlgorithm = "A192GCMKW"          // AES-GCM key wrap (192)
+	A192KW             KeyEncryptionAlgorithm = "A192KW"             // AES key wrap (192)
+	A256GCMKW          KeyEncryptionAlgorithm = "A256GCMKW"          // AES-GCM key wrap (256)
+	A256KW             KeyEncryptionAlgorithm = "A256KW"             // AES key wrap (256)
+	DIRECT             KeyEncryptionAlgorithm = "dir"                // Direct encryption
+	ECDH_ES            KeyEncryptionAlgorithm = "ECDH-ES"            // ECDH-ES
+	ECDH_ES_A128KW     KeyEncryptionAlgorithm = "ECDH-ES+A128KW"     // ECDH-ES + AES key wrap (128)
+	ECDH_ES_A192KW     KeyEncryptionAlgorithm = "ECDH-ES+A192KW"     // ECDH-ES + AES key wrap (192)
+	ECDH_ES_A256KW     KeyEncryptionAlgorithm = "ECDH-ES+A256KW"     // ECDH-ES + AES key wrap (256)
+	PBES2_HS256_A128KW KeyEncryptionAlgorithm = "PBES2-HS256+A128KW" // PBES2 + HMAC-SHA256 + AES key wrap (128)
+	PBES2_HS384_A192KW KeyEncryptionAlgorithm = "PBES2-HS384+A192KW" // PBES2 + HMAC-SHA384 + AES key wrap (192)
+	PBES2_HS512_A256KW KeyEncryptionAlgorithm = "PBES2-HS512+A256KW" // PBES2 + HMAC-SHA512 + AES key wrap (256)
+	RSA1_5             KeyEncryptionAlgorithm = "RSA1_5"             // RSA-PKCS1v1.5
+	RSA_OAEP           KeyEncryptionAlgorithm = "RSA-OAEP"           // RSA-OAEP-SHA1
+	RSA_OAEP_256       KeyEncryptionAlgorithm = "RSA-OAEP-256"       // RSA-OAEP-SHA256
+)
+
+var muKeyEncryptionAlgorithms sync.RWMutex
+var allKeyEncryptionAlgorithms map[KeyEncryptionAlgorithm]struct{}
+var listKeyEncryptionAlgorithm []KeyEncryptionAlgorithm
+
+func init() {
+	muKeyEncryptionAlgorithms.Lock()
+	defer muKeyEncryptionAlgorithms.Unlock()
+	allKeyEncryptionAlgorithms = make(map[KeyEncryptionAlgorithm]struct{})
+	allKeyEncryptionAlgorithms[A128GCMKW] = struct{}{}
+	allKeyEncryptionAlgorithms[A128KW] = struct{}{}
+	allKeyEncryptionAlgorithms[A192GCMKW] = struct{}{}
+	allKeyEncryptionAlgorithms[A192KW] = struct{}{}
+	allKeyEncryptionAlgorithms[A256GCMKW] = struct{}{}
+	allKeyEncryptionAlgorithms[A256KW] = struct{}{}
+	allKeyEncryptionAlgorithms[DIRECT] = struct{}{}
+	allKeyEncryptionAlgorithms[ECDH_ES] = struct{}{}
+	allKeyEncryptionAlgorithms[ECDH_ES_A128KW] = struct{}{}
+	allKeyEncryptionAlgorithms[ECDH_ES_A192KW] = struct{}{}
+	allKeyEncryptionAlgorithms[ECDH_ES_A256KW] = struct{}{}
+	allKeyEncryptionAlgorithms[PBES2_HS256_A128KW] = struct{}{}
+	allKeyEncryptionAlgorithms[PBES2_HS384_A192KW] = struct{}{}
+	allKeyEncryptionAlgorithms[PBES2_HS512_A256KW] = struct{}{}
+	allKeyEncryptionAlgorithms[RSA1_5] = struct{}{}
+	allKeyEncryptionAlgorithms[RSA_OAEP] = struct{}{}
+	allKeyEncryptionAlgorithms[RSA_OAEP_256] = struct{}{}
+	rebuildKeyEncryptionAlgorithm()
+}
+
+// RegisterKeyEncryptionAlgorithm registers a new KeyEncryptionAlgorithm so that the jwx can properly handle the new value.
+// Duplicates will silently be ignored
+func RegisterKeyEncryptionAlgorithm(v KeyEncryptionAlgorithm) {
+	muKeyEncryptionAlgorithms.Lock()
+	defer muKeyEncryptionAlgorithms.Unlock()
+	if _, ok := allKeyEncryptionAlgorithms[v]; !ok {
+		allKeyEncryptionAlgorithms[v] = struct{}{}
+		rebuildKeyEncryptionAlgorithm()
+	}
+}
+
+// UnregisterKeyEncryptionAlgorithm unregisters a KeyEncryptionAlgorithm from its known database.
+// Non-existentn entries will silently be ignored
+func UnregisterKeyEncryptionAlgorithm(v KeyEncryptionAlgorithm) {
+	muKeyEncryptionAlgorithms.Lock()
+	defer muKeyEncryptionAlgorithms.Unlock()
+	if _, ok := allKeyEncryptionAlgorithms[v]; ok {
+		delete(allKeyEncryptionAlgorithms, v)
+		rebuildKeyEncryptionAlgorithm()
+	}
+}
+
+func rebuildKeyEncryptionAlgorithm() {
+	listKeyEncryptionAlgorithm = make([]KeyEncryptionAlgorithm, 0, len(allKeyEncryptionAlgorithms))
+	for v := range allKeyEncryptionAlgorithms {
+		listKeyEncryptionAlgorithm = append(listKeyEncryptionAlgorithm, v)
+	}
+	sort.Slice(listKeyEncryptionAlgorithm, func(i, j int) bool {
+		return string(listKeyEncryptionAlgorithm[i]) < string(listKeyEncryptionAlgorithm[j])
+	})
+}
+
+// KeyEncryptionAlgorithms returns a list of all available values for KeyEncryptionAlgorithm
+func KeyEncryptionAlgorithms() []KeyEncryptionAlgorithm {
+	muKeyEncryptionAlgorithms.RLock()
+	defer muKeyEncryptionAlgorithms.RUnlock()
+	return listKeyEncryptionAlgorithm
+}
+
+// Accept is used when conversion from values given by
+// outside sources (such as JSON payloads) is required
+func (v *KeyEncryptionAlgorithm) Accept(value interface{}) error {
+	var tmp KeyEncryptionAlgorithm
+	if x, ok := value.(KeyEncryptionAlgorithm); ok {
+		tmp = x
+	} else {
+		var s string
+		switch x := value.(type) {
+		case fmt.Stringer:
+			s = x.String()
+		case string:
+			s = x
+		default:
+			return fmt.Errorf(`invalid type for jwa.KeyEncryptionAlgorithm: %T`, value)
+		}
+		tmp = KeyEncryptionAlgorithm(s)
+	}
+	if _, ok := allKeyEncryptionAlgorithms[tmp]; !ok {
+		return fmt.Errorf(`invalid jwa.KeyEncryptionAlgorithm value`)
+	}
+
+	*v = tmp
+	return nil
+}
+
+// String returns the string representation of a KeyEncryptionAlgorithm
+func (v KeyEncryptionAlgorithm) String() string {
+	return string(v)
+}
+
+// IsSymmetric returns true if the algorithm is a symmetric type
+func (v KeyEncryptionAlgorithm) IsSymmetric() bool {
+	switch v {
+	case A128GCMKW, A128KW, A192GCMKW, A192KW, A256GCMKW, A256KW, DIRECT, PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/key_type_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/key_type_gen.go
new file mode 100644
index 0000000000..e1f9e38965
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/key_type_gen.go
@@ -0,0 +1,106 @@
+// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT.
+
+package jwa
+
+import (
+	"fmt"
+	"sort"
+	"sync"
+)
+
+// KeyType represents the key type ("kty") that are supported
+type KeyType string
+
+// Supported values for KeyType
+const (
+	EC             KeyType = "EC"  // Elliptic Curve
+	InvalidKeyType KeyType = ""    // Invalid KeyType
+	OKP            KeyType = "OKP" // Octet string key pairs
+	OctetSeq       KeyType = "oct" // Octet sequence (used to represent symmetric keys)
+	RSA            KeyType = "RSA" // RSA
+)
+
+var muKeyTypes sync.RWMutex
+var allKeyTypes map[KeyType]struct{}
+var listKeyType []KeyType
+
+func init() {
+	muKeyTypes.Lock()
+	defer muKeyTypes.Unlock()
+	allKeyTypes = make(map[KeyType]struct{})
+	allKeyTypes[EC] = struct{}{}
+	allKeyTypes[OKP] = struct{}{}
+	allKeyTypes[OctetSeq] = struct{}{}
+	allKeyTypes[RSA] = struct{}{}
+	rebuildKeyType()
+}
+
+// RegisterKeyType registers a new KeyType so that the jwx can properly handle the new value.
+// Duplicates will silently be ignored
+func RegisterKeyType(v KeyType) {
+	muKeyTypes.Lock()
+	defer muKeyTypes.Unlock()
+	if _, ok := allKeyTypes[v]; !ok {
+		allKeyTypes[v] = struct{}{}
+		rebuildKeyType()
+	}
+}
+
+// UnregisterKeyType unregisters a KeyType from its known database.
+// Non-existentn entries will silently be ignored
+func UnregisterKeyType(v KeyType) {
+	muKeyTypes.Lock()
+	defer muKeyTypes.Unlock()
+	if _, ok := allKeyTypes[v]; ok {
+		delete(allKeyTypes, v)
+		rebuildKeyType()
+	}
+}
+
+func rebuildKeyType() {
+	listKeyType = make([]KeyType, 0, len(allKeyTypes))
+	for v := range allKeyTypes {
+		listKeyType = append(listKeyType, v)
+	}
+	sort.Slice(listKeyType, func(i, j int) bool {
+		return string(listKeyType[i]) < string(listKeyType[j])
+	})
+}
+
+// KeyTypes returns a list of all available values for KeyType
+func KeyTypes() []KeyType {
+	muKeyTypes.RLock()
+	defer muKeyTypes.RUnlock()
+	return listKeyType
+}
+
+// Accept is used when conversion from values given by
+// outside sources (such as JSON payloads) is required
+func (v *KeyType) Accept(value interface{}) error {
+	var tmp KeyType
+	if x, ok := value.(KeyType); ok {
+		tmp = x
+	} else {
+		var s string
+		switch x := value.(type) {
+		case fmt.Stringer:
+			s = x.String()
+		case string:
+			s = x
+		default:
+			return fmt.Errorf(`invalid type for jwa.KeyType: %T`, value)
+		}
+		tmp = KeyType(s)
+	}
+	if _, ok := allKeyTypes[tmp]; !ok {
+		return fmt.Errorf(`invalid jwa.KeyType value`)
+	}
+
+	*v = tmp
+	return nil
+}
+
+// String returns the string representation of a KeyType
+func (v KeyType) String() string {
+	return string(v)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/secp2561k.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/secp2561k.go
new file mode 100644
index 0000000000..a6da0dde91
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/secp2561k.go
@@ -0,0 +1,11 @@
+//go:build jwx_es256k
+// +build jwx_es256k
+
+package jwa
+
+// This constant is only available if compiled with jwx_es256k build tag
+const Secp256k1 EllipticCurveAlgorithm = "secp256k1"
+
+func init() {
+	allEllipticCurveAlgorithms[Secp256k1] = struct{}{}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/signature_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/signature_gen.go
new file mode 100644
index 0000000000..eaa2f8662b
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/signature_gen.go
@@ -0,0 +1,127 @@
+// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT.
+
+package jwa
+
+import (
+	"fmt"
+	"sort"
+	"sync"
+)
+
+// SignatureAlgorithm represents the various signature algorithms as described in https://tools.ietf.org/html/rfc7518#section-3.1
+type SignatureAlgorithm string
+
+// Supported values for SignatureAlgorithm
+const (
+	ES256       SignatureAlgorithm = "ES256"  // ECDSA using P-256 and SHA-256
+	ES256K      SignatureAlgorithm = "ES256K" // ECDSA using secp256k1 and SHA-256
+	ES384       SignatureAlgorithm = "ES384"  // ECDSA using P-384 and SHA-384
+	ES512       SignatureAlgorithm = "ES512"  // ECDSA using P-521 and SHA-512
+	EdDSA       SignatureAlgorithm = "EdDSA"  // EdDSA signature algorithms
+	HS256       SignatureAlgorithm = "HS256"  // HMAC using SHA-256
+	HS384       SignatureAlgorithm = "HS384"  // HMAC using SHA-384
+	HS512       SignatureAlgorithm = "HS512"  // HMAC using SHA-512
+	NoSignature SignatureAlgorithm = "none"
+	PS256       SignatureAlgorithm = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256
+	PS384       SignatureAlgorithm = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384
+	PS512       SignatureAlgorithm = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
+	RS256       SignatureAlgorithm = "RS256" // RSASSA-PKCS-v1.5 using SHA-256
+	RS384       SignatureAlgorithm = "RS384" // RSASSA-PKCS-v1.5 using SHA-384
+	RS512       SignatureAlgorithm = "RS512" // RSASSA-PKCS-v1.5 using SHA-512
+)
+
+var muSignatureAlgorithms sync.RWMutex
+var allSignatureAlgorithms map[SignatureAlgorithm]struct{}
+var listSignatureAlgorithm []SignatureAlgorithm
+
+func init() {
+	muSignatureAlgorithms.Lock()
+	defer muSignatureAlgorithms.Unlock()
+	allSignatureAlgorithms = make(map[SignatureAlgorithm]struct{})
+	allSignatureAlgorithms[ES256] = struct{}{}
+	allSignatureAlgorithms[ES256K] = struct{}{}
+	allSignatureAlgorithms[ES384] = struct{}{}
+	allSignatureAlgorithms[ES512] = struct{}{}
+	allSignatureAlgorithms[EdDSA] = struct{}{}
+	allSignatureAlgorithms[HS256] = struct{}{}
+	allSignatureAlgorithms[HS384] = struct{}{}
+	allSignatureAlgorithms[HS512] = struct{}{}
+	allSignatureAlgorithms[NoSignature] = struct{}{}
+	allSignatureAlgorithms[PS256] = struct{}{}
+	allSignatureAlgorithms[PS384] = struct{}{}
+	allSignatureAlgorithms[PS512] = struct{}{}
+	allSignatureAlgorithms[RS256] = struct{}{}
+	allSignatureAlgorithms[RS384] = struct{}{}
+	allSignatureAlgorithms[RS512] = struct{}{}
+	rebuildSignatureAlgorithm()
+}
+
+// RegisterSignatureAlgorithm registers a new SignatureAlgorithm so that the jwx can properly handle the new value.
+// Duplicates will silently be ignored
+func RegisterSignatureAlgorithm(v SignatureAlgorithm) {
+	muSignatureAlgorithms.Lock()
+	defer muSignatureAlgorithms.Unlock()
+	if _, ok := allSignatureAlgorithms[v]; !ok {
+		allSignatureAlgorithms[v] = struct{}{}
+		rebuildSignatureAlgorithm()
+	}
+}
+
+// UnregisterSignatureAlgorithm unregisters a SignatureAlgorithm from its known database.
+// Non-existentn entries will silently be ignored
+func UnregisterSignatureAlgorithm(v SignatureAlgorithm) {
+	muSignatureAlgorithms.Lock()
+	defer muSignatureAlgorithms.Unlock()
+	if _, ok := allSignatureAlgorithms[v]; ok {
+		delete(allSignatureAlgorithms, v)
+		rebuildSignatureAlgorithm()
+	}
+}
+
+func rebuildSignatureAlgorithm() {
+	listSignatureAlgorithm = make([]SignatureAlgorithm, 0, len(allSignatureAlgorithms))
+	for v := range allSignatureAlgorithms {
+		listSignatureAlgorithm = append(listSignatureAlgorithm, v)
+	}
+	sort.Slice(listSignatureAlgorithm, func(i, j int) bool {
+		return string(listSignatureAlgorithm[i]) < string(listSignatureAlgorithm[j])
+	})
+}
+
+// SignatureAlgorithms returns a list of all available values for SignatureAlgorithm
+func SignatureAlgorithms() []SignatureAlgorithm {
+	muSignatureAlgorithms.RLock()
+	defer muSignatureAlgorithms.RUnlock()
+	return listSignatureAlgorithm
+}
+
+// Accept is used when conversion from values given by
+// outside sources (such as JSON payloads) is required
+func (v *SignatureAlgorithm) Accept(value interface{}) error {
+	var tmp SignatureAlgorithm
+	if x, ok := value.(SignatureAlgorithm); ok {
+		tmp = x
+	} else {
+		var s string
+		switch x := value.(type) {
+		case fmt.Stringer:
+			s = x.String()
+		case string:
+			s = x
+		default:
+			return fmt.Errorf(`invalid type for jwa.SignatureAlgorithm: %T`, value)
+		}
+		tmp = SignatureAlgorithm(s)
+	}
+	if _, ok := allSignatureAlgorithms[tmp]; !ok {
+		return fmt.Errorf(`invalid jwa.SignatureAlgorithm value`)
+	}
+
+	*v = tmp
+	return nil
+}
+
+// String returns the string representation of a SignatureAlgorithm
+func (v SignatureAlgorithm) String() string {
+	return string(v)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v2/jwk/BUILD.bazel
new file mode 100644
index 0000000000..a61a919f5a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/BUILD.bazel
@@ -0,0 +1,78 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+    name = "jwk",
+    srcs = [
+        "cache.go",
+        "ecdsa.go",
+        "ecdsa_gen.go",
+        "fetch.go",
+        "interface.go",
+        "interface_gen.go",
+        "io.go",
+        "jwk.go",
+        "key_ops.go",
+        "okp.go",
+        "okp_gen.go",
+        "options.go",
+        "options_gen.go",
+        "rsa.go",
+        "rsa_gen.go",
+        "set.go",
+        "symmetric.go",
+        "symmetric_gen.go",
+        "usage.go",
+        "whitelist.go",
+    ],
+    importpath = "github.com/lestrrat-go/jwx/v2/jwk",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//cert",
+        "//internal/base64",
+        "//internal/ecutil",
+        "//internal/iter",
+        "//internal/json",
+        "//internal/pool",
+        "//jwa",
+        "//x25519",
+        "@com_github_lestrrat_go_blackmagic//:go_default_library",
+        "@com_github_lestrrat_go_httprc//:go_default_library",
+        "@com_github_lestrrat_go_iter//arrayiter:go_default_library",
+        "@com_github_lestrrat_go_iter//mapiter:go_default_library",
+        "@com_github_lestrrat_go_option//:option",
+    ],
+)
+
+go_test(
+    name = "jwk_test",
+    srcs = [
+        "headers_test.go",
+        "jwk_internal_test.go",
+        "jwk_test.go",
+        "options_gen_test.go",
+        "refresh_test.go",
+        "set_test.go",
+        "x5c_test.go",
+    ],
+    data = glob(["testdata/**"]),
+    embed = [":jwk"],
+    deps = [
+        "//cert",
+        "//internal/base64",
+        "//internal/ecutil",
+        "//internal/jose",
+        "//internal/json",
+        "//internal/jwxtest",
+        "//jwa",
+        "//jws",
+        "//x25519",
+        "@com_github_stretchr_testify//assert",
+        "@com_github_stretchr_testify//require",
+    ],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":jwk",
+    visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/README.md b/vendor/github.com/lestrrat-go/jwx/v2/jwk/README.md
new file mode 100644
index 0000000000..85fb0a4b4a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/README.md
@@ -0,0 +1,217 @@
+# JWK [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v2/jwk.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v2/jwk)
+
+Package jwk implements JWK as described in [RFC7517](https://tools.ietf.org/html/rfc7517).
+If you are looking to use JWT wit JWKs, look no further than [github.com/lestrrat-go/jwx](../jwt).
+
+* Parse and work with RSA/EC/Symmetric/OKP JWK types
+  * Convert to and from JSON
+  * Convert to and from raw key types (e.g. *rsa.PrivateKey)
+* Ability to keep a JWKS fresh using *jwk.AutoRefersh
+
+## Supported key types:
+
+| kty | Curve                   | Go Key Type                                   |
+|:----|:------------------------|:----------------------------------------------|
+| RSA | N/A                     | rsa.PrivateKey / rsa.PublicKey (2)            |
+| EC  | P-256<br>P-384<br>P-521<br>secp256k1 (1) | ecdsa.PrivateKey / ecdsa.PublicKey (2)        |
+| oct | N/A                     | []byte                                        |
+| OKP | Ed25519 (1)             | ed25519.PrivateKey / ed25519.PublicKey (2)    |
+|     | X25519 (1)              | (jwx/)x25519.PrivateKey / x25519.PublicKey (2)|
+
+* Note 1: Experimental
+* Note 2: Either value or pointers accepted (e.g. rsa.PrivateKey or *rsa.PrivateKey)
+
+# Documentation
+
+Please read the [API reference](https://pkg.go.dev/github.com/lestrrat-go/jwx/v2/jwk), or
+the how-to style documentation on how to use JWK can be found in the [docs directory](../docs/04-jwk.md).
+
+# Auto-Refresh a key during a long running process
+
+<!-- INCLUDE(examples/jwk_cache_example_test.go) -->
+```go
+package examples_test
+
+import (
+  "context"
+  "fmt"
+  "time"
+
+  "github.com/lestrrat-go/jwx/v2/jwk"
+)
+
+func ExampleJWK_Cache() {
+  ctx, cancel := context.WithCancel(context.Background())
+
+  const googleCerts = `https://www.googleapis.com/oauth2/v3/certs`
+
+  // First, set up the `jwk.Cache` object. You need to pass it a
+  // `context.Context` object to control the lifecycle of the background fetching goroutine.
+  //
+  // Note that by default refreshes only happen very 15 minutes at the
+  // earliest. If you need to control this, use `jwk.WithRefreshWindow()`
+  c := jwk.NewCache(ctx)
+
+  // Tell *jwk.Cache that we only want to refresh this JWKS
+  // when it needs to (based on Cache-Control or Expires header from
+  // the HTTP response). If the calculated minimum refresh interval is less
+  // than 15 minutes, don't go refreshing any earlier than 15 minutes.
+  c.Register(googleCerts, jwk.WithMinRefreshInterval(15*time.Minute))
+
+  // Refresh the JWKS once before getting into the main loop.
+  // This allows you to check if the JWKS is available before we start
+  // a long-running program
+  _, err := c.Refresh(ctx, googleCerts)
+  if err != nil {
+    fmt.Printf("failed to refresh google JWKS: %s\n", err)
+    return
+  }
+
+  // Pretend that this is your program's main loop
+MAIN:
+  for {
+    select {
+    case <-ctx.Done():
+      break MAIN
+    default:
+    }
+    keyset, err := c.Get(ctx, googleCerts)
+    if err != nil {
+      fmt.Printf("failed to fetch google JWKS: %s\n", err)
+      return
+    }
+    _ = keyset
+    // The returned `keyset` will always be "reasonably" new.
+    //
+    // By "reasonably" we mean that we cannot guarantee that the keys will be refreshed
+    // immediately after it has been rotated in the remote source. But it should be close\
+    // enough, and should you need to forcefully refresh the token using the `(jwk.Cache).Refresh()` method.
+    //
+    // If re-fetching the keyset fails, a cached version will be returned from the previous successful
+    // fetch upon calling `(jwk.Cache).Fetch()`.
+
+    // Do interesting stuff with the keyset... but here, we just
+    // sleep for a bit
+    time.Sleep(time.Second)
+
+    // Because we're a dummy program, we just cancel the loop now.
+    // If this were a real program, you prosumably loop forever
+    cancel()
+  }
+  // OUTPUT:
+}
+```
+source: [examples/jwk_cache_example_test.go](https://github.com/lestrrat-go/jwx/blob/v2/examples/jwk_cache_example_test.go)
+<!-- END INCLUDE -->
+
+Parse and use a JWK key:
+
+<!-- INCLUDE(examples/jwk_example_test.go) -->
+```go
+package examples_test
+
+import (
+  "context"
+  "fmt"
+  "log"
+
+  "github.com/lestrrat-go/jwx/v2/internal/json"
+  "github.com/lestrrat-go/jwx/v2/jwk"
+)
+
+func ExampleJWK_Usage() {
+  // Use jwk.Cache if you intend to keep reuse the JWKS over and over
+  set, err := jwk.Fetch(context.Background(), "https://www.googleapis.com/oauth2/v3/certs")
+  if err != nil {
+    log.Printf("failed to parse JWK: %s", err)
+    return
+  }
+
+  // Key sets can be serialized back to JSON
+  {
+    jsonbuf, err := json.Marshal(set)
+    if err != nil {
+      log.Printf("failed to marshal key set into JSON: %s", err)
+      return
+    }
+    log.Printf("%s", jsonbuf)
+  }
+
+  for it := set.Iterate(context.Background()); it.Next(context.Background()); {
+    pair := it.Pair()
+    key := pair.Value.(jwk.Key)
+
+    var rawkey interface{} // This is the raw key, like *rsa.PrivateKey or *ecdsa.PrivateKey
+    if err := key.Raw(&rawkey); err != nil {
+      log.Printf("failed to create public key: %s", err)
+      return
+    }
+    // Use rawkey for jws.Verify() or whatever.
+    _ = rawkey
+
+    // You can create jwk.Key from a raw key, too
+    fromRawKey, err := jwk.FromRaw(rawkey)
+    if err != nil {
+      log.Printf("failed to acquire raw key from jwk.Key: %s", err)
+      return
+    }
+
+    // Keys can be serialized back to JSON
+    jsonbuf, err := json.Marshal(key)
+    if err != nil {
+      log.Printf("failed to marshal key into JSON: %s", err)
+      return
+    }
+
+    fromJSONKey, err := jwk.Parse(jsonbuf)
+    if err != nil {
+      log.Printf("failed to parse json: %s", err)
+      return
+    }
+    _ = fromJSONKey
+    _ = fromRawKey
+  }
+  // OUTPUT:
+}
+
+//nolint:govet
+func ExampleJWK_MarshalJSON() {
+  // JWKs that inherently involve randomness such as RSA and EC keys are
+  // not used in this example, because they may produce different results
+  // depending on the environment.
+  //
+  // (In fact, even if you use a static source of randomness, tests may fail
+  // because of internal changes in the Go runtime).
+
+  raw := []byte("01234567890123456789012345678901234567890123456789ABCDEF")
+
+  // This would create a symmetric key
+  key, err := jwk.FromRaw(raw)
+  if err != nil {
+    fmt.Printf("failed to create symmetric key: %s\n", err)
+    return
+  }
+  if _, ok := key.(jwk.SymmetricKey); !ok {
+    fmt.Printf("expected jwk.SymmetricKey, got %T\n", key)
+    return
+  }
+
+  key.Set(jwk.KeyIDKey, "mykey")
+
+  buf, err := json.MarshalIndent(key, "", "  ")
+  if err != nil {
+    fmt.Printf("failed to marshal key into JSON: %s\n", err)
+    return
+  }
+  fmt.Printf("%s\n", buf)
+
+  // OUTPUT:
+  // {
+  //   "k": "MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODlBQkNERUY",
+  //   "kid": "mykey",
+  //   "kty": "oct"
+  // }
+}
+```
+source: [examples/jwk_example_test.go](https://github.com/lestrrat-go/jwx/blob/v2/examples/jwk_example_test.go)
+<!-- END INCLUDE -->
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/cache.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/cache.go
new file mode 100644
index 0000000000..5d5b6b90bc
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/cache.go
@@ -0,0 +1,410 @@
+package jwk
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net/http"
+	"time"
+
+	"github.com/lestrrat-go/httprc"
+	"github.com/lestrrat-go/iter/arrayiter"
+	"github.com/lestrrat-go/iter/mapiter"
+)
+
+type Transformer = httprc.Transformer
+type HTTPClient = httprc.HTTPClient
+type ErrSink = httprc.ErrSink
+
+// Whitelist describes a set of rules that allows users to access
+// a particular URL. By default all URLs are blocked for security
+// reasons. You will HAVE to provide some sort of whitelist. See
+// the documentation for github.com/lestrrat-go/httprc for more details.
+type Whitelist = httprc.Whitelist
+
+// Cache is a container that keeps track of Set object by their source URLs.
+// The Set objects are stored in memory, and are refreshed automatically
+// behind the scenes.
+//
+// Before retrieving the Set objects, the user must pre-register the
+// URLs they intend to use by calling `Register()`
+//
+//	c := jwk.NewCache(ctx)
+//	c.Register(url, options...)
+//
+// Once registered, you can call `Get()` to retrieve the Set object.
+//
+// All JWKS objects that are retrieved via this mechanism should be
+// treated read-only, as they are shared among all consumers, as well
+// as the `jwk.Cache` object.
+//
+// There are cases where `jwk.Cache` and `jwk.CachedSet` should and
+// should not be used.
+//
+// First and foremost, do NOT use a cache for those JWKS objects that
+// need constant checking. For example, unreliable or user-provided JWKS (i.e. those
+// JWKS that are not from a well-known provider) should not be fetched
+// through a `jwk.Cache` or `jwk.CachedSet`.
+//
+// For example, if you have a flaky JWKS server for development
+// that can go down often, you should consider alternatives such as
+// providing `http.Client` with a caching `http.RoundTripper` configured
+// (see `jwk.WithHTTPClient`), setting up a reverse proxy, etc.
+// These techniques allow you to setup a more robust way to both cache
+// and report precise causes of the problems than using `jwk.Cache` or
+// `jwk.CachedSet`. If you handle the caching at the HTTP level like this,
+// you will be able to use a simple `jwk.Fetch` call and not worry about the cache.
+//
+// User-provided JWKS objects may also be problematic, as it may go down
+// unexpectedly (and frequently!), and it will be hard to detect when
+// the URLs or its contents are swapped.
+//
+// A good use-case for `jwk.Cache` and `jwk.CachedSet` are for "stable"
+// JWKS objects.
+//
+// When we say "stable", we are thinking of JWKS that should mostly be
+// ALWAYS available. A good example are those JWKS objects provided by
+// major cloud providers such as Google Cloud, AWS, or Azure.
+// Stable JWKS may still experience intermittent network connectivity problems,
+// but you can expect that they will eventually recover in relatively
+// short period of time. They rarely change URLs, and the contents are
+// expected to be valid or otherwise it would cause havoc to those providers
+//
+// We also know that these stable JWKS objects are rotated periodically,
+// which is a perfect use for `jwk.Cache` and `jwk.CachedSet`. The caches
+// can be configured to perodically refresh the JWKS thereby keeping them
+// fresh without extra intervention from the developer.
+//
+// Notice that for these recommended use-cases the requirement to check
+// the validity or the availability of the JWKS objects are non-existent,
+// as it is expected that they will be available and will be valid. The
+// caching mechanism can hide intermittent connectivity problems as well
+// as keep the objects mostly fresh.
+type Cache struct {
+	cache *httprc.Cache
+}
+
+// PostFetcher is an interface for objects that want to perform
+// operations on the `Set` that was fetched.
+type PostFetcher interface {
+	// PostFetch revceives the URL and the JWKS, after a successful
+	// fetch and parse.
+	//
+	// It should return a `Set`, optionally modified, to be stored
+	// in the cache for subsequent use
+	PostFetch(string, Set) (Set, error)
+}
+
+// PostFetchFunc is a PostFetcher based on a functon.
+type PostFetchFunc func(string, Set) (Set, error)
+
+func (f PostFetchFunc) PostFetch(u string, set Set) (Set, error) {
+	return f(u, set)
+}
+
+// httprc.Transofmer that transforms the response into a JWKS
+type jwksTransform struct {
+	postFetch    PostFetcher
+	parseOptions []ParseOption
+}
+
+// Default transform has no postFetch. This can be shared
+// by multiple fetchers
+var defaultTransform = &jwksTransform{}
+
+func (t *jwksTransform) Transform(u string, res *http.Response) (interface{}, error) {
+	if res.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf(`failed to process response: non-200 response code %q`, res.Status)
+	}
+	buf, err := io.ReadAll(res.Body)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to read response body status: %w`, err)
+	}
+
+	set, err := Parse(buf, t.parseOptions...)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to parse JWK set at %q: %w`, u, err)
+	}
+
+	if pf := t.postFetch; pf != nil {
+		v, err := pf.PostFetch(u, set)
+		if err != nil {
+			return nil, fmt.Errorf(`failed to execute PostFetch: %w`, err)
+		}
+		set = v
+	}
+
+	return set, nil
+}
+
+// NewCache creates a new `jwk.Cache` object.
+//
+// Please refer to the documentation for `httprc.New` for more
+// details.
+func NewCache(ctx context.Context, options ...CacheOption) *Cache {
+	var hrcopts []httprc.CacheOption
+	for _, option := range options {
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identRefreshWindow{}:
+			hrcopts = append(hrcopts, httprc.WithRefreshWindow(option.Value().(time.Duration)))
+		case identErrSink{}:
+			hrcopts = append(hrcopts, httprc.WithErrSink(option.Value().(ErrSink)))
+		}
+	}
+
+	return &Cache{
+		cache: httprc.NewCache(ctx, hrcopts...),
+	}
+}
+
+// Register registers a URL to be managed by the cache. URLs must
+// be registered before issuing `Get`
+//
+// This method is almost identical to `(httprc.Cache).Register`, except
+// it accepts some extra options.
+//
+// Use `jwk.WithParser` to configure how the JWKS should be parsed,
+// such as passing it extra options.
+//
+// Please refer to the documentation for `(httprc.Cache).Register` for more
+// details.
+//
+// Register does not check for the validity of the url being registered.
+// If you need to make sure that a url is valid before entering your main
+// loop, call `Refresh` once to make sure the JWKS is available.
+//
+//	_ = cache.Register(url)
+//	if _, err := cache.Refresh(ctx, url); err != nil {
+//	  // url is not a valid JWKS
+//	  panic(err)
+//	}
+func (c *Cache) Register(u string, options ...RegisterOption) error {
+	var hrropts []httprc.RegisterOption
+	var pf PostFetcher
+	var parseOptions []ParseOption
+
+	// Note: we do NOT accept Transform option
+	for _, option := range options {
+		if parseOpt, ok := option.(ParseOption); ok {
+			parseOptions = append(parseOptions, parseOpt)
+			continue
+		}
+
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identHTTPClient{}:
+			hrropts = append(hrropts, httprc.WithHTTPClient(option.Value().(HTTPClient)))
+		case identRefreshInterval{}:
+			hrropts = append(hrropts, httprc.WithRefreshInterval(option.Value().(time.Duration)))
+		case identMinRefreshInterval{}:
+			hrropts = append(hrropts, httprc.WithMinRefreshInterval(option.Value().(time.Duration)))
+		case identFetchWhitelist{}:
+			hrropts = append(hrropts, httprc.WithWhitelist(option.Value().(httprc.Whitelist)))
+		case identPostFetcher{}:
+			pf = option.Value().(PostFetcher)
+		}
+	}
+
+	var t *jwksTransform
+	if pf == nil && len(parseOptions) == 0 {
+		t = defaultTransform
+	} else {
+		// User-supplied PostFetcher is attached to the transformer
+		t = &jwksTransform{
+			postFetch:    pf,
+			parseOptions: parseOptions,
+		}
+	}
+
+	// Set the transfomer at the end so that nobody can override it
+	hrropts = append(hrropts, httprc.WithTransformer(t))
+	return c.cache.Register(u, hrropts...)
+}
+
+// Get returns the stored JWK set (`Set`) from the cache.
+//
+// Please refer to the documentation for `(httprc.Cache).Get` for more
+// details.
+func (c *Cache) Get(ctx context.Context, u string) (Set, error) {
+	v, err := c.cache.Get(ctx, u)
+	if err != nil {
+		return nil, err
+	}
+
+	set, ok := v.(Set)
+	if !ok {
+		return nil, fmt.Errorf(`cached object is not a Set (was %T)`, v)
+	}
+	return set, nil
+}
+
+// Refresh is identical to Get(), except it always fetches the
+// specified resource anew, and updates the cached content
+//
+// Please refer to the documentation for `(httprc.Cache).Refresh` for
+// more details
+func (c *Cache) Refresh(ctx context.Context, u string) (Set, error) {
+	v, err := c.cache.Refresh(ctx, u)
+	if err != nil {
+		return nil, err
+	}
+
+	set, ok := v.(Set)
+	if !ok {
+		return nil, fmt.Errorf(`cached object is not a Set (was %T)`, v)
+	}
+	return set, nil
+}
+
+// IsRegistered returns true if the given URL `u` has already been registered
+// in the cache.
+//
+// Please refer to the documentation for `(httprc.Cache).IsRegistered` for more
+// details.
+func (c *Cache) IsRegistered(u string) bool {
+	return c.cache.IsRegistered(u)
+}
+
+// Unregister removes the given URL `u` from the cache.
+//
+// Please refer to the documentation for `(httprc.Cache).Unregister` for more
+// details.
+func (c *Cache) Unregister(u string) error {
+	return c.cache.Unregister(u)
+}
+
+func (c *Cache) Snapshot() *httprc.Snapshot {
+	return c.cache.Snapshot()
+}
+
+// CachedSet is a thin shim over jwk.Cache that allows the user to cloack
+// jwk.Cache as if it's a `jwk.Set`. Behind the scenes, the `jwk.Set` is
+// retrieved from the `jwk.Cache` for every operation.
+//
+// Since `jwk.CachedSet` always deals with a cached version of the `jwk.Set`,
+// all operations that mutate the object (such as AddKey(), RemoveKey(), et. al)
+// are no-ops and return an error.
+//
+// Note that since this is a utility shim over `jwk.Cache`, you _will_ lose
+// the ability to control the finer details (such as controlling how long to
+// wait for in case of a fetch failure using `context.Context`)
+//
+// Make sure that you read the documentation for `jwk.Cache` as well.
+type CachedSet struct {
+	cache *Cache
+	url   string
+}
+
+var _ Set = &CachedSet{}
+
+func NewCachedSet(cache *Cache, url string) Set {
+	return &CachedSet{
+		cache: cache,
+		url:   url,
+	}
+}
+
+func (cs *CachedSet) cached() (Set, error) {
+	return cs.cache.Get(context.Background(), cs.url)
+}
+
+// Add is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only
+func (*CachedSet) AddKey(_ Key) error {
+	return fmt.Errorf(`(jwk.Cachedset).AddKey: jwk.CachedSet is immutable`)
+}
+
+// Clear is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only
+func (*CachedSet) Clear() error {
+	return fmt.Errorf(`(jwk.CachedSet).Clear: jwk.CachedSet is immutable`)
+}
+
+// Set is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only
+func (*CachedSet) Set(_ string, _ interface{}) error {
+	return fmt.Errorf(`(jwk.CachedSet).Set: jwk.CachedSet is immutable`)
+}
+
+// Remove is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only
+func (*CachedSet) Remove(_ string) error {
+	// TODO: Remove() should be renamed to Remove(string) error
+	return fmt.Errorf(`(jwk.CachedSet).Remove: jwk.CachedSet is immutable`)
+}
+
+// RemoveKey is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only
+func (*CachedSet) RemoveKey(_ Key) error {
+	return fmt.Errorf(`(jwk.CachedSet).RemoveKey: jwk.CachedSet is immutable`)
+}
+
+func (cs *CachedSet) Clone() (Set, error) {
+	set, err := cs.cached()
+	if err != nil {
+		return nil, fmt.Errorf(`failed to get cached jwk.Set: %w`, err)
+	}
+
+	return set.Clone()
+}
+
+// Get returns the value of non-Key field stored in the jwk.Set
+func (cs *CachedSet) Get(name string) (interface{}, bool) {
+	set, err := cs.cached()
+	if err != nil {
+		return nil, false
+	}
+
+	return set.Get(name)
+}
+
+// Key returns the Key at the specified index
+func (cs *CachedSet) Key(idx int) (Key, bool) {
+	set, err := cs.cached()
+	if err != nil {
+		return nil, false
+	}
+
+	return set.Key(idx)
+}
+
+func (cs *CachedSet) Index(key Key) int {
+	set, err := cs.cached()
+	if err != nil {
+		return -1
+	}
+
+	return set.Index(key)
+}
+
+func (cs *CachedSet) Keys(ctx context.Context) KeyIterator {
+	set, err := cs.cached()
+	if err != nil {
+		return arrayiter.New(nil)
+	}
+
+	return set.Keys(ctx)
+}
+
+func (cs *CachedSet) Iterate(ctx context.Context) HeaderIterator {
+	set, err := cs.cached()
+	if err != nil {
+		return mapiter.New(nil)
+	}
+
+	return set.Iterate(ctx)
+}
+
+func (cs *CachedSet) Len() int {
+	set, err := cs.cached()
+	if err != nil {
+		return -1
+	}
+
+	return set.Len()
+}
+
+func (cs *CachedSet) LookupKeyID(kid string) (Key, bool) {
+	set, err := cs.cached()
+	if err != nil {
+		return nil, false
+	}
+
+	return set.LookupKeyID(kid)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/ecdsa.go
new file mode 100644
index 0000000000..67a14ba63e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/ecdsa.go
@@ -0,0 +1,228 @@
+package jwk
+
+import (
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"fmt"
+	"math/big"
+
+	"github.com/lestrrat-go/blackmagic"
+	"github.com/lestrrat-go/jwx/v2/internal/base64"
+	"github.com/lestrrat-go/jwx/v2/internal/ecutil"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+func init() {
+	ecutil.RegisterCurve(elliptic.P256(), jwa.P256)
+	ecutil.RegisterCurve(elliptic.P384(), jwa.P384)
+	ecutil.RegisterCurve(elliptic.P521(), jwa.P521)
+}
+
+func (k *ecdsaPublicKey) FromRaw(rawKey *ecdsa.PublicKey) error {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+
+	if rawKey.X == nil {
+		return fmt.Errorf(`invalid ecdsa.PublicKey`)
+	}
+
+	if rawKey.Y == nil {
+		return fmt.Errorf(`invalid ecdsa.PublicKey`)
+	}
+
+	xbuf := ecutil.AllocECPointBuffer(rawKey.X, rawKey.Curve)
+	ybuf := ecutil.AllocECPointBuffer(rawKey.Y, rawKey.Curve)
+	defer ecutil.ReleaseECPointBuffer(xbuf)
+	defer ecutil.ReleaseECPointBuffer(ybuf)
+
+	k.x = make([]byte, len(xbuf))
+	copy(k.x, xbuf)
+	k.y = make([]byte, len(ybuf))
+	copy(k.y, ybuf)
+
+	var crv jwa.EllipticCurveAlgorithm
+	if tmp, ok := ecutil.AlgorithmForCurve(rawKey.Curve); ok {
+		crv = tmp
+	} else {
+		return fmt.Errorf(`invalid elliptic curve %s`, rawKey.Curve)
+	}
+	k.crv = &crv
+
+	return nil
+}
+
+func (k *ecdsaPrivateKey) FromRaw(rawKey *ecdsa.PrivateKey) error {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+
+	if rawKey.PublicKey.X == nil {
+		return fmt.Errorf(`invalid ecdsa.PrivateKey`)
+	}
+	if rawKey.PublicKey.Y == nil {
+		return fmt.Errorf(`invalid ecdsa.PrivateKey`)
+	}
+	if rawKey.D == nil {
+		return fmt.Errorf(`invalid ecdsa.PrivateKey`)
+	}
+
+	xbuf := ecutil.AllocECPointBuffer(rawKey.PublicKey.X, rawKey.Curve)
+	ybuf := ecutil.AllocECPointBuffer(rawKey.PublicKey.Y, rawKey.Curve)
+	dbuf := ecutil.AllocECPointBuffer(rawKey.D, rawKey.Curve)
+	defer ecutil.ReleaseECPointBuffer(xbuf)
+	defer ecutil.ReleaseECPointBuffer(ybuf)
+	defer ecutil.ReleaseECPointBuffer(dbuf)
+
+	k.x = make([]byte, len(xbuf))
+	copy(k.x, xbuf)
+	k.y = make([]byte, len(ybuf))
+	copy(k.y, ybuf)
+	k.d = make([]byte, len(dbuf))
+	copy(k.d, dbuf)
+
+	var crv jwa.EllipticCurveAlgorithm
+	if tmp, ok := ecutil.AlgorithmForCurve(rawKey.Curve); ok {
+		crv = tmp
+	} else {
+		return fmt.Errorf(`invalid elliptic curve %s`, rawKey.Curve)
+	}
+	k.crv = &crv
+
+	return nil
+}
+
+func buildECDSAPublicKey(alg jwa.EllipticCurveAlgorithm, xbuf, ybuf []byte) (*ecdsa.PublicKey, error) {
+	var crv elliptic.Curve
+	if tmp, ok := ecutil.CurveForAlgorithm(alg); ok {
+		crv = tmp
+	} else {
+		return nil, fmt.Errorf(`invalid curve algorithm %s`, alg)
+	}
+
+	var x, y big.Int
+	x.SetBytes(xbuf)
+	y.SetBytes(ybuf)
+
+	return &ecdsa.PublicKey{Curve: crv, X: &x, Y: &y}, nil
+}
+
+// Raw returns the EC-DSA public key represented by this JWK
+func (k *ecdsaPublicKey) Raw(v interface{}) error {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+
+	pubk, err := buildECDSAPublicKey(k.Crv(), k.x, k.y)
+	if err != nil {
+		return fmt.Errorf(`failed to build public key: %w`, err)
+	}
+
+	return blackmagic.AssignIfCompatible(v, pubk)
+}
+
+func (k *ecdsaPrivateKey) Raw(v interface{}) error {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+
+	pubk, err := buildECDSAPublicKey(k.Crv(), k.x, k.y)
+	if err != nil {
+		return fmt.Errorf(`failed to build public key: %w`, err)
+	}
+
+	var key ecdsa.PrivateKey
+	var d big.Int
+	d.SetBytes(k.d)
+	key.D = &d
+	key.PublicKey = *pubk
+
+	return blackmagic.AssignIfCompatible(v, &key)
+}
+
+func makeECDSAPublicKey(v interface {
+	makePairs() []*HeaderPair
+}) (Key, error) {
+	newKey := newECDSAPublicKey()
+
+	// Iterate and copy everything except for the bits that should not be in the public key
+	for _, pair := range v.makePairs() {
+		switch pair.Key {
+		case ECDSADKey:
+			continue
+		default:
+			//nolint:forcetypeassert
+			key := pair.Key.(string)
+			if err := newKey.Set(key, pair.Value); err != nil {
+				return nil, fmt.Errorf(`failed to set field %q: %w`, key, err)
+			}
+		}
+	}
+
+	return newKey, nil
+}
+
+func (k *ecdsaPrivateKey) PublicKey() (Key, error) {
+	return makeECDSAPublicKey(k)
+}
+
+func (k *ecdsaPublicKey) PublicKey() (Key, error) {
+	return makeECDSAPublicKey(k)
+}
+
+func ecdsaThumbprint(hash crypto.Hash, crv, x, y string) []byte {
+	h := hash.New()
+	fmt.Fprint(h, `{"crv":"`)
+	fmt.Fprint(h, crv)
+	fmt.Fprint(h, `","kty":"EC","x":"`)
+	fmt.Fprint(h, x)
+	fmt.Fprint(h, `","y":"`)
+	fmt.Fprint(h, y)
+	fmt.Fprint(h, `"}`)
+	return h.Sum(nil)
+}
+
+// Thumbprint returns the JWK thumbprint using the indicated
+// hashing algorithm, according to RFC 7638
+func (k ecdsaPublicKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+
+	var key ecdsa.PublicKey
+	if err := k.Raw(&key); err != nil {
+		return nil, fmt.Errorf(`failed to materialize ecdsa.PublicKey for thumbprint generation: %w`, err)
+	}
+
+	xbuf := ecutil.AllocECPointBuffer(key.X, key.Curve)
+	ybuf := ecutil.AllocECPointBuffer(key.Y, key.Curve)
+	defer ecutil.ReleaseECPointBuffer(xbuf)
+	defer ecutil.ReleaseECPointBuffer(ybuf)
+
+	return ecdsaThumbprint(
+		hash,
+		key.Curve.Params().Name,
+		base64.EncodeToString(xbuf),
+		base64.EncodeToString(ybuf),
+	), nil
+}
+
+// Thumbprint returns the JWK thumbprint using the indicated
+// hashing algorithm, according to RFC 7638
+func (k ecdsaPrivateKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+
+	var key ecdsa.PrivateKey
+	if err := k.Raw(&key); err != nil {
+		return nil, fmt.Errorf(`failed to materialize ecdsa.PrivateKey for thumbprint generation: %w`, err)
+	}
+
+	xbuf := ecutil.AllocECPointBuffer(key.X, key.Curve)
+	ybuf := ecutil.AllocECPointBuffer(key.Y, key.Curve)
+	defer ecutil.ReleaseECPointBuffer(xbuf)
+	defer ecutil.ReleaseECPointBuffer(ybuf)
+
+	return ecdsaThumbprint(
+		hash,
+		key.Curve.Params().Name,
+		base64.EncodeToString(xbuf),
+		base64.EncodeToString(ybuf),
+	), nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/ecdsa_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/ecdsa_gen.go
new file mode 100644
index 0000000000..95a2995175
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/ecdsa_gen.go
@@ -0,0 +1,1181 @@
+// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT.
+
+package jwk
+
+import (
+	"bytes"
+	"context"
+	"crypto/ecdsa"
+	"fmt"
+	"sort"
+	"sync"
+
+	"github.com/lestrrat-go/iter/mapiter"
+	"github.com/lestrrat-go/jwx/v2/cert"
+	"github.com/lestrrat-go/jwx/v2/internal/base64"
+	"github.com/lestrrat-go/jwx/v2/internal/iter"
+	"github.com/lestrrat-go/jwx/v2/internal/json"
+	"github.com/lestrrat-go/jwx/v2/internal/pool"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+const (
+	ECDSACrvKey = "crv"
+	ECDSADKey   = "d"
+	ECDSAXKey   = "x"
+	ECDSAYKey   = "y"
+)
+
+type ECDSAPublicKey interface {
+	Key
+	FromRaw(*ecdsa.PublicKey) error
+	Crv() jwa.EllipticCurveAlgorithm
+	X() []byte
+	Y() []byte
+}
+
+type ecdsaPublicKey struct {
+	algorithm              *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4
+	crv                    *jwa.EllipticCurveAlgorithm
+	keyID                  *string           // https://tools.ietf.org/html/rfc7515#section-4.1.4
+	keyOps                 *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3
+	keyUsage               *string           // https://tools.ietf.org/html/rfc7517#section-4.2
+	x                      []byte
+	x509CertChain          *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+	x509CertThumbprint     *string     // https://tools.ietf.org/html/rfc7515#section-4.1.7
+	x509CertThumbprintS256 *string     // https://tools.ietf.org/html/rfc7515#section-4.1.8
+	x509URL                *string     // https://tools.ietf.org/html/rfc7515#section-4.1.5
+	y                      []byte
+	privateParams          map[string]interface{}
+	mu                     *sync.RWMutex
+	dc                     json.DecodeCtx
+}
+
+var _ ECDSAPublicKey = &ecdsaPublicKey{}
+var _ Key = &ecdsaPublicKey{}
+
+func newECDSAPublicKey() *ecdsaPublicKey {
+	return &ecdsaPublicKey{
+		mu:            &sync.RWMutex{},
+		privateParams: make(map[string]interface{}),
+	}
+}
+
+func (h ecdsaPublicKey) KeyType() jwa.KeyType {
+	return jwa.EC
+}
+
+func (h *ecdsaPublicKey) Algorithm() jwa.KeyAlgorithm {
+	if h.algorithm != nil {
+		return *(h.algorithm)
+	}
+	return jwa.InvalidKeyAlgorithm("")
+}
+
+func (h *ecdsaPublicKey) Crv() jwa.EllipticCurveAlgorithm {
+	if h.crv != nil {
+		return *(h.crv)
+	}
+	return jwa.InvalidEllipticCurve
+}
+
+func (h *ecdsaPublicKey) KeyID() string {
+	if h.keyID != nil {
+		return *(h.keyID)
+	}
+	return ""
+}
+
+func (h *ecdsaPublicKey) KeyOps() KeyOperationList {
+	if h.keyOps != nil {
+		return *(h.keyOps)
+	}
+	return nil
+}
+
+func (h *ecdsaPublicKey) KeyUsage() string {
+	if h.keyUsage != nil {
+		return *(h.keyUsage)
+	}
+	return ""
+}
+
+func (h *ecdsaPublicKey) X() []byte {
+	return h.x
+}
+
+func (h *ecdsaPublicKey) X509CertChain() *cert.Chain {
+	return h.x509CertChain
+}
+
+func (h *ecdsaPublicKey) X509CertThumbprint() string {
+	if h.x509CertThumbprint != nil {
+		return *(h.x509CertThumbprint)
+	}
+	return ""
+}
+
+func (h *ecdsaPublicKey) X509CertThumbprintS256() string {
+	if h.x509CertThumbprintS256 != nil {
+		return *(h.x509CertThumbprintS256)
+	}
+	return ""
+}
+
+func (h *ecdsaPublicKey) X509URL() string {
+	if h.x509URL != nil {
+		return *(h.x509URL)
+	}
+	return ""
+}
+
+func (h *ecdsaPublicKey) Y() []byte {
+	return h.y
+}
+
+func (h *ecdsaPublicKey) makePairs() []*HeaderPair {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+
+	var pairs []*HeaderPair
+	pairs = append(pairs, &HeaderPair{Key: "kty", Value: jwa.EC})
+	if h.algorithm != nil {
+		pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)})
+	}
+	if h.crv != nil {
+		pairs = append(pairs, &HeaderPair{Key: ECDSACrvKey, Value: *(h.crv)})
+	}
+	if h.keyID != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)})
+	}
+	if h.keyOps != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyOpsKey, Value: *(h.keyOps)})
+	}
+	if h.keyUsage != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyUsageKey, Value: *(h.keyUsage)})
+	}
+	if h.x != nil {
+		pairs = append(pairs, &HeaderPair{Key: ECDSAXKey, Value: h.x})
+	}
+	if h.x509CertChain != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain})
+	}
+	if h.x509CertThumbprint != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)})
+	}
+	if h.x509CertThumbprintS256 != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)})
+	}
+	if h.x509URL != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)})
+	}
+	if h.y != nil {
+		pairs = append(pairs, &HeaderPair{Key: ECDSAYKey, Value: h.y})
+	}
+	for k, v := range h.privateParams {
+		pairs = append(pairs, &HeaderPair{Key: k, Value: v})
+	}
+	return pairs
+}
+
+func (h *ecdsaPublicKey) PrivateParams() map[string]interface{} {
+	return h.privateParams
+}
+
+func (h *ecdsaPublicKey) Get(name string) (interface{}, bool) {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	switch name {
+	case KeyTypeKey:
+		return h.KeyType(), true
+	case AlgorithmKey:
+		if h.algorithm == nil {
+			return nil, false
+		}
+		return *(h.algorithm), true
+	case ECDSACrvKey:
+		if h.crv == nil {
+			return nil, false
+		}
+		return *(h.crv), true
+	case KeyIDKey:
+		if h.keyID == nil {
+			return nil, false
+		}
+		return *(h.keyID), true
+	case KeyOpsKey:
+		if h.keyOps == nil {
+			return nil, false
+		}
+		return *(h.keyOps), true
+	case KeyUsageKey:
+		if h.keyUsage == nil {
+			return nil, false
+		}
+		return *(h.keyUsage), true
+	case ECDSAXKey:
+		if h.x == nil {
+			return nil, false
+		}
+		return h.x, true
+	case X509CertChainKey:
+		if h.x509CertChain == nil {
+			return nil, false
+		}
+		return h.x509CertChain, true
+	case X509CertThumbprintKey:
+		if h.x509CertThumbprint == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprint), true
+	case X509CertThumbprintS256Key:
+		if h.x509CertThumbprintS256 == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprintS256), true
+	case X509URLKey:
+		if h.x509URL == nil {
+			return nil, false
+		}
+		return *(h.x509URL), true
+	case ECDSAYKey:
+		if h.y == nil {
+			return nil, false
+		}
+		return h.y, true
+	default:
+		v, ok := h.privateParams[name]
+		return v, ok
+	}
+}
+
+func (h *ecdsaPublicKey) Set(name string, value interface{}) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	return h.setNoLock(name, value)
+}
+
+func (h *ecdsaPublicKey) setNoLock(name string, value interface{}) error {
+	switch name {
+	case "kty":
+		return nil
+	case AlgorithmKey:
+		switch v := value.(type) {
+		case string, jwa.SignatureAlgorithm, jwa.ContentEncryptionAlgorithm:
+			var tmp = jwa.KeyAlgorithmFrom(v)
+			h.algorithm = &tmp
+		case fmt.Stringer:
+			s := v.String()
+			var tmp = jwa.KeyAlgorithmFrom(s)
+			h.algorithm = &tmp
+		default:
+			return fmt.Errorf(`invalid type for %s key: %T`, AlgorithmKey, value)
+		}
+		return nil
+	case ECDSACrvKey:
+		if v, ok := value.(jwa.EllipticCurveAlgorithm); ok {
+			h.crv = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, ECDSACrvKey, value)
+	case KeyIDKey:
+		if v, ok := value.(string); ok {
+			h.keyID = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+	case KeyOpsKey:
+		var acceptor KeyOperationList
+		if err := acceptor.Accept(value); err != nil {
+			return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err)
+		}
+		h.keyOps = &acceptor
+		return nil
+	case KeyUsageKey:
+		switch v := value.(type) {
+		case KeyUsageType:
+			switch v {
+			case ForSignature, ForEncryption:
+				tmp := v.String()
+				h.keyUsage = &tmp
+			default:
+				return fmt.Errorf(`invalid key usage type %s`, v)
+			}
+		case string:
+			h.keyUsage = &v
+		default:
+			return fmt.Errorf(`invalid key usage type %s`, v)
+		}
+	case ECDSAXKey:
+		if v, ok := value.([]byte); ok {
+			h.x = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, ECDSAXKey, value)
+	case X509CertChainKey:
+		if v, ok := value.(*cert.Chain); ok {
+			h.x509CertChain = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+	case X509CertThumbprintKey:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprint = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+	case X509CertThumbprintS256Key:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprintS256 = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+	case X509URLKey:
+		if v, ok := value.(string); ok {
+			h.x509URL = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+	case ECDSAYKey:
+		if v, ok := value.([]byte); ok {
+			h.y = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, ECDSAYKey, value)
+	default:
+		if h.privateParams == nil {
+			h.privateParams = map[string]interface{}{}
+		}
+		h.privateParams[name] = value
+	}
+	return nil
+}
+
+func (k *ecdsaPublicKey) Remove(key string) error {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+	switch key {
+	case AlgorithmKey:
+		k.algorithm = nil
+	case ECDSACrvKey:
+		k.crv = nil
+	case KeyIDKey:
+		k.keyID = nil
+	case KeyOpsKey:
+		k.keyOps = nil
+	case KeyUsageKey:
+		k.keyUsage = nil
+	case ECDSAXKey:
+		k.x = nil
+	case X509CertChainKey:
+		k.x509CertChain = nil
+	case X509CertThumbprintKey:
+		k.x509CertThumbprint = nil
+	case X509CertThumbprintS256Key:
+		k.x509CertThumbprintS256 = nil
+	case X509URLKey:
+		k.x509URL = nil
+	case ECDSAYKey:
+		k.y = nil
+	default:
+		delete(k.privateParams, key)
+	}
+	return nil
+}
+
+func (k *ecdsaPublicKey) Clone() (Key, error) {
+	return cloneKey(k)
+}
+
+func (k *ecdsaPublicKey) DecodeCtx() json.DecodeCtx {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+	return k.dc
+}
+
+func (k *ecdsaPublicKey) SetDecodeCtx(dc json.DecodeCtx) {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+	k.dc = dc
+}
+
+func (h *ecdsaPublicKey) UnmarshalJSON(buf []byte) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.algorithm = nil
+	h.crv = nil
+	h.keyID = nil
+	h.keyOps = nil
+	h.keyUsage = nil
+	h.x = nil
+	h.x509CertChain = nil
+	h.x509CertThumbprint = nil
+	h.x509CertThumbprintS256 = nil
+	h.x509URL = nil
+	h.y = nil
+	dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+	for {
+		tok, err := dec.Token()
+		if err != nil {
+			return fmt.Errorf(`error reading token: %w`, err)
+		}
+		switch tok := tok.(type) {
+		case json.Delim:
+			// Assuming we're doing everything correctly, we should ONLY
+			// get either '{' or '}' here.
+			if tok == '}' { // End of object
+				break LOOP
+			} else if tok != '{' {
+				return fmt.Errorf(`expected '{', but got '%c'`, tok)
+			}
+		case string: // Objects can only have string keys
+			switch tok {
+			case KeyTypeKey:
+				val, err := json.ReadNextStringToken(dec)
+				if err != nil {
+					return fmt.Errorf(`error reading token: %w`, err)
+				}
+				if val != jwa.EC.String() {
+					return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val)
+				}
+			case AlgorithmKey:
+				var s string
+				if err := dec.Decode(&s); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+				}
+				alg := jwa.KeyAlgorithmFrom(s)
+				h.algorithm = &alg
+			case ECDSACrvKey:
+				var decoded jwa.EllipticCurveAlgorithm
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSACrvKey, err)
+				}
+				h.crv = &decoded
+			case KeyIDKey:
+				if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+				}
+			case KeyOpsKey:
+				var decoded KeyOperationList
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err)
+				}
+				h.keyOps = &decoded
+			case KeyUsageKey:
+				if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err)
+				}
+			case ECDSAXKey:
+				if err := json.AssignNextBytesToken(&h.x, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAXKey, err)
+				}
+			case X509CertChainKey:
+				var decoded cert.Chain
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+				}
+				h.x509CertChain = &decoded
+			case X509CertThumbprintKey:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+				}
+			case X509CertThumbprintS256Key:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+				}
+			case X509URLKey:
+				if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+				}
+			case ECDSAYKey:
+				if err := json.AssignNextBytesToken(&h.y, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAYKey, err)
+				}
+			default:
+				if dc := h.dc; dc != nil {
+					if localReg := dc.Registry(); localReg != nil {
+						decoded, err := localReg.Decode(dec, tok)
+						if err == nil {
+							h.setNoLock(tok, decoded)
+							continue
+						}
+					}
+				}
+				decoded, err := registry.Decode(dec, tok)
+				if err == nil {
+					h.setNoLock(tok, decoded)
+					continue
+				}
+				return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+			}
+		default:
+			return fmt.Errorf(`invalid token %T`, tok)
+		}
+	}
+	if h.crv == nil {
+		return fmt.Errorf(`required field crv is missing`)
+	}
+	if h.x == nil {
+		return fmt.Errorf(`required field x is missing`)
+	}
+	if h.y == nil {
+		return fmt.Errorf(`required field y is missing`)
+	}
+	return nil
+}
+
+func (h ecdsaPublicKey) MarshalJSON() ([]byte, error) {
+	data := make(map[string]interface{})
+	fields := make([]string, 0, 11)
+	for _, pair := range h.makePairs() {
+		fields = append(fields, pair.Key.(string))
+		data[pair.Key.(string)] = pair.Value
+	}
+
+	sort.Strings(fields)
+	buf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(buf)
+	buf.WriteByte('{')
+	enc := json.NewEncoder(buf)
+	for i, f := range fields {
+		if i > 0 {
+			buf.WriteRune(',')
+		}
+		buf.WriteRune('"')
+		buf.WriteString(f)
+		buf.WriteString(`":`)
+		v := data[f]
+		switch v := v.(type) {
+		case []byte:
+			buf.WriteRune('"')
+			buf.WriteString(base64.EncodeToString(v))
+			buf.WriteRune('"')
+		default:
+			if err := enc.Encode(v); err != nil {
+				return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err)
+			}
+			buf.Truncate(buf.Len() - 1)
+		}
+	}
+	buf.WriteByte('}')
+	ret := make([]byte, buf.Len())
+	copy(ret, buf.Bytes())
+	return ret, nil
+}
+
+func (h *ecdsaPublicKey) Iterate(ctx context.Context) HeaderIterator {
+	pairs := h.makePairs()
+	ch := make(chan *HeaderPair, len(pairs))
+	go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) {
+		defer close(ch)
+		for _, pair := range pairs {
+			select {
+			case <-ctx.Done():
+				return
+			case ch <- pair:
+			}
+		}
+	}(ctx, ch, pairs)
+	return mapiter.New(ch)
+}
+
+func (h *ecdsaPublicKey) Walk(ctx context.Context, visitor HeaderVisitor) error {
+	return iter.WalkMap(ctx, h, visitor)
+}
+
+func (h *ecdsaPublicKey) AsMap(ctx context.Context) (map[string]interface{}, error) {
+	return iter.AsMap(ctx, h)
+}
+
+type ECDSAPrivateKey interface {
+	Key
+	FromRaw(*ecdsa.PrivateKey) error
+	Crv() jwa.EllipticCurveAlgorithm
+	D() []byte
+	X() []byte
+	Y() []byte
+}
+
+type ecdsaPrivateKey struct {
+	algorithm              *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4
+	crv                    *jwa.EllipticCurveAlgorithm
+	d                      []byte
+	keyID                  *string           // https://tools.ietf.org/html/rfc7515#section-4.1.4
+	keyOps                 *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3
+	keyUsage               *string           // https://tools.ietf.org/html/rfc7517#section-4.2
+	x                      []byte
+	x509CertChain          *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+	x509CertThumbprint     *string     // https://tools.ietf.org/html/rfc7515#section-4.1.7
+	x509CertThumbprintS256 *string     // https://tools.ietf.org/html/rfc7515#section-4.1.8
+	x509URL                *string     // https://tools.ietf.org/html/rfc7515#section-4.1.5
+	y                      []byte
+	privateParams          map[string]interface{}
+	mu                     *sync.RWMutex
+	dc                     json.DecodeCtx
+}
+
+var _ ECDSAPrivateKey = &ecdsaPrivateKey{}
+var _ Key = &ecdsaPrivateKey{}
+
+func newECDSAPrivateKey() *ecdsaPrivateKey {
+	return &ecdsaPrivateKey{
+		mu:            &sync.RWMutex{},
+		privateParams: make(map[string]interface{}),
+	}
+}
+
+func (h ecdsaPrivateKey) KeyType() jwa.KeyType {
+	return jwa.EC
+}
+
+func (h *ecdsaPrivateKey) Algorithm() jwa.KeyAlgorithm {
+	if h.algorithm != nil {
+		return *(h.algorithm)
+	}
+	return jwa.InvalidKeyAlgorithm("")
+}
+
+func (h *ecdsaPrivateKey) Crv() jwa.EllipticCurveAlgorithm {
+	if h.crv != nil {
+		return *(h.crv)
+	}
+	return jwa.InvalidEllipticCurve
+}
+
+func (h *ecdsaPrivateKey) D() []byte {
+	return h.d
+}
+
+func (h *ecdsaPrivateKey) KeyID() string {
+	if h.keyID != nil {
+		return *(h.keyID)
+	}
+	return ""
+}
+
+func (h *ecdsaPrivateKey) KeyOps() KeyOperationList {
+	if h.keyOps != nil {
+		return *(h.keyOps)
+	}
+	return nil
+}
+
+func (h *ecdsaPrivateKey) KeyUsage() string {
+	if h.keyUsage != nil {
+		return *(h.keyUsage)
+	}
+	return ""
+}
+
+func (h *ecdsaPrivateKey) X() []byte {
+	return h.x
+}
+
+func (h *ecdsaPrivateKey) X509CertChain() *cert.Chain {
+	return h.x509CertChain
+}
+
+func (h *ecdsaPrivateKey) X509CertThumbprint() string {
+	if h.x509CertThumbprint != nil {
+		return *(h.x509CertThumbprint)
+	}
+	return ""
+}
+
+func (h *ecdsaPrivateKey) X509CertThumbprintS256() string {
+	if h.x509CertThumbprintS256 != nil {
+		return *(h.x509CertThumbprintS256)
+	}
+	return ""
+}
+
+func (h *ecdsaPrivateKey) X509URL() string {
+	if h.x509URL != nil {
+		return *(h.x509URL)
+	}
+	return ""
+}
+
+func (h *ecdsaPrivateKey) Y() []byte {
+	return h.y
+}
+
+func (h *ecdsaPrivateKey) makePairs() []*HeaderPair {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+
+	var pairs []*HeaderPair
+	pairs = append(pairs, &HeaderPair{Key: "kty", Value: jwa.EC})
+	if h.algorithm != nil {
+		pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)})
+	}
+	if h.crv != nil {
+		pairs = append(pairs, &HeaderPair{Key: ECDSACrvKey, Value: *(h.crv)})
+	}
+	if h.d != nil {
+		pairs = append(pairs, &HeaderPair{Key: ECDSADKey, Value: h.d})
+	}
+	if h.keyID != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)})
+	}
+	if h.keyOps != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyOpsKey, Value: *(h.keyOps)})
+	}
+	if h.keyUsage != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyUsageKey, Value: *(h.keyUsage)})
+	}
+	if h.x != nil {
+		pairs = append(pairs, &HeaderPair{Key: ECDSAXKey, Value: h.x})
+	}
+	if h.x509CertChain != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain})
+	}
+	if h.x509CertThumbprint != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)})
+	}
+	if h.x509CertThumbprintS256 != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)})
+	}
+	if h.x509URL != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)})
+	}
+	if h.y != nil {
+		pairs = append(pairs, &HeaderPair{Key: ECDSAYKey, Value: h.y})
+	}
+	for k, v := range h.privateParams {
+		pairs = append(pairs, &HeaderPair{Key: k, Value: v})
+	}
+	return pairs
+}
+
+func (h *ecdsaPrivateKey) PrivateParams() map[string]interface{} {
+	return h.privateParams
+}
+
+func (h *ecdsaPrivateKey) Get(name string) (interface{}, bool) {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	switch name {
+	case KeyTypeKey:
+		return h.KeyType(), true
+	case AlgorithmKey:
+		if h.algorithm == nil {
+			return nil, false
+		}
+		return *(h.algorithm), true
+	case ECDSACrvKey:
+		if h.crv == nil {
+			return nil, false
+		}
+		return *(h.crv), true
+	case ECDSADKey:
+		if h.d == nil {
+			return nil, false
+		}
+		return h.d, true
+	case KeyIDKey:
+		if h.keyID == nil {
+			return nil, false
+		}
+		return *(h.keyID), true
+	case KeyOpsKey:
+		if h.keyOps == nil {
+			return nil, false
+		}
+		return *(h.keyOps), true
+	case KeyUsageKey:
+		if h.keyUsage == nil {
+			return nil, false
+		}
+		return *(h.keyUsage), true
+	case ECDSAXKey:
+		if h.x == nil {
+			return nil, false
+		}
+		return h.x, true
+	case X509CertChainKey:
+		if h.x509CertChain == nil {
+			return nil, false
+		}
+		return h.x509CertChain, true
+	case X509CertThumbprintKey:
+		if h.x509CertThumbprint == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprint), true
+	case X509CertThumbprintS256Key:
+		if h.x509CertThumbprintS256 == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprintS256), true
+	case X509URLKey:
+		if h.x509URL == nil {
+			return nil, false
+		}
+		return *(h.x509URL), true
+	case ECDSAYKey:
+		if h.y == nil {
+			return nil, false
+		}
+		return h.y, true
+	default:
+		v, ok := h.privateParams[name]
+		return v, ok
+	}
+}
+
+func (h *ecdsaPrivateKey) Set(name string, value interface{}) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	return h.setNoLock(name, value)
+}
+
+func (h *ecdsaPrivateKey) setNoLock(name string, value interface{}) error {
+	switch name {
+	case "kty":
+		return nil
+	case AlgorithmKey:
+		switch v := value.(type) {
+		case string, jwa.SignatureAlgorithm, jwa.ContentEncryptionAlgorithm:
+			var tmp = jwa.KeyAlgorithmFrom(v)
+			h.algorithm = &tmp
+		case fmt.Stringer:
+			s := v.String()
+			var tmp = jwa.KeyAlgorithmFrom(s)
+			h.algorithm = &tmp
+		default:
+			return fmt.Errorf(`invalid type for %s key: %T`, AlgorithmKey, value)
+		}
+		return nil
+	case ECDSACrvKey:
+		if v, ok := value.(jwa.EllipticCurveAlgorithm); ok {
+			h.crv = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, ECDSACrvKey, value)
+	case ECDSADKey:
+		if v, ok := value.([]byte); ok {
+			h.d = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, ECDSADKey, value)
+	case KeyIDKey:
+		if v, ok := value.(string); ok {
+			h.keyID = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+	case KeyOpsKey:
+		var acceptor KeyOperationList
+		if err := acceptor.Accept(value); err != nil {
+			return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err)
+		}
+		h.keyOps = &acceptor
+		return nil
+	case KeyUsageKey:
+		switch v := value.(type) {
+		case KeyUsageType:
+			switch v {
+			case ForSignature, ForEncryption:
+				tmp := v.String()
+				h.keyUsage = &tmp
+			default:
+				return fmt.Errorf(`invalid key usage type %s`, v)
+			}
+		case string:
+			h.keyUsage = &v
+		default:
+			return fmt.Errorf(`invalid key usage type %s`, v)
+		}
+	case ECDSAXKey:
+		if v, ok := value.([]byte); ok {
+			h.x = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, ECDSAXKey, value)
+	case X509CertChainKey:
+		if v, ok := value.(*cert.Chain); ok {
+			h.x509CertChain = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+	case X509CertThumbprintKey:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprint = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+	case X509CertThumbprintS256Key:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprintS256 = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+	case X509URLKey:
+		if v, ok := value.(string); ok {
+			h.x509URL = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+	case ECDSAYKey:
+		if v, ok := value.([]byte); ok {
+			h.y = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, ECDSAYKey, value)
+	default:
+		if h.privateParams == nil {
+			h.privateParams = map[string]interface{}{}
+		}
+		h.privateParams[name] = value
+	}
+	return nil
+}
+
+func (k *ecdsaPrivateKey) Remove(key string) error {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+	switch key {
+	case AlgorithmKey:
+		k.algorithm = nil
+	case ECDSACrvKey:
+		k.crv = nil
+	case ECDSADKey:
+		k.d = nil
+	case KeyIDKey:
+		k.keyID = nil
+	case KeyOpsKey:
+		k.keyOps = nil
+	case KeyUsageKey:
+		k.keyUsage = nil
+	case ECDSAXKey:
+		k.x = nil
+	case X509CertChainKey:
+		k.x509CertChain = nil
+	case X509CertThumbprintKey:
+		k.x509CertThumbprint = nil
+	case X509CertThumbprintS256Key:
+		k.x509CertThumbprintS256 = nil
+	case X509URLKey:
+		k.x509URL = nil
+	case ECDSAYKey:
+		k.y = nil
+	default:
+		delete(k.privateParams, key)
+	}
+	return nil
+}
+
+func (k *ecdsaPrivateKey) Clone() (Key, error) {
+	return cloneKey(k)
+}
+
+func (k *ecdsaPrivateKey) DecodeCtx() json.DecodeCtx {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+	return k.dc
+}
+
+func (k *ecdsaPrivateKey) SetDecodeCtx(dc json.DecodeCtx) {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+	k.dc = dc
+}
+
+func (h *ecdsaPrivateKey) UnmarshalJSON(buf []byte) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.algorithm = nil
+	h.crv = nil
+	h.d = nil
+	h.keyID = nil
+	h.keyOps = nil
+	h.keyUsage = nil
+	h.x = nil
+	h.x509CertChain = nil
+	h.x509CertThumbprint = nil
+	h.x509CertThumbprintS256 = nil
+	h.x509URL = nil
+	h.y = nil
+	dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+	for {
+		tok, err := dec.Token()
+		if err != nil {
+			return fmt.Errorf(`error reading token: %w`, err)
+		}
+		switch tok := tok.(type) {
+		case json.Delim:
+			// Assuming we're doing everything correctly, we should ONLY
+			// get either '{' or '}' here.
+			if tok == '}' { // End of object
+				break LOOP
+			} else if tok != '{' {
+				return fmt.Errorf(`expected '{', but got '%c'`, tok)
+			}
+		case string: // Objects can only have string keys
+			switch tok {
+			case KeyTypeKey:
+				val, err := json.ReadNextStringToken(dec)
+				if err != nil {
+					return fmt.Errorf(`error reading token: %w`, err)
+				}
+				if val != jwa.EC.String() {
+					return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val)
+				}
+			case AlgorithmKey:
+				var s string
+				if err := dec.Decode(&s); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+				}
+				alg := jwa.KeyAlgorithmFrom(s)
+				h.algorithm = &alg
+			case ECDSACrvKey:
+				var decoded jwa.EllipticCurveAlgorithm
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSACrvKey, err)
+				}
+				h.crv = &decoded
+			case ECDSADKey:
+				if err := json.AssignNextBytesToken(&h.d, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSADKey, err)
+				}
+			case KeyIDKey:
+				if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+				}
+			case KeyOpsKey:
+				var decoded KeyOperationList
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err)
+				}
+				h.keyOps = &decoded
+			case KeyUsageKey:
+				if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err)
+				}
+			case ECDSAXKey:
+				if err := json.AssignNextBytesToken(&h.x, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAXKey, err)
+				}
+			case X509CertChainKey:
+				var decoded cert.Chain
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+				}
+				h.x509CertChain = &decoded
+			case X509CertThumbprintKey:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+				}
+			case X509CertThumbprintS256Key:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+				}
+			case X509URLKey:
+				if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+				}
+			case ECDSAYKey:
+				if err := json.AssignNextBytesToken(&h.y, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAYKey, err)
+				}
+			default:
+				if dc := h.dc; dc != nil {
+					if localReg := dc.Registry(); localReg != nil {
+						decoded, err := localReg.Decode(dec, tok)
+						if err == nil {
+							h.setNoLock(tok, decoded)
+							continue
+						}
+					}
+				}
+				decoded, err := registry.Decode(dec, tok)
+				if err == nil {
+					h.setNoLock(tok, decoded)
+					continue
+				}
+				return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+			}
+		default:
+			return fmt.Errorf(`invalid token %T`, tok)
+		}
+	}
+	if h.crv == nil {
+		return fmt.Errorf(`required field crv is missing`)
+	}
+	if h.d == nil {
+		return fmt.Errorf(`required field d is missing`)
+	}
+	if h.x == nil {
+		return fmt.Errorf(`required field x is missing`)
+	}
+	if h.y == nil {
+		return fmt.Errorf(`required field y is missing`)
+	}
+	return nil
+}
+
+func (h ecdsaPrivateKey) MarshalJSON() ([]byte, error) {
+	data := make(map[string]interface{})
+	fields := make([]string, 0, 12)
+	for _, pair := range h.makePairs() {
+		fields = append(fields, pair.Key.(string))
+		data[pair.Key.(string)] = pair.Value
+	}
+
+	sort.Strings(fields)
+	buf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(buf)
+	buf.WriteByte('{')
+	enc := json.NewEncoder(buf)
+	for i, f := range fields {
+		if i > 0 {
+			buf.WriteRune(',')
+		}
+		buf.WriteRune('"')
+		buf.WriteString(f)
+		buf.WriteString(`":`)
+		v := data[f]
+		switch v := v.(type) {
+		case []byte:
+			buf.WriteRune('"')
+			buf.WriteString(base64.EncodeToString(v))
+			buf.WriteRune('"')
+		default:
+			if err := enc.Encode(v); err != nil {
+				return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err)
+			}
+			buf.Truncate(buf.Len() - 1)
+		}
+	}
+	buf.WriteByte('}')
+	ret := make([]byte, buf.Len())
+	copy(ret, buf.Bytes())
+	return ret, nil
+}
+
+func (h *ecdsaPrivateKey) Iterate(ctx context.Context) HeaderIterator {
+	pairs := h.makePairs()
+	ch := make(chan *HeaderPair, len(pairs))
+	go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) {
+		defer close(ch)
+		for _, pair := range pairs {
+			select {
+			case <-ctx.Done():
+				return
+			case ch <- pair:
+			}
+		}
+	}(ctx, ch, pairs)
+	return mapiter.New(ch)
+}
+
+func (h *ecdsaPrivateKey) Walk(ctx context.Context, visitor HeaderVisitor) error {
+	return iter.WalkMap(ctx, h, visitor)
+}
+
+func (h *ecdsaPrivateKey) AsMap(ctx context.Context) (map[string]interface{}, error) {
+	return iter.AsMap(ctx, h)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/es256k.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/es256k.go
new file mode 100644
index 0000000000..1a9d2346a4
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/es256k.go
@@ -0,0 +1,14 @@
+//go:build jwx_es256k
+// +build jwx_es256k
+
+package jwk
+
+import (
+	"github.com/decred/dcrd/dcrec/secp256k1/v4"
+	"github.com/lestrrat-go/jwx/v2/internal/ecutil"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+func init() {
+	ecutil.RegisterCurve(secp256k1.S256(), jwa.Secp256k1)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/fetch.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/fetch.go
new file mode 100644
index 0000000000..ddc75cd838
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/fetch.go
@@ -0,0 +1,134 @@
+package jwk
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"math"
+	"os"
+	"strconv"
+	"sync"
+	"sync/atomic"
+
+	"github.com/lestrrat-go/httprc"
+)
+
+type Fetcher interface {
+	Fetch(context.Context, string, ...FetchOption) (Set, error)
+}
+
+type FetchFunc func(context.Context, string, ...FetchOption) (Set, error)
+
+func (f FetchFunc) Fetch(ctx context.Context, u string, options ...FetchOption) (Set, error) {
+	return f(ctx, u, options...)
+}
+
+var globalFetcher httprc.Fetcher
+var muGlobalFetcher sync.Mutex
+var fetcherChanged uint32
+
+func init() {
+	atomic.StoreUint32(&fetcherChanged, 1)
+}
+
+func getGlobalFetcher() httprc.Fetcher {
+	if v := atomic.LoadUint32(&fetcherChanged); v == 0 {
+		return globalFetcher
+	}
+
+	muGlobalFetcher.Lock()
+	defer muGlobalFetcher.Unlock()
+	if globalFetcher == nil {
+		var nworkers int
+		v := os.Getenv(`JWK_FETCHER_WORKER_COUNT`)
+		if c, err := strconv.ParseInt(v, 10, 64); err == nil {
+			if c > math.MaxInt {
+				nworkers = math.MaxInt
+			} else {
+				nworkers = int(c)
+			}
+		}
+		if nworkers < 1 {
+			nworkers = 3
+		}
+
+		globalFetcher = httprc.NewFetcher(context.Background(), httprc.WithFetcherWorkerCount(nworkers))
+	}
+
+	atomic.StoreUint32(&fetcherChanged, 0)
+	return globalFetcher
+}
+
+// SetGlobalFetcher allows users to specify a custom global fetcher,
+// which is used by the `Fetch` function. Assigning `nil` forces the
+// the default fetcher to be (re)created when the next call to
+// `jwk.Fetch` occurs
+//
+// You only need to call this function when you want to
+// either change the fetching behavior (for example, you want to change
+// how the default whitelist is handled), or when you want to control
+// the lifetime of the global fetcher, for example for tests
+// that require a clean shutdown.
+//
+// If you do use this function to set a custom fetcher and you
+// control its termination, make sure that you call `jwk.SetGlobalFetcher()`
+// one more time (possibly with `nil`) to assign a valid fetcher.
+// Otherwise, once the fetcher is invalidated, subsequent calls to `jwk.Fetch`
+// may hang, causing very hard to debug problems.
+//
+// If you are sure you no longer need `jwk.Fetch` after terminating the
+// fetcher, then you the above caution is not necessary.
+func SetGlobalFetcher(f httprc.Fetcher) {
+	muGlobalFetcher.Lock()
+	globalFetcher = f
+	muGlobalFetcher.Unlock()
+	atomic.StoreUint32(&fetcherChanged, 1)
+}
+
+// Fetch fetches a JWK resource specified by a URL. The url must be
+// pointing to a resource that is supported by `net/http`.
+//
+// If you are using the same `jwk.Set` for long periods of time during
+// the lifecycle of your program, and would like to periodically refresh the
+// contents of the object with the data at the remote resource,
+// consider using `jwk.Cache`, which automatically refreshes
+// jwk.Set objects asynchronously.
+//
+// Please note that underneath the `jwk.Fetch` function, it uses a global
+// object that spawns goroutines that are present until the go runtime
+// exits. Initially this global variable is uninitialized, but upon
+// calling `jwk.Fetch` once, it is initialized and goroutines are spawned.
+// If you want to control the lifetime of these goroutines, you can
+// call `jwk.SetGlobalFetcher` with a custom fetcher which is tied to
+// a `context.Context` object that you can control.
+func Fetch(ctx context.Context, u string, options ...FetchOption) (Set, error) {
+	var hrfopts []httprc.FetchOption
+	var parseOptions []ParseOption
+	for _, option := range options {
+		if parseOpt, ok := option.(ParseOption); ok {
+			parseOptions = append(parseOptions, parseOpt)
+			continue
+		}
+
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identHTTPClient{}:
+			hrfopts = append(hrfopts, httprc.WithHTTPClient(option.Value().(HTTPClient)))
+		case identFetchWhitelist{}:
+			hrfopts = append(hrfopts, httprc.WithWhitelist(option.Value().(httprc.Whitelist)))
+		}
+	}
+
+	res, err := getGlobalFetcher().Fetch(ctx, u, hrfopts...)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to fetch %q: %w`, u, err)
+	}
+
+	buf, err := io.ReadAll(res.Body)
+	defer res.Body.Close()
+	if err != nil {
+		return nil, fmt.Errorf(`failed to read response body for %q: %w`, u, err)
+	}
+
+	return Parse(buf, parseOptions...)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/interface.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/interface.go
new file mode 100644
index 0000000000..729a0ec6c5
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/interface.go
@@ -0,0 +1,137 @@
+package jwk
+
+import (
+	"context"
+	"sync"
+
+	"github.com/lestrrat-go/iter/arrayiter"
+	"github.com/lestrrat-go/iter/mapiter"
+	"github.com/lestrrat-go/jwx/v2/internal/iter"
+	"github.com/lestrrat-go/jwx/v2/internal/json"
+)
+
+// KeyUsageType is used to denote what this key should be used for
+type KeyUsageType string
+
+const (
+	// ForSignature is the value used in the headers to indicate that
+	// this key should be used for signatures
+	ForSignature KeyUsageType = "sig"
+	// ForEncryption is the value used in the headers to indicate that
+	// this key should be used for encrypting
+	ForEncryption KeyUsageType = "enc"
+)
+
+type KeyOperation string
+type KeyOperationList []KeyOperation
+
+const (
+	KeyOpSign       KeyOperation = "sign"       // (compute digital signature or MAC)
+	KeyOpVerify     KeyOperation = "verify"     // (verify digital signature or MAC)
+	KeyOpEncrypt    KeyOperation = "encrypt"    // (encrypt content)
+	KeyOpDecrypt    KeyOperation = "decrypt"    // (decrypt content and validate decryption, if applicable)
+	KeyOpWrapKey    KeyOperation = "wrapKey"    // (encrypt key)
+	KeyOpUnwrapKey  KeyOperation = "unwrapKey"  // (decrypt key and validate decryption, if applicable)
+	KeyOpDeriveKey  KeyOperation = "deriveKey"  // (derive key)
+	KeyOpDeriveBits KeyOperation = "deriveBits" // (derive bits not to be used as a key)
+)
+
+// Set represents JWKS object, a collection of jwk.Key objects.
+//
+// Sets can be safely converted to and from JSON using the standard
+// `"encoding/json".Marshal` and `"encoding/json".Unmarshal`. However,
+// if you do not know if the payload contains a single JWK or a JWK set,
+// consider using `jwk.Parse()` to always get a `jwk.Set` out of it.
+//
+// Since v1.2.12, JWK sets with private parameters can be parsed as well.
+// Such private parameters can be accessed via the `Field()` method.
+// If a resource contains a single JWK instead of a JWK set, private parameters
+// are stored in _both_ the resulting `jwk.Set` object and the `jwk.Key` object .
+//
+//nolint:interfacebloat
+type Set interface {
+	// AddKey adds the specified key. If the key already exists in the set,
+	// an error is returned.
+	AddKey(Key) error
+
+	// Clear resets the list of keys associated with this set, emptying the
+	// internal list of `jwk.Key`s, as well as clearing any other non-key
+	// fields
+	Clear() error
+
+	// Get returns the key at index `idx`. If the index is out of range,
+	// then the second return value is false.
+	Key(int) (Key, bool)
+
+	// Get returns the value of a private field in the key set.
+	//
+	// For the purposes of a key set, any field other than the "keys" field is
+	// considered to be a private field. In other words, you cannot use this
+	// method to directly access the list of keys in the set
+	Get(string) (interface{}, bool)
+
+	// Set sets the value of a single field.
+	//
+	// This method, which takes an `interface{}`, exists because
+	// these objects can contain extra _arbitrary_ fields that users can
+	// specify, and there is no way of knowing what type they could be.
+	Set(string, interface{}) error
+
+	// RemoveKey removes the specified non-key field from the set.
+	// Keys may not be removed using this method.
+	Remove(string) error
+
+	// Index returns the index where the given key exists, -1 otherwise
+	Index(Key) int
+
+	// Len returns the number of keys in the set
+	Len() int
+
+	// LookupKeyID returns the first key matching the given key id.
+	// The second return value is false if there are no keys matching the key id.
+	// The set *may* contain multiple keys with the same key id. If you
+	// need all of them, use `Iterate()`
+	LookupKeyID(string) (Key, bool)
+
+	// RemoveKey removes the key from the set.
+	RemoveKey(Key) error
+
+	// Keys creates an iterator to iterate through all keys in the set.
+	Keys(context.Context) KeyIterator
+
+	// Iterate creates an iterator to iterate through all fields other than the keys
+	Iterate(context.Context) HeaderIterator
+
+	// Clone create a new set with identical keys. Keys themselves are not cloned.
+	Clone() (Set, error)
+}
+
+type set struct {
+	keys          []Key
+	mu            sync.RWMutex
+	dc            DecodeCtx
+	privateParams map[string]interface{}
+}
+
+type HeaderVisitor = iter.MapVisitor
+type HeaderVisitorFunc = iter.MapVisitorFunc
+type HeaderPair = mapiter.Pair
+type HeaderIterator = mapiter.Iterator
+type KeyPair = arrayiter.Pair
+type KeyIterator = arrayiter.Iterator
+
+type PublicKeyer interface {
+	// PublicKey creates the corresponding PublicKey type for this object.
+	// All fields are copied onto the new public key, except for those that are not allowed.
+	// Returned value must not be the receiver itself.
+	PublicKey() (Key, error)
+}
+
+type DecodeCtx interface {
+	json.DecodeCtx
+	IgnoreParseError() bool
+}
+type KeyWithDecodeCtx interface {
+	SetDecodeCtx(DecodeCtx)
+	DecodeCtx() DecodeCtx
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/interface_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/interface_gen.go
new file mode 100644
index 0000000000..6e4e79a047
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/interface_gen.go
@@ -0,0 +1,116 @@
+// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT.
+
+package jwk
+
+import (
+	"context"
+	"crypto"
+
+	"github.com/lestrrat-go/jwx/v2/cert"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+const (
+	KeyTypeKey                = "kty"
+	KeyUsageKey               = "use"
+	KeyOpsKey                 = "key_ops"
+	AlgorithmKey              = "alg"
+	KeyIDKey                  = "kid"
+	X509URLKey                = "x5u"
+	X509CertChainKey          = "x5c"
+	X509CertThumbprintKey     = "x5t"
+	X509CertThumbprintS256Key = "x5t#S256"
+)
+
+// Key defines the minimal interface for each of the
+// key types. Their use and implementation differ significantly
+// between each key types, so you should use type assertions
+// to perform more specific tasks with each key
+type Key interface {
+	// Get returns the value of a single field. The second boolean return value
+	// will be false if the field is not stored in the source
+	//
+	// This method, which returns an `interface{}`, exists because
+	// these objects can contain extra _arbitrary_ fields that users can
+	// specify, and there is no way of knowing what type they could be
+	Get(string) (interface{}, bool)
+
+	// Set sets the value of a single field. Note that certain fields,
+	// notably "kty", cannot be altered, but will not return an error
+	//
+	// This method, which takes an `interface{}`, exists because
+	// these objects can contain extra _arbitrary_ fields that users can
+	// specify, and there is no way of knowing what type they could be
+	Set(string, interface{}) error
+
+	// Remove removes the field associated with the specified key.
+	// There is no way to remove the `kty` (key type). You will ALWAYS be left with one field in a jwk.Key.
+	Remove(string) error
+
+	// Raw creates the corresponding raw key. For example,
+	// EC types would create *ecdsa.PublicKey or *ecdsa.PrivateKey,
+	// and OctetSeq types create a []byte key.
+	//
+	// If you do not know the exact type of a jwk.Key before attempting
+	// to obtain the raw key, you can simply pass a pointer to an
+	// empty interface as the first argument.
+	//
+	// If you already know the exact type, it is recommended that you
+	// pass a pointer to the zero value of the actual key type (e.g. &rsa.PrivateKey)
+	// for efficiency.
+	Raw(interface{}) error
+
+	// Thumbprint returns the JWK thumbprint using the indicated
+	// hashing algorithm, according to RFC 7638
+	Thumbprint(crypto.Hash) ([]byte, error)
+
+	// Iterate returns an iterator that returns all keys and values.
+	// See github.com/lestrrat-go/iter for a description of the iterator.
+	Iterate(ctx context.Context) HeaderIterator
+
+	// Walk is a utility tool that allows a visitor to iterate all keys and values
+	Walk(context.Context, HeaderVisitor) error
+
+	// AsMap is a utility tool that returns a new map that contains the same fields as the source
+	AsMap(context.Context) (map[string]interface{}, error)
+
+	// PrivateParams returns the non-standard elements in the source structure
+	// WARNING: DO NOT USE PrivateParams() IF YOU HAVE CONCURRENT CODE ACCESSING THEM.
+	// Use `AsMap()` to get a copy of the entire header, or use `Iterate()` instead
+	PrivateParams() map[string]interface{}
+
+	// Clone creates a new instance of the same type
+	Clone() (Key, error)
+
+	// PublicKey creates the corresponding PublicKey type for this object.
+	// All fields are copied onto the new public key, except for those that are not allowed.
+	//
+	// If the key is already a public key, it returns a new copy minus the disallowed fields as above.
+	PublicKey() (Key, error)
+
+	// KeyType returns the `kty` of a JWK
+	KeyType() jwa.KeyType
+	// KeyUsage returns `use` of a JWK
+	KeyUsage() string
+	// KeyOps returns `key_ops` of a JWK
+	KeyOps() KeyOperationList
+	// Algorithm returns `alg` of a JWK
+
+	// Algorithm returns the value of the `alg` field
+	//
+	// This field may contain either `jwk.SignatureAlgorithm` or `jwk.KeyEncryptionAlgorithm`.
+	// This is why there exists a `jwa.KeyAlgorithm` type that encompases both types.
+	Algorithm() jwa.KeyAlgorithm
+	// KeyID returns `kid` of a JWK
+	KeyID() string
+	// X509URL returns `x5u` of a JWK
+	X509URL() string
+	// X509CertChain returns `x5c` of a JWK
+	X509CertChain() *cert.Chain
+	// X509CertThumbprint returns `x5t` of a JWK
+	X509CertThumbprint() string
+	// X509CertThumbprintS256 returns `x5t#S256` of a JWK
+	X509CertThumbprintS256() string
+
+	makePairs() []*HeaderPair
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/io.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/io.go
new file mode 100644
index 0000000000..2dc097e2fe
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/io.go
@@ -0,0 +1,39 @@
+// Code generated by tools/cmd/genreadfile/main.go. DO NOT EDIT.
+
+package jwk
+
+import (
+	"io/fs"
+	"os"
+)
+
+type sysFS struct{}
+
+func (sysFS) Open(path string) (fs.File, error) {
+	return os.Open(path)
+}
+
+func ReadFile(path string, options ...ReadFileOption) (Set, error) {
+	var parseOptions []ParseOption
+	for _, option := range options {
+		if po, ok := option.(ParseOption); ok {
+			parseOptions = append(parseOptions, po)
+		}
+	}
+
+	var srcFS fs.FS = sysFS{}
+	for _, option := range options {
+		switch option.Ident() {
+		case identFS{}:
+			srcFS = option.Value().(fs.FS)
+		}
+	}
+
+	f, err := srcFS.Open(path)
+	if err != nil {
+		return nil, err
+	}
+
+	defer f.Close()
+	return ParseReader(f, parseOptions...)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/jwk.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/jwk.go
new file mode 100644
index 0000000000..8521ba6e9f
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/jwk.go
@@ -0,0 +1,729 @@
+//go:generate ../tools/cmd/genjwk.sh
+
+// Package jwk implements JWK as described in https://tools.ietf.org/html/rfc7517
+package jwk
+
+import (
+	"bytes"
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/ed25519"
+	"crypto/elliptic"
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/pem"
+	"fmt"
+	"io"
+	"math/big"
+
+	"github.com/lestrrat-go/jwx/v2/internal/base64"
+	"github.com/lestrrat-go/jwx/v2/internal/ecutil"
+	"github.com/lestrrat-go/jwx/v2/internal/json"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+	"github.com/lestrrat-go/jwx/v2/x25519"
+)
+
+var registry = json.NewRegistry()
+
+func bigIntToBytes(n *big.Int) ([]byte, error) {
+	if n == nil {
+		return nil, fmt.Errorf(`invalid *big.Int value`)
+	}
+	return n.Bytes(), nil
+}
+
+// FromRaw creates a jwk.Key from the given key (RSA/ECDSA/symmetric keys).
+//
+// The constructor auto-detects the type of key to be instantiated
+// based on the input type:
+//
+//   - "crypto/rsa".PrivateKey and "crypto/rsa".PublicKey creates an RSA based key
+//   - "crypto/ecdsa".PrivateKey and "crypto/ecdsa".PublicKey creates an EC based key
+//   - "crypto/ed25519".PrivateKey and "crypto/ed25519".PublicKey creates an OKP based key
+//   - []byte creates a symmetric key
+func FromRaw(key interface{}) (Key, error) {
+	if key == nil {
+		return nil, fmt.Errorf(`jwk.FromRaw requires a non-nil key`)
+	}
+
+	var ptr interface{}
+	switch v := key.(type) {
+	case rsa.PrivateKey:
+		ptr = &v
+	case rsa.PublicKey:
+		ptr = &v
+	case ecdsa.PrivateKey:
+		ptr = &v
+	case ecdsa.PublicKey:
+		ptr = &v
+	default:
+		ptr = v
+	}
+
+	switch rawKey := ptr.(type) {
+	case *rsa.PrivateKey:
+		k := newRSAPrivateKey()
+		if err := k.FromRaw(rawKey); err != nil {
+			return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err)
+		}
+		return k, nil
+	case *rsa.PublicKey:
+		k := newRSAPublicKey()
+		if err := k.FromRaw(rawKey); err != nil {
+			return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err)
+		}
+		return k, nil
+	case *ecdsa.PrivateKey:
+		k := newECDSAPrivateKey()
+		if err := k.FromRaw(rawKey); err != nil {
+			return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err)
+		}
+		return k, nil
+	case *ecdsa.PublicKey:
+		k := newECDSAPublicKey()
+		if err := k.FromRaw(rawKey); err != nil {
+			return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err)
+		}
+		return k, nil
+	case ed25519.PrivateKey:
+		k := newOKPPrivateKey()
+		if err := k.FromRaw(rawKey); err != nil {
+			return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err)
+		}
+		return k, nil
+	case ed25519.PublicKey:
+		k := newOKPPublicKey()
+		if err := k.FromRaw(rawKey); err != nil {
+			return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err)
+		}
+		return k, nil
+	case x25519.PrivateKey:
+		k := newOKPPrivateKey()
+		if err := k.FromRaw(rawKey); err != nil {
+			return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err)
+		}
+		return k, nil
+	case x25519.PublicKey:
+		k := newOKPPublicKey()
+		if err := k.FromRaw(rawKey); err != nil {
+			return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err)
+		}
+		return k, nil
+	case []byte:
+		k := newSymmetricKey()
+		if err := k.FromRaw(rawKey); err != nil {
+			return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err)
+		}
+		return k, nil
+	default:
+		return nil, fmt.Errorf(`invalid key type '%T' for jwk.New`, key)
+	}
+}
+
+// PublicSetOf returns a new jwk.Set consisting of
+// public keys of the keys contained in the set.
+//
+// This is useful when you are generating a set of private keys, and
+// you want to generate the corresponding public versions for the
+// users to verify with.
+//
+// Be aware that all fields will be copied onto the new public key. It is the caller's
+// responsibility to remove any fields, if necessary.
+func PublicSetOf(v Set) (Set, error) {
+	newSet := NewSet()
+
+	n := v.Len()
+	for i := 0; i < n; i++ {
+		k, ok := v.Key(i)
+		if !ok {
+			return nil, fmt.Errorf(`key not found`)
+		}
+		pubKey, err := PublicKeyOf(k)
+		if err != nil {
+			return nil, fmt.Errorf(`failed to get public key of %T: %w`, k, err)
+		}
+		if err := newSet.AddKey(pubKey); err != nil {
+			return nil, fmt.Errorf(`failed to add key to public key set: %w`, err)
+		}
+	}
+
+	return newSet, nil
+}
+
+// PublicKeyOf returns the corresponding public version of the jwk.Key.
+// If `v` is a SymmetricKey, then the same value is returned.
+// If `v` is already a public key, the key itself is returned.
+//
+// If `v` is a private key type that has a `PublicKey()` method, be aware
+// that all fields will be copied onto the new public key. It is the caller's
+// responsibility to remove any fields, if necessary
+//
+// If `v` is a raw key, the key is first converted to a `jwk.Key`
+func PublicKeyOf(v interface{}) (Key, error) {
+	// This should catch all jwk.Key instances
+	if pk, ok := v.(PublicKeyer); ok {
+		return pk.PublicKey()
+	}
+
+	jk, err := FromRaw(v)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to convert key into JWK: %w`, err)
+	}
+
+	return jk.PublicKey()
+}
+
+// PublicRawKeyOf returns the corresponding public key of the given
+// value `v` (e.g. given *rsa.PrivateKey, *rsa.PublicKey is returned)
+// If `v` is already a public key, the key itself is returned.
+//
+// The returned value will always be a pointer to the public key,
+// except when a []byte (e.g. symmetric key, ed25519 key) is passed to `v`.
+// In this case, the same []byte value is returned.
+func PublicRawKeyOf(v interface{}) (interface{}, error) {
+	if pk, ok := v.(PublicKeyer); ok {
+		pubk, err := pk.PublicKey()
+		if err != nil {
+			return nil, fmt.Errorf(`failed to obtain public key from %T: %w`, v, err)
+		}
+
+		var raw interface{}
+		if err := pubk.Raw(&raw); err != nil {
+			return nil, fmt.Errorf(`failed to obtain raw key from %T: %w`, pubk, err)
+		}
+		return raw, nil
+	}
+
+	// This may be a silly idea, but if the user gave us a non-pointer value...
+	var ptr interface{}
+	switch v := v.(type) {
+	case rsa.PrivateKey:
+		ptr = &v
+	case rsa.PublicKey:
+		ptr = &v
+	case ecdsa.PrivateKey:
+		ptr = &v
+	case ecdsa.PublicKey:
+		ptr = &v
+	default:
+		ptr = v
+	}
+
+	switch x := ptr.(type) {
+	case *rsa.PrivateKey:
+		return &x.PublicKey, nil
+	case *rsa.PublicKey:
+		return x, nil
+	case *ecdsa.PrivateKey:
+		return &x.PublicKey, nil
+	case *ecdsa.PublicKey:
+		return x, nil
+	case ed25519.PrivateKey:
+		return x.Public(), nil
+	case ed25519.PublicKey:
+		return x, nil
+	case x25519.PrivateKey:
+		return x.Public(), nil
+	case x25519.PublicKey:
+		return x, nil
+	case []byte:
+		return x, nil
+	default:
+		return nil, fmt.Errorf(`invalid key type passed to PublicKeyOf (%T)`, v)
+	}
+}
+
+const (
+	pmPrivateKey    = `PRIVATE KEY`
+	pmPublicKey     = `PUBLIC KEY`
+	pmECPrivateKey  = `EC PRIVATE KEY`
+	pmRSAPublicKey  = `RSA PUBLIC KEY`
+	pmRSAPrivateKey = `RSA PRIVATE KEY`
+)
+
+// EncodeX509 encodes the key into a byte sequence in ASN.1 DER format
+// suitable for to be PEM encoded. The key can be a jwk.Key or a raw key
+// instance, but it must be one of the types supported by `x509` package.
+//
+// This function will try to do the right thing depending on the key type
+// (i.e. switch between `x509.MarshalPKCS1PRivateKey` and `x509.MarshalECPrivateKey`),
+// but for public keys, it will always use `x509.MarshalPKIXPublicKey`.
+// Please manually perform the encoding if you need more fine grained control
+//
+// The first return value is the name that can be used for `(pem.Block).Type`.
+// The second return value is the encoded byte sequence.
+func EncodeX509(v interface{}) (string, []byte, error) {
+	// we can't import jwk, so just use the interface
+	if key, ok := v.(interface{ Raw(interface{}) error }); ok {
+		var raw interface{}
+		if err := key.Raw(&raw); err != nil {
+			return "", nil, fmt.Errorf(`failed to get raw key out of %T: %w`, key, err)
+		}
+
+		v = raw
+	}
+
+	// Try to convert it into a certificate
+	switch v := v.(type) {
+	case *rsa.PrivateKey:
+		return pmRSAPrivateKey, x509.MarshalPKCS1PrivateKey(v), nil
+	case *ecdsa.PrivateKey:
+		marshaled, err := x509.MarshalECPrivateKey(v)
+		if err != nil {
+			return "", nil, err
+		}
+		return pmECPrivateKey, marshaled, nil
+	case ed25519.PrivateKey:
+		marshaled, err := x509.MarshalPKCS8PrivateKey(v)
+		if err != nil {
+			return "", nil, err
+		}
+		return pmPrivateKey, marshaled, nil
+	case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey:
+		marshaled, err := x509.MarshalPKIXPublicKey(v)
+		if err != nil {
+			return "", nil, err
+		}
+		return pmPublicKey, marshaled, nil
+	default:
+		return "", nil, fmt.Errorf(`unsupported type %T for ASN.1 DER encoding`, v)
+	}
+}
+
+// EncodePEM encodes the key into a PEM encoded ASN.1 DER format.
+// The key can be a jwk.Key or a raw key instance, but it must be one of
+// the types supported by `x509` package.
+//
+// Internally, it uses the same routine as `jwk.EncodeX509()`, and therefore
+// the same caveats apply
+func EncodePEM(v interface{}) ([]byte, error) {
+	typ, marshaled, err := EncodeX509(v)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to encode key in x509: %w`, err)
+	}
+
+	block := &pem.Block{
+		Type:  typ,
+		Bytes: marshaled,
+	}
+	return pem.EncodeToMemory(block), nil
+}
+
+// DecodePEM decodes a key in PEM encoded ASN.1 DER format.
+// and returns a raw key
+func DecodePEM(src []byte) (interface{}, []byte, error) {
+	block, rest := pem.Decode(src)
+	if block == nil {
+		return nil, nil, fmt.Errorf(`failed to decode PEM data`)
+	}
+
+	switch block.Type {
+	// Handle the semi-obvious cases
+	case pmRSAPrivateKey:
+		key, err := x509.ParsePKCS1PrivateKey(block.Bytes)
+		if err != nil {
+			return nil, nil, fmt.Errorf(`failed to parse PKCS1 private key: %w`, err)
+		}
+		return key, rest, nil
+	case pmRSAPublicKey:
+		key, err := x509.ParsePKCS1PublicKey(block.Bytes)
+		if err != nil {
+			return nil, nil, fmt.Errorf(`failed to parse PKCS1 public key: %w`, err)
+		}
+		return key, rest, nil
+	case pmECPrivateKey:
+		key, err := x509.ParseECPrivateKey(block.Bytes)
+		if err != nil {
+			return nil, nil, fmt.Errorf(`failed to parse EC private key: %w`, err)
+		}
+		return key, rest, nil
+	case pmPublicKey:
+		// XXX *could* return dsa.PublicKey
+		key, err := x509.ParsePKIXPublicKey(block.Bytes)
+		if err != nil {
+			return nil, nil, fmt.Errorf(`failed to parse PKIX public key: %w`, err)
+		}
+		return key, rest, nil
+	case pmPrivateKey:
+		key, err := x509.ParsePKCS8PrivateKey(block.Bytes)
+		if err != nil {
+			return nil, nil, fmt.Errorf(`failed to parse PKCS8 private key: %w`, err)
+		}
+		return key, rest, nil
+	case "CERTIFICATE":
+		cert, err := x509.ParseCertificate(block.Bytes)
+		if err != nil {
+			return nil, nil, fmt.Errorf(`failed to parse certificate: %w`, err)
+		}
+		return cert.PublicKey, rest, nil
+	default:
+		return nil, nil, fmt.Errorf(`invalid PEM block type %s`, block.Type)
+	}
+}
+
+// ParseRawKey is a combination of ParseKey and Raw. It parses a single JWK key,
+// and assigns the "raw" key to the given parameter. The key must either be
+// a pointer to an empty interface, or a pointer to the actual raw key type
+// such as *rsa.PrivateKey, *ecdsa.PublicKey, *[]byte, etc.
+func ParseRawKey(data []byte, rawkey interface{}) error {
+	key, err := ParseKey(data)
+	if err != nil {
+		return fmt.Errorf(`failed to parse key: %w`, err)
+	}
+
+	if err := key.Raw(rawkey); err != nil {
+		return fmt.Errorf(`failed to assign to raw key variable: %w`, err)
+	}
+
+	return nil
+}
+
+type setDecodeCtx struct {
+	json.DecodeCtx
+	ignoreParseError bool
+}
+
+func (ctx *setDecodeCtx) IgnoreParseError() bool {
+	return ctx.ignoreParseError
+}
+
+// ParseKey parses a single key JWK. Unlike `jwk.Parse` this method will
+// report failure if you attempt to pass a JWK set. Only use this function
+// when you know that the data is a single JWK.
+//
+// Given a WithPEM(true) option, this function assumes that the given input
+// is PEM encoded ASN.1 DER format key.
+//
+// Note that a successful parsing of any type of key does NOT necessarily
+// guarantee a valid key. For example, no checks against expiration dates
+// are performed for certificate expiration, no checks against missing
+// parameters are performed, etc.
+func ParseKey(data []byte, options ...ParseOption) (Key, error) {
+	var parsePEM bool
+	var localReg *json.Registry
+	for _, option := range options {
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identPEM{}:
+			parsePEM = option.Value().(bool)
+		case identLocalRegistry{}:
+			// in reality you can only pass either withLocalRegistry or
+			// WithTypedField, but since withLocalRegistry is used only by us,
+			// we skip checking
+			localReg = option.Value().(*json.Registry)
+		case identTypedField{}:
+			pair := option.Value().(typedFieldPair)
+			if localReg == nil {
+				localReg = json.NewRegistry()
+			}
+			localReg.Register(pair.Name, pair.Value)
+		case identIgnoreParseError{}:
+			return nil, fmt.Errorf(`jwk.WithIgnoreParseError() cannot be used for ParseKey()`)
+		}
+	}
+
+	if parsePEM {
+		raw, _, err := DecodePEM(data)
+		if err != nil {
+			return nil, fmt.Errorf(`failed to parse PEM encoded key: %w`, err)
+		}
+		return FromRaw(raw)
+	}
+
+	var hint struct {
+		Kty string          `json:"kty"`
+		D   json.RawMessage `json:"d"`
+	}
+
+	if err := json.Unmarshal(data, &hint); err != nil {
+		return nil, fmt.Errorf(`failed to unmarshal JSON into key hint: %w`, err)
+	}
+
+	var key Key
+	switch jwa.KeyType(hint.Kty) {
+	case jwa.RSA:
+		if len(hint.D) > 0 {
+			key = newRSAPrivateKey()
+		} else {
+			key = newRSAPublicKey()
+		}
+	case jwa.EC:
+		if len(hint.D) > 0 {
+			key = newECDSAPrivateKey()
+		} else {
+			key = newECDSAPublicKey()
+		}
+	case jwa.OctetSeq:
+		key = newSymmetricKey()
+	case jwa.OKP:
+		if len(hint.D) > 0 {
+			key = newOKPPrivateKey()
+		} else {
+			key = newOKPPublicKey()
+		}
+	default:
+		return nil, fmt.Errorf(`invalid key type from JSON (%s)`, hint.Kty)
+	}
+
+	if localReg != nil {
+		dcKey, ok := key.(json.DecodeCtxContainer)
+		if !ok {
+			return nil, fmt.Errorf(`typed field was requested, but the key (%T) does not support DecodeCtx`, key)
+		}
+		dc := json.NewDecodeCtx(localReg)
+		dcKey.SetDecodeCtx(dc)
+		defer func() { dcKey.SetDecodeCtx(nil) }()
+	}
+
+	if err := json.Unmarshal(data, key); err != nil {
+		return nil, fmt.Errorf(`failed to unmarshal JSON into key (%T): %w`, key, err)
+	}
+
+	return key, nil
+}
+
+// Parse parses JWK from the incoming []byte.
+//
+// For JWK sets, this is a convenience function. You could just as well
+// call `json.Unmarshal` against an empty set created by `jwk.NewSet()`
+// to parse a JSON buffer into a `jwk.Set`.
+//
+// This function exists because many times the user does not know before hand
+// if a JWK(s) resource at a remote location contains a single JWK key or
+// a JWK set, and `jwk.Parse()` can handle either case, returning a JWK Set
+// even if the data only contains a single JWK key
+//
+// If you are looking for more information on how JWKs are parsed, or if
+// you know for sure that you have a single key, please see the documentation
+// for `jwk.ParseKey()`.
+func Parse(src []byte, options ...ParseOption) (Set, error) {
+	var parsePEM bool
+	var localReg *json.Registry
+	var ignoreParseError bool
+	for _, option := range options {
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identPEM{}:
+			parsePEM = option.Value().(bool)
+		case identIgnoreParseError{}:
+			ignoreParseError = option.Value().(bool)
+		case identTypedField{}:
+			pair := option.Value().(typedFieldPair)
+			if localReg == nil {
+				localReg = json.NewRegistry()
+			}
+			localReg.Register(pair.Name, pair.Value)
+		}
+	}
+
+	s := NewSet()
+
+	if parsePEM {
+		src = bytes.TrimSpace(src)
+		for len(src) > 0 {
+			raw, rest, err := DecodePEM(src)
+			if err != nil {
+				return nil, fmt.Errorf(`failed to parse PEM encoded key: %w`, err)
+			}
+			key, err := FromRaw(raw)
+			if err != nil {
+				return nil, fmt.Errorf(`failed to create jwk.Key from %T: %w`, raw, err)
+			}
+			if err := s.AddKey(key); err != nil {
+				return nil, fmt.Errorf(`failed to add jwk.Key to set: %w`, err)
+			}
+			src = bytes.TrimSpace(rest)
+		}
+		return s, nil
+	}
+
+	if localReg != nil || ignoreParseError {
+		dcKs, ok := s.(KeyWithDecodeCtx)
+		if !ok {
+			return nil, fmt.Errorf(`typed field was requested, but the key set (%T) does not support DecodeCtx`, s)
+		}
+		dc := &setDecodeCtx{
+			DecodeCtx:        json.NewDecodeCtx(localReg),
+			ignoreParseError: ignoreParseError,
+		}
+		dcKs.SetDecodeCtx(dc)
+		defer func() { dcKs.SetDecodeCtx(nil) }()
+	}
+
+	if err := json.Unmarshal(src, s); err != nil {
+		return nil, fmt.Errorf(`failed to unmarshal JWK set: %w`, err)
+	}
+
+	return s, nil
+}
+
+// ParseReader parses a JWK set from the incoming byte buffer.
+func ParseReader(src io.Reader, options ...ParseOption) (Set, error) {
+	// meh, there's no way to tell if a stream has "ended" a single
+	// JWKs except when we encounter an EOF, so just... ReadAll
+	buf, err := io.ReadAll(src)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to read from io.Reader: %w`, err)
+	}
+
+	return Parse(buf, options...)
+}
+
+// ParseString parses a JWK set from the incoming string.
+func ParseString(s string, options ...ParseOption) (Set, error) {
+	return Parse([]byte(s), options...)
+}
+
+// AssignKeyID is a convenience function to automatically assign the "kid"
+// section of the key, if it already doesn't have one. It uses Key.Thumbprint
+// method with crypto.SHA256 as the default hashing algorithm
+func AssignKeyID(key Key, options ...AssignKeyIDOption) error {
+	if _, ok := key.Get(KeyIDKey); ok {
+		return nil
+	}
+
+	hash := crypto.SHA256
+	for _, option := range options {
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identThumbprintHash{}:
+			hash = option.Value().(crypto.Hash)
+		}
+	}
+
+	h, err := key.Thumbprint(hash)
+	if err != nil {
+		return fmt.Errorf(`failed to generate thumbprint: %w`, err)
+	}
+
+	if err := key.Set(KeyIDKey, base64.EncodeToString(h)); err != nil {
+		return fmt.Errorf(`failed to set "kid": %w`, err)
+	}
+
+	return nil
+}
+
+func cloneKey(src Key) (Key, error) {
+	var dst Key
+	switch src.(type) {
+	case RSAPrivateKey:
+		dst = newRSAPrivateKey()
+	case RSAPublicKey:
+		dst = newRSAPublicKey()
+	case ECDSAPrivateKey:
+		dst = newECDSAPrivateKey()
+	case ECDSAPublicKey:
+		dst = newECDSAPublicKey()
+	case OKPPrivateKey:
+		dst = newOKPPrivateKey()
+	case OKPPublicKey:
+		dst = newOKPPublicKey()
+	case SymmetricKey:
+		dst = newSymmetricKey()
+	default:
+		return nil, fmt.Errorf(`unknown key type %T`, src)
+	}
+
+	for _, pair := range src.makePairs() {
+		//nolint:forcetypeassert
+		key := pair.Key.(string)
+		if err := dst.Set(key, pair.Value); err != nil {
+			return nil, fmt.Errorf(`failed to set %q: %w`, key, err)
+		}
+	}
+	return dst, nil
+}
+
+// Pem serializes the given jwk.Key in PEM encoded ASN.1 DER format,
+// using either PKCS8 for private keys and PKIX for public keys.
+// If you need to encode using PKCS1 or SEC1, you must do it yourself.
+//
+// # Argument must be of type jwk.Key or jwk.Set
+//
+// Currently only EC (including Ed25519) and RSA keys (and jwk.Set
+// comprised of these key types) are supported.
+func Pem(v interface{}) ([]byte, error) {
+	var set Set
+	switch v := v.(type) {
+	case Key:
+		set = NewSet()
+		if err := set.AddKey(v); err != nil {
+			return nil, fmt.Errorf(`failed to add key to set: %w`, err)
+		}
+	case Set:
+		set = v
+	default:
+		return nil, fmt.Errorf(`argument to Pem must be either jwk.Key or jwk.Set: %T`, v)
+	}
+
+	var ret []byte
+	for i := 0; i < set.Len(); i++ {
+		key, _ := set.Key(i)
+		typ, buf, err := asnEncode(key)
+		if err != nil {
+			return nil, fmt.Errorf(`failed to encode content for key #%d: %w`, i, err)
+		}
+
+		var block pem.Block
+		block.Type = typ
+		block.Bytes = buf
+		ret = append(ret, pem.EncodeToMemory(&block)...)
+	}
+	return ret, nil
+}
+
+func asnEncode(key Key) (string, []byte, error) {
+	switch key := key.(type) {
+	case RSAPrivateKey, ECDSAPrivateKey, OKPPrivateKey:
+		var rawkey interface{}
+		if err := key.Raw(&rawkey); err != nil {
+			return "", nil, fmt.Errorf(`failed to get raw key from jwk.Key: %w`, err)
+		}
+		buf, err := x509.MarshalPKCS8PrivateKey(rawkey)
+		if err != nil {
+			return "", nil, fmt.Errorf(`failed to marshal PKCS8: %w`, err)
+		}
+		return pmPrivateKey, buf, nil
+	case RSAPublicKey, ECDSAPublicKey, OKPPublicKey:
+		var rawkey interface{}
+		if err := key.Raw(&rawkey); err != nil {
+			return "", nil, fmt.Errorf(`failed to get raw key from jwk.Key: %w`, err)
+		}
+		buf, err := x509.MarshalPKIXPublicKey(rawkey)
+		if err != nil {
+			return "", nil, fmt.Errorf(`failed to marshal PKIX: %w`, err)
+		}
+		return pmPublicKey, buf, nil
+	default:
+		return "", nil, fmt.Errorf(`unsupported key type %T`, key)
+	}
+}
+
+// RegisterCustomField allows users to specify that a private field
+// be decoded as an instance of the specified type. This option has
+// a global effect.
+//
+// For example, suppose you have a custom field `x-birthday`, which
+// you want to represent as a string formatted in RFC3339 in JSON,
+// but want it back as `time.Time`.
+//
+// In that case you would register a custom field as follows
+//
+//	jwk.RegisterCustomField(`x-birthday`, timeT)
+//
+// Then `key.Get("x-birthday")` will still return an `interface{}`,
+// but you can convert its type to `time.Time`
+//
+//	bdayif, _ := key.Get(`x-birthday`)
+//	bday := bdayif.(time.Time)
+func RegisterCustomField(name string, object interface{}) {
+	registry.Register(name, object)
+}
+
+func AvailableCurves() []elliptic.Curve {
+	return ecutil.AvailableCurves()
+}
+
+func CurveForAlgorithm(alg jwa.EllipticCurveAlgorithm) (elliptic.Curve, bool) {
+	return ecutil.CurveForAlgorithm(alg)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/key_ops.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/key_ops.go
new file mode 100644
index 0000000000..26fc2f28c8
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/key_ops.go
@@ -0,0 +1,58 @@
+package jwk
+
+import "fmt"
+
+func (ops *KeyOperationList) Get() KeyOperationList {
+	if ops == nil {
+		return nil
+	}
+	return *ops
+}
+
+func (ops *KeyOperationList) Accept(v interface{}) error {
+	switch x := v.(type) {
+	case string:
+		return ops.Accept([]string{x})
+	case []interface{}:
+		l := make([]string, len(x))
+		for i, e := range x {
+			if es, ok := e.(string); ok {
+				l[i] = es
+			} else {
+				return fmt.Errorf(`invalid list element type: expected string, got %T`, v)
+			}
+		}
+		return ops.Accept(l)
+	case []string:
+		list := make(KeyOperationList, len(x))
+		for i, e := range x {
+			switch e := KeyOperation(e); e {
+			case KeyOpSign, KeyOpVerify, KeyOpEncrypt, KeyOpDecrypt, KeyOpWrapKey, KeyOpUnwrapKey, KeyOpDeriveKey, KeyOpDeriveBits:
+				list[i] = e
+			default:
+				return fmt.Errorf(`invalid keyoperation %v`, e)
+			}
+		}
+
+		*ops = list
+		return nil
+	case []KeyOperation:
+		list := make(KeyOperationList, len(x))
+		for i, e := range x {
+			switch e {
+			case KeyOpSign, KeyOpVerify, KeyOpEncrypt, KeyOpDecrypt, KeyOpWrapKey, KeyOpUnwrapKey, KeyOpDeriveKey, KeyOpDeriveBits:
+				list[i] = e
+			default:
+				return fmt.Errorf(`invalid keyoperation %v`, e)
+			}
+		}
+
+		*ops = list
+		return nil
+	case KeyOperationList:
+		*ops = x
+		return nil
+	default:
+		return fmt.Errorf(`invalid value %T`, v)
+	}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/okp.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/okp.go
new file mode 100644
index 0000000000..2686ba516d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/okp.go
@@ -0,0 +1,183 @@
+package jwk
+
+import (
+	"bytes"
+	"crypto"
+	"crypto/ed25519"
+	"fmt"
+
+	"github.com/lestrrat-go/blackmagic"
+	"github.com/lestrrat-go/jwx/v2/internal/base64"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+	"github.com/lestrrat-go/jwx/v2/x25519"
+)
+
+func (k *okpPublicKey) FromRaw(rawKeyIf interface{}) error {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+
+	var crv jwa.EllipticCurveAlgorithm
+	switch rawKey := rawKeyIf.(type) {
+	case ed25519.PublicKey:
+		k.x = rawKey
+		crv = jwa.Ed25519
+		k.crv = &crv
+	case x25519.PublicKey:
+		k.x = rawKey
+		crv = jwa.X25519
+		k.crv = &crv
+	default:
+		return fmt.Errorf(`unknown key type %T`, rawKeyIf)
+	}
+
+	return nil
+}
+
+func (k *okpPrivateKey) FromRaw(rawKeyIf interface{}) error {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+
+	var crv jwa.EllipticCurveAlgorithm
+	switch rawKey := rawKeyIf.(type) {
+	case ed25519.PrivateKey:
+		k.d = rawKey.Seed()
+		k.x = rawKey.Public().(ed25519.PublicKey) //nolint:forcetypeassert
+		crv = jwa.Ed25519
+		k.crv = &crv
+	case x25519.PrivateKey:
+		k.d = rawKey.Seed()
+		k.x = rawKey.Public().(x25519.PublicKey) //nolint:forcetypeassert
+		crv = jwa.X25519
+		k.crv = &crv
+	default:
+		return fmt.Errorf(`unknown key type %T`, rawKeyIf)
+	}
+
+	return nil
+}
+
+func buildOKPPublicKey(alg jwa.EllipticCurveAlgorithm, xbuf []byte) (interface{}, error) {
+	switch alg {
+	case jwa.Ed25519:
+		return ed25519.PublicKey(xbuf), nil
+	case jwa.X25519:
+		return x25519.PublicKey(xbuf), nil
+	default:
+		return nil, fmt.Errorf(`invalid curve algorithm %s`, alg)
+	}
+}
+
+// Raw returns the EC-DSA public key represented by this JWK
+func (k *okpPublicKey) Raw(v interface{}) error {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+
+	pubk, err := buildOKPPublicKey(k.Crv(), k.x)
+	if err != nil {
+		return fmt.Errorf(`failed to build public key: %w`, err)
+	}
+
+	return blackmagic.AssignIfCompatible(v, pubk)
+}
+
+func buildOKPPrivateKey(alg jwa.EllipticCurveAlgorithm, xbuf []byte, dbuf []byte) (interface{}, error) {
+	switch alg {
+	case jwa.Ed25519:
+		ret := ed25519.NewKeyFromSeed(dbuf)
+		//nolint:forcetypeassert
+		if !bytes.Equal(xbuf, ret.Public().(ed25519.PublicKey)) {
+			return nil, fmt.Errorf(`invalid x value given d value`)
+		}
+		return ret, nil
+	case jwa.X25519:
+		ret, err := x25519.NewKeyFromSeed(dbuf)
+		if err != nil {
+			return nil, fmt.Errorf(`unable to construct x25519 private key from seed: %w`, err)
+		}
+		//nolint:forcetypeassert
+		if !bytes.Equal(xbuf, ret.Public().(x25519.PublicKey)) {
+			return nil, fmt.Errorf(`invalid x value given d value`)
+		}
+		return ret, nil
+	default:
+		return nil, fmt.Errorf(`invalid curve algorithm %s`, alg)
+	}
+}
+
+func (k *okpPrivateKey) Raw(v interface{}) error {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+
+	privk, err := buildOKPPrivateKey(k.Crv(), k.x, k.d)
+	if err != nil {
+		return fmt.Errorf(`failed to build public key: %w`, err)
+	}
+
+	return blackmagic.AssignIfCompatible(v, privk)
+}
+
+func makeOKPPublicKey(v interface {
+	makePairs() []*HeaderPair
+}) (Key, error) {
+	newKey := newOKPPublicKey()
+
+	// Iterate and copy everything except for the bits that should not be in the public key
+	for _, pair := range v.makePairs() {
+		switch pair.Key {
+		case OKPDKey:
+			continue
+		default:
+			//nolint:forcetypeassert
+			key := pair.Key.(string)
+			if err := newKey.Set(key, pair.Value); err != nil {
+				return nil, fmt.Errorf(`failed to set field %q: %w`, key, err)
+			}
+		}
+	}
+
+	return newKey, nil
+}
+
+func (k *okpPrivateKey) PublicKey() (Key, error) {
+	return makeOKPPublicKey(k)
+}
+
+func (k *okpPublicKey) PublicKey() (Key, error) {
+	return makeOKPPublicKey(k)
+}
+
+func okpThumbprint(hash crypto.Hash, crv, x string) []byte {
+	h := hash.New()
+	fmt.Fprint(h, `{"crv":"`)
+	fmt.Fprint(h, crv)
+	fmt.Fprint(h, `","kty":"OKP","x":"`)
+	fmt.Fprint(h, x)
+	fmt.Fprint(h, `"}`)
+	return h.Sum(nil)
+}
+
+// Thumbprint returns the JWK thumbprint using the indicated
+// hashing algorithm, according to RFC 7638 / 8037
+func (k okpPublicKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+
+	return okpThumbprint(
+		hash,
+		k.Crv().String(),
+		base64.EncodeToString(k.x),
+	), nil
+}
+
+// Thumbprint returns the JWK thumbprint using the indicated
+// hashing algorithm, according to RFC 7638 / 8037
+func (k okpPrivateKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+
+	return okpThumbprint(
+		hash,
+		k.Crv().String(),
+		base64.EncodeToString(k.x),
+	), nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/okp_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/okp_gen.go
new file mode 100644
index 0000000000..832dc912d9
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/okp_gen.go
@@ -0,0 +1,1119 @@
+// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT.
+
+package jwk
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"sort"
+	"sync"
+
+	"github.com/lestrrat-go/iter/mapiter"
+	"github.com/lestrrat-go/jwx/v2/cert"
+	"github.com/lestrrat-go/jwx/v2/internal/base64"
+	"github.com/lestrrat-go/jwx/v2/internal/iter"
+	"github.com/lestrrat-go/jwx/v2/internal/json"
+	"github.com/lestrrat-go/jwx/v2/internal/pool"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+const (
+	OKPCrvKey = "crv"
+	OKPDKey   = "d"
+	OKPXKey   = "x"
+)
+
+type OKPPublicKey interface {
+	Key
+	FromRaw(interface{}) error
+	Crv() jwa.EllipticCurveAlgorithm
+	X() []byte
+}
+
+type okpPublicKey struct {
+	algorithm              *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4
+	crv                    *jwa.EllipticCurveAlgorithm
+	keyID                  *string           // https://tools.ietf.org/html/rfc7515#section-4.1.4
+	keyOps                 *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3
+	keyUsage               *string           // https://tools.ietf.org/html/rfc7517#section-4.2
+	x                      []byte
+	x509CertChain          *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+	x509CertThumbprint     *string     // https://tools.ietf.org/html/rfc7515#section-4.1.7
+	x509CertThumbprintS256 *string     // https://tools.ietf.org/html/rfc7515#section-4.1.8
+	x509URL                *string     // https://tools.ietf.org/html/rfc7515#section-4.1.5
+	privateParams          map[string]interface{}
+	mu                     *sync.RWMutex
+	dc                     json.DecodeCtx
+}
+
+var _ OKPPublicKey = &okpPublicKey{}
+var _ Key = &okpPublicKey{}
+
+func newOKPPublicKey() *okpPublicKey {
+	return &okpPublicKey{
+		mu:            &sync.RWMutex{},
+		privateParams: make(map[string]interface{}),
+	}
+}
+
+func (h okpPublicKey) KeyType() jwa.KeyType {
+	return jwa.OKP
+}
+
+func (h *okpPublicKey) Algorithm() jwa.KeyAlgorithm {
+	if h.algorithm != nil {
+		return *(h.algorithm)
+	}
+	return jwa.InvalidKeyAlgorithm("")
+}
+
+func (h *okpPublicKey) Crv() jwa.EllipticCurveAlgorithm {
+	if h.crv != nil {
+		return *(h.crv)
+	}
+	return jwa.InvalidEllipticCurve
+}
+
+func (h *okpPublicKey) KeyID() string {
+	if h.keyID != nil {
+		return *(h.keyID)
+	}
+	return ""
+}
+
+func (h *okpPublicKey) KeyOps() KeyOperationList {
+	if h.keyOps != nil {
+		return *(h.keyOps)
+	}
+	return nil
+}
+
+func (h *okpPublicKey) KeyUsage() string {
+	if h.keyUsage != nil {
+		return *(h.keyUsage)
+	}
+	return ""
+}
+
+func (h *okpPublicKey) X() []byte {
+	return h.x
+}
+
+func (h *okpPublicKey) X509CertChain() *cert.Chain {
+	return h.x509CertChain
+}
+
+func (h *okpPublicKey) X509CertThumbprint() string {
+	if h.x509CertThumbprint != nil {
+		return *(h.x509CertThumbprint)
+	}
+	return ""
+}
+
+func (h *okpPublicKey) X509CertThumbprintS256() string {
+	if h.x509CertThumbprintS256 != nil {
+		return *(h.x509CertThumbprintS256)
+	}
+	return ""
+}
+
+func (h *okpPublicKey) X509URL() string {
+	if h.x509URL != nil {
+		return *(h.x509URL)
+	}
+	return ""
+}
+
+func (h *okpPublicKey) makePairs() []*HeaderPair {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+
+	var pairs []*HeaderPair
+	pairs = append(pairs, &HeaderPair{Key: "kty", Value: jwa.OKP})
+	if h.algorithm != nil {
+		pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)})
+	}
+	if h.crv != nil {
+		pairs = append(pairs, &HeaderPair{Key: OKPCrvKey, Value: *(h.crv)})
+	}
+	if h.keyID != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)})
+	}
+	if h.keyOps != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyOpsKey, Value: *(h.keyOps)})
+	}
+	if h.keyUsage != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyUsageKey, Value: *(h.keyUsage)})
+	}
+	if h.x != nil {
+		pairs = append(pairs, &HeaderPair{Key: OKPXKey, Value: h.x})
+	}
+	if h.x509CertChain != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain})
+	}
+	if h.x509CertThumbprint != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)})
+	}
+	if h.x509CertThumbprintS256 != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)})
+	}
+	if h.x509URL != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)})
+	}
+	for k, v := range h.privateParams {
+		pairs = append(pairs, &HeaderPair{Key: k, Value: v})
+	}
+	return pairs
+}
+
+func (h *okpPublicKey) PrivateParams() map[string]interface{} {
+	return h.privateParams
+}
+
+func (h *okpPublicKey) Get(name string) (interface{}, bool) {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	switch name {
+	case KeyTypeKey:
+		return h.KeyType(), true
+	case AlgorithmKey:
+		if h.algorithm == nil {
+			return nil, false
+		}
+		return *(h.algorithm), true
+	case OKPCrvKey:
+		if h.crv == nil {
+			return nil, false
+		}
+		return *(h.crv), true
+	case KeyIDKey:
+		if h.keyID == nil {
+			return nil, false
+		}
+		return *(h.keyID), true
+	case KeyOpsKey:
+		if h.keyOps == nil {
+			return nil, false
+		}
+		return *(h.keyOps), true
+	case KeyUsageKey:
+		if h.keyUsage == nil {
+			return nil, false
+		}
+		return *(h.keyUsage), true
+	case OKPXKey:
+		if h.x == nil {
+			return nil, false
+		}
+		return h.x, true
+	case X509CertChainKey:
+		if h.x509CertChain == nil {
+			return nil, false
+		}
+		return h.x509CertChain, true
+	case X509CertThumbprintKey:
+		if h.x509CertThumbprint == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprint), true
+	case X509CertThumbprintS256Key:
+		if h.x509CertThumbprintS256 == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprintS256), true
+	case X509URLKey:
+		if h.x509URL == nil {
+			return nil, false
+		}
+		return *(h.x509URL), true
+	default:
+		v, ok := h.privateParams[name]
+		return v, ok
+	}
+}
+
+func (h *okpPublicKey) Set(name string, value interface{}) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	return h.setNoLock(name, value)
+}
+
+func (h *okpPublicKey) setNoLock(name string, value interface{}) error {
+	switch name {
+	case "kty":
+		return nil
+	case AlgorithmKey:
+		switch v := value.(type) {
+		case string, jwa.SignatureAlgorithm, jwa.ContentEncryptionAlgorithm:
+			var tmp = jwa.KeyAlgorithmFrom(v)
+			h.algorithm = &tmp
+		case fmt.Stringer:
+			s := v.String()
+			var tmp = jwa.KeyAlgorithmFrom(s)
+			h.algorithm = &tmp
+		default:
+			return fmt.Errorf(`invalid type for %s key: %T`, AlgorithmKey, value)
+		}
+		return nil
+	case OKPCrvKey:
+		if v, ok := value.(jwa.EllipticCurveAlgorithm); ok {
+			h.crv = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, OKPCrvKey, value)
+	case KeyIDKey:
+		if v, ok := value.(string); ok {
+			h.keyID = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+	case KeyOpsKey:
+		var acceptor KeyOperationList
+		if err := acceptor.Accept(value); err != nil {
+			return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err)
+		}
+		h.keyOps = &acceptor
+		return nil
+	case KeyUsageKey:
+		switch v := value.(type) {
+		case KeyUsageType:
+			switch v {
+			case ForSignature, ForEncryption:
+				tmp := v.String()
+				h.keyUsage = &tmp
+			default:
+				return fmt.Errorf(`invalid key usage type %s`, v)
+			}
+		case string:
+			h.keyUsage = &v
+		default:
+			return fmt.Errorf(`invalid key usage type %s`, v)
+		}
+	case OKPXKey:
+		if v, ok := value.([]byte); ok {
+			h.x = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, OKPXKey, value)
+	case X509CertChainKey:
+		if v, ok := value.(*cert.Chain); ok {
+			h.x509CertChain = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+	case X509CertThumbprintKey:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprint = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+	case X509CertThumbprintS256Key:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprintS256 = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+	case X509URLKey:
+		if v, ok := value.(string); ok {
+			h.x509URL = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+	default:
+		if h.privateParams == nil {
+			h.privateParams = map[string]interface{}{}
+		}
+		h.privateParams[name] = value
+	}
+	return nil
+}
+
+func (k *okpPublicKey) Remove(key string) error {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+	switch key {
+	case AlgorithmKey:
+		k.algorithm = nil
+	case OKPCrvKey:
+		k.crv = nil
+	case KeyIDKey:
+		k.keyID = nil
+	case KeyOpsKey:
+		k.keyOps = nil
+	case KeyUsageKey:
+		k.keyUsage = nil
+	case OKPXKey:
+		k.x = nil
+	case X509CertChainKey:
+		k.x509CertChain = nil
+	case X509CertThumbprintKey:
+		k.x509CertThumbprint = nil
+	case X509CertThumbprintS256Key:
+		k.x509CertThumbprintS256 = nil
+	case X509URLKey:
+		k.x509URL = nil
+	default:
+		delete(k.privateParams, key)
+	}
+	return nil
+}
+
+func (k *okpPublicKey) Clone() (Key, error) {
+	return cloneKey(k)
+}
+
+func (k *okpPublicKey) DecodeCtx() json.DecodeCtx {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+	return k.dc
+}
+
+func (k *okpPublicKey) SetDecodeCtx(dc json.DecodeCtx) {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+	k.dc = dc
+}
+
+func (h *okpPublicKey) UnmarshalJSON(buf []byte) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.algorithm = nil
+	h.crv = nil
+	h.keyID = nil
+	h.keyOps = nil
+	h.keyUsage = nil
+	h.x = nil
+	h.x509CertChain = nil
+	h.x509CertThumbprint = nil
+	h.x509CertThumbprintS256 = nil
+	h.x509URL = nil
+	dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+	for {
+		tok, err := dec.Token()
+		if err != nil {
+			return fmt.Errorf(`error reading token: %w`, err)
+		}
+		switch tok := tok.(type) {
+		case json.Delim:
+			// Assuming we're doing everything correctly, we should ONLY
+			// get either '{' or '}' here.
+			if tok == '}' { // End of object
+				break LOOP
+			} else if tok != '{' {
+				return fmt.Errorf(`expected '{', but got '%c'`, tok)
+			}
+		case string: // Objects can only have string keys
+			switch tok {
+			case KeyTypeKey:
+				val, err := json.ReadNextStringToken(dec)
+				if err != nil {
+					return fmt.Errorf(`error reading token: %w`, err)
+				}
+				if val != jwa.OKP.String() {
+					return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val)
+				}
+			case AlgorithmKey:
+				var s string
+				if err := dec.Decode(&s); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+				}
+				alg := jwa.KeyAlgorithmFrom(s)
+				h.algorithm = &alg
+			case OKPCrvKey:
+				var decoded jwa.EllipticCurveAlgorithm
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, OKPCrvKey, err)
+				}
+				h.crv = &decoded
+			case KeyIDKey:
+				if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+				}
+			case KeyOpsKey:
+				var decoded KeyOperationList
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err)
+				}
+				h.keyOps = &decoded
+			case KeyUsageKey:
+				if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err)
+				}
+			case OKPXKey:
+				if err := json.AssignNextBytesToken(&h.x, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, OKPXKey, err)
+				}
+			case X509CertChainKey:
+				var decoded cert.Chain
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+				}
+				h.x509CertChain = &decoded
+			case X509CertThumbprintKey:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+				}
+			case X509CertThumbprintS256Key:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+				}
+			case X509URLKey:
+				if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+				}
+			default:
+				if dc := h.dc; dc != nil {
+					if localReg := dc.Registry(); localReg != nil {
+						decoded, err := localReg.Decode(dec, tok)
+						if err == nil {
+							h.setNoLock(tok, decoded)
+							continue
+						}
+					}
+				}
+				decoded, err := registry.Decode(dec, tok)
+				if err == nil {
+					h.setNoLock(tok, decoded)
+					continue
+				}
+				return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+			}
+		default:
+			return fmt.Errorf(`invalid token %T`, tok)
+		}
+	}
+	if h.crv == nil {
+		return fmt.Errorf(`required field crv is missing`)
+	}
+	if h.x == nil {
+		return fmt.Errorf(`required field x is missing`)
+	}
+	return nil
+}
+
+func (h okpPublicKey) MarshalJSON() ([]byte, error) {
+	data := make(map[string]interface{})
+	fields := make([]string, 0, 10)
+	for _, pair := range h.makePairs() {
+		fields = append(fields, pair.Key.(string))
+		data[pair.Key.(string)] = pair.Value
+	}
+
+	sort.Strings(fields)
+	buf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(buf)
+	buf.WriteByte('{')
+	enc := json.NewEncoder(buf)
+	for i, f := range fields {
+		if i > 0 {
+			buf.WriteRune(',')
+		}
+		buf.WriteRune('"')
+		buf.WriteString(f)
+		buf.WriteString(`":`)
+		v := data[f]
+		switch v := v.(type) {
+		case []byte:
+			buf.WriteRune('"')
+			buf.WriteString(base64.EncodeToString(v))
+			buf.WriteRune('"')
+		default:
+			if err := enc.Encode(v); err != nil {
+				return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err)
+			}
+			buf.Truncate(buf.Len() - 1)
+		}
+	}
+	buf.WriteByte('}')
+	ret := make([]byte, buf.Len())
+	copy(ret, buf.Bytes())
+	return ret, nil
+}
+
+func (h *okpPublicKey) Iterate(ctx context.Context) HeaderIterator {
+	pairs := h.makePairs()
+	ch := make(chan *HeaderPair, len(pairs))
+	go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) {
+		defer close(ch)
+		for _, pair := range pairs {
+			select {
+			case <-ctx.Done():
+				return
+			case ch <- pair:
+			}
+		}
+	}(ctx, ch, pairs)
+	return mapiter.New(ch)
+}
+
+func (h *okpPublicKey) Walk(ctx context.Context, visitor HeaderVisitor) error {
+	return iter.WalkMap(ctx, h, visitor)
+}
+
+func (h *okpPublicKey) AsMap(ctx context.Context) (map[string]interface{}, error) {
+	return iter.AsMap(ctx, h)
+}
+
+type OKPPrivateKey interface {
+	Key
+	FromRaw(interface{}) error
+	Crv() jwa.EllipticCurveAlgorithm
+	D() []byte
+	X() []byte
+}
+
+type okpPrivateKey struct {
+	algorithm              *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4
+	crv                    *jwa.EllipticCurveAlgorithm
+	d                      []byte
+	keyID                  *string           // https://tools.ietf.org/html/rfc7515#section-4.1.4
+	keyOps                 *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3
+	keyUsage               *string           // https://tools.ietf.org/html/rfc7517#section-4.2
+	x                      []byte
+	x509CertChain          *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+	x509CertThumbprint     *string     // https://tools.ietf.org/html/rfc7515#section-4.1.7
+	x509CertThumbprintS256 *string     // https://tools.ietf.org/html/rfc7515#section-4.1.8
+	x509URL                *string     // https://tools.ietf.org/html/rfc7515#section-4.1.5
+	privateParams          map[string]interface{}
+	mu                     *sync.RWMutex
+	dc                     json.DecodeCtx
+}
+
+var _ OKPPrivateKey = &okpPrivateKey{}
+var _ Key = &okpPrivateKey{}
+
+func newOKPPrivateKey() *okpPrivateKey {
+	return &okpPrivateKey{
+		mu:            &sync.RWMutex{},
+		privateParams: make(map[string]interface{}),
+	}
+}
+
+func (h okpPrivateKey) KeyType() jwa.KeyType {
+	return jwa.OKP
+}
+
+func (h *okpPrivateKey) Algorithm() jwa.KeyAlgorithm {
+	if h.algorithm != nil {
+		return *(h.algorithm)
+	}
+	return jwa.InvalidKeyAlgorithm("")
+}
+
+func (h *okpPrivateKey) Crv() jwa.EllipticCurveAlgorithm {
+	if h.crv != nil {
+		return *(h.crv)
+	}
+	return jwa.InvalidEllipticCurve
+}
+
+func (h *okpPrivateKey) D() []byte {
+	return h.d
+}
+
+func (h *okpPrivateKey) KeyID() string {
+	if h.keyID != nil {
+		return *(h.keyID)
+	}
+	return ""
+}
+
+func (h *okpPrivateKey) KeyOps() KeyOperationList {
+	if h.keyOps != nil {
+		return *(h.keyOps)
+	}
+	return nil
+}
+
+func (h *okpPrivateKey) KeyUsage() string {
+	if h.keyUsage != nil {
+		return *(h.keyUsage)
+	}
+	return ""
+}
+
+func (h *okpPrivateKey) X() []byte {
+	return h.x
+}
+
+func (h *okpPrivateKey) X509CertChain() *cert.Chain {
+	return h.x509CertChain
+}
+
+func (h *okpPrivateKey) X509CertThumbprint() string {
+	if h.x509CertThumbprint != nil {
+		return *(h.x509CertThumbprint)
+	}
+	return ""
+}
+
+func (h *okpPrivateKey) X509CertThumbprintS256() string {
+	if h.x509CertThumbprintS256 != nil {
+		return *(h.x509CertThumbprintS256)
+	}
+	return ""
+}
+
+func (h *okpPrivateKey) X509URL() string {
+	if h.x509URL != nil {
+		return *(h.x509URL)
+	}
+	return ""
+}
+
+func (h *okpPrivateKey) makePairs() []*HeaderPair {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+
+	var pairs []*HeaderPair
+	pairs = append(pairs, &HeaderPair{Key: "kty", Value: jwa.OKP})
+	if h.algorithm != nil {
+		pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)})
+	}
+	if h.crv != nil {
+		pairs = append(pairs, &HeaderPair{Key: OKPCrvKey, Value: *(h.crv)})
+	}
+	if h.d != nil {
+		pairs = append(pairs, &HeaderPair{Key: OKPDKey, Value: h.d})
+	}
+	if h.keyID != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)})
+	}
+	if h.keyOps != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyOpsKey, Value: *(h.keyOps)})
+	}
+	if h.keyUsage != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyUsageKey, Value: *(h.keyUsage)})
+	}
+	if h.x != nil {
+		pairs = append(pairs, &HeaderPair{Key: OKPXKey, Value: h.x})
+	}
+	if h.x509CertChain != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain})
+	}
+	if h.x509CertThumbprint != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)})
+	}
+	if h.x509CertThumbprintS256 != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)})
+	}
+	if h.x509URL != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)})
+	}
+	for k, v := range h.privateParams {
+		pairs = append(pairs, &HeaderPair{Key: k, Value: v})
+	}
+	return pairs
+}
+
+func (h *okpPrivateKey) PrivateParams() map[string]interface{} {
+	return h.privateParams
+}
+
+func (h *okpPrivateKey) Get(name string) (interface{}, bool) {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	switch name {
+	case KeyTypeKey:
+		return h.KeyType(), true
+	case AlgorithmKey:
+		if h.algorithm == nil {
+			return nil, false
+		}
+		return *(h.algorithm), true
+	case OKPCrvKey:
+		if h.crv == nil {
+			return nil, false
+		}
+		return *(h.crv), true
+	case OKPDKey:
+		if h.d == nil {
+			return nil, false
+		}
+		return h.d, true
+	case KeyIDKey:
+		if h.keyID == nil {
+			return nil, false
+		}
+		return *(h.keyID), true
+	case KeyOpsKey:
+		if h.keyOps == nil {
+			return nil, false
+		}
+		return *(h.keyOps), true
+	case KeyUsageKey:
+		if h.keyUsage == nil {
+			return nil, false
+		}
+		return *(h.keyUsage), true
+	case OKPXKey:
+		if h.x == nil {
+			return nil, false
+		}
+		return h.x, true
+	case X509CertChainKey:
+		if h.x509CertChain == nil {
+			return nil, false
+		}
+		return h.x509CertChain, true
+	case X509CertThumbprintKey:
+		if h.x509CertThumbprint == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprint), true
+	case X509CertThumbprintS256Key:
+		if h.x509CertThumbprintS256 == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprintS256), true
+	case X509URLKey:
+		if h.x509URL == nil {
+			return nil, false
+		}
+		return *(h.x509URL), true
+	default:
+		v, ok := h.privateParams[name]
+		return v, ok
+	}
+}
+
+func (h *okpPrivateKey) Set(name string, value interface{}) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	return h.setNoLock(name, value)
+}
+
+func (h *okpPrivateKey) setNoLock(name string, value interface{}) error {
+	switch name {
+	case "kty":
+		return nil
+	case AlgorithmKey:
+		switch v := value.(type) {
+		case string, jwa.SignatureAlgorithm, jwa.ContentEncryptionAlgorithm:
+			var tmp = jwa.KeyAlgorithmFrom(v)
+			h.algorithm = &tmp
+		case fmt.Stringer:
+			s := v.String()
+			var tmp = jwa.KeyAlgorithmFrom(s)
+			h.algorithm = &tmp
+		default:
+			return fmt.Errorf(`invalid type for %s key: %T`, AlgorithmKey, value)
+		}
+		return nil
+	case OKPCrvKey:
+		if v, ok := value.(jwa.EllipticCurveAlgorithm); ok {
+			h.crv = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, OKPCrvKey, value)
+	case OKPDKey:
+		if v, ok := value.([]byte); ok {
+			h.d = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, OKPDKey, value)
+	case KeyIDKey:
+		if v, ok := value.(string); ok {
+			h.keyID = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+	case KeyOpsKey:
+		var acceptor KeyOperationList
+		if err := acceptor.Accept(value); err != nil {
+			return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err)
+		}
+		h.keyOps = &acceptor
+		return nil
+	case KeyUsageKey:
+		switch v := value.(type) {
+		case KeyUsageType:
+			switch v {
+			case ForSignature, ForEncryption:
+				tmp := v.String()
+				h.keyUsage = &tmp
+			default:
+				return fmt.Errorf(`invalid key usage type %s`, v)
+			}
+		case string:
+			h.keyUsage = &v
+		default:
+			return fmt.Errorf(`invalid key usage type %s`, v)
+		}
+	case OKPXKey:
+		if v, ok := value.([]byte); ok {
+			h.x = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, OKPXKey, value)
+	case X509CertChainKey:
+		if v, ok := value.(*cert.Chain); ok {
+			h.x509CertChain = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+	case X509CertThumbprintKey:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprint = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+	case X509CertThumbprintS256Key:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprintS256 = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+	case X509URLKey:
+		if v, ok := value.(string); ok {
+			h.x509URL = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+	default:
+		if h.privateParams == nil {
+			h.privateParams = map[string]interface{}{}
+		}
+		h.privateParams[name] = value
+	}
+	return nil
+}
+
+func (k *okpPrivateKey) Remove(key string) error {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+	switch key {
+	case AlgorithmKey:
+		k.algorithm = nil
+	case OKPCrvKey:
+		k.crv = nil
+	case OKPDKey:
+		k.d = nil
+	case KeyIDKey:
+		k.keyID = nil
+	case KeyOpsKey:
+		k.keyOps = nil
+	case KeyUsageKey:
+		k.keyUsage = nil
+	case OKPXKey:
+		k.x = nil
+	case X509CertChainKey:
+		k.x509CertChain = nil
+	case X509CertThumbprintKey:
+		k.x509CertThumbprint = nil
+	case X509CertThumbprintS256Key:
+		k.x509CertThumbprintS256 = nil
+	case X509URLKey:
+		k.x509URL = nil
+	default:
+		delete(k.privateParams, key)
+	}
+	return nil
+}
+
+func (k *okpPrivateKey) Clone() (Key, error) {
+	return cloneKey(k)
+}
+
+func (k *okpPrivateKey) DecodeCtx() json.DecodeCtx {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+	return k.dc
+}
+
+func (k *okpPrivateKey) SetDecodeCtx(dc json.DecodeCtx) {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+	k.dc = dc
+}
+
+func (h *okpPrivateKey) UnmarshalJSON(buf []byte) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.algorithm = nil
+	h.crv = nil
+	h.d = nil
+	h.keyID = nil
+	h.keyOps = nil
+	h.keyUsage = nil
+	h.x = nil
+	h.x509CertChain = nil
+	h.x509CertThumbprint = nil
+	h.x509CertThumbprintS256 = nil
+	h.x509URL = nil
+	dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+	for {
+		tok, err := dec.Token()
+		if err != nil {
+			return fmt.Errorf(`error reading token: %w`, err)
+		}
+		switch tok := tok.(type) {
+		case json.Delim:
+			// Assuming we're doing everything correctly, we should ONLY
+			// get either '{' or '}' here.
+			if tok == '}' { // End of object
+				break LOOP
+			} else if tok != '{' {
+				return fmt.Errorf(`expected '{', but got '%c'`, tok)
+			}
+		case string: // Objects can only have string keys
+			switch tok {
+			case KeyTypeKey:
+				val, err := json.ReadNextStringToken(dec)
+				if err != nil {
+					return fmt.Errorf(`error reading token: %w`, err)
+				}
+				if val != jwa.OKP.String() {
+					return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val)
+				}
+			case AlgorithmKey:
+				var s string
+				if err := dec.Decode(&s); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+				}
+				alg := jwa.KeyAlgorithmFrom(s)
+				h.algorithm = &alg
+			case OKPCrvKey:
+				var decoded jwa.EllipticCurveAlgorithm
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, OKPCrvKey, err)
+				}
+				h.crv = &decoded
+			case OKPDKey:
+				if err := json.AssignNextBytesToken(&h.d, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, OKPDKey, err)
+				}
+			case KeyIDKey:
+				if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+				}
+			case KeyOpsKey:
+				var decoded KeyOperationList
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err)
+				}
+				h.keyOps = &decoded
+			case KeyUsageKey:
+				if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err)
+				}
+			case OKPXKey:
+				if err := json.AssignNextBytesToken(&h.x, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, OKPXKey, err)
+				}
+			case X509CertChainKey:
+				var decoded cert.Chain
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+				}
+				h.x509CertChain = &decoded
+			case X509CertThumbprintKey:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+				}
+			case X509CertThumbprintS256Key:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+				}
+			case X509URLKey:
+				if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+				}
+			default:
+				if dc := h.dc; dc != nil {
+					if localReg := dc.Registry(); localReg != nil {
+						decoded, err := localReg.Decode(dec, tok)
+						if err == nil {
+							h.setNoLock(tok, decoded)
+							continue
+						}
+					}
+				}
+				decoded, err := registry.Decode(dec, tok)
+				if err == nil {
+					h.setNoLock(tok, decoded)
+					continue
+				}
+				return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+			}
+		default:
+			return fmt.Errorf(`invalid token %T`, tok)
+		}
+	}
+	if h.crv == nil {
+		return fmt.Errorf(`required field crv is missing`)
+	}
+	if h.d == nil {
+		return fmt.Errorf(`required field d is missing`)
+	}
+	if h.x == nil {
+		return fmt.Errorf(`required field x is missing`)
+	}
+	return nil
+}
+
+func (h okpPrivateKey) MarshalJSON() ([]byte, error) {
+	data := make(map[string]interface{})
+	fields := make([]string, 0, 11)
+	for _, pair := range h.makePairs() {
+		fields = append(fields, pair.Key.(string))
+		data[pair.Key.(string)] = pair.Value
+	}
+
+	sort.Strings(fields)
+	buf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(buf)
+	buf.WriteByte('{')
+	enc := json.NewEncoder(buf)
+	for i, f := range fields {
+		if i > 0 {
+			buf.WriteRune(',')
+		}
+		buf.WriteRune('"')
+		buf.WriteString(f)
+		buf.WriteString(`":`)
+		v := data[f]
+		switch v := v.(type) {
+		case []byte:
+			buf.WriteRune('"')
+			buf.WriteString(base64.EncodeToString(v))
+			buf.WriteRune('"')
+		default:
+			if err := enc.Encode(v); err != nil {
+				return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err)
+			}
+			buf.Truncate(buf.Len() - 1)
+		}
+	}
+	buf.WriteByte('}')
+	ret := make([]byte, buf.Len())
+	copy(ret, buf.Bytes())
+	return ret, nil
+}
+
+func (h *okpPrivateKey) Iterate(ctx context.Context) HeaderIterator {
+	pairs := h.makePairs()
+	ch := make(chan *HeaderPair, len(pairs))
+	go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) {
+		defer close(ch)
+		for _, pair := range pairs {
+			select {
+			case <-ctx.Done():
+				return
+			case ch <- pair:
+			}
+		}
+	}(ctx, ch, pairs)
+	return mapiter.New(ch)
+}
+
+func (h *okpPrivateKey) Walk(ctx context.Context, visitor HeaderVisitor) error {
+	return iter.WalkMap(ctx, h, visitor)
+}
+
+func (h *okpPrivateKey) AsMap(ctx context.Context) (map[string]interface{}, error) {
+	return iter.AsMap(ctx, h)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/options.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/options.go
new file mode 100644
index 0000000000..98fcc4097a
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/options.go
@@ -0,0 +1,38 @@
+package jwk
+
+import (
+	"github.com/lestrrat-go/option"
+)
+
+type identTypedField struct{}
+
+type typedFieldPair struct {
+	Name  string
+	Value interface{}
+}
+
+// WithTypedField allows a private field to be parsed into the object type of
+// your choice. It works much like the RegisterCustomField, but the effect
+// is only applicable to the jwt.Parse function call which receives this option.
+//
+// While this can be extremely useful, this option should be used with caution:
+// There are many caveats that your entire team/user-base needs to be aware of,
+// and therefore in general its use is discouraged. Only use it when you know
+// what you are doing, and you document its use clearly for others.
+//
+// First and foremost, this is a "per-object" option. Meaning that given the same
+// serialized format, it is possible to generate two objects whose internal
+// representations may differ. That is, if you parse one _WITH_ the option,
+// and the other _WITHOUT_, their internal representation may completely differ.
+// This could potentially lead to problems.
+//
+// Second, specifying this option will slightly slow down the decoding process
+// as it needs to consult multiple definitions sources (global and local), so
+// be careful if you are decoding a large number of tokens, as the effects will stack up.
+func WithTypedField(name string, object interface{}) ParseOption {
+	return &parseOption{
+		option.New(identTypedField{},
+			typedFieldPair{Name: name, Value: object},
+		),
+	}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/options.yaml b/vendor/github.com/lestrrat-go/jwx/v2/jwk/options.yaml
new file mode 100644
index 0000000000..3f7b6e2a16
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/options.yaml
@@ -0,0 +1,142 @@
+package_name: jwk
+output: jwk/options_gen.go
+interfaces:
+  - name: CacheOption
+    comment: |
+      CacheOption is a type of Option that can be passed to the
+      `jwk.Cache` object.
+  - name: AssignKeyIDOption
+  - name: FetchOption
+    methods:
+      - fetchOption
+      - parseOption
+      - registerOption
+    comment: |
+      FetchOption is a type of Option that can be passed to `jwk.Fetch()`
+      FetchOption also implements the `CacheOption`, and thus can
+      safely be passed to `(*jwk.Cache).Configure()`
+  - name: ParseOption
+    methods:
+      - fetchOption
+      - registerOption
+      - readFileOption
+    comment: |
+      ParseOption is a type of Option that can be passed to `jwk.Parse()`
+      ParseOption also implmentsthe `ReadFileOption` and `CacheOption`,
+      and thus safely be passed to `jwk.ReadFile` and `(*jwk.Cache).Configure()`
+  - name: ReadFileOption
+    comment: |
+      ReadFileOption is a type of `Option` that can be passed to `jwk.ReadFile`
+  - name: RegisterOption
+    comment: |
+      RegisterOption desribes options that can be passed to `(jwk.Cache).Register()`
+options:
+  - ident: HTTPClient
+    interface: FetchOption
+    argument_type: HTTPClient
+    comment: |
+      WithHTTPClient allows users to specify the "net/http".Client object that
+      is used when fetching jwk.Set objects.
+  - ident: ThumbprintHash
+    interface: AssignKeyIDOption
+    argument_type: crypto.Hash
+  - ident: RefreshInterval
+    interface: RegisterOption
+    argument_type: time.Duration
+    comment: |
+      WithRefreshInterval specifies the static interval between refreshes
+      of jwk.Set objects controlled by jwk.Cache.
+      
+      Providing this option overrides the adaptive token refreshing based
+      on Cache-Control/Expires header (and jwk.WithMinRefreshInterval),
+      and refreshes will *always* happen in this interval.
+  - ident: MinRefreshInterval
+    interface: RegisterOption
+    argument_type: time.Duration
+    comment: |
+      WithMinRefreshInterval specifies the minimum refresh interval to be used
+      when using `jwk.Cache`. This value is ONLY used if you did not specify
+      a user-supplied static refresh interval via `WithRefreshInterval`.
+      
+      This value is used as a fallback value when tokens are refreshed.
+      
+      When we fetch the key from a remote URL, we first look at the max-age
+      directive from Cache-Control response header. If this value is present,
+      we compare the max-age value and the value specified by this option
+      and take the larger one.
+      
+      Next we check for the Expires header, and similarly if the header is
+      present, we compare it against the value specified by this option,
+      and take the larger one.
+      
+      Finally, if neither of the above headers are present, we use the
+      value specified by this option as the next refresh timing
+      
+      If unspecified, the minimum refresh interval is 1 hour
+  - ident: LocalRegistry
+    option_name: withLocalRegistry
+    interface: ParseOption
+    argument_type: '*json.Registry'
+    comment: This option is only available for internal code. Users don't get to play with it
+  - ident: PEM
+    interface: ParseOption
+    argument_type: bool
+    comment: WithPEM specifies that the input to `Parse()` is a PEM encoded key.
+  - ident: FetchWhitelist
+    interface: FetchOption
+    argument_type: Whitelist
+    comment: |
+      WithFetchWhitelist specifies the Whitelist object to use when
+      fetching JWKs from a remote source. This option can be passed
+      to both `jwk.Fetch()`, `jwk.NewCache()`, and `(*jwk.Cache).Configure()`
+  - ident: IgnoreParseError
+    interface: ParseOption
+    argument_type: bool
+    comment: |
+      WithIgnoreParseError is only applicable when used with `jwk.Parse()`
+      (i.e. to parse JWK sets). If passed to `jwk.ParseKey()`, the function
+      will return an error no matter what the input is.
+      
+      DO NOT USE WITHOUT EXHAUSTING ALL OTHER ROUTES FIRST.
+      
+      The option specifies that errors found during parsing of individual
+      keys are ignored. For example, if you had keys A, B, C where B is
+      invalid (e.g. it does not contain the required fields), then the
+      resulting JWKS will contain keys A and C only.
+      
+      This options exists as an escape hatch for those times when a
+      key in a JWKS that is irrelevant for your use case is causing
+      your JWKS parsing to fail, and you want to get to the rest of the
+      keys in the JWKS.
+      
+      Again, DO NOT USE unless you have exhausted all other routes.
+      When you use this option, you will not be able to tell if you are
+      using a faulty JWKS, except for when there are JSON syntax errors.
+  - ident: FS
+    interface: ReadFileOption
+    argument_type: fs.FS
+    comment: |
+      WithFS specifies the source `fs.FS` object to read the file from.
+  - ident: PostFetcher
+    interface: RegisterOption
+    argument_type: PostFetcher
+    comment: |
+      WithPostFetcher specifies the PostFetcher object to be used on the
+      jwk.Set object obtained in `jwk.Cache`. This option can be used
+      to, for example, modify the jwk.Set to give it key IDs or algorithm
+      names after it has been fetched and parsed, but before it is cached.
+  - ident: RefreshWindow
+    interface: CacheOption
+    argument_type: time.Duration
+    comment: |
+      WithRefreshWindow specifies the interval between checks for refreshes.
+
+      See the documentation in `httprc.WithRefreshWindow` for more details.
+  - ident: ErrSink
+    interface: CacheOption
+    argument_type: ErrSink
+    comment: |
+      WithErrSink specifies the `httprc.ErrSink` object that handles errors
+      that occurred during the cache's execution.
+
+      See the documentation in `httprc.WithErrSink` for more details.
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/options_gen.go
new file mode 100644
index 0000000000..644b247114
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/options_gen.go
@@ -0,0 +1,274 @@
+// Code generated by tools/cmd/genoptions/main.go. DO NOT EDIT.
+
+package jwk
+
+import (
+	"crypto"
+	"io/fs"
+	"time"
+
+	"github.com/lestrrat-go/jwx/v2/internal/json"
+	"github.com/lestrrat-go/option"
+)
+
+type Option = option.Interface
+
+type AssignKeyIDOption interface {
+	Option
+	assignKeyIDOption()
+}
+
+type assignKeyIDOption struct {
+	Option
+}
+
+func (*assignKeyIDOption) assignKeyIDOption() {}
+
+// CacheOption is a type of Option that can be passed to the
+// `jwk.Cache` object.
+type CacheOption interface {
+	Option
+	cacheOption()
+}
+
+type cacheOption struct {
+	Option
+}
+
+func (*cacheOption) cacheOption() {}
+
+// FetchOption is a type of Option that can be passed to `jwk.Fetch()`
+// FetchOption also implements the `CacheOption`, and thus can
+// safely be passed to `(*jwk.Cache).Configure()`
+type FetchOption interface {
+	Option
+	fetchOption()
+	parseOption()
+	registerOption()
+}
+
+type fetchOption struct {
+	Option
+}
+
+func (*fetchOption) fetchOption() {}
+
+func (*fetchOption) parseOption() {}
+
+func (*fetchOption) registerOption() {}
+
+// ParseOption is a type of Option that can be passed to `jwk.Parse()`
+// ParseOption also implmentsthe `ReadFileOption` and `CacheOption`,
+// and thus safely be passed to `jwk.ReadFile` and `(*jwk.Cache).Configure()`
+type ParseOption interface {
+	Option
+	fetchOption()
+	registerOption()
+	readFileOption()
+}
+
+type parseOption struct {
+	Option
+}
+
+func (*parseOption) fetchOption() {}
+
+func (*parseOption) registerOption() {}
+
+func (*parseOption) readFileOption() {}
+
+// ReadFileOption is a type of `Option` that can be passed to `jwk.ReadFile`
+type ReadFileOption interface {
+	Option
+	readFileOption()
+}
+
+type readFileOption struct {
+	Option
+}
+
+func (*readFileOption) readFileOption() {}
+
+// RegisterOption desribes options that can be passed to `(jwk.Cache).Register()`
+type RegisterOption interface {
+	Option
+	registerOption()
+}
+
+type registerOption struct {
+	Option
+}
+
+func (*registerOption) registerOption() {}
+
+type identErrSink struct{}
+type identFS struct{}
+type identFetchWhitelist struct{}
+type identHTTPClient struct{}
+type identIgnoreParseError struct{}
+type identLocalRegistry struct{}
+type identMinRefreshInterval struct{}
+type identPEM struct{}
+type identPostFetcher struct{}
+type identRefreshInterval struct{}
+type identRefreshWindow struct{}
+type identThumbprintHash struct{}
+
+func (identErrSink) String() string {
+	return "WithErrSink"
+}
+
+func (identFS) String() string {
+	return "WithFS"
+}
+
+func (identFetchWhitelist) String() string {
+	return "WithFetchWhitelist"
+}
+
+func (identHTTPClient) String() string {
+	return "WithHTTPClient"
+}
+
+func (identIgnoreParseError) String() string {
+	return "WithIgnoreParseError"
+}
+
+func (identLocalRegistry) String() string {
+	return "withLocalRegistry"
+}
+
+func (identMinRefreshInterval) String() string {
+	return "WithMinRefreshInterval"
+}
+
+func (identPEM) String() string {
+	return "WithPEM"
+}
+
+func (identPostFetcher) String() string {
+	return "WithPostFetcher"
+}
+
+func (identRefreshInterval) String() string {
+	return "WithRefreshInterval"
+}
+
+func (identRefreshWindow) String() string {
+	return "WithRefreshWindow"
+}
+
+func (identThumbprintHash) String() string {
+	return "WithThumbprintHash"
+}
+
+// WithErrSink specifies the `httprc.ErrSink` object that handles errors
+// that occurred during the cache's execution.
+//
+// See the documentation in `httprc.WithErrSink` for more details.
+func WithErrSink(v ErrSink) CacheOption {
+	return &cacheOption{option.New(identErrSink{}, v)}
+}
+
+// WithFS specifies the source `fs.FS` object to read the file from.
+func WithFS(v fs.FS) ReadFileOption {
+	return &readFileOption{option.New(identFS{}, v)}
+}
+
+// WithFetchWhitelist specifies the Whitelist object to use when
+// fetching JWKs from a remote source. This option can be passed
+// to both `jwk.Fetch()`, `jwk.NewCache()`, and `(*jwk.Cache).Configure()`
+func WithFetchWhitelist(v Whitelist) FetchOption {
+	return &fetchOption{option.New(identFetchWhitelist{}, v)}
+}
+
+// WithHTTPClient allows users to specify the "net/http".Client object that
+// is used when fetching jwk.Set objects.
+func WithHTTPClient(v HTTPClient) FetchOption {
+	return &fetchOption{option.New(identHTTPClient{}, v)}
+}
+
+// WithIgnoreParseError is only applicable when used with `jwk.Parse()`
+// (i.e. to parse JWK sets). If passed to `jwk.ParseKey()`, the function
+// will return an error no matter what the input is.
+//
+// DO NOT USE WITHOUT EXHAUSTING ALL OTHER ROUTES FIRST.
+//
+// The option specifies that errors found during parsing of individual
+// keys are ignored. For example, if you had keys A, B, C where B is
+// invalid (e.g. it does not contain the required fields), then the
+// resulting JWKS will contain keys A and C only.
+//
+// This options exists as an escape hatch for those times when a
+// key in a JWKS that is irrelevant for your use case is causing
+// your JWKS parsing to fail, and you want to get to the rest of the
+// keys in the JWKS.
+//
+// Again, DO NOT USE unless you have exhausted all other routes.
+// When you use this option, you will not be able to tell if you are
+// using a faulty JWKS, except for when there are JSON syntax errors.
+func WithIgnoreParseError(v bool) ParseOption {
+	return &parseOption{option.New(identIgnoreParseError{}, v)}
+}
+
+// This option is only available for internal code. Users don't get to play with it
+func withLocalRegistry(v *json.Registry) ParseOption {
+	return &parseOption{option.New(identLocalRegistry{}, v)}
+}
+
+// WithMinRefreshInterval specifies the minimum refresh interval to be used
+// when using `jwk.Cache`. This value is ONLY used if you did not specify
+// a user-supplied static refresh interval via `WithRefreshInterval`.
+//
+// This value is used as a fallback value when tokens are refreshed.
+//
+// When we fetch the key from a remote URL, we first look at the max-age
+// directive from Cache-Control response header. If this value is present,
+// we compare the max-age value and the value specified by this option
+// and take the larger one.
+//
+// Next we check for the Expires header, and similarly if the header is
+// present, we compare it against the value specified by this option,
+// and take the larger one.
+//
+// Finally, if neither of the above headers are present, we use the
+// value specified by this option as the next refresh timing
+//
+// If unspecified, the minimum refresh interval is 1 hour
+func WithMinRefreshInterval(v time.Duration) RegisterOption {
+	return &registerOption{option.New(identMinRefreshInterval{}, v)}
+}
+
+// WithPEM specifies that the input to `Parse()` is a PEM encoded key.
+func WithPEM(v bool) ParseOption {
+	return &parseOption{option.New(identPEM{}, v)}
+}
+
+// WithPostFetcher specifies the PostFetcher object to be used on the
+// jwk.Set object obtained in `jwk.Cache`. This option can be used
+// to, for example, modify the jwk.Set to give it key IDs or algorithm
+// names after it has been fetched and parsed, but before it is cached.
+func WithPostFetcher(v PostFetcher) RegisterOption {
+	return &registerOption{option.New(identPostFetcher{}, v)}
+}
+
+// WithRefreshInterval specifies the static interval between refreshes
+// of jwk.Set objects controlled by jwk.Cache.
+//
+// Providing this option overrides the adaptive token refreshing based
+// on Cache-Control/Expires header (and jwk.WithMinRefreshInterval),
+// and refreshes will *always* happen in this interval.
+func WithRefreshInterval(v time.Duration) RegisterOption {
+	return &registerOption{option.New(identRefreshInterval{}, v)}
+}
+
+// WithRefreshWindow specifies the interval between checks for refreshes.
+//
+// See the documentation in `httprc.WithRefreshWindow` for more details.
+func WithRefreshWindow(v time.Duration) CacheOption {
+	return &cacheOption{option.New(identRefreshWindow{}, v)}
+}
+
+func WithThumbprintHash(v crypto.Hash) AssignKeyIDOption {
+	return &assignKeyIDOption{option.New(identThumbprintHash{}, v)}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/rsa.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/rsa.go
new file mode 100644
index 0000000000..5de6b6358e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/rsa.go
@@ -0,0 +1,243 @@
+package jwk
+
+import (
+	"crypto"
+	"crypto/rsa"
+	"encoding/binary"
+	"fmt"
+	"math/big"
+
+	"github.com/lestrrat-go/blackmagic"
+	"github.com/lestrrat-go/jwx/v2/internal/base64"
+	"github.com/lestrrat-go/jwx/v2/internal/pool"
+)
+
+func (k *rsaPrivateKey) FromRaw(rawKey *rsa.PrivateKey) error {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+
+	d, err := bigIntToBytes(rawKey.D)
+	if err != nil {
+		return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err)
+	}
+	k.d = d
+
+	l := len(rawKey.Primes)
+
+	if l < 0 /* I know, I'm being paranoid */ || l > 2 {
+		return fmt.Errorf(`invalid number of primes in rsa.PrivateKey: need 0 to 2, but got %d`, len(rawKey.Primes))
+	}
+
+	if l > 0 {
+		p, err := bigIntToBytes(rawKey.Primes[0])
+		if err != nil {
+			return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err)
+		}
+		k.p = p
+	}
+
+	if l > 1 {
+		q, err := bigIntToBytes(rawKey.Primes[1])
+		if err != nil {
+			return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err)
+		}
+		k.q = q
+	}
+
+	// dp, dq, qi are optional values
+	if v, err := bigIntToBytes(rawKey.Precomputed.Dp); err == nil {
+		k.dp = v
+	}
+	if v, err := bigIntToBytes(rawKey.Precomputed.Dq); err == nil {
+		k.dq = v
+	}
+	if v, err := bigIntToBytes(rawKey.Precomputed.Qinv); err == nil {
+		k.qi = v
+	}
+
+	// public key part
+	n, e, err := rsaPublicKeyByteValuesFromRaw(&rawKey.PublicKey)
+	if err != nil {
+		return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err)
+	}
+	k.n = n
+	k.e = e
+
+	return nil
+}
+
+func rsaPublicKeyByteValuesFromRaw(rawKey *rsa.PublicKey) ([]byte, []byte, error) {
+	n, err := bigIntToBytes(rawKey.N)
+	if err != nil {
+		return nil, nil, fmt.Errorf(`invalid rsa.PublicKey: %w`, err)
+	}
+
+	data := make([]byte, 8)
+	binary.BigEndian.PutUint64(data, uint64(rawKey.E))
+	i := 0
+	for ; i < len(data); i++ {
+		if data[i] != 0x0 {
+			break
+		}
+	}
+	return n, data[i:], nil
+}
+
+func (k *rsaPublicKey) FromRaw(rawKey *rsa.PublicKey) error {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+
+	n, e, err := rsaPublicKeyByteValuesFromRaw(rawKey)
+	if err != nil {
+		return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err)
+	}
+	k.n = n
+	k.e = e
+
+	return nil
+}
+
+func (k *rsaPrivateKey) Raw(v interface{}) error {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+
+	var d, q, p big.Int // note: do not use from sync.Pool
+
+	d.SetBytes(k.d)
+	q.SetBytes(k.q)
+	p.SetBytes(k.p)
+
+	// optional fields
+	var dp, dq, qi *big.Int
+	if len(k.dp) > 0 {
+		dp = &big.Int{} // note: do not use from sync.Pool
+		dp.SetBytes(k.dp)
+	}
+
+	if len(k.dq) > 0 {
+		dq = &big.Int{} // note: do not use from sync.Pool
+		dq.SetBytes(k.dq)
+	}
+
+	if len(k.qi) > 0 {
+		qi = &big.Int{} // note: do not use from sync.Pool
+		qi.SetBytes(k.qi)
+	}
+
+	var key rsa.PrivateKey
+
+	pubk := newRSAPublicKey()
+	pubk.n = k.n
+	pubk.e = k.e
+	if err := pubk.Raw(&key.PublicKey); err != nil {
+		return fmt.Errorf(`failed to materialize RSA public key: %w`, err)
+	}
+
+	key.D = &d
+	key.Primes = []*big.Int{&p, &q}
+
+	if dp != nil {
+		key.Precomputed.Dp = dp
+	}
+	if dq != nil {
+		key.Precomputed.Dq = dq
+	}
+	if qi != nil {
+		key.Precomputed.Qinv = qi
+	}
+	key.Precomputed.CRTValues = []rsa.CRTValue{}
+
+	return blackmagic.AssignIfCompatible(v, &key)
+}
+
+// Raw takes the values stored in the Key object, and creates the
+// corresponding *rsa.PublicKey object.
+func (k *rsaPublicKey) Raw(v interface{}) error {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+
+	var key rsa.PublicKey
+
+	n := pool.GetBigInt()
+	e := pool.GetBigInt()
+	defer pool.ReleaseBigInt(e)
+
+	n.SetBytes(k.n)
+	e.SetBytes(k.e)
+
+	key.N = n
+	key.E = int(e.Int64())
+
+	return blackmagic.AssignIfCompatible(v, &key)
+}
+
+func makeRSAPublicKey(v interface {
+	makePairs() []*HeaderPair
+}) (Key, error) {
+	newKey := newRSAPublicKey()
+
+	// Iterate and copy everything except for the bits that should not be in the public key
+	for _, pair := range v.makePairs() {
+		switch pair.Key {
+		case RSADKey, RSADPKey, RSADQKey, RSAPKey, RSAQKey, RSAQIKey:
+			continue
+		default:
+			//nolint:forcetypeassert
+			key := pair.Key.(string)
+			if err := newKey.Set(key, pair.Value); err != nil {
+				return nil, fmt.Errorf(`failed to set field %q: %w`, key, err)
+			}
+		}
+	}
+
+	return newKey, nil
+}
+
+func (k *rsaPrivateKey) PublicKey() (Key, error) {
+	return makeRSAPublicKey(k)
+}
+
+func (k *rsaPublicKey) PublicKey() (Key, error) {
+	return makeRSAPublicKey(k)
+}
+
+// Thumbprint returns the JWK thumbprint using the indicated
+// hashing algorithm, according to RFC 7638
+func (k rsaPrivateKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+
+	var key rsa.PrivateKey
+	if err := k.Raw(&key); err != nil {
+		return nil, fmt.Errorf(`failed to materialize RSA private key: %w`, err)
+	}
+	return rsaThumbprint(hash, &key.PublicKey)
+}
+
+func (k rsaPublicKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+
+	var key rsa.PublicKey
+	if err := k.Raw(&key); err != nil {
+		return nil, fmt.Errorf(`failed to materialize RSA public key: %w`, err)
+	}
+	return rsaThumbprint(hash, &key)
+}
+
+func rsaThumbprint(hash crypto.Hash, key *rsa.PublicKey) ([]byte, error) {
+	buf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(buf)
+
+	buf.WriteString(`{"e":"`)
+	buf.WriteString(base64.EncodeUint64ToString(uint64(key.E)))
+	buf.WriteString(`","kty":"RSA","n":"`)
+	buf.WriteString(base64.EncodeToString(key.N.Bytes()))
+	buf.WriteString(`"}`)
+
+	h := hash.New()
+	if _, err := buf.WriteTo(h); err != nil {
+		return nil, fmt.Errorf(`failed to write rsaThumbprint: %w`, err)
+	}
+	return h.Sum(nil), nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/rsa_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/rsa_gen.go
new file mode 100644
index 0000000000..659624297f
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/rsa_gen.go
@@ -0,0 +1,1250 @@
+// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT.
+
+package jwk
+
+import (
+	"bytes"
+	"context"
+	"crypto/rsa"
+	"fmt"
+	"sort"
+	"sync"
+
+	"github.com/lestrrat-go/iter/mapiter"
+	"github.com/lestrrat-go/jwx/v2/cert"
+	"github.com/lestrrat-go/jwx/v2/internal/base64"
+	"github.com/lestrrat-go/jwx/v2/internal/iter"
+	"github.com/lestrrat-go/jwx/v2/internal/json"
+	"github.com/lestrrat-go/jwx/v2/internal/pool"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+const (
+	RSADKey  = "d"
+	RSADPKey = "dp"
+	RSADQKey = "dq"
+	RSAEKey  = "e"
+	RSANKey  = "n"
+	RSAPKey  = "p"
+	RSAQIKey = "qi"
+	RSAQKey  = "q"
+)
+
+type RSAPublicKey interface {
+	Key
+	FromRaw(*rsa.PublicKey) error
+	E() []byte
+	N() []byte
+}
+
+type rsaPublicKey struct {
+	algorithm              *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4
+	e                      []byte
+	keyID                  *string           // https://tools.ietf.org/html/rfc7515#section-4.1.4
+	keyOps                 *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3
+	keyUsage               *string           // https://tools.ietf.org/html/rfc7517#section-4.2
+	n                      []byte
+	x509CertChain          *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+	x509CertThumbprint     *string     // https://tools.ietf.org/html/rfc7515#section-4.1.7
+	x509CertThumbprintS256 *string     // https://tools.ietf.org/html/rfc7515#section-4.1.8
+	x509URL                *string     // https://tools.ietf.org/html/rfc7515#section-4.1.5
+	privateParams          map[string]interface{}
+	mu                     *sync.RWMutex
+	dc                     json.DecodeCtx
+}
+
+var _ RSAPublicKey = &rsaPublicKey{}
+var _ Key = &rsaPublicKey{}
+
+func newRSAPublicKey() *rsaPublicKey {
+	return &rsaPublicKey{
+		mu:            &sync.RWMutex{},
+		privateParams: make(map[string]interface{}),
+	}
+}
+
+func (h rsaPublicKey) KeyType() jwa.KeyType {
+	return jwa.RSA
+}
+
+func (h *rsaPublicKey) Algorithm() jwa.KeyAlgorithm {
+	if h.algorithm != nil {
+		return *(h.algorithm)
+	}
+	return jwa.InvalidKeyAlgorithm("")
+}
+
+func (h *rsaPublicKey) E() []byte {
+	return h.e
+}
+
+func (h *rsaPublicKey) KeyID() string {
+	if h.keyID != nil {
+		return *(h.keyID)
+	}
+	return ""
+}
+
+func (h *rsaPublicKey) KeyOps() KeyOperationList {
+	if h.keyOps != nil {
+		return *(h.keyOps)
+	}
+	return nil
+}
+
+func (h *rsaPublicKey) KeyUsage() string {
+	if h.keyUsage != nil {
+		return *(h.keyUsage)
+	}
+	return ""
+}
+
+func (h *rsaPublicKey) N() []byte {
+	return h.n
+}
+
+func (h *rsaPublicKey) X509CertChain() *cert.Chain {
+	return h.x509CertChain
+}
+
+func (h *rsaPublicKey) X509CertThumbprint() string {
+	if h.x509CertThumbprint != nil {
+		return *(h.x509CertThumbprint)
+	}
+	return ""
+}
+
+func (h *rsaPublicKey) X509CertThumbprintS256() string {
+	if h.x509CertThumbprintS256 != nil {
+		return *(h.x509CertThumbprintS256)
+	}
+	return ""
+}
+
+func (h *rsaPublicKey) X509URL() string {
+	if h.x509URL != nil {
+		return *(h.x509URL)
+	}
+	return ""
+}
+
+func (h *rsaPublicKey) makePairs() []*HeaderPair {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+
+	var pairs []*HeaderPair
+	pairs = append(pairs, &HeaderPair{Key: "kty", Value: jwa.RSA})
+	if h.algorithm != nil {
+		pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)})
+	}
+	if h.e != nil {
+		pairs = append(pairs, &HeaderPair{Key: RSAEKey, Value: h.e})
+	}
+	if h.keyID != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)})
+	}
+	if h.keyOps != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyOpsKey, Value: *(h.keyOps)})
+	}
+	if h.keyUsage != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyUsageKey, Value: *(h.keyUsage)})
+	}
+	if h.n != nil {
+		pairs = append(pairs, &HeaderPair{Key: RSANKey, Value: h.n})
+	}
+	if h.x509CertChain != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain})
+	}
+	if h.x509CertThumbprint != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)})
+	}
+	if h.x509CertThumbprintS256 != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)})
+	}
+	if h.x509URL != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)})
+	}
+	for k, v := range h.privateParams {
+		pairs = append(pairs, &HeaderPair{Key: k, Value: v})
+	}
+	return pairs
+}
+
+func (h *rsaPublicKey) PrivateParams() map[string]interface{} {
+	return h.privateParams
+}
+
+func (h *rsaPublicKey) Get(name string) (interface{}, bool) {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	switch name {
+	case KeyTypeKey:
+		return h.KeyType(), true
+	case AlgorithmKey:
+		if h.algorithm == nil {
+			return nil, false
+		}
+		return *(h.algorithm), true
+	case RSAEKey:
+		if h.e == nil {
+			return nil, false
+		}
+		return h.e, true
+	case KeyIDKey:
+		if h.keyID == nil {
+			return nil, false
+		}
+		return *(h.keyID), true
+	case KeyOpsKey:
+		if h.keyOps == nil {
+			return nil, false
+		}
+		return *(h.keyOps), true
+	case KeyUsageKey:
+		if h.keyUsage == nil {
+			return nil, false
+		}
+		return *(h.keyUsage), true
+	case RSANKey:
+		if h.n == nil {
+			return nil, false
+		}
+		return h.n, true
+	case X509CertChainKey:
+		if h.x509CertChain == nil {
+			return nil, false
+		}
+		return h.x509CertChain, true
+	case X509CertThumbprintKey:
+		if h.x509CertThumbprint == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprint), true
+	case X509CertThumbprintS256Key:
+		if h.x509CertThumbprintS256 == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprintS256), true
+	case X509URLKey:
+		if h.x509URL == nil {
+			return nil, false
+		}
+		return *(h.x509URL), true
+	default:
+		v, ok := h.privateParams[name]
+		return v, ok
+	}
+}
+
+func (h *rsaPublicKey) Set(name string, value interface{}) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	return h.setNoLock(name, value)
+}
+
+func (h *rsaPublicKey) setNoLock(name string, value interface{}) error {
+	switch name {
+	case "kty":
+		return nil
+	case AlgorithmKey:
+		switch v := value.(type) {
+		case string, jwa.SignatureAlgorithm, jwa.ContentEncryptionAlgorithm:
+			var tmp = jwa.KeyAlgorithmFrom(v)
+			h.algorithm = &tmp
+		case fmt.Stringer:
+			s := v.String()
+			var tmp = jwa.KeyAlgorithmFrom(s)
+			h.algorithm = &tmp
+		default:
+			return fmt.Errorf(`invalid type for %s key: %T`, AlgorithmKey, value)
+		}
+		return nil
+	case RSAEKey:
+		if v, ok := value.([]byte); ok {
+			h.e = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, RSAEKey, value)
+	case KeyIDKey:
+		if v, ok := value.(string); ok {
+			h.keyID = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+	case KeyOpsKey:
+		var acceptor KeyOperationList
+		if err := acceptor.Accept(value); err != nil {
+			return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err)
+		}
+		h.keyOps = &acceptor
+		return nil
+	case KeyUsageKey:
+		switch v := value.(type) {
+		case KeyUsageType:
+			switch v {
+			case ForSignature, ForEncryption:
+				tmp := v.String()
+				h.keyUsage = &tmp
+			default:
+				return fmt.Errorf(`invalid key usage type %s`, v)
+			}
+		case string:
+			h.keyUsage = &v
+		default:
+			return fmt.Errorf(`invalid key usage type %s`, v)
+		}
+	case RSANKey:
+		if v, ok := value.([]byte); ok {
+			h.n = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, RSANKey, value)
+	case X509CertChainKey:
+		if v, ok := value.(*cert.Chain); ok {
+			h.x509CertChain = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+	case X509CertThumbprintKey:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprint = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+	case X509CertThumbprintS256Key:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprintS256 = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+	case X509URLKey:
+		if v, ok := value.(string); ok {
+			h.x509URL = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+	default:
+		if h.privateParams == nil {
+			h.privateParams = map[string]interface{}{}
+		}
+		h.privateParams[name] = value
+	}
+	return nil
+}
+
+func (k *rsaPublicKey) Remove(key string) error {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+	switch key {
+	case AlgorithmKey:
+		k.algorithm = nil
+	case RSAEKey:
+		k.e = nil
+	case KeyIDKey:
+		k.keyID = nil
+	case KeyOpsKey:
+		k.keyOps = nil
+	case KeyUsageKey:
+		k.keyUsage = nil
+	case RSANKey:
+		k.n = nil
+	case X509CertChainKey:
+		k.x509CertChain = nil
+	case X509CertThumbprintKey:
+		k.x509CertThumbprint = nil
+	case X509CertThumbprintS256Key:
+		k.x509CertThumbprintS256 = nil
+	case X509URLKey:
+		k.x509URL = nil
+	default:
+		delete(k.privateParams, key)
+	}
+	return nil
+}
+
+func (k *rsaPublicKey) Clone() (Key, error) {
+	return cloneKey(k)
+}
+
+func (k *rsaPublicKey) DecodeCtx() json.DecodeCtx {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+	return k.dc
+}
+
+func (k *rsaPublicKey) SetDecodeCtx(dc json.DecodeCtx) {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+	k.dc = dc
+}
+
+func (h *rsaPublicKey) UnmarshalJSON(buf []byte) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.algorithm = nil
+	h.e = nil
+	h.keyID = nil
+	h.keyOps = nil
+	h.keyUsage = nil
+	h.n = nil
+	h.x509CertChain = nil
+	h.x509CertThumbprint = nil
+	h.x509CertThumbprintS256 = nil
+	h.x509URL = nil
+	dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+	for {
+		tok, err := dec.Token()
+		if err != nil {
+			return fmt.Errorf(`error reading token: %w`, err)
+		}
+		switch tok := tok.(type) {
+		case json.Delim:
+			// Assuming we're doing everything correctly, we should ONLY
+			// get either '{' or '}' here.
+			if tok == '}' { // End of object
+				break LOOP
+			} else if tok != '{' {
+				return fmt.Errorf(`expected '{', but got '%c'`, tok)
+			}
+		case string: // Objects can only have string keys
+			switch tok {
+			case KeyTypeKey:
+				val, err := json.ReadNextStringToken(dec)
+				if err != nil {
+					return fmt.Errorf(`error reading token: %w`, err)
+				}
+				if val != jwa.RSA.String() {
+					return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val)
+				}
+			case AlgorithmKey:
+				var s string
+				if err := dec.Decode(&s); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+				}
+				alg := jwa.KeyAlgorithmFrom(s)
+				h.algorithm = &alg
+			case RSAEKey:
+				if err := json.AssignNextBytesToken(&h.e, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, RSAEKey, err)
+				}
+			case KeyIDKey:
+				if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+				}
+			case KeyOpsKey:
+				var decoded KeyOperationList
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err)
+				}
+				h.keyOps = &decoded
+			case KeyUsageKey:
+				if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err)
+				}
+			case RSANKey:
+				if err := json.AssignNextBytesToken(&h.n, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, RSANKey, err)
+				}
+			case X509CertChainKey:
+				var decoded cert.Chain
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+				}
+				h.x509CertChain = &decoded
+			case X509CertThumbprintKey:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+				}
+			case X509CertThumbprintS256Key:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+				}
+			case X509URLKey:
+				if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+				}
+			default:
+				if dc := h.dc; dc != nil {
+					if localReg := dc.Registry(); localReg != nil {
+						decoded, err := localReg.Decode(dec, tok)
+						if err == nil {
+							h.setNoLock(tok, decoded)
+							continue
+						}
+					}
+				}
+				decoded, err := registry.Decode(dec, tok)
+				if err == nil {
+					h.setNoLock(tok, decoded)
+					continue
+				}
+				return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+			}
+		default:
+			return fmt.Errorf(`invalid token %T`, tok)
+		}
+	}
+	if h.e == nil {
+		return fmt.Errorf(`required field e is missing`)
+	}
+	if h.n == nil {
+		return fmt.Errorf(`required field n is missing`)
+	}
+	return nil
+}
+
+func (h rsaPublicKey) MarshalJSON() ([]byte, error) {
+	data := make(map[string]interface{})
+	fields := make([]string, 0, 10)
+	for _, pair := range h.makePairs() {
+		fields = append(fields, pair.Key.(string))
+		data[pair.Key.(string)] = pair.Value
+	}
+
+	sort.Strings(fields)
+	buf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(buf)
+	buf.WriteByte('{')
+	enc := json.NewEncoder(buf)
+	for i, f := range fields {
+		if i > 0 {
+			buf.WriteRune(',')
+		}
+		buf.WriteRune('"')
+		buf.WriteString(f)
+		buf.WriteString(`":`)
+		v := data[f]
+		switch v := v.(type) {
+		case []byte:
+			buf.WriteRune('"')
+			buf.WriteString(base64.EncodeToString(v))
+			buf.WriteRune('"')
+		default:
+			if err := enc.Encode(v); err != nil {
+				return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err)
+			}
+			buf.Truncate(buf.Len() - 1)
+		}
+	}
+	buf.WriteByte('}')
+	ret := make([]byte, buf.Len())
+	copy(ret, buf.Bytes())
+	return ret, nil
+}
+
+func (h *rsaPublicKey) Iterate(ctx context.Context) HeaderIterator {
+	pairs := h.makePairs()
+	ch := make(chan *HeaderPair, len(pairs))
+	go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) {
+		defer close(ch)
+		for _, pair := range pairs {
+			select {
+			case <-ctx.Done():
+				return
+			case ch <- pair:
+			}
+		}
+	}(ctx, ch, pairs)
+	return mapiter.New(ch)
+}
+
+func (h *rsaPublicKey) Walk(ctx context.Context, visitor HeaderVisitor) error {
+	return iter.WalkMap(ctx, h, visitor)
+}
+
+func (h *rsaPublicKey) AsMap(ctx context.Context) (map[string]interface{}, error) {
+	return iter.AsMap(ctx, h)
+}
+
+type RSAPrivateKey interface {
+	Key
+	FromRaw(*rsa.PrivateKey) error
+	D() []byte
+	DP() []byte
+	DQ() []byte
+	E() []byte
+	N() []byte
+	P() []byte
+	Q() []byte
+	QI() []byte
+}
+
+type rsaPrivateKey struct {
+	algorithm              *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4
+	d                      []byte
+	dp                     []byte
+	dq                     []byte
+	e                      []byte
+	keyID                  *string           // https://tools.ietf.org/html/rfc7515#section-4.1.4
+	keyOps                 *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3
+	keyUsage               *string           // https://tools.ietf.org/html/rfc7517#section-4.2
+	n                      []byte
+	p                      []byte
+	q                      []byte
+	qi                     []byte
+	x509CertChain          *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+	x509CertThumbprint     *string     // https://tools.ietf.org/html/rfc7515#section-4.1.7
+	x509CertThumbprintS256 *string     // https://tools.ietf.org/html/rfc7515#section-4.1.8
+	x509URL                *string     // https://tools.ietf.org/html/rfc7515#section-4.1.5
+	privateParams          map[string]interface{}
+	mu                     *sync.RWMutex
+	dc                     json.DecodeCtx
+}
+
+var _ RSAPrivateKey = &rsaPrivateKey{}
+var _ Key = &rsaPrivateKey{}
+
+func newRSAPrivateKey() *rsaPrivateKey {
+	return &rsaPrivateKey{
+		mu:            &sync.RWMutex{},
+		privateParams: make(map[string]interface{}),
+	}
+}
+
+func (h rsaPrivateKey) KeyType() jwa.KeyType {
+	return jwa.RSA
+}
+
+func (h *rsaPrivateKey) Algorithm() jwa.KeyAlgorithm {
+	if h.algorithm != nil {
+		return *(h.algorithm)
+	}
+	return jwa.InvalidKeyAlgorithm("")
+}
+
+func (h *rsaPrivateKey) D() []byte {
+	return h.d
+}
+
+func (h *rsaPrivateKey) DP() []byte {
+	return h.dp
+}
+
+func (h *rsaPrivateKey) DQ() []byte {
+	return h.dq
+}
+
+func (h *rsaPrivateKey) E() []byte {
+	return h.e
+}
+
+func (h *rsaPrivateKey) KeyID() string {
+	if h.keyID != nil {
+		return *(h.keyID)
+	}
+	return ""
+}
+
+func (h *rsaPrivateKey) KeyOps() KeyOperationList {
+	if h.keyOps != nil {
+		return *(h.keyOps)
+	}
+	return nil
+}
+
+func (h *rsaPrivateKey) KeyUsage() string {
+	if h.keyUsage != nil {
+		return *(h.keyUsage)
+	}
+	return ""
+}
+
+func (h *rsaPrivateKey) N() []byte {
+	return h.n
+}
+
+func (h *rsaPrivateKey) P() []byte {
+	return h.p
+}
+
+func (h *rsaPrivateKey) Q() []byte {
+	return h.q
+}
+
+func (h *rsaPrivateKey) QI() []byte {
+	return h.qi
+}
+
+func (h *rsaPrivateKey) X509CertChain() *cert.Chain {
+	return h.x509CertChain
+}
+
+func (h *rsaPrivateKey) X509CertThumbprint() string {
+	if h.x509CertThumbprint != nil {
+		return *(h.x509CertThumbprint)
+	}
+	return ""
+}
+
+func (h *rsaPrivateKey) X509CertThumbprintS256() string {
+	if h.x509CertThumbprintS256 != nil {
+		return *(h.x509CertThumbprintS256)
+	}
+	return ""
+}
+
+func (h *rsaPrivateKey) X509URL() string {
+	if h.x509URL != nil {
+		return *(h.x509URL)
+	}
+	return ""
+}
+
+func (h *rsaPrivateKey) makePairs() []*HeaderPair {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+
+	var pairs []*HeaderPair
+	pairs = append(pairs, &HeaderPair{Key: "kty", Value: jwa.RSA})
+	if h.algorithm != nil {
+		pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)})
+	}
+	if h.d != nil {
+		pairs = append(pairs, &HeaderPair{Key: RSADKey, Value: h.d})
+	}
+	if h.dp != nil {
+		pairs = append(pairs, &HeaderPair{Key: RSADPKey, Value: h.dp})
+	}
+	if h.dq != nil {
+		pairs = append(pairs, &HeaderPair{Key: RSADQKey, Value: h.dq})
+	}
+	if h.e != nil {
+		pairs = append(pairs, &HeaderPair{Key: RSAEKey, Value: h.e})
+	}
+	if h.keyID != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)})
+	}
+	if h.keyOps != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyOpsKey, Value: *(h.keyOps)})
+	}
+	if h.keyUsage != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyUsageKey, Value: *(h.keyUsage)})
+	}
+	if h.n != nil {
+		pairs = append(pairs, &HeaderPair{Key: RSANKey, Value: h.n})
+	}
+	if h.p != nil {
+		pairs = append(pairs, &HeaderPair{Key: RSAPKey, Value: h.p})
+	}
+	if h.q != nil {
+		pairs = append(pairs, &HeaderPair{Key: RSAQKey, Value: h.q})
+	}
+	if h.qi != nil {
+		pairs = append(pairs, &HeaderPair{Key: RSAQIKey, Value: h.qi})
+	}
+	if h.x509CertChain != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain})
+	}
+	if h.x509CertThumbprint != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)})
+	}
+	if h.x509CertThumbprintS256 != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)})
+	}
+	if h.x509URL != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)})
+	}
+	for k, v := range h.privateParams {
+		pairs = append(pairs, &HeaderPair{Key: k, Value: v})
+	}
+	return pairs
+}
+
+func (h *rsaPrivateKey) PrivateParams() map[string]interface{} {
+	return h.privateParams
+}
+
+func (h *rsaPrivateKey) Get(name string) (interface{}, bool) {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	switch name {
+	case KeyTypeKey:
+		return h.KeyType(), true
+	case AlgorithmKey:
+		if h.algorithm == nil {
+			return nil, false
+		}
+		return *(h.algorithm), true
+	case RSADKey:
+		if h.d == nil {
+			return nil, false
+		}
+		return h.d, true
+	case RSADPKey:
+		if h.dp == nil {
+			return nil, false
+		}
+		return h.dp, true
+	case RSADQKey:
+		if h.dq == nil {
+			return nil, false
+		}
+		return h.dq, true
+	case RSAEKey:
+		if h.e == nil {
+			return nil, false
+		}
+		return h.e, true
+	case KeyIDKey:
+		if h.keyID == nil {
+			return nil, false
+		}
+		return *(h.keyID), true
+	case KeyOpsKey:
+		if h.keyOps == nil {
+			return nil, false
+		}
+		return *(h.keyOps), true
+	case KeyUsageKey:
+		if h.keyUsage == nil {
+			return nil, false
+		}
+		return *(h.keyUsage), true
+	case RSANKey:
+		if h.n == nil {
+			return nil, false
+		}
+		return h.n, true
+	case RSAPKey:
+		if h.p == nil {
+			return nil, false
+		}
+		return h.p, true
+	case RSAQKey:
+		if h.q == nil {
+			return nil, false
+		}
+		return h.q, true
+	case RSAQIKey:
+		if h.qi == nil {
+			return nil, false
+		}
+		return h.qi, true
+	case X509CertChainKey:
+		if h.x509CertChain == nil {
+			return nil, false
+		}
+		return h.x509CertChain, true
+	case X509CertThumbprintKey:
+		if h.x509CertThumbprint == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprint), true
+	case X509CertThumbprintS256Key:
+		if h.x509CertThumbprintS256 == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprintS256), true
+	case X509URLKey:
+		if h.x509URL == nil {
+			return nil, false
+		}
+		return *(h.x509URL), true
+	default:
+		v, ok := h.privateParams[name]
+		return v, ok
+	}
+}
+
+func (h *rsaPrivateKey) Set(name string, value interface{}) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	return h.setNoLock(name, value)
+}
+
+func (h *rsaPrivateKey) setNoLock(name string, value interface{}) error {
+	switch name {
+	case "kty":
+		return nil
+	case AlgorithmKey:
+		switch v := value.(type) {
+		case string, jwa.SignatureAlgorithm, jwa.ContentEncryptionAlgorithm:
+			var tmp = jwa.KeyAlgorithmFrom(v)
+			h.algorithm = &tmp
+		case fmt.Stringer:
+			s := v.String()
+			var tmp = jwa.KeyAlgorithmFrom(s)
+			h.algorithm = &tmp
+		default:
+			return fmt.Errorf(`invalid type for %s key: %T`, AlgorithmKey, value)
+		}
+		return nil
+	case RSADKey:
+		if v, ok := value.([]byte); ok {
+			h.d = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, RSADKey, value)
+	case RSADPKey:
+		if v, ok := value.([]byte); ok {
+			h.dp = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, RSADPKey, value)
+	case RSADQKey:
+		if v, ok := value.([]byte); ok {
+			h.dq = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, RSADQKey, value)
+	case RSAEKey:
+		if v, ok := value.([]byte); ok {
+			h.e = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, RSAEKey, value)
+	case KeyIDKey:
+		if v, ok := value.(string); ok {
+			h.keyID = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+	case KeyOpsKey:
+		var acceptor KeyOperationList
+		if err := acceptor.Accept(value); err != nil {
+			return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err)
+		}
+		h.keyOps = &acceptor
+		return nil
+	case KeyUsageKey:
+		switch v := value.(type) {
+		case KeyUsageType:
+			switch v {
+			case ForSignature, ForEncryption:
+				tmp := v.String()
+				h.keyUsage = &tmp
+			default:
+				return fmt.Errorf(`invalid key usage type %s`, v)
+			}
+		case string:
+			h.keyUsage = &v
+		default:
+			return fmt.Errorf(`invalid key usage type %s`, v)
+		}
+	case RSANKey:
+		if v, ok := value.([]byte); ok {
+			h.n = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, RSANKey, value)
+	case RSAPKey:
+		if v, ok := value.([]byte); ok {
+			h.p = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, RSAPKey, value)
+	case RSAQKey:
+		if v, ok := value.([]byte); ok {
+			h.q = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, RSAQKey, value)
+	case RSAQIKey:
+		if v, ok := value.([]byte); ok {
+			h.qi = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, RSAQIKey, value)
+	case X509CertChainKey:
+		if v, ok := value.(*cert.Chain); ok {
+			h.x509CertChain = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+	case X509CertThumbprintKey:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprint = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+	case X509CertThumbprintS256Key:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprintS256 = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+	case X509URLKey:
+		if v, ok := value.(string); ok {
+			h.x509URL = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+	default:
+		if h.privateParams == nil {
+			h.privateParams = map[string]interface{}{}
+		}
+		h.privateParams[name] = value
+	}
+	return nil
+}
+
+func (k *rsaPrivateKey) Remove(key string) error {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+	switch key {
+	case AlgorithmKey:
+		k.algorithm = nil
+	case RSADKey:
+		k.d = nil
+	case RSADPKey:
+		k.dp = nil
+	case RSADQKey:
+		k.dq = nil
+	case RSAEKey:
+		k.e = nil
+	case KeyIDKey:
+		k.keyID = nil
+	case KeyOpsKey:
+		k.keyOps = nil
+	case KeyUsageKey:
+		k.keyUsage = nil
+	case RSANKey:
+		k.n = nil
+	case RSAPKey:
+		k.p = nil
+	case RSAQKey:
+		k.q = nil
+	case RSAQIKey:
+		k.qi = nil
+	case X509CertChainKey:
+		k.x509CertChain = nil
+	case X509CertThumbprintKey:
+		k.x509CertThumbprint = nil
+	case X509CertThumbprintS256Key:
+		k.x509CertThumbprintS256 = nil
+	case X509URLKey:
+		k.x509URL = nil
+	default:
+		delete(k.privateParams, key)
+	}
+	return nil
+}
+
+func (k *rsaPrivateKey) Clone() (Key, error) {
+	return cloneKey(k)
+}
+
+func (k *rsaPrivateKey) DecodeCtx() json.DecodeCtx {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+	return k.dc
+}
+
+func (k *rsaPrivateKey) SetDecodeCtx(dc json.DecodeCtx) {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+	k.dc = dc
+}
+
+func (h *rsaPrivateKey) UnmarshalJSON(buf []byte) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.algorithm = nil
+	h.d = nil
+	h.dp = nil
+	h.dq = nil
+	h.e = nil
+	h.keyID = nil
+	h.keyOps = nil
+	h.keyUsage = nil
+	h.n = nil
+	h.p = nil
+	h.q = nil
+	h.qi = nil
+	h.x509CertChain = nil
+	h.x509CertThumbprint = nil
+	h.x509CertThumbprintS256 = nil
+	h.x509URL = nil
+	dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+	for {
+		tok, err := dec.Token()
+		if err != nil {
+			return fmt.Errorf(`error reading token: %w`, err)
+		}
+		switch tok := tok.(type) {
+		case json.Delim:
+			// Assuming we're doing everything correctly, we should ONLY
+			// get either '{' or '}' here.
+			if tok == '}' { // End of object
+				break LOOP
+			} else if tok != '{' {
+				return fmt.Errorf(`expected '{', but got '%c'`, tok)
+			}
+		case string: // Objects can only have string keys
+			switch tok {
+			case KeyTypeKey:
+				val, err := json.ReadNextStringToken(dec)
+				if err != nil {
+					return fmt.Errorf(`error reading token: %w`, err)
+				}
+				if val != jwa.RSA.String() {
+					return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val)
+				}
+			case AlgorithmKey:
+				var s string
+				if err := dec.Decode(&s); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+				}
+				alg := jwa.KeyAlgorithmFrom(s)
+				h.algorithm = &alg
+			case RSADKey:
+				if err := json.AssignNextBytesToken(&h.d, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, RSADKey, err)
+				}
+			case RSADPKey:
+				if err := json.AssignNextBytesToken(&h.dp, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, RSADPKey, err)
+				}
+			case RSADQKey:
+				if err := json.AssignNextBytesToken(&h.dq, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, RSADQKey, err)
+				}
+			case RSAEKey:
+				if err := json.AssignNextBytesToken(&h.e, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, RSAEKey, err)
+				}
+			case KeyIDKey:
+				if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+				}
+			case KeyOpsKey:
+				var decoded KeyOperationList
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err)
+				}
+				h.keyOps = &decoded
+			case KeyUsageKey:
+				if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err)
+				}
+			case RSANKey:
+				if err := json.AssignNextBytesToken(&h.n, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, RSANKey, err)
+				}
+			case RSAPKey:
+				if err := json.AssignNextBytesToken(&h.p, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, RSAPKey, err)
+				}
+			case RSAQKey:
+				if err := json.AssignNextBytesToken(&h.q, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, RSAQKey, err)
+				}
+			case RSAQIKey:
+				if err := json.AssignNextBytesToken(&h.qi, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, RSAQIKey, err)
+				}
+			case X509CertChainKey:
+				var decoded cert.Chain
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+				}
+				h.x509CertChain = &decoded
+			case X509CertThumbprintKey:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+				}
+			case X509CertThumbprintS256Key:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+				}
+			case X509URLKey:
+				if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+				}
+			default:
+				if dc := h.dc; dc != nil {
+					if localReg := dc.Registry(); localReg != nil {
+						decoded, err := localReg.Decode(dec, tok)
+						if err == nil {
+							h.setNoLock(tok, decoded)
+							continue
+						}
+					}
+				}
+				decoded, err := registry.Decode(dec, tok)
+				if err == nil {
+					h.setNoLock(tok, decoded)
+					continue
+				}
+				return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+			}
+		default:
+			return fmt.Errorf(`invalid token %T`, tok)
+		}
+	}
+	if h.d == nil {
+		return fmt.Errorf(`required field d is missing`)
+	}
+	if h.e == nil {
+		return fmt.Errorf(`required field e is missing`)
+	}
+	if h.n == nil {
+		return fmt.Errorf(`required field n is missing`)
+	}
+	return nil
+}
+
+func (h rsaPrivateKey) MarshalJSON() ([]byte, error) {
+	data := make(map[string]interface{})
+	fields := make([]string, 0, 16)
+	for _, pair := range h.makePairs() {
+		fields = append(fields, pair.Key.(string))
+		data[pair.Key.(string)] = pair.Value
+	}
+
+	sort.Strings(fields)
+	buf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(buf)
+	buf.WriteByte('{')
+	enc := json.NewEncoder(buf)
+	for i, f := range fields {
+		if i > 0 {
+			buf.WriteRune(',')
+		}
+		buf.WriteRune('"')
+		buf.WriteString(f)
+		buf.WriteString(`":`)
+		v := data[f]
+		switch v := v.(type) {
+		case []byte:
+			buf.WriteRune('"')
+			buf.WriteString(base64.EncodeToString(v))
+			buf.WriteRune('"')
+		default:
+			if err := enc.Encode(v); err != nil {
+				return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err)
+			}
+			buf.Truncate(buf.Len() - 1)
+		}
+	}
+	buf.WriteByte('}')
+	ret := make([]byte, buf.Len())
+	copy(ret, buf.Bytes())
+	return ret, nil
+}
+
+func (h *rsaPrivateKey) Iterate(ctx context.Context) HeaderIterator {
+	pairs := h.makePairs()
+	ch := make(chan *HeaderPair, len(pairs))
+	go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) {
+		defer close(ch)
+		for _, pair := range pairs {
+			select {
+			case <-ctx.Done():
+				return
+			case ch <- pair:
+			}
+		}
+	}(ctx, ch, pairs)
+	return mapiter.New(ch)
+}
+
+func (h *rsaPrivateKey) Walk(ctx context.Context, visitor HeaderVisitor) error {
+	return iter.WalkMap(ctx, h, visitor)
+}
+
+func (h *rsaPrivateKey) AsMap(ctx context.Context) (map[string]interface{}, error) {
+	return iter.AsMap(ctx, h)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/set.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/set.go
new file mode 100644
index 0000000000..ab535104db
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/set.go
@@ -0,0 +1,338 @@
+package jwk
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"sort"
+
+	"github.com/lestrrat-go/iter/arrayiter"
+	"github.com/lestrrat-go/iter/mapiter"
+	"github.com/lestrrat-go/jwx/v2/internal/json"
+	"github.com/lestrrat-go/jwx/v2/internal/pool"
+)
+
+const keysKey = `keys` // appease linter
+
+// NewSet creates and empty `jwk.Set` object
+func NewSet() Set {
+	return &set{
+		privateParams: make(map[string]interface{}),
+	}
+}
+
+func (s *set) Set(n string, v interface{}) error {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+
+	if n == keysKey {
+		vl, ok := v.([]Key)
+		if !ok {
+			return fmt.Errorf(`value for field "keys" must be []jwk.Key`)
+		}
+		s.keys = vl
+		return nil
+	}
+
+	s.privateParams[n] = v
+	return nil
+}
+
+func (s *set) Get(n string) (interface{}, bool) {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+
+	v, ok := s.privateParams[n]
+	return v, ok
+}
+
+func (s *set) Key(idx int) (Key, bool) {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+
+	if idx >= 0 && idx < len(s.keys) {
+		return s.keys[idx], true
+	}
+	return nil, false
+}
+
+func (s *set) Len() int {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+
+	return len(s.keys)
+}
+
+// indexNL is Index(), but without the locking
+func (s *set) indexNL(key Key) int {
+	for i, k := range s.keys {
+		if k == key {
+			return i
+		}
+	}
+	return -1
+}
+
+func (s *set) Index(key Key) int {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+
+	return s.indexNL(key)
+}
+
+func (s *set) AddKey(key Key) error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	if i := s.indexNL(key); i > -1 {
+		return fmt.Errorf(`(jwk.Set).AddKey: key already exists`)
+	}
+	s.keys = append(s.keys, key)
+	return nil
+}
+
+func (s *set) Remove(name string) error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	delete(s.privateParams, name)
+	return nil
+}
+
+func (s *set) RemoveKey(key Key) error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	for i, k := range s.keys {
+		if k == key {
+			switch i {
+			case 0:
+				s.keys = s.keys[1:]
+			case len(s.keys) - 1:
+				s.keys = s.keys[:i]
+			default:
+				s.keys = append(s.keys[:i], s.keys[i+1:]...)
+			}
+			return nil
+		}
+	}
+	return fmt.Errorf(`(jwk.Set).RemoveKey: specified key does not exist in set`)
+}
+
+func (s *set) Clear() error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	s.keys = nil
+	s.privateParams = make(map[string]interface{})
+	return nil
+}
+
+func (s *set) Keys(ctx context.Context) KeyIterator {
+	ch := make(chan *KeyPair, s.Len())
+	go iterate(ctx, s.keys, ch)
+	return arrayiter.New(ch)
+}
+
+func iterate(ctx context.Context, keys []Key, ch chan *KeyPair) {
+	defer close(ch)
+
+	for i, key := range keys {
+		pair := &KeyPair{Index: i, Value: key}
+		select {
+		case <-ctx.Done():
+			return
+		case ch <- pair:
+		}
+	}
+}
+
+func (s *set) MarshalJSON() ([]byte, error) {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+
+	buf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(buf)
+	enc := json.NewEncoder(buf)
+
+	fields := []string{keysKey}
+	for k := range s.privateParams {
+		fields = append(fields, k)
+	}
+	sort.Strings(fields)
+
+	buf.WriteByte('{')
+	for i, field := range fields {
+		if i > 0 {
+			buf.WriteByte(',')
+		}
+		fmt.Fprintf(buf, `%q:`, field)
+		if field != keysKey {
+			if err := enc.Encode(s.privateParams[field]); err != nil {
+				return nil, fmt.Errorf(`failed to marshal field %q: %w`, field, err)
+			}
+		} else {
+			buf.WriteByte('[')
+			for j, k := range s.keys {
+				if j > 0 {
+					buf.WriteByte(',')
+				}
+				if err := enc.Encode(k); err != nil {
+					return nil, fmt.Errorf(`failed to marshal key #%d: %w`, i, err)
+				}
+			}
+			buf.WriteByte(']')
+		}
+	}
+	buf.WriteByte('}')
+
+	ret := make([]byte, buf.Len())
+	copy(ret, buf.Bytes())
+	return ret, nil
+}
+
+func (s *set) UnmarshalJSON(data []byte) error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	s.privateParams = make(map[string]interface{})
+	s.keys = nil
+
+	var options []ParseOption
+	var ignoreParseError bool
+	if dc := s.dc; dc != nil {
+		if localReg := dc.Registry(); localReg != nil {
+			options = append(options, withLocalRegistry(localReg))
+		}
+		ignoreParseError = dc.IgnoreParseError()
+	}
+
+	var sawKeysField bool
+	dec := json.NewDecoder(bytes.NewReader(data))
+LOOP:
+	for {
+		tok, err := dec.Token()
+		if err != nil {
+			return fmt.Errorf(`error reading token: %w`, err)
+		}
+
+		switch tok := tok.(type) {
+		case json.Delim:
+			// Assuming we're doing everything correctly, we should ONLY
+			// get either '{' or '}' here.
+			if tok == '}' { // End of object
+				break LOOP
+			} else if tok != '{' {
+				return fmt.Errorf(`expected '{', but got '%c'`, tok)
+			}
+		case string:
+			switch tok {
+			case "keys":
+				sawKeysField = true
+				var list []json.RawMessage
+				if err := dec.Decode(&list); err != nil {
+					return fmt.Errorf(`failed to decode "keys": %w`, err)
+				}
+
+				for i, keysrc := range list {
+					key, err := ParseKey(keysrc, options...)
+					if err != nil {
+						if !ignoreParseError {
+							return fmt.Errorf(`failed to decode key #%d in "keys": %w`, i, err)
+						}
+						continue
+					}
+					s.keys = append(s.keys, key)
+				}
+			default:
+				var v interface{}
+				if err := dec.Decode(&v); err != nil {
+					return fmt.Errorf(`failed to decode value for key %q: %w`, tok, err)
+				}
+				s.privateParams[tok] = v
+			}
+		}
+	}
+
+	// This is really silly, but we can only detect the
+	// lack of the "keys" field after going through the
+	// entire object once
+	// Not checking for len(s.keys) == 0, because it could be
+	// an empty key set
+	if !sawKeysField {
+		key, err := ParseKey(data, options...)
+		if err != nil {
+			return fmt.Errorf(`failed to parse sole key in key set`)
+		}
+		s.keys = append(s.keys, key)
+	}
+	return nil
+}
+
+func (s *set) LookupKeyID(kid string) (Key, bool) {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+
+	n := s.Len()
+	for i := 0; i < n; i++ {
+		key, ok := s.Key(i)
+		if !ok {
+			return nil, false
+		}
+		if key.KeyID() == kid {
+			return key, true
+		}
+	}
+	return nil, false
+}
+
+func (s *set) DecodeCtx() DecodeCtx {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+	return s.dc
+}
+
+func (s *set) SetDecodeCtx(dc DecodeCtx) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.dc = dc
+}
+
+func (s *set) Clone() (Set, error) {
+	s2 := &set{}
+
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+
+	s2.keys = make([]Key, len(s.keys))
+	copy(s2.keys, s.keys)
+	return s2, nil
+}
+
+func (s *set) makePairs() []*HeaderPair {
+	pairs := make([]*HeaderPair, 0, len(s.privateParams))
+	for k, v := range s.privateParams {
+		pairs = append(pairs, &HeaderPair{Key: k, Value: v})
+	}
+	sort.Slice(pairs, func(i, j int) bool {
+		//nolint:forcetypeassert
+		return pairs[i].Key.(string) < pairs[j].Key.(string)
+	})
+	return pairs
+}
+
+func (s *set) Iterate(ctx context.Context) HeaderIterator {
+	pairs := s.makePairs()
+	ch := make(chan *HeaderPair, len(pairs))
+	go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) {
+		defer close(ch)
+		for _, pair := range pairs {
+			select {
+			case <-ctx.Done():
+				return
+			case ch <- pair:
+			}
+		}
+	}(ctx, ch, pairs)
+	return mapiter.New(ch)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/symmetric.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/symmetric.go
new file mode 100644
index 0000000000..d2498e3341
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/symmetric.go
@@ -0,0 +1,60 @@
+package jwk
+
+import (
+	"crypto"
+	"fmt"
+
+	"github.com/lestrrat-go/blackmagic"
+	"github.com/lestrrat-go/jwx/v2/internal/base64"
+)
+
+func (k *symmetricKey) FromRaw(rawKey []byte) error {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+
+	if len(rawKey) == 0 {
+		return fmt.Errorf(`non-empty []byte key required`)
+	}
+
+	k.octets = rawKey
+
+	return nil
+}
+
+// Raw returns the octets for this symmetric key.
+// Since this is a symmetric key, this just calls Octets
+func (k *symmetricKey) Raw(v interface{}) error {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+	return blackmagic.AssignIfCompatible(v, k.octets)
+}
+
+// Thumbprint returns the JWK thumbprint using the indicated
+// hashing algorithm, according to RFC 7638
+func (k *symmetricKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+	var octets []byte
+	if err := k.Raw(&octets); err != nil {
+		return nil, fmt.Errorf(`failed to materialize symmetric key: %w`, err)
+	}
+
+	h := hash.New()
+	fmt.Fprint(h, `{"k":"`)
+	fmt.Fprint(h, base64.EncodeToString(octets))
+	fmt.Fprint(h, `","kty":"oct"}`)
+	return h.Sum(nil), nil
+}
+
+func (k *symmetricKey) PublicKey() (Key, error) {
+	newKey := newSymmetricKey()
+
+	for _, pair := range k.makePairs() {
+		//nolint:forcetypeassert
+		key := pair.Key.(string)
+		if err := newKey.Set(key, pair.Value); err != nil {
+			return nil, fmt.Errorf(`failed to set field %q: %w`, key, err)
+		}
+	}
+	return newKey, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/symmetric_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/symmetric_gen.go
new file mode 100644
index 0000000000..fc96c78d7c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/symmetric_gen.go
@@ -0,0 +1,520 @@
+// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT.
+
+package jwk
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"sort"
+	"sync"
+
+	"github.com/lestrrat-go/iter/mapiter"
+	"github.com/lestrrat-go/jwx/v2/cert"
+	"github.com/lestrrat-go/jwx/v2/internal/base64"
+	"github.com/lestrrat-go/jwx/v2/internal/iter"
+	"github.com/lestrrat-go/jwx/v2/internal/json"
+	"github.com/lestrrat-go/jwx/v2/internal/pool"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+const (
+	SymmetricOctetsKey = "k"
+)
+
+type SymmetricKey interface {
+	Key
+	FromRaw([]byte) error
+	Octets() []byte
+}
+
+type symmetricKey struct {
+	algorithm              *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4
+	keyID                  *string           // https://tools.ietf.org/html/rfc7515#section-4.1.4
+	keyOps                 *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3
+	keyUsage               *string           // https://tools.ietf.org/html/rfc7517#section-4.2
+	octets                 []byte
+	x509CertChain          *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6
+	x509CertThumbprint     *string     // https://tools.ietf.org/html/rfc7515#section-4.1.7
+	x509CertThumbprintS256 *string     // https://tools.ietf.org/html/rfc7515#section-4.1.8
+	x509URL                *string     // https://tools.ietf.org/html/rfc7515#section-4.1.5
+	privateParams          map[string]interface{}
+	mu                     *sync.RWMutex
+	dc                     json.DecodeCtx
+}
+
+var _ SymmetricKey = &symmetricKey{}
+var _ Key = &symmetricKey{}
+
+func newSymmetricKey() *symmetricKey {
+	return &symmetricKey{
+		mu:            &sync.RWMutex{},
+		privateParams: make(map[string]interface{}),
+	}
+}
+
+func (h symmetricKey) KeyType() jwa.KeyType {
+	return jwa.OctetSeq
+}
+
+func (h *symmetricKey) Algorithm() jwa.KeyAlgorithm {
+	if h.algorithm != nil {
+		return *(h.algorithm)
+	}
+	return jwa.InvalidKeyAlgorithm("")
+}
+
+func (h *symmetricKey) KeyID() string {
+	if h.keyID != nil {
+		return *(h.keyID)
+	}
+	return ""
+}
+
+func (h *symmetricKey) KeyOps() KeyOperationList {
+	if h.keyOps != nil {
+		return *(h.keyOps)
+	}
+	return nil
+}
+
+func (h *symmetricKey) KeyUsage() string {
+	if h.keyUsage != nil {
+		return *(h.keyUsage)
+	}
+	return ""
+}
+
+func (h *symmetricKey) Octets() []byte {
+	return h.octets
+}
+
+func (h *symmetricKey) X509CertChain() *cert.Chain {
+	return h.x509CertChain
+}
+
+func (h *symmetricKey) X509CertThumbprint() string {
+	if h.x509CertThumbprint != nil {
+		return *(h.x509CertThumbprint)
+	}
+	return ""
+}
+
+func (h *symmetricKey) X509CertThumbprintS256() string {
+	if h.x509CertThumbprintS256 != nil {
+		return *(h.x509CertThumbprintS256)
+	}
+	return ""
+}
+
+func (h *symmetricKey) X509URL() string {
+	if h.x509URL != nil {
+		return *(h.x509URL)
+	}
+	return ""
+}
+
+func (h *symmetricKey) makePairs() []*HeaderPair {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+
+	var pairs []*HeaderPair
+	pairs = append(pairs, &HeaderPair{Key: "kty", Value: jwa.OctetSeq})
+	if h.algorithm != nil {
+		pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)})
+	}
+	if h.keyID != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)})
+	}
+	if h.keyOps != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyOpsKey, Value: *(h.keyOps)})
+	}
+	if h.keyUsage != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyUsageKey, Value: *(h.keyUsage)})
+	}
+	if h.octets != nil {
+		pairs = append(pairs, &HeaderPair{Key: SymmetricOctetsKey, Value: h.octets})
+	}
+	if h.x509CertChain != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain})
+	}
+	if h.x509CertThumbprint != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)})
+	}
+	if h.x509CertThumbprintS256 != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)})
+	}
+	if h.x509URL != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)})
+	}
+	for k, v := range h.privateParams {
+		pairs = append(pairs, &HeaderPair{Key: k, Value: v})
+	}
+	return pairs
+}
+
+func (h *symmetricKey) PrivateParams() map[string]interface{} {
+	return h.privateParams
+}
+
+func (h *symmetricKey) Get(name string) (interface{}, bool) {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	switch name {
+	case KeyTypeKey:
+		return h.KeyType(), true
+	case AlgorithmKey:
+		if h.algorithm == nil {
+			return nil, false
+		}
+		return *(h.algorithm), true
+	case KeyIDKey:
+		if h.keyID == nil {
+			return nil, false
+		}
+		return *(h.keyID), true
+	case KeyOpsKey:
+		if h.keyOps == nil {
+			return nil, false
+		}
+		return *(h.keyOps), true
+	case KeyUsageKey:
+		if h.keyUsage == nil {
+			return nil, false
+		}
+		return *(h.keyUsage), true
+	case SymmetricOctetsKey:
+		if h.octets == nil {
+			return nil, false
+		}
+		return h.octets, true
+	case X509CertChainKey:
+		if h.x509CertChain == nil {
+			return nil, false
+		}
+		return h.x509CertChain, true
+	case X509CertThumbprintKey:
+		if h.x509CertThumbprint == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprint), true
+	case X509CertThumbprintS256Key:
+		if h.x509CertThumbprintS256 == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprintS256), true
+	case X509URLKey:
+		if h.x509URL == nil {
+			return nil, false
+		}
+		return *(h.x509URL), true
+	default:
+		v, ok := h.privateParams[name]
+		return v, ok
+	}
+}
+
+func (h *symmetricKey) Set(name string, value interface{}) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	return h.setNoLock(name, value)
+}
+
+func (h *symmetricKey) setNoLock(name string, value interface{}) error {
+	switch name {
+	case "kty":
+		return nil
+	case AlgorithmKey:
+		switch v := value.(type) {
+		case string, jwa.SignatureAlgorithm, jwa.ContentEncryptionAlgorithm:
+			var tmp = jwa.KeyAlgorithmFrom(v)
+			h.algorithm = &tmp
+		case fmt.Stringer:
+			s := v.String()
+			var tmp = jwa.KeyAlgorithmFrom(s)
+			h.algorithm = &tmp
+		default:
+			return fmt.Errorf(`invalid type for %s key: %T`, AlgorithmKey, value)
+		}
+		return nil
+	case KeyIDKey:
+		if v, ok := value.(string); ok {
+			h.keyID = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+	case KeyOpsKey:
+		var acceptor KeyOperationList
+		if err := acceptor.Accept(value); err != nil {
+			return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err)
+		}
+		h.keyOps = &acceptor
+		return nil
+	case KeyUsageKey:
+		switch v := value.(type) {
+		case KeyUsageType:
+			switch v {
+			case ForSignature, ForEncryption:
+				tmp := v.String()
+				h.keyUsage = &tmp
+			default:
+				return fmt.Errorf(`invalid key usage type %s`, v)
+			}
+		case string:
+			h.keyUsage = &v
+		default:
+			return fmt.Errorf(`invalid key usage type %s`, v)
+		}
+	case SymmetricOctetsKey:
+		if v, ok := value.([]byte); ok {
+			h.octets = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, SymmetricOctetsKey, value)
+	case X509CertChainKey:
+		if v, ok := value.(*cert.Chain); ok {
+			h.x509CertChain = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+	case X509CertThumbprintKey:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprint = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+	case X509CertThumbprintS256Key:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprintS256 = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+	case X509URLKey:
+		if v, ok := value.(string); ok {
+			h.x509URL = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+	default:
+		if h.privateParams == nil {
+			h.privateParams = map[string]interface{}{}
+		}
+		h.privateParams[name] = value
+	}
+	return nil
+}
+
+func (k *symmetricKey) Remove(key string) error {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+	switch key {
+	case AlgorithmKey:
+		k.algorithm = nil
+	case KeyIDKey:
+		k.keyID = nil
+	case KeyOpsKey:
+		k.keyOps = nil
+	case KeyUsageKey:
+		k.keyUsage = nil
+	case SymmetricOctetsKey:
+		k.octets = nil
+	case X509CertChainKey:
+		k.x509CertChain = nil
+	case X509CertThumbprintKey:
+		k.x509CertThumbprint = nil
+	case X509CertThumbprintS256Key:
+		k.x509CertThumbprintS256 = nil
+	case X509URLKey:
+		k.x509URL = nil
+	default:
+		delete(k.privateParams, key)
+	}
+	return nil
+}
+
+func (k *symmetricKey) Clone() (Key, error) {
+	return cloneKey(k)
+}
+
+func (k *symmetricKey) DecodeCtx() json.DecodeCtx {
+	k.mu.RLock()
+	defer k.mu.RUnlock()
+	return k.dc
+}
+
+func (k *symmetricKey) SetDecodeCtx(dc json.DecodeCtx) {
+	k.mu.Lock()
+	defer k.mu.Unlock()
+	k.dc = dc
+}
+
+func (h *symmetricKey) UnmarshalJSON(buf []byte) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.algorithm = nil
+	h.keyID = nil
+	h.keyOps = nil
+	h.keyUsage = nil
+	h.octets = nil
+	h.x509CertChain = nil
+	h.x509CertThumbprint = nil
+	h.x509CertThumbprintS256 = nil
+	h.x509URL = nil
+	dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+	for {
+		tok, err := dec.Token()
+		if err != nil {
+			return fmt.Errorf(`error reading token: %w`, err)
+		}
+		switch tok := tok.(type) {
+		case json.Delim:
+			// Assuming we're doing everything correctly, we should ONLY
+			// get either '{' or '}' here.
+			if tok == '}' { // End of object
+				break LOOP
+			} else if tok != '{' {
+				return fmt.Errorf(`expected '{', but got '%c'`, tok)
+			}
+		case string: // Objects can only have string keys
+			switch tok {
+			case KeyTypeKey:
+				val, err := json.ReadNextStringToken(dec)
+				if err != nil {
+					return fmt.Errorf(`error reading token: %w`, err)
+				}
+				if val != jwa.OctetSeq.String() {
+					return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val)
+				}
+			case AlgorithmKey:
+				var s string
+				if err := dec.Decode(&s); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+				}
+				alg := jwa.KeyAlgorithmFrom(s)
+				h.algorithm = &alg
+			case KeyIDKey:
+				if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+				}
+			case KeyOpsKey:
+				var decoded KeyOperationList
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err)
+				}
+				h.keyOps = &decoded
+			case KeyUsageKey:
+				if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err)
+				}
+			case SymmetricOctetsKey:
+				if err := json.AssignNextBytesToken(&h.octets, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, SymmetricOctetsKey, err)
+				}
+			case X509CertChainKey:
+				var decoded cert.Chain
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+				}
+				h.x509CertChain = &decoded
+			case X509CertThumbprintKey:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+				}
+			case X509CertThumbprintS256Key:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+				}
+			case X509URLKey:
+				if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+				}
+			default:
+				if dc := h.dc; dc != nil {
+					if localReg := dc.Registry(); localReg != nil {
+						decoded, err := localReg.Decode(dec, tok)
+						if err == nil {
+							h.setNoLock(tok, decoded)
+							continue
+						}
+					}
+				}
+				decoded, err := registry.Decode(dec, tok)
+				if err == nil {
+					h.setNoLock(tok, decoded)
+					continue
+				}
+				return fmt.Errorf(`could not decode field %s: %w`, tok, err)
+			}
+		default:
+			return fmt.Errorf(`invalid token %T`, tok)
+		}
+	}
+	if h.octets == nil {
+		return fmt.Errorf(`required field k is missing`)
+	}
+	return nil
+}
+
+func (h symmetricKey) MarshalJSON() ([]byte, error) {
+	data := make(map[string]interface{})
+	fields := make([]string, 0, 9)
+	for _, pair := range h.makePairs() {
+		fields = append(fields, pair.Key.(string))
+		data[pair.Key.(string)] = pair.Value
+	}
+
+	sort.Strings(fields)
+	buf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(buf)
+	buf.WriteByte('{')
+	enc := json.NewEncoder(buf)
+	for i, f := range fields {
+		if i > 0 {
+			buf.WriteRune(',')
+		}
+		buf.WriteRune('"')
+		buf.WriteString(f)
+		buf.WriteString(`":`)
+		v := data[f]
+		switch v := v.(type) {
+		case []byte:
+			buf.WriteRune('"')
+			buf.WriteString(base64.EncodeToString(v))
+			buf.WriteRune('"')
+		default:
+			if err := enc.Encode(v); err != nil {
+				return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err)
+			}
+			buf.Truncate(buf.Len() - 1)
+		}
+	}
+	buf.WriteByte('}')
+	ret := make([]byte, buf.Len())
+	copy(ret, buf.Bytes())
+	return ret, nil
+}
+
+func (h *symmetricKey) Iterate(ctx context.Context) HeaderIterator {
+	pairs := h.makePairs()
+	ch := make(chan *HeaderPair, len(pairs))
+	go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) {
+		defer close(ch)
+		for _, pair := range pairs {
+			select {
+			case <-ctx.Done():
+				return
+			case ch <- pair:
+			}
+		}
+	}(ctx, ch, pairs)
+	return mapiter.New(ch)
+}
+
+func (h *symmetricKey) Walk(ctx context.Context, visitor HeaderVisitor) error {
+	return iter.WalkMap(ctx, h, visitor)
+}
+
+func (h *symmetricKey) AsMap(ctx context.Context) (map[string]interface{}, error) {
+	return iter.AsMap(ctx, h)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/usage.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/usage.go
new file mode 100644
index 0000000000..c21892395d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/usage.go
@@ -0,0 +1,30 @@
+package jwk
+
+import "fmt"
+
+func (k KeyUsageType) String() string {
+	return string(k)
+}
+
+func (k *KeyUsageType) Accept(v interface{}) error {
+	switch v := v.(type) {
+	case KeyUsageType:
+		switch v {
+		case ForSignature, ForEncryption:
+			*k = v
+			return nil
+		default:
+			return fmt.Errorf("invalid key usage type %s", v)
+		}
+	case string:
+		switch v {
+		case ForSignature.String(), ForEncryption.String():
+			*k = KeyUsageType(v)
+			return nil
+		default:
+			return fmt.Errorf("invalid key usage type %s", v)
+		}
+	}
+
+	return fmt.Errorf("invalid value for key usage type %s", v)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/whitelist.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/whitelist.go
new file mode 100644
index 0000000000..6b0180d307
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/whitelist.go
@@ -0,0 +1,69 @@
+package jwk
+
+import "regexp"
+
+// InsecureWhitelist allows any URLs to be fetched. This is the default
+// behavior of `jwk.Fetch()`, but this exists to allow other libraries
+// (such as jws, via jws.VerifyAuto) and users to be able to explicitly
+// state that they intend to not check the URLs that are being fetched
+type InsecureWhitelist struct{}
+
+func (InsecureWhitelist) IsAllowed(string) bool {
+	return true
+}
+
+// RegexpWhitelist is a jwk.Whitelist object comprised of a list of *regexp.Regexp
+// objects. All entries in the list are tried until one matches. If none of the
+// *regexp.Regexp objects match, then the URL is deemed unallowed.
+type RegexpWhitelist struct {
+	patterns []*regexp.Regexp
+}
+
+func NewRegexpWhitelist() *RegexpWhitelist {
+	return &RegexpWhitelist{}
+}
+
+func (w *RegexpWhitelist) Add(pat *regexp.Regexp) *RegexpWhitelist {
+	w.patterns = append(w.patterns, pat)
+	return w
+}
+
+// IsAlloed returns true if any of the patterns in the whitelist
+// returns true.
+func (w *RegexpWhitelist) IsAllowed(u string) bool {
+	for _, pat := range w.patterns {
+		if pat.MatchString(u) {
+			return true
+		}
+	}
+	return false
+}
+
+// MapWhitelist is a jwk.Whitelist object comprised of a map of strings.
+// If the URL exists in the map, then the URL is allowed to be fetched.
+type MapWhitelist struct {
+	store map[string]struct{}
+}
+
+func NewMapWhitelist() *MapWhitelist {
+	return &MapWhitelist{store: make(map[string]struct{})}
+}
+
+func (w *MapWhitelist) Add(pat string) *MapWhitelist {
+	w.store[pat] = struct{}{}
+	return w
+}
+
+func (w *MapWhitelist) IsAllowed(u string) bool {
+	_, b := w.store[u]
+	return b
+}
+
+// WhitelistFunc is a jwk.Whitelist object based on a function.
+// You can perform any sort of check against the given URL to determine
+// if it can be fetched or not.
+type WhitelistFunc func(string) bool
+
+func (w WhitelistFunc) IsAllowed(u string) bool {
+	return w(u)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v2/jws/BUILD.bazel
new file mode 100644
index 0000000000..859c183b36
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/BUILD.bazel
@@ -0,0 +1,69 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+    name = "jws",
+    srcs = [
+        "ecdsa.go",
+        "eddsa.go",
+        "headers.go",
+        "headers_gen.go",
+        "hmac.go",
+        "interface.go",
+        "io.go",
+        "jws.go",
+        "key_provider.go",
+        "message.go",
+        "options.go",
+        "options_gen.go",
+        "rsa.go",
+        "signer.go",
+        "verifier.go",
+    ],
+    importpath = "github.com/lestrrat-go/jwx/v2/jws",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//cert",
+        "//internal/base64",
+        "//internal/iter",
+        "//internal/json",
+        "//internal/keyconv",
+        "//internal/pool",
+        "//jwa",
+        "//jwk",
+        "//x25519",
+        "@com_github_lestrrat_go_blackmagic//:go_default_library",
+        "@com_github_lestrrat_go_iter//mapiter:go_default_library",
+        "@com_github_lestrrat_go_option//:option",
+    ],
+)
+
+go_test(
+    name = "jws_test",
+    srcs = [
+        "headers_test.go",
+        "jws_test.go",
+        "message_test.go",
+        "options_gen_test.go",
+        "signer_test.go",
+    ],
+    embed = [":jws"],
+    deps = [
+        "//cert",
+        "//internal/base64",
+        "//internal/json",
+        "//internal/jwxtest",
+        "//jwa",
+        "//jwk",
+        "//jwt",
+        "//x25519",
+        "@com_github_lestrrat_go_httprc//:go_default_library",
+        "@com_github_stretchr_testify//assert",
+        "@com_github_stretchr_testify//require",
+    ],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":jws",
+    visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/README.md b/vendor/github.com/lestrrat-go/jwx/v2/jws/README.md
new file mode 100644
index 0000000000..470842ef38
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/README.md
@@ -0,0 +1,111 @@
+# JWS [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v2/jws.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v2/jws)
+
+Package jws implements JWS as described in [RFC7515](https://tools.ietf.org/html/rfc7515) and [RFC7797](https://tools.ietf.org/html/rfc7797)
+
+* Parse and generate compact or JSON serializations
+* Sign and verify arbitrary payload
+* Use any of the keys supported in [github.com/lestrrat-go/jwx/v2/jwk](../jwk)
+* Add arbitrary fields in the JWS object
+* Ability to add/replace existing signature methods
+* Respect "b64" settings for RFC7797
+
+How-to style documentation can be found in the [docs directory](../docs).
+
+Examples are located in the examples directory ([jws_example_test.go](../examples/jws_example_test.go))
+
+Supported signature algorithms:
+
+| Algorithm                               | Supported? | Constant in [jwa](../jwa) |
+|:----------------------------------------|:-----------|:-------------------------|
+| HMAC using SHA-256                      | YES        | jwa.HS256                |
+| HMAC using SHA-384                      | YES        | jwa.HS384                |
+| HMAC using SHA-512                      | YES        | jwa.HS512                |
+| RSASSA-PKCS-v1.5 using SHA-256          | YES        | jwa.RS256                |
+| RSASSA-PKCS-v1.5 using SHA-384          | YES        | jwa.RS384                |
+| RSASSA-PKCS-v1.5 using SHA-512          | YES        | jwa.RS512                |
+| ECDSA using P-256 and SHA-256           | YES        | jwa.ES256                |
+| ECDSA using P-384 and SHA-384           | YES        | jwa.ES384                |
+| ECDSA using P-521 and SHA-512           | YES        | jwa.ES512                |
+| ECDSA using secp256k1 and SHA-256 (2)   | YES        | jwa.ES256K               |
+| RSASSA-PSS using SHA256 and MGF1-SHA256 | YES        | jwa.PS256                |
+| RSASSA-PSS using SHA384 and MGF1-SHA384 | YES        | jwa.PS384                |
+| RSASSA-PSS using SHA512 and MGF1-SHA512 | YES        | jwa.PS512                |
+| EdDSA (1)                               | YES        | jwa.EdDSA                |
+
+* Note 1: Experimental
+* Note 2: Experimental, and must be toggled using `-tags jwx_es256k` build tag
+
+# SYNOPSIS
+
+## Sign and verify arbitrary data
+
+```go
+import(
+  "crypto/rand"
+  "crypto/rsa"
+  "log"
+
+  "github.com/lestrrat-go/jwx/v2/jwa"
+  "github.com/lestrrat-go/jwx/v2/jws"
+)
+
+func main() {
+  privkey, err := rsa.GenerateKey(rand.Reader, 2048)
+  if err != nil {
+    log.Printf("failed to generate private key: %s", err)
+    return
+  }
+
+  buf, err := jws.Sign([]byte("Lorem ipsum"), jws.WithKey(jwa.RS256, privkey))
+  if err != nil {
+    log.Printf("failed to created JWS message: %s", err)
+    return
+  }
+
+  // When you receive a JWS message, you can verify the signature
+  // and grab the payload sent in the message in one go:
+  verified, err := jws.Verify(buf, jws.WithKey(jwa.RS256, &privkey.PublicKey))
+  if err != nil {
+    log.Printf("failed to verify message: %s", err)
+    return
+  }
+
+  log.Printf("signed message verified! -> %s", verified)
+}
+```
+
+## Programatically manipulate `jws.Message`
+
+```go
+func ExampleMessage() {
+  // initialization for the following variables have been omitted.
+  // please see jws_example_test.go for details
+  var decodedPayload, decodedSig1, decodedSig2 []byte
+  var public1, protected1, public2, protected2 jws.Header
+
+  // Construct a message. DO NOT use values that are base64 encoded
+  m := jws.NewMessage().
+    SetPayload(decodedPayload).
+    AppendSignature(
+      jws.NewSignature().
+        SetSignature(decodedSig1).
+        SetProtectedHeaders(public1).
+        SetPublicHeaders(protected1),
+    ).
+    AppendSignature(
+      jws.NewSignature().
+        SetSignature(decodedSig2).
+        SetProtectedHeaders(public2).
+        SetPublicHeaders(protected2),
+    )
+
+  buf, err := json.MarshalIndent(m, "", "  ")
+  if err != nil {
+    fmt.Printf("%s\n", err)
+    return
+  }
+
+  _ = buf
+}
+```
+
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/ecdsa.go
new file mode 100644
index 0000000000..a2d644e438
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/ecdsa.go
@@ -0,0 +1,197 @@
+package jws
+
+import (
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/rand"
+	"encoding/asn1"
+	"fmt"
+	"math/big"
+
+	"github.com/lestrrat-go/jwx/v2/internal/keyconv"
+	"github.com/lestrrat-go/jwx/v2/internal/pool"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+var ecdsaSigners map[jwa.SignatureAlgorithm]*ecdsaSigner
+var ecdsaVerifiers map[jwa.SignatureAlgorithm]*ecdsaVerifier
+
+func init() {
+	algs := map[jwa.SignatureAlgorithm]crypto.Hash{
+		jwa.ES256:  crypto.SHA256,
+		jwa.ES384:  crypto.SHA384,
+		jwa.ES512:  crypto.SHA512,
+		jwa.ES256K: crypto.SHA256,
+	}
+	ecdsaSigners = make(map[jwa.SignatureAlgorithm]*ecdsaSigner)
+	ecdsaVerifiers = make(map[jwa.SignatureAlgorithm]*ecdsaVerifier)
+
+	for alg, hash := range algs {
+		ecdsaSigners[alg] = &ecdsaSigner{
+			alg:  alg,
+			hash: hash,
+		}
+		ecdsaVerifiers[alg] = &ecdsaVerifier{
+			alg:  alg,
+			hash: hash,
+		}
+	}
+}
+
+func newECDSASigner(alg jwa.SignatureAlgorithm) Signer {
+	return ecdsaSigners[alg]
+}
+
+// ecdsaSigners are immutable.
+type ecdsaSigner struct {
+	alg  jwa.SignatureAlgorithm
+	hash crypto.Hash
+}
+
+func (es ecdsaSigner) Algorithm() jwa.SignatureAlgorithm {
+	return es.alg
+}
+
+func (es *ecdsaSigner) Sign(payload []byte, key interface{}) ([]byte, error) {
+	if key == nil {
+		return nil, fmt.Errorf(`missing private key while signing payload`)
+	}
+
+	h := es.hash.New()
+	if _, err := h.Write(payload); err != nil {
+		return nil, fmt.Errorf(`failed to write payload using ecdsa: %w`, err)
+	}
+
+	signer, ok := key.(crypto.Signer)
+	if ok {
+		switch key.(type) {
+		case ecdsa.PrivateKey, *ecdsa.PrivateKey:
+			// if it's a ecdsa.PrivateKey, it's more efficient to
+			// go through the non-crypto.Signer route. Set ok to false
+			ok = false
+		}
+	}
+
+	var r, s *big.Int
+	var curveBits int
+	if ok {
+		signed, err := signer.Sign(rand.Reader, h.Sum(nil), es.hash)
+		if err != nil {
+			return nil, err
+		}
+
+		var p struct {
+			R *big.Int
+			S *big.Int
+		}
+		if _, err := asn1.Unmarshal(signed, &p); err != nil {
+			return nil, fmt.Errorf(`failed to unmarshal ASN1 encoded signature: %w`, err)
+		}
+
+		// Okay, this is silly, but hear me out. When we use the
+		// crypto.Signer interface, the PrivateKey is hidden.
+		// But we need some information about the key (it's bit size).
+		//
+		// So while silly, we're going to have to make another call
+		// here and fetch the Public key.
+		// This probably means that this should be cached some where.
+		cpub := signer.Public()
+		pubkey, ok := cpub.(*ecdsa.PublicKey)
+		if !ok {
+			return nil, fmt.Errorf(`expected *ecdsa.PublicKey, got %T`, pubkey)
+		}
+		curveBits = pubkey.Curve.Params().BitSize
+
+		r = p.R
+		s = p.S
+	} else {
+		var privkey ecdsa.PrivateKey
+		if err := keyconv.ECDSAPrivateKey(&privkey, key); err != nil {
+			return nil, fmt.Errorf(`failed to retrieve ecdsa.PrivateKey out of %T: %w`, key, err)
+		}
+		curveBits = privkey.Curve.Params().BitSize
+		rtmp, stmp, err := ecdsa.Sign(rand.Reader, &privkey, h.Sum(nil))
+		if err != nil {
+			return nil, fmt.Errorf(`failed to sign payload using ecdsa: %w`, err)
+		}
+		r = rtmp
+		s = stmp
+	}
+
+	keyBytes := curveBits / 8
+	// Curve bits do not need to be a multiple of 8.
+	if curveBits%8 > 0 {
+		keyBytes++
+	}
+
+	rBytes := r.Bytes()
+	rBytesPadded := make([]byte, keyBytes)
+	copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+	sBytes := s.Bytes()
+	sBytesPadded := make([]byte, keyBytes)
+	copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+	out := append(rBytesPadded, sBytesPadded...)
+	return out, nil
+}
+
+// ecdsaVerifiers are immutable.
+type ecdsaVerifier struct {
+	alg  jwa.SignatureAlgorithm
+	hash crypto.Hash
+}
+
+func newECDSAVerifier(alg jwa.SignatureAlgorithm) Verifier {
+	return ecdsaVerifiers[alg]
+}
+
+func (v ecdsaVerifier) Algorithm() jwa.SignatureAlgorithm {
+	return v.alg
+}
+
+func (v *ecdsaVerifier) Verify(payload []byte, signature []byte, key interface{}) error {
+	if key == nil {
+		return fmt.Errorf(`missing public key while verifying payload`)
+	}
+
+	var pubkey ecdsa.PublicKey
+	if cs, ok := key.(crypto.Signer); ok {
+		cpub := cs.Public()
+		switch cpub := cpub.(type) {
+		case ecdsa.PublicKey:
+			pubkey = cpub
+		case *ecdsa.PublicKey:
+			pubkey = *cpub
+		default:
+			return fmt.Errorf(`failed to retrieve ecdsa.PublicKey out of crypto.Signer %T`, key)
+		}
+	} else {
+		if err := keyconv.ECDSAPublicKey(&pubkey, key); err != nil {
+			return fmt.Errorf(`failed to retrieve ecdsa.PublicKey out of %T: %w`, key, err)
+		}
+	}
+
+	if !pubkey.Curve.IsOnCurve(pubkey.X, pubkey.Y) {
+		return fmt.Errorf(`public key used does not contain a point (X,Y) on the curve`)
+	}
+
+	r := pool.GetBigInt()
+	s := pool.GetBigInt()
+	defer pool.ReleaseBigInt(r)
+	defer pool.ReleaseBigInt(s)
+
+	n := len(signature) / 2
+	r.SetBytes(signature[:n])
+	s.SetBytes(signature[n:])
+
+	h := v.hash.New()
+	if _, err := h.Write(payload); err != nil {
+		return fmt.Errorf(`failed to write payload using ecdsa: %w`, err)
+	}
+
+	if !ecdsa.Verify(&pubkey, h.Sum(nil), r, s) {
+		return fmt.Errorf(`failed to verify signature using ecdsa`)
+	}
+	return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/eddsa.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/eddsa.go
new file mode 100644
index 0000000000..78c1a2d68d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/eddsa.go
@@ -0,0 +1,73 @@
+package jws
+
+import (
+	"crypto"
+	"crypto/ed25519"
+	"crypto/rand"
+	"fmt"
+
+	"github.com/lestrrat-go/jwx/v2/internal/keyconv"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+type eddsaSigner struct{}
+
+func newEdDSASigner() Signer {
+	return &eddsaSigner{}
+}
+
+func (s eddsaSigner) Algorithm() jwa.SignatureAlgorithm {
+	return jwa.EdDSA
+}
+
+func (s eddsaSigner) Sign(payload []byte, key interface{}) ([]byte, error) {
+	if key == nil {
+		return nil, fmt.Errorf(`missing private key while signing payload`)
+	}
+
+	// The ed25519.PrivateKey object implements crypto.Signer, so we should
+	// simply accept a crypto.Signer here.
+	signer, ok := key.(crypto.Signer)
+	if !ok {
+		// This fallback exists for cases when jwk.Key was passed, or
+		// users gave us a pointer instead of non-pointer, etc.
+		var privkey ed25519.PrivateKey
+		if err := keyconv.Ed25519PrivateKey(&privkey, key); err != nil {
+			return nil, fmt.Errorf(`failed to retrieve ed25519.PrivateKey out of %T: %w`, key, err)
+		}
+		signer = privkey
+	}
+	return signer.Sign(rand.Reader, payload, crypto.Hash(0))
+}
+
+type eddsaVerifier struct{}
+
+func newEdDSAVerifier() Verifier {
+	return &eddsaVerifier{}
+}
+
+func (v eddsaVerifier) Verify(payload, signature []byte, key interface{}) (err error) {
+	if key == nil {
+		return fmt.Errorf(`missing public key while verifying payload`)
+	}
+
+	var pubkey ed25519.PublicKey
+	signer, ok := key.(crypto.Signer)
+	if ok {
+		v := signer.Public()
+		pubkey, ok = v.(ed25519.PublicKey)
+		if !ok {
+			return fmt.Errorf(`expected crypto.Signer.Public() to return ed25519.PublicKey, but got %T`, v)
+		}
+	} else {
+		if err := keyconv.Ed25519PublicKey(&pubkey, key); err != nil {
+			return fmt.Errorf(`failed to retrieve ed25519.PublicKey out of %T: %w`, key, err)
+		}
+	}
+
+	if !ed25519.Verify(pubkey, payload, signature) {
+		return fmt.Errorf(`failed to match EdDSA signature`)
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/es256k.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/es256k.go
new file mode 100644
index 0000000000..c5043805a6
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/es256k.go
@@ -0,0 +1,12 @@
+//go:build jwx_es256k
+// +build jwx_es256k
+
+package jws
+
+import (
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+func init() {
+	addAlgorithmForKeyType(jwa.EC, jwa.ES256K)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/headers.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/headers.go
new file mode 100644
index 0000000000..dce72895e8
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/headers.go
@@ -0,0 +1,71 @@
+package jws
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/lestrrat-go/iter/mapiter"
+	"github.com/lestrrat-go/jwx/v2/internal/iter"
+)
+
+// Iterate returns a channel that successively returns all the
+// header name and values.
+func (h *stdHeaders) Iterate(ctx context.Context) Iterator {
+	pairs := h.makePairs()
+	ch := make(chan *HeaderPair, len(pairs))
+	go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) {
+		defer close(ch)
+		for _, pair := range pairs {
+			select {
+			case <-ctx.Done():
+				return
+			case ch <- pair:
+			}
+		}
+	}(ctx, ch, pairs)
+	return mapiter.New(ch)
+}
+
+func (h *stdHeaders) Walk(ctx context.Context, visitor Visitor) error {
+	return iter.WalkMap(ctx, h, visitor)
+}
+
+func (h *stdHeaders) AsMap(ctx context.Context) (map[string]interface{}, error) {
+	return iter.AsMap(ctx, h)
+}
+
+func (h *stdHeaders) Copy(ctx context.Context, dst Headers) error {
+	for _, pair := range h.makePairs() {
+		//nolint:forcetypeassert
+		key := pair.Key.(string)
+		if err := dst.Set(key, pair.Value); err != nil {
+			return fmt.Errorf(`failed to set header %q: %w`, key, err)
+		}
+	}
+	return nil
+}
+
+// mergeHeaders merges two headers, and works even if the first Header
+// object is nil. This is not exported because ATM it felt like this
+// function is not frequently used, and MergeHeaders seemed a clunky name
+func mergeHeaders(ctx context.Context, h1, h2 Headers) (Headers, error) {
+	h3 := NewHeaders()
+
+	if h1 != nil {
+		if err := h1.Copy(ctx, h3); err != nil {
+			return nil, fmt.Errorf(`failed to copy headers from first Header: %w`, err)
+		}
+	}
+
+	if h2 != nil {
+		if err := h2.Copy(ctx, h3); err != nil {
+			return nil, fmt.Errorf(`failed to copy headers from second Header: %w`, err)
+		}
+	}
+
+	return h3, nil
+}
+
+func (h *stdHeaders) Merge(ctx context.Context, h2 Headers) (Headers, error) {
+	return mergeHeaders(ctx, h, h2)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/headers_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/headers_gen.go
new file mode 100644
index 0000000000..8855d06753
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/headers_gen.go
@@ -0,0 +1,565 @@
+// Code generated by tools/cmd/genjws/main.go. DO NOT EDIT.
+
+package jws
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"sort"
+	"sync"
+
+	"github.com/lestrrat-go/jwx/v2/cert"
+	"github.com/lestrrat-go/jwx/v2/internal/base64"
+	"github.com/lestrrat-go/jwx/v2/internal/json"
+	"github.com/lestrrat-go/jwx/v2/internal/pool"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+	"github.com/lestrrat-go/jwx/v2/jwk"
+)
+
+const (
+	AlgorithmKey              = "alg"
+	ContentTypeKey            = "cty"
+	CriticalKey               = "crit"
+	JWKKey                    = "jwk"
+	JWKSetURLKey              = "jku"
+	KeyIDKey                  = "kid"
+	TypeKey                   = "typ"
+	X509CertChainKey          = "x5c"
+	X509CertThumbprintKey     = "x5t"
+	X509CertThumbprintS256Key = "x5t#S256"
+	X509URLKey                = "x5u"
+)
+
+// Headers describe a standard Header set.
+type Headers interface {
+	json.Marshaler
+	json.Unmarshaler
+	Algorithm() jwa.SignatureAlgorithm
+	ContentType() string
+	Critical() []string
+	JWK() jwk.Key
+	JWKSetURL() string
+	KeyID() string
+	Type() string
+	X509CertChain() *cert.Chain
+	X509CertThumbprint() string
+	X509CertThumbprintS256() string
+	X509URL() string
+	Iterate(ctx context.Context) Iterator
+	Walk(context.Context, Visitor) error
+	AsMap(context.Context) (map[string]interface{}, error)
+	Copy(context.Context, Headers) error
+	Merge(context.Context, Headers) (Headers, error)
+	Get(string) (interface{}, bool)
+	Set(string, interface{}) error
+	Remove(string) error
+
+	// PrivateParams returns the non-standard elements in the source structure
+	// WARNING: DO NOT USE PrivateParams() IF YOU HAVE CONCURRENT CODE ACCESSING THEM.
+	// Use AsMap() to get a copy of the entire header instead
+	PrivateParams() map[string]interface{}
+}
+
+type stdHeaders struct {
+	algorithm              *jwa.SignatureAlgorithm // https://tools.ietf.org/html/rfc7515#section-4.1.1
+	contentType            *string                 // https://tools.ietf.org/html/rfc7515#section-4.1.10
+	critical               []string                // https://tools.ietf.org/html/rfc7515#section-4.1.11
+	jwk                    jwk.Key                 // https://tools.ietf.org/html/rfc7515#section-4.1.3
+	jwkSetURL              *string                 // https://tools.ietf.org/html/rfc7515#section-4.1.2
+	keyID                  *string                 // https://tools.ietf.org/html/rfc7515#section-4.1.4
+	typ                    *string                 // https://tools.ietf.org/html/rfc7515#section-4.1.9
+	x509CertChain          *cert.Chain             // https://tools.ietf.org/html/rfc7515#section-4.1.6
+	x509CertThumbprint     *string                 // https://tools.ietf.org/html/rfc7515#section-4.1.7
+	x509CertThumbprintS256 *string                 // https://tools.ietf.org/html/rfc7515#section-4.1.8
+	x509URL                *string                 // https://tools.ietf.org/html/rfc7515#section-4.1.5
+	privateParams          map[string]interface{}
+	mu                     *sync.RWMutex
+	dc                     DecodeCtx
+	raw                    []byte // stores the raw version of the header so it can be used later
+}
+
+func NewHeaders() Headers {
+	return &stdHeaders{
+		mu: &sync.RWMutex{},
+	}
+}
+
+func (h *stdHeaders) Algorithm() jwa.SignatureAlgorithm {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	if h.algorithm == nil {
+		return ""
+	}
+	return *(h.algorithm)
+}
+
+func (h *stdHeaders) ContentType() string {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	if h.contentType == nil {
+		return ""
+	}
+	return *(h.contentType)
+}
+
+func (h *stdHeaders) Critical() []string {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	return h.critical
+}
+
+func (h *stdHeaders) JWK() jwk.Key {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	return h.jwk
+}
+
+func (h *stdHeaders) JWKSetURL() string {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	if h.jwkSetURL == nil {
+		return ""
+	}
+	return *(h.jwkSetURL)
+}
+
+func (h *stdHeaders) KeyID() string {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	if h.keyID == nil {
+		return ""
+	}
+	return *(h.keyID)
+}
+
+func (h *stdHeaders) Type() string {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	if h.typ == nil {
+		return ""
+	}
+	return *(h.typ)
+}
+
+func (h *stdHeaders) X509CertChain() *cert.Chain {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	return h.x509CertChain
+}
+
+func (h *stdHeaders) X509CertThumbprint() string {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	if h.x509CertThumbprint == nil {
+		return ""
+	}
+	return *(h.x509CertThumbprint)
+}
+
+func (h *stdHeaders) X509CertThumbprintS256() string {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	if h.x509CertThumbprintS256 == nil {
+		return ""
+	}
+	return *(h.x509CertThumbprintS256)
+}
+
+func (h *stdHeaders) X509URL() string {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	if h.x509URL == nil {
+		return ""
+	}
+	return *(h.x509URL)
+}
+
+func (h *stdHeaders) clear() {
+	h.algorithm = nil
+	h.contentType = nil
+	h.critical = nil
+	h.jwk = nil
+	h.jwkSetURL = nil
+	h.keyID = nil
+	h.typ = nil
+	h.x509CertChain = nil
+	h.x509CertThumbprint = nil
+	h.x509CertThumbprintS256 = nil
+	h.x509URL = nil
+	h.privateParams = nil
+	h.raw = nil
+}
+
+func (h *stdHeaders) DecodeCtx() DecodeCtx {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	return h.dc
+}
+
+func (h *stdHeaders) SetDecodeCtx(dc DecodeCtx) {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.dc = dc
+}
+
+func (h *stdHeaders) rawBuffer() []byte {
+	return h.raw
+}
+
+func (h *stdHeaders) makePairs() []*HeaderPair {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	var pairs []*HeaderPair
+	if h.algorithm != nil {
+		pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)})
+	}
+	if h.contentType != nil {
+		pairs = append(pairs, &HeaderPair{Key: ContentTypeKey, Value: *(h.contentType)})
+	}
+	if h.critical != nil {
+		pairs = append(pairs, &HeaderPair{Key: CriticalKey, Value: h.critical})
+	}
+	if h.jwk != nil {
+		pairs = append(pairs, &HeaderPair{Key: JWKKey, Value: h.jwk})
+	}
+	if h.jwkSetURL != nil {
+		pairs = append(pairs, &HeaderPair{Key: JWKSetURLKey, Value: *(h.jwkSetURL)})
+	}
+	if h.keyID != nil {
+		pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)})
+	}
+	if h.typ != nil {
+		pairs = append(pairs, &HeaderPair{Key: TypeKey, Value: *(h.typ)})
+	}
+	if h.x509CertChain != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain})
+	}
+	if h.x509CertThumbprint != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)})
+	}
+	if h.x509CertThumbprintS256 != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)})
+	}
+	if h.x509URL != nil {
+		pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)})
+	}
+	for k, v := range h.privateParams {
+		pairs = append(pairs, &HeaderPair{Key: k, Value: v})
+	}
+	sort.Slice(pairs, func(i, j int) bool {
+		return pairs[i].Key.(string) < pairs[j].Key.(string)
+	})
+	return pairs
+}
+
+func (h *stdHeaders) PrivateParams() map[string]interface{} {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	return h.privateParams
+}
+
+func (h *stdHeaders) Get(name string) (interface{}, bool) {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	switch name {
+	case AlgorithmKey:
+		if h.algorithm == nil {
+			return nil, false
+		}
+		return *(h.algorithm), true
+	case ContentTypeKey:
+		if h.contentType == nil {
+			return nil, false
+		}
+		return *(h.contentType), true
+	case CriticalKey:
+		if h.critical == nil {
+			return nil, false
+		}
+		return h.critical, true
+	case JWKKey:
+		if h.jwk == nil {
+			return nil, false
+		}
+		return h.jwk, true
+	case JWKSetURLKey:
+		if h.jwkSetURL == nil {
+			return nil, false
+		}
+		return *(h.jwkSetURL), true
+	case KeyIDKey:
+		if h.keyID == nil {
+			return nil, false
+		}
+		return *(h.keyID), true
+	case TypeKey:
+		if h.typ == nil {
+			return nil, false
+		}
+		return *(h.typ), true
+	case X509CertChainKey:
+		if h.x509CertChain == nil {
+			return nil, false
+		}
+		return h.x509CertChain, true
+	case X509CertThumbprintKey:
+		if h.x509CertThumbprint == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprint), true
+	case X509CertThumbprintS256Key:
+		if h.x509CertThumbprintS256 == nil {
+			return nil, false
+		}
+		return *(h.x509CertThumbprintS256), true
+	case X509URLKey:
+		if h.x509URL == nil {
+			return nil, false
+		}
+		return *(h.x509URL), true
+	default:
+		v, ok := h.privateParams[name]
+		return v, ok
+	}
+}
+
+func (h *stdHeaders) Set(name string, value interface{}) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	return h.setNoLock(name, value)
+}
+
+func (h *stdHeaders) setNoLock(name string, value interface{}) error {
+	switch name {
+	case AlgorithmKey:
+		var acceptor jwa.SignatureAlgorithm
+		if err := acceptor.Accept(value); err != nil {
+			return fmt.Errorf(`invalid value for %s key: %w`, AlgorithmKey, err)
+		}
+		h.algorithm = &acceptor
+		return nil
+	case ContentTypeKey:
+		if v, ok := value.(string); ok {
+			h.contentType = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, ContentTypeKey, value)
+	case CriticalKey:
+		if v, ok := value.([]string); ok {
+			h.critical = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, CriticalKey, value)
+	case JWKKey:
+		if v, ok := value.(jwk.Key); ok {
+			h.jwk = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, JWKKey, value)
+	case JWKSetURLKey:
+		if v, ok := value.(string); ok {
+			h.jwkSetURL = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, JWKSetURLKey, value)
+	case KeyIDKey:
+		if v, ok := value.(string); ok {
+			h.keyID = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+	case TypeKey:
+		if v, ok := value.(string); ok {
+			h.typ = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, TypeKey, value)
+	case X509CertChainKey:
+		if v, ok := value.(*cert.Chain); ok {
+			h.x509CertChain = v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value)
+	case X509CertThumbprintKey:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprint = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value)
+	case X509CertThumbprintS256Key:
+		if v, ok := value.(string); ok {
+			h.x509CertThumbprintS256 = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value)
+	case X509URLKey:
+		if v, ok := value.(string); ok {
+			h.x509URL = &v
+			return nil
+		}
+		return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value)
+	default:
+		if h.privateParams == nil {
+			h.privateParams = map[string]interface{}{}
+		}
+		h.privateParams[name] = value
+	}
+	return nil
+}
+
+func (h *stdHeaders) Remove(key string) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	switch key {
+	case AlgorithmKey:
+		h.algorithm = nil
+	case ContentTypeKey:
+		h.contentType = nil
+	case CriticalKey:
+		h.critical = nil
+	case JWKKey:
+		h.jwk = nil
+	case JWKSetURLKey:
+		h.jwkSetURL = nil
+	case KeyIDKey:
+		h.keyID = nil
+	case TypeKey:
+		h.typ = nil
+	case X509CertChainKey:
+		h.x509CertChain = nil
+	case X509CertThumbprintKey:
+		h.x509CertThumbprint = nil
+	case X509CertThumbprintS256Key:
+		h.x509CertThumbprintS256 = nil
+	case X509URLKey:
+		h.x509URL = nil
+	default:
+		delete(h.privateParams, key)
+	}
+	return nil
+}
+
+func (h *stdHeaders) UnmarshalJSON(buf []byte) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.clear()
+	dec := json.NewDecoder(bytes.NewReader(buf))
+LOOP:
+	for {
+		tok, err := dec.Token()
+		if err != nil {
+			return fmt.Errorf(`error reading token: %w`, err)
+		}
+		switch tok := tok.(type) {
+		case json.Delim:
+			// Assuming we're doing everything correctly, we should ONLY
+			// get either '{' or '}' here.
+			if tok == '}' { // End of object
+				break LOOP
+			} else if tok != '{' {
+				return fmt.Errorf(`expected '{', but got '%c'`, tok)
+			}
+		case string: // Objects can only have string keys
+			switch tok {
+			case AlgorithmKey:
+				var decoded jwa.SignatureAlgorithm
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err)
+				}
+				h.algorithm = &decoded
+			case ContentTypeKey:
+				if err := json.AssignNextStringToken(&h.contentType, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, ContentTypeKey, err)
+				}
+			case CriticalKey:
+				var decoded []string
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, CriticalKey, err)
+				}
+				h.critical = decoded
+			case JWKKey:
+				var buf json.RawMessage
+				if err := dec.Decode(&buf); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, JWKKey, err)
+				}
+				key, err := jwk.ParseKey(buf)
+				if err != nil {
+					return fmt.Errorf(`failed to parse JWK for key %s: %w`, JWKKey, err)
+				}
+				h.jwk = key
+			case JWKSetURLKey:
+				if err := json.AssignNextStringToken(&h.jwkSetURL, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, JWKSetURLKey, err)
+				}
+			case KeyIDKey:
+				if err := json.AssignNextStringToken(&h.keyID, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err)
+				}
+			case TypeKey:
+				if err := json.AssignNextStringToken(&h.typ, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, TypeKey, err)
+				}
+			case X509CertChainKey:
+				var decoded cert.Chain
+				if err := dec.Decode(&decoded); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err)
+				}
+				h.x509CertChain = &decoded
+			case X509CertThumbprintKey:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err)
+				}
+			case X509CertThumbprintS256Key:
+				if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err)
+				}
+			case X509URLKey:
+				if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil {
+					return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err)
+				}
+			default:
+				decoded, err := registry.Decode(dec, tok)
+				if err != nil {
+					return err
+				}
+				h.setNoLock(tok, decoded)
+			}
+		default:
+			return fmt.Errorf(`invalid token %T`, tok)
+		}
+	}
+	h.raw = buf
+	return nil
+}
+
+func (h stdHeaders) MarshalJSON() ([]byte, error) {
+	buf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(buf)
+	buf.WriteByte('{')
+	enc := json.NewEncoder(buf)
+	for i, p := range h.makePairs() {
+		if i > 0 {
+			buf.WriteRune(',')
+		}
+		buf.WriteRune('"')
+		buf.WriteString(p.Key.(string))
+		buf.WriteString(`":`)
+		v := p.Value
+		switch v := v.(type) {
+		case []byte:
+			buf.WriteRune('"')
+			buf.WriteString(base64.EncodeToString(v))
+			buf.WriteRune('"')
+		default:
+			if err := enc.Encode(v); err != nil {
+				return nil, fmt.Errorf(`failed to encode value for field %s: %w`, p.Key, err)
+			}
+			buf.Truncate(buf.Len() - 1)
+		}
+	}
+	buf.WriteByte('}')
+	ret := make([]byte, buf.Len())
+	copy(ret, buf.Bytes())
+	return ret, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/hmac.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/hmac.go
new file mode 100644
index 0000000000..247ebc76dd
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/hmac.go
@@ -0,0 +1,77 @@
+package jws
+
+import (
+	"crypto/hmac"
+	"crypto/sha256"
+	"crypto/sha512"
+	"fmt"
+	"hash"
+
+	"github.com/lestrrat-go/jwx/v2/internal/keyconv"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+var hmacSignFuncs = map[jwa.SignatureAlgorithm]hmacSignFunc{}
+
+func init() {
+	algs := map[jwa.SignatureAlgorithm]func() hash.Hash{
+		jwa.HS256: sha256.New,
+		jwa.HS384: sha512.New384,
+		jwa.HS512: sha512.New,
+	}
+
+	for alg, h := range algs {
+		hmacSignFuncs[alg] = makeHMACSignFunc(h)
+	}
+}
+
+func newHMACSigner(alg jwa.SignatureAlgorithm) Signer {
+	return &HMACSigner{
+		alg:  alg,
+		sign: hmacSignFuncs[alg], // we know this will succeed
+	}
+}
+
+func makeHMACSignFunc(hfunc func() hash.Hash) hmacSignFunc {
+	return func(payload []byte, key []byte) ([]byte, error) {
+		h := hmac.New(hfunc, key)
+		if _, err := h.Write(payload); err != nil {
+			return nil, fmt.Errorf(`failed to write payload using hmac: %w`, err)
+		}
+		return h.Sum(nil), nil
+	}
+}
+
+func (s HMACSigner) Algorithm() jwa.SignatureAlgorithm {
+	return s.alg
+}
+
+func (s HMACSigner) Sign(payload []byte, key interface{}) ([]byte, error) {
+	var hmackey []byte
+	if err := keyconv.ByteSliceKey(&hmackey, key); err != nil {
+		return nil, fmt.Errorf(`invalid key type %T. []byte is required: %w`, key, err)
+	}
+
+	if len(hmackey) == 0 {
+		return nil, fmt.Errorf(`missing key while signing payload`)
+	}
+
+	return s.sign(payload, hmackey)
+}
+
+func newHMACVerifier(alg jwa.SignatureAlgorithm) Verifier {
+	s := newHMACSigner(alg)
+	return &HMACVerifier{signer: s}
+}
+
+func (v HMACVerifier) Verify(payload, signature []byte, key interface{}) (err error) {
+	expected, err := v.signer.Sign(payload, key)
+	if err != nil {
+		return fmt.Errorf(`failed to generated signature: %w`, err)
+	}
+
+	if !hmac.Equal(signature, expected) {
+		return fmt.Errorf(`failed to match hmac signature`)
+	}
+	return nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/interface.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/interface.go
new file mode 100644
index 0000000000..9df909a7da
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/interface.go
@@ -0,0 +1,106 @@
+package jws
+
+import (
+	"github.com/lestrrat-go/iter/mapiter"
+	"github.com/lestrrat-go/jwx/v2/internal/iter"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+type DecodeCtx interface {
+	CollectRaw() bool
+}
+
+// Message represents a full JWS encoded message. Flattened serialization
+// is not supported as a struct, but rather it's represented as a
+// Message struct with only one `signature` element.
+//
+// Do not expect to use the Message object to verify or construct a
+// signed payload with. You should only use this when you want to actually
+// programmatically view the contents of the full JWS payload.
+//
+// As of this version, there is one big incompatibility when using Message
+// objects to convert between compact and JSON representations.
+// The protected header is sometimes encoded differently from the original
+// message and the JSON serialization that we use in Go.
+//
+// For example, the protected header `eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9`
+// decodes to
+//
+//	{"typ":"JWT",
+//	  "alg":"HS256"}
+//
+// However, when we parse this into a message, we create a jws.Header object,
+// which, when we marshal into a JSON object again, becomes
+//
+//	{"typ":"JWT","alg":"HS256"}
+//
+// Notice that serialization lacks a line break and a space between `"JWT",`
+// and `"alg"`. This causes a problem when verifying the signatures AFTER
+// a compact JWS message has been unmarshaled into a jws.Message.
+//
+// jws.Verify() doesn't go through this step, and therefore this does not
+// manifest itself. However, you may see this discrepancy when you manually
+// go through these conversions, and/or use the `jwx` tool like so:
+//
+//	jwx jws parse message.jws | jwx jws verify --key somekey.jwk --stdin
+//
+// In this scenario, the first `jwx jws parse` outputs a parsed jws.Message
+// which is marshaled into JSON. At this point the message's protected
+// headers and the signatures don't match.
+//
+// To sign and verify, use the appropriate `Sign()` and `Verify()` functions.
+type Message struct {
+	dc         DecodeCtx
+	payload    []byte
+	signatures []*Signature
+	b64        bool // true if payload should be base64 encoded
+}
+
+type Signature struct {
+	dc        DecodeCtx
+	headers   Headers // Unprotected Headers
+	protected Headers // Protected Headers
+	signature []byte  // Signature
+	detached  bool
+}
+
+type Visitor = iter.MapVisitor
+type VisitorFunc = iter.MapVisitorFunc
+type HeaderPair = mapiter.Pair
+type Iterator = mapiter.Iterator
+
+// Signer generates the signature for a given payload.
+type Signer interface {
+	// Sign creates a signature for the given payload.
+	// The second argument is the key used for signing the payload, and is usually
+	// the private key type associated with the signature method. For example,
+	// for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the
+	// `*"crypto/rsa".PrivateKey` type.
+	// Check the documentation for each signer for details
+	Sign([]byte, interface{}) ([]byte, error)
+
+	Algorithm() jwa.SignatureAlgorithm
+}
+
+type hmacSignFunc func([]byte, []byte) ([]byte, error)
+
+// HMACSigner uses crypto/hmac to sign the payloads.
+type HMACSigner struct {
+	alg  jwa.SignatureAlgorithm
+	sign hmacSignFunc
+}
+
+type Verifier interface {
+	// Verify checks whether the payload and signature are valid for
+	// the given key.
+	// `key` is the key used for verifying the payload, and is usually
+	// the public key associated with the signature method. For example,
+	// for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the
+	// `*"crypto/rsa".PublicKey` type.
+	// Check the documentation for each verifier for details
+	Verify(payload []byte, signature []byte, key interface{}) error
+}
+
+type HMACVerifier struct {
+	signer Signer
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/io.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/io.go
new file mode 100644
index 0000000000..0d9dbd6cc3
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/io.go
@@ -0,0 +1,33 @@
+// Code generated by tools/cmd/genreadfile/main.go. DO NOT EDIT.
+
+package jws
+
+import (
+	"io/fs"
+	"os"
+)
+
+type sysFS struct{}
+
+func (sysFS) Open(path string) (fs.File, error) {
+	return os.Open(path)
+}
+
+func ReadFile(path string, options ...ReadFileOption) (*Message, error) {
+
+	var srcFS fs.FS = sysFS{}
+	for _, option := range options {
+		switch option.Ident() {
+		case identFS{}:
+			srcFS = option.Value().(fs.FS)
+		}
+	}
+
+	f, err := srcFS.Open(path)
+	if err != nil {
+		return nil, err
+	}
+
+	defer f.Close()
+	return ParseReader(f)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/jws.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/jws.go
new file mode 100644
index 0000000000..09100ec17d
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/jws.go
@@ -0,0 +1,749 @@
+//go:generate ../tools/cmd/genjws.sh
+
+// Package jws implements the digital signature on JSON based data
+// structures as described in https://tools.ietf.org/html/rfc7515
+//
+// If you do not care about the details, the only things that you
+// would need to use are the following functions:
+//
+//	jws.Sign(payload, jws.WithKey(algorithm, key))
+//	jws.Verify(serialized, jws.WithKey(algorithm, key))
+//
+// To sign, simply use `jws.Sign`. `payload` is a []byte buffer that
+// contains whatever data you want to sign. `alg` is one of the
+// jwa.SignatureAlgorithm constants from package jwa. For RSA and
+// ECDSA family of algorithms, you will need to prepare a private key.
+// For HMAC family, you just need a []byte value. The `jws.Sign`
+// function will return the encoded JWS message on success.
+//
+// To verify, use `jws.Verify`. It will parse the `encodedjws` buffer
+// and verify the result using `algorithm` and `key`. Upon successful
+// verification, the original payload is returned, so you can work on it.
+package jws
+
+import (
+	"bufio"
+	"bytes"
+	"context"
+	"crypto/ecdsa"
+	"crypto/ed25519"
+	"crypto/rsa"
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+	"sync"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/lestrrat-go/blackmagic"
+	"github.com/lestrrat-go/jwx/v2/internal/base64"
+	"github.com/lestrrat-go/jwx/v2/internal/json"
+	"github.com/lestrrat-go/jwx/v2/internal/pool"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+	"github.com/lestrrat-go/jwx/v2/jwk"
+	"github.com/lestrrat-go/jwx/v2/x25519"
+)
+
+var registry = json.NewRegistry()
+
+type payloadSigner struct {
+	signer    Signer
+	key       interface{}
+	protected Headers
+	public    Headers
+}
+
+func (s *payloadSigner) Sign(payload []byte) ([]byte, error) {
+	return s.signer.Sign(payload, s.key)
+}
+
+func (s *payloadSigner) Algorithm() jwa.SignatureAlgorithm {
+	return s.signer.Algorithm()
+}
+
+func (s *payloadSigner) ProtectedHeader() Headers {
+	return s.protected
+}
+
+func (s *payloadSigner) PublicHeader() Headers {
+	return s.public
+}
+
+var signers = make(map[jwa.SignatureAlgorithm]Signer)
+var muSigner = &sync.Mutex{}
+
+func makeSigner(alg jwa.SignatureAlgorithm, key interface{}, public, protected Headers) (*payloadSigner, error) {
+	muSigner.Lock()
+	signer, ok := signers[alg]
+	if !ok {
+		v, err := NewSigner(alg)
+		if err != nil {
+			muSigner.Unlock()
+			return nil, fmt.Errorf(`failed to create payload signer: %w`, err)
+		}
+		signers[alg] = v
+		signer = v
+	}
+	muSigner.Unlock()
+
+	return &payloadSigner{
+		signer:    signer,
+		key:       key,
+		public:    public,
+		protected: protected,
+	}, nil
+}
+
+const (
+	fmtInvalid = iota
+	fmtCompact
+	fmtJSON
+	fmtJSONPretty
+	fmtMax
+)
+
+// silence linters
+var _ = fmtInvalid
+var _ = fmtMax
+
+// Sign generates a JWS message for the given payload and returns
+// it in serialized form, which can be in either compact or
+// JSON format. Default is compact.
+//
+// You must pass at least one key to `jws.Sign()` by using `jws.WithKey()`
+// option.
+//
+//	jws.Sign(payload, jws.WithKey(alg, key))
+//	jws.Sign(payload, jws.WithJSON(), jws.WithKey(alg1, key1), jws.WithKey(alg2, key2))
+//
+// Note that in the second example the `jws.WithJSON()` option is
+// specified as well. This is because the compact serialization
+// format does not support multiple signatures, and users must
+// specifically ask for the JSON serialization format.
+//
+// Read the documentation for `jws.WithKey()` to learn more about the
+// possible values that can be used for `alg` and `key`.
+//
+// You may create JWS messages with the "none" (jwa.NoSignature) algorithm
+// if you use the `jws.WithInsecureNoSignature()` option. This option
+// can be combined with one or more signature keys, as well as the
+// `jws.WithJSON()` option to generate multiple signatures (though
+// the usefulness of such constructs is highly debatable)
+//
+// Note that this library does not allow you to successfully call `jws.Verify()` on
+// signatures with the "none" algorithm. To parse these, use `jws.Parse()` instead.
+//
+// If you want to use a detached payload, use `jws.WithDetachedPayload()` as
+// one of the options. When you use this option, you must always set the
+// first parameter (`payload`) to `nil`, or the function will return an error
+//
+// You may also want to look at how to pass protected headers to the
+// signing process, as you will likely be required to set the `b64` field
+// when using detached payload.
+//
+// Look for options that return `jws.SignOption` or `jws.SignVerifyOption`
+// for a complete list of options that can be passed to this function.
+func Sign(payload []byte, options ...SignOption) ([]byte, error) {
+	format := fmtCompact
+	var signers []*payloadSigner
+	var detached bool
+	var noneSignature *payloadSigner
+	for _, option := range options {
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identSerialization{}:
+			format = option.Value().(int)
+		case identInsecureNoSignature{}:
+			data := option.Value().(*withInsecureNoSignature)
+			// only the last one is used (we overwrite previous values)
+			noneSignature = &payloadSigner{
+				signer:    noneSigner{},
+				protected: data.protected,
+			}
+		case identKey{}:
+			data := option.Value().(*withKey)
+
+			alg, ok := data.alg.(jwa.SignatureAlgorithm)
+			if !ok {
+				return nil, fmt.Errorf(`jws.Sign: expected algorithm to be of type jwa.SignatureAlgorithm but got (%[1]q, %[1]T)`, data.alg)
+			}
+
+			// No, we don't accept "none" here.
+			if alg == jwa.NoSignature {
+				return nil, fmt.Errorf(`jws.Sign: "none" (jwa.NoSignature) cannot be used with jws.WithKey`)
+			}
+
+			signer, err := makeSigner(alg, data.key, data.public, data.protected)
+			if err != nil {
+				return nil, fmt.Errorf(`jws.Sign: failed to create signer: %w`, err)
+			}
+			signers = append(signers, signer)
+		case identDetachedPayload{}:
+			detached = true
+			if payload != nil {
+				return nil, fmt.Errorf(`jws.Sign: payload must be nil when jws.WithDetachedPayload() is specified`)
+			}
+			payload = option.Value().([]byte)
+		}
+	}
+
+	if noneSignature != nil {
+		signers = append(signers, noneSignature)
+	}
+
+	lsigner := len(signers)
+	if lsigner == 0 {
+		return nil, fmt.Errorf(`jws.Sign: no signers available. Specify an alogirthm and akey using jws.WithKey()`)
+	}
+
+	// Design note: while we could have easily set format = fmtJSON when
+	// lsigner > 1, I believe the decision to change serialization formats
+	// must be explicitly stated by the caller. Otherwise I'm pretty sure
+	// there would be people filing issues saying "I get JSON when I expcted
+	// compact serialization".
+	//
+	// Therefore, instead of making implicit format conversions, we force the
+	// user to spell it out as `jws.Sign(..., jws.WithJSON(), jws.WithKey(...), jws.WithKey(...))`
+	if format == fmtCompact && lsigner != 1 {
+		return nil, fmt.Errorf(`jws.Sign: cannot have multiple signers (keys) specified for compact serialization. Use only one jws.WithKey()`)
+	}
+
+	// Create a Message object with all the bits and bobs, and we'll
+	// serialize it in the end
+	var result Message
+
+	result.payload = payload
+
+	result.signatures = make([]*Signature, 0, len(signers))
+	for i, signer := range signers {
+		protected := signer.ProtectedHeader()
+		if protected == nil {
+			protected = NewHeaders()
+		}
+
+		if err := protected.Set(AlgorithmKey, signer.Algorithm()); err != nil {
+			return nil, fmt.Errorf(`failed to set "alg" header: %w`, err)
+		}
+
+		if key, ok := signer.key.(jwk.Key); ok {
+			if kid := key.KeyID(); kid != "" {
+				if err := protected.Set(KeyIDKey, kid); err != nil {
+					return nil, fmt.Errorf(`failed to set "kid" header: %w`, err)
+				}
+			}
+		}
+		sig := &Signature{
+			headers:   signer.PublicHeader(),
+			protected: protected,
+			// cheat. FIXXXXXXMEEEEEE
+			detached: detached,
+		}
+		_, _, err := sig.Sign(payload, signer.signer, signer.key)
+		if err != nil {
+			return nil, fmt.Errorf(`failed to generate signature for signer #%d (alg=%s): %w`, i, signer.Algorithm(), err)
+		}
+
+		result.signatures = append(result.signatures, sig)
+	}
+
+	switch format {
+	case fmtJSON:
+		return json.Marshal(result)
+	case fmtJSONPretty:
+		return json.MarshalIndent(result, "", "  ")
+	case fmtCompact:
+		// Take the only signature object, and convert it into a Compact
+		// serialization format
+		var compactOpts []CompactOption
+		if detached {
+			compactOpts = append(compactOpts, WithDetached(detached))
+		}
+		return Compact(&result, compactOpts...)
+	default:
+		return nil, fmt.Errorf(`jws.Sign: invalid serialization format`)
+	}
+}
+
+var allowNoneWhitelist = jwk.WhitelistFunc(func(string) bool {
+	return false
+})
+
+// Verify checks if the given JWS message is verifiable using `alg` and `key`.
+// `key` may be a "raw" key (e.g. rsa.PublicKey) or a jwk.Key
+//
+// If the verification is successful, `err` is nil, and the content of the
+// payload that was signed is returned. If you need more fine-grained
+// control of the verification process, manually generate a
+// `Verifier` in `verify` subpackage, and call `Verify` method on it.
+// If you need to access signatures and JOSE headers in a JWS message,
+// use `Parse` function to get `Message` object.
+//
+// Because the use of "none" (jwa.NoSignature) algorithm is strongly discouraged,
+// this function DOES NOT consider it a success when `{"alg":"none"}` is
+// encountered in the message (it would also be counter intuitive when the code says
+// you _verified_ something when in fact it did no such thing). If you want to
+// accept messages with "none" signature algorithm, use `jws.Parse` to get the
+// raw JWS message.
+func Verify(buf []byte, options ...VerifyOption) ([]byte, error) {
+	var dst *Message
+	var detachedPayload []byte
+	var keyProviders []KeyProvider
+	var keyUsed interface{}
+
+	ctx := context.Background()
+
+	//nolint:forcetypeassert
+	for _, option := range options {
+		switch option.Ident() {
+		case identMessage{}:
+			dst = option.Value().(*Message)
+		case identDetachedPayload{}:
+			detachedPayload = option.Value().([]byte)
+		case identKey{}:
+			pair := option.Value().(*withKey)
+			alg, ok := pair.alg.(jwa.SignatureAlgorithm)
+			if !ok {
+				return nil, fmt.Errorf(`WithKey() option must be specified using jwa.SignatureAlgorithm (got %T)`, pair.alg)
+			}
+			keyProviders = append(keyProviders, &staticKeyProvider{
+				alg: alg,
+				key: pair.key,
+			})
+		case identKeyProvider{}:
+			keyProviders = append(keyProviders, option.Value().(KeyProvider))
+		case identKeyUsed{}:
+			keyUsed = option.Value()
+		case identContext{}:
+			ctx = option.Value().(context.Context)
+		default:
+			return nil, fmt.Errorf(`invalid jws.VerifyOption %q passed`, `With`+strings.TrimPrefix(fmt.Sprintf(`%T`, option.Ident()), `jws.ident`))
+		}
+	}
+
+	if len(keyProviders) < 1 {
+		return nil, fmt.Errorf(`jws.Verify: no key providers have been provided (see jws.WithKey(), jws.WithKeySet(), jws.WithVerifyAuto(), and jws.WithKeyProvider()`)
+	}
+
+	msg, err := Parse(buf)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to parse jws: %w`, err)
+	}
+	defer msg.clearRaw()
+
+	if detachedPayload != nil {
+		if len(msg.payload) != 0 {
+			return nil, fmt.Errorf(`can't specify detached payload for JWS with payload`)
+		}
+
+		msg.payload = detachedPayload
+	}
+
+	// Pre-compute the base64 encoded version of payload
+	var payload string
+	if msg.b64 {
+		payload = base64.EncodeToString(msg.payload)
+	} else {
+		payload = string(msg.payload)
+	}
+
+	verifyBuf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(verifyBuf)
+
+	for i, sig := range msg.signatures {
+		verifyBuf.Reset()
+
+		var encodedProtectedHeader string
+		if rbp, ok := sig.protected.(interface{ rawBuffer() []byte }); ok {
+			if raw := rbp.rawBuffer(); raw != nil {
+				encodedProtectedHeader = base64.EncodeToString(raw)
+			}
+		}
+
+		if encodedProtectedHeader == "" {
+			protected, err := json.Marshal(sig.protected)
+			if err != nil {
+				return nil, fmt.Errorf(`failed to marshal "protected" for signature #%d: %w`, i+1, err)
+			}
+
+			encodedProtectedHeader = base64.EncodeToString(protected)
+		}
+
+		verifyBuf.WriteString(encodedProtectedHeader)
+		verifyBuf.WriteByte('.')
+		verifyBuf.WriteString(payload)
+
+		for i, kp := range keyProviders {
+			var sink algKeySink
+			if err := kp.FetchKeys(ctx, &sink, sig, msg); err != nil {
+				return nil, fmt.Errorf(`key provider %d failed: %w`, i, err)
+			}
+
+			for _, pair := range sink.list {
+				// alg is converted here because pair.alg is of type jwa.KeyAlgorithm.
+				// this may seem ugly, but we're trying to avoid declaring separate
+				// structs for `alg jwa.KeyAlgorithm` and `alg jwa.SignatureAlgorithm`
+				//nolint:forcetypeassert
+				alg := pair.alg.(jwa.SignatureAlgorithm)
+				key := pair.key
+				verifier, err := NewVerifier(alg)
+				if err != nil {
+					return nil, fmt.Errorf(`failed to create verifier for algorithm %q: %w`, alg, err)
+				}
+
+				if err := verifier.Verify(verifyBuf.Bytes(), sig.signature, key); err != nil {
+					continue
+				}
+
+				if keyUsed != nil {
+					if err := blackmagic.AssignIfCompatible(keyUsed, key); err != nil {
+						return nil, fmt.Errorf(`failed to assign used key (%T) to %T: %w`, key, keyUsed, err)
+					}
+				}
+
+				if dst != nil {
+					*(dst) = *msg
+				}
+
+				return msg.payload, nil
+			}
+		}
+	}
+	return nil, fmt.Errorf(`could not verify message using any of the signatures or keys`)
+}
+
+// get the value of b64 header field.
+// If the field does not exist, returns true (default)
+// Otherwise return the value specified by the header field.
+func getB64Value(hdr Headers) bool {
+	b64raw, ok := hdr.Get("b64")
+	if !ok {
+		return true // default
+	}
+
+	b64, ok := b64raw.(bool) // default
+	if !ok {
+		return false
+	}
+	return b64
+}
+
+// This is an "optimized" io.ReadAll(). It will attempt to read
+// all of the contents from the reader IF the reader is of a certain
+// concrete type.
+func readAll(rdr io.Reader) ([]byte, bool) {
+	switch rdr.(type) {
+	case *bytes.Reader, *bytes.Buffer, *strings.Reader:
+		data, err := io.ReadAll(rdr)
+		if err != nil {
+			return nil, false
+		}
+		return data, true
+	default:
+		return nil, false
+	}
+}
+
+// Parse parses contents from the given source and creates a jws.Message
+// struct. The input can be in either compact or full JSON serialization.
+//
+// Parse() currently does not take any options, but the API accepts it
+// in anticipation of future addition.
+func Parse(src []byte, _ ...ParseOption) (*Message, error) {
+	for i := 0; i < len(src); i++ {
+		r := rune(src[i])
+		if r >= utf8.RuneSelf {
+			r, _ = utf8.DecodeRune(src)
+		}
+		if !unicode.IsSpace(r) {
+			if r == '{' {
+				return parseJSON(src)
+			}
+			return parseCompact(src)
+		}
+	}
+	return nil, fmt.Errorf(`invalid byte sequence`)
+}
+
+// Parse parses contents from the given source and creates a jws.Message
+// struct. The input can be in either compact or full JSON serialization.
+func ParseString(src string) (*Message, error) {
+	return Parse([]byte(src))
+}
+
+// Parse parses contents from the given source and creates a jws.Message
+// struct. The input can be in either compact or full JSON serialization.
+func ParseReader(src io.Reader) (*Message, error) {
+	if data, ok := readAll(src); ok {
+		return Parse(data)
+	}
+
+	rdr := bufio.NewReader(src)
+	var first rune
+	for {
+		r, _, err := rdr.ReadRune()
+		if err != nil {
+			return nil, fmt.Errorf(`failed to read rune: %w`, err)
+		}
+		if !unicode.IsSpace(r) {
+			first = r
+			if err := rdr.UnreadRune(); err != nil {
+				return nil, fmt.Errorf(`failed to unread rune: %w`, err)
+			}
+
+			break
+		}
+	}
+
+	var parser func(io.Reader) (*Message, error)
+	if first == '{' {
+		parser = parseJSONReader
+	} else {
+		parser = parseCompactReader
+	}
+
+	m, err := parser(rdr)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to parse jws message: %w`, err)
+	}
+
+	return m, nil
+}
+
+func parseJSONReader(src io.Reader) (result *Message, err error) {
+	var m Message
+	if err := json.NewDecoder(src).Decode(&m); err != nil {
+		return nil, fmt.Errorf(`failed to unmarshal jws message: %w`, err)
+	}
+	return &m, nil
+}
+
+func parseJSON(data []byte) (result *Message, err error) {
+	var m Message
+	if err := json.Unmarshal(data, &m); err != nil {
+		return nil, fmt.Errorf(`failed to unmarshal jws message: %w`, err)
+	}
+	return &m, nil
+}
+
+// SplitCompact splits a JWT and returns its three parts
+// separately: protected headers, payload and signature.
+func SplitCompact(src []byte) ([]byte, []byte, []byte, error) {
+	parts := bytes.Split(src, []byte("."))
+	if len(parts) < 3 {
+		return nil, nil, nil, fmt.Errorf(`invalid number of segments`)
+	}
+	return parts[0], parts[1], parts[2], nil
+}
+
+// SplitCompactString splits a JWT and returns its three parts
+// separately: protected headers, payload and signature.
+func SplitCompactString(src string) ([]byte, []byte, []byte, error) {
+	parts := strings.Split(src, ".")
+	if len(parts) < 3 {
+		return nil, nil, nil, fmt.Errorf(`invalid number of segments`)
+	}
+	return []byte(parts[0]), []byte(parts[1]), []byte(parts[2]), nil
+}
+
+// SplitCompactReader splits a JWT and returns its three parts
+// separately: protected headers, payload and signature.
+func SplitCompactReader(rdr io.Reader) ([]byte, []byte, []byte, error) {
+	if data, ok := readAll(rdr); ok {
+		return SplitCompact(data)
+	}
+
+	var protected []byte
+	var payload []byte
+	var signature []byte
+	var periods int
+	var state int
+
+	buf := make([]byte, 4096)
+	var sofar []byte
+
+	for {
+		// read next bytes
+		n, err := rdr.Read(buf)
+		// return on unexpected read error
+		if err != nil && err != io.EOF {
+			return nil, nil, nil, fmt.Errorf(`unexpected end of input: %w`, err)
+		}
+
+		// append to current buffer
+		sofar = append(sofar, buf[:n]...)
+		// loop to capture multiple '.' in current buffer
+		for loop := true; loop; {
+			var i = bytes.IndexByte(sofar, '.')
+			if i == -1 && err != io.EOF {
+				// no '.' found -> exit and read next bytes (outer loop)
+				loop = false
+				continue
+			} else if i == -1 && err == io.EOF {
+				// no '.' found -> process rest and exit
+				i = len(sofar)
+				loop = false
+			} else {
+				// '.' found
+				periods++
+			}
+
+			// Reaching this point means we have found a '.' or EOF and process the rest of the buffer
+			switch state {
+			case 0:
+				protected = sofar[:i]
+				state++
+			case 1:
+				payload = sofar[:i]
+				state++
+			case 2:
+				signature = sofar[:i]
+			}
+			// Shorten current buffer
+			if len(sofar) > i {
+				sofar = sofar[i+1:]
+			}
+		}
+		// Exit on EOF
+		if err == io.EOF {
+			break
+		}
+	}
+	if periods != 2 {
+		return nil, nil, nil, fmt.Errorf(`invalid number of segments`)
+	}
+
+	return protected, payload, signature, nil
+}
+
+// parseCompactReader parses a JWS value serialized via compact serialization.
+func parseCompactReader(rdr io.Reader) (m *Message, err error) {
+	protected, payload, signature, err := SplitCompactReader(rdr)
+	if err != nil {
+		return nil, fmt.Errorf(`invalid compact serialization format: %w`, err)
+	}
+	return parse(protected, payload, signature)
+}
+
+func parseCompact(data []byte) (m *Message, err error) {
+	protected, payload, signature, err := SplitCompact(data)
+	if err != nil {
+		return nil, fmt.Errorf(`invalid compact serialization format: %w`, err)
+	}
+	return parse(protected, payload, signature)
+}
+
+func parse(protected, payload, signature []byte) (*Message, error) {
+	decodedHeader, err := base64.Decode(protected)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to decode protected headers: %w`, err)
+	}
+
+	hdr := NewHeaders()
+	if err := json.Unmarshal(decodedHeader, hdr); err != nil {
+		return nil, fmt.Errorf(`failed to parse JOSE headers: %w`, err)
+	}
+
+	var decodedPayload []byte
+	b64 := getB64Value(hdr)
+	if !b64 {
+		decodedPayload = payload
+	} else {
+		v, err := base64.Decode(payload)
+		if err != nil {
+			return nil, fmt.Errorf(`failed to decode payload: %w`, err)
+		}
+		decodedPayload = v
+	}
+
+	decodedSignature, err := base64.Decode(signature)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to decode signature: %w`, err)
+	}
+
+	var msg Message
+	msg.payload = decodedPayload
+	msg.signatures = append(msg.signatures, &Signature{
+		protected: hdr,
+		signature: decodedSignature,
+	})
+	msg.b64 = b64
+	return &msg, nil
+}
+
+// RegisterCustomField allows users to specify that a private field
+// be decoded as an instance of the specified type. This option has
+// a global effect.
+//
+// For example, suppose you have a custom field `x-birthday`, which
+// you want to represent as a string formatted in RFC3339 in JSON,
+// but want it back as `time.Time`.
+//
+// In that case you would register a custom field as follows
+//
+//	jwe.RegisterCustomField(`x-birthday`, timeT)
+//
+// Then `hdr.Get("x-birthday")` will still return an `interface{}`,
+// but you can convert its type to `time.Time`
+//
+//	bdayif, _ := hdr.Get(`x-birthday`)
+//	bday := bdayif.(time.Time)
+func RegisterCustomField(name string, object interface{}) {
+	registry.Register(name, object)
+}
+
+// Helpers for signature verification
+var rawKeyToKeyType = make(map[reflect.Type]jwa.KeyType)
+var keyTypeToAlgorithms = make(map[jwa.KeyType][]jwa.SignatureAlgorithm)
+
+func init() {
+	rawKeyToKeyType[reflect.TypeOf([]byte(nil))] = jwa.OctetSeq
+	rawKeyToKeyType[reflect.TypeOf(ed25519.PublicKey(nil))] = jwa.OKP
+	rawKeyToKeyType[reflect.TypeOf(rsa.PublicKey{})] = jwa.RSA
+	rawKeyToKeyType[reflect.TypeOf((*rsa.PublicKey)(nil))] = jwa.RSA
+	rawKeyToKeyType[reflect.TypeOf(ecdsa.PublicKey{})] = jwa.EC
+	rawKeyToKeyType[reflect.TypeOf((*ecdsa.PublicKey)(nil))] = jwa.EC
+
+	addAlgorithmForKeyType(jwa.OKP, jwa.EdDSA)
+	for _, alg := range []jwa.SignatureAlgorithm{jwa.HS256, jwa.HS384, jwa.HS512} {
+		addAlgorithmForKeyType(jwa.OctetSeq, alg)
+	}
+	for _, alg := range []jwa.SignatureAlgorithm{jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512} {
+		addAlgorithmForKeyType(jwa.RSA, alg)
+	}
+	for _, alg := range []jwa.SignatureAlgorithm{jwa.ES256, jwa.ES384, jwa.ES512} {
+		addAlgorithmForKeyType(jwa.EC, alg)
+	}
+}
+
+func addAlgorithmForKeyType(kty jwa.KeyType, alg jwa.SignatureAlgorithm) {
+	keyTypeToAlgorithms[kty] = append(keyTypeToAlgorithms[kty], alg)
+}
+
+// AlgorithmsForKey returns the possible signature algorithms that can
+// be used for a given key. It only takes in consideration keys/algorithms
+// for verification purposes, as this is the only usage where one may need
+// dynamically figure out which method to use.
+func AlgorithmsForKey(key interface{}) ([]jwa.SignatureAlgorithm, error) {
+	var kty jwa.KeyType
+	switch key := key.(type) {
+	case jwk.Key:
+		kty = key.KeyType()
+	case rsa.PublicKey, *rsa.PublicKey, rsa.PrivateKey, *rsa.PrivateKey:
+		kty = jwa.RSA
+	case ecdsa.PublicKey, *ecdsa.PublicKey, ecdsa.PrivateKey, *ecdsa.PrivateKey:
+		kty = jwa.EC
+	case ed25519.PublicKey, ed25519.PrivateKey, x25519.PublicKey, x25519.PrivateKey:
+		kty = jwa.OKP
+	case []byte:
+		kty = jwa.OctetSeq
+	default:
+		return nil, fmt.Errorf(`invalid key %T`, key)
+	}
+
+	algs, ok := keyTypeToAlgorithms[kty]
+	if !ok {
+		return nil, fmt.Errorf(`invalid key type %q`, kty)
+	}
+	return algs, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/key_provider.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/key_provider.go
new file mode 100644
index 0000000000..7d7518af1e
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/key_provider.go
@@ -0,0 +1,276 @@
+package jws
+
+import (
+	"context"
+	"fmt"
+	"net/url"
+	"sync"
+
+	"github.com/lestrrat-go/jwx/v2/jwa"
+	"github.com/lestrrat-go/jwx/v2/jwk"
+)
+
+// KeyProvider is responsible for providing key(s) to sign or verify a payload.
+// Multiple `jws.KeyProvider`s can be passed to `jws.Verify()` or `jws.Sign()`
+//
+// `jws.Sign()` can only accept static key providers via `jws.WithKey()`,
+// while `jws.Verify()` can accept `jws.WithKey()`, `jws.WithKeySet()`,
+// `jws.WithVerifyAuto()`, and `jws.WithKeyProvider()`.
+//
+// Understanding how this works is crucial to learn how this package works.
+//
+// `jws.Sign()` is straightforward: signatures are created for each
+// provided key.
+//
+// `jws.Verify()` is a bit more involved, because there are cases you
+// will want to compute/deduce/guess the keys that you would like to
+// use for verification.
+//
+// The first thing that `jws.Verify()` does is to collect the
+// KeyProviders from the option list that the user provided (presented in pseudocode):
+//
+//	keyProviders := filterKeyProviders(options)
+//
+// Then, remember that a JWS message may contain multiple signatures in the
+// message. For each signature, we call on the KeyProviders to give us
+// the key(s) to use on this signature:
+//
+//	for sig in msg.Signatures {
+//	  for kp in keyProviders {
+//	    kp.FetcKeys(ctx, sink, sig, msg)
+//	    ...
+//	  }
+//	}
+//
+// The `sink` argument passed to the KeyProvider is a temporary storage
+// for the keys (either a jwk.Key or a "raw" key). The `KeyProvider`
+// is responsible for sending keys into the `sink`.
+//
+// When called, the `KeyProvider` created by `jws.WithKey()` sends the same key,
+// `jws.WithKeySet()` sends keys that matches a particular `kid` and `alg`,
+// `jws.WithVerifyAuto()` fetchs a JWK from the `jku` URL,
+// and finally `jws.WithKeyProvider()` allows you to execute arbitrary
+// logic to provide keys. If you are providing a custom `KeyProvider`,
+// you should execute the necessary checks or retrieval of keys, and
+// then send the key(s) to the sink:
+//
+//	sink.Key(alg, key)
+//
+// These keys are then retrieved and tried for each signature, until
+// a match is found:
+//
+//	keys := sink.Keys()
+//	for key in keys {
+//	  if givenSignature == makeSignatre(key, payload, ...)) {
+//	    return OK
+//	  }
+//	}
+type KeyProvider interface {
+	FetchKeys(context.Context, KeySink, *Signature, *Message) error
+}
+
+// KeySink is a data storage where `jws.KeyProvider` objects should
+// send their keys to.
+type KeySink interface {
+	Key(jwa.SignatureAlgorithm, interface{})
+}
+
+type algKeyPair struct {
+	alg jwa.KeyAlgorithm
+	key interface{}
+}
+
+type algKeySink struct {
+	mu   sync.Mutex
+	list []algKeyPair
+}
+
+func (s *algKeySink) Key(alg jwa.SignatureAlgorithm, key interface{}) {
+	s.mu.Lock()
+	s.list = append(s.list, algKeyPair{alg, key})
+	s.mu.Unlock()
+}
+
+type staticKeyProvider struct {
+	alg jwa.SignatureAlgorithm
+	key interface{}
+}
+
+func (kp *staticKeyProvider) FetchKeys(_ context.Context, sink KeySink, _ *Signature, _ *Message) error {
+	sink.Key(kp.alg, kp.key)
+	return nil
+}
+
+type keySetProvider struct {
+	set                  jwk.Set
+	requireKid           bool // true if `kid` must be specified
+	useDefault           bool // true if the first key should be used iff there's exactly one key in set
+	inferAlgorithm       bool // true if the algorithm should be inferred from key type
+	multipleKeysPerKeyID bool // true if we should attempt to match multiple keys per key ID. if false we assume that only one key exists for a given key ID
+}
+
+func (kp *keySetProvider) selectKey(sink KeySink, key jwk.Key, sig *Signature, _ *Message) error {
+	if usage := key.KeyUsage(); usage != "" && usage != jwk.ForSignature.String() {
+		return nil
+	}
+
+	if v := key.Algorithm(); v.String() != "" {
+		var alg jwa.SignatureAlgorithm
+		if err := alg.Accept(v); err != nil {
+			return fmt.Errorf(`invalid signature algorithm %s: %w`, key.Algorithm(), err)
+		}
+
+		sink.Key(alg, key)
+		return nil
+	}
+
+	if kp.inferAlgorithm {
+		algs, err := AlgorithmsForKey(key)
+		if err != nil {
+			return fmt.Errorf(`failed to get a list of signature methods for key type %s: %w`, key.KeyType(), err)
+		}
+
+		// bail out if the JWT has a `alg` field, and it doesn't match
+		if tokAlg := sig.ProtectedHeaders().Algorithm(); tokAlg != "" {
+			for _, alg := range algs {
+				if tokAlg == alg {
+					sink.Key(alg, key)
+					return nil
+				}
+			}
+			return fmt.Errorf(`algorithm in the message does not match any of the inferred algorithms`)
+		}
+
+		// Yes, you get to try them all!!!!!!!
+		for _, alg := range algs {
+			sink.Key(alg, key)
+		}
+		return nil
+	}
+	return nil
+}
+
+func (kp *keySetProvider) FetchKeys(_ context.Context, sink KeySink, sig *Signature, msg *Message) error {
+	if kp.requireKid {
+		wantedKid := sig.ProtectedHeaders().KeyID()
+		if wantedKid == "" {
+			// If the kid is NOT specified... kp.useDefault needs to be true, and the
+			// JWKs must have exactly one key in it
+			if !kp.useDefault {
+				return fmt.Errorf(`failed to find matching key: no key ID ("kid") specified in token`)
+			} else if kp.useDefault && kp.set.Len() > 1 {
+				return fmt.Errorf(`failed to find matching key: no key ID ("kid") specified in token but multiple keys available in key set`)
+			}
+
+			// if we got here, then useDefault == true AND there is exactly
+			// one key in the set.
+			key, _ := kp.set.Key(0)
+			return kp.selectKey(sink, key, sig, msg)
+		}
+
+		// Otherwise we better be able to look up the key.
+		// <= v2.0.3 backwards compatible case: only match a single key
+		// whose key ID matches `wantedKid`
+		if !kp.multipleKeysPerKeyID {
+			key, ok := kp.set.LookupKeyID(wantedKid)
+			if !ok {
+				return fmt.Errorf(`failed to find key with key ID %q in key set`, wantedKid)
+			}
+			return kp.selectKey(sink, key, sig, msg)
+		}
+
+		// if multipleKeysPerKeyID is true, we attempt all keys whose key ID matches
+		// the wantedKey
+		var ok bool
+		for i := 0; i < kp.set.Len(); i++ {
+			key, _ := kp.set.Key(i)
+			if key.KeyID() != wantedKid {
+				continue
+			}
+
+			if err := kp.selectKey(sink, key, sig, msg); err != nil {
+				continue
+			}
+			ok = true
+			// continue processing so that we try all keys with the same key ID
+		}
+		if !ok {
+			return fmt.Errorf(`failed to find key with key ID %q in key set`, wantedKid)
+		}
+		return nil
+	}
+
+	// Otherwise just try all keys
+	for i := 0; i < kp.set.Len(); i++ {
+		key, _ := kp.set.Key(i)
+		if err := kp.selectKey(sink, key, sig, msg); err != nil {
+			continue
+		}
+	}
+	return nil
+}
+
+type jkuProvider struct {
+	fetcher jwk.Fetcher
+	options []jwk.FetchOption
+}
+
+func (kp jkuProvider) FetchKeys(ctx context.Context, sink KeySink, sig *Signature, _ *Message) error {
+	kid := sig.ProtectedHeaders().KeyID()
+	if kid == "" {
+		return fmt.Errorf(`use of "jku" requires that the payload contain a "kid" field in the protected header`)
+	}
+
+	// errors here can't be reliablly passed to the consumers.
+	// it's unfortunate, but if you need this control, you are
+	// going to have to write your own fetcher
+	u := sig.ProtectedHeaders().JWKSetURL()
+	if u == "" {
+		return fmt.Errorf(`use of "jku" field specified, but the field is empty`)
+	}
+	uo, err := url.Parse(u)
+	if err != nil {
+		return fmt.Errorf(`failed to parse "jku": %w`, err)
+	}
+	if uo.Scheme != "https" {
+		return fmt.Errorf(`url in "jku" must be HTTPS`)
+	}
+
+	set, err := kp.fetcher.Fetch(ctx, u, kp.options...)
+	if err != nil {
+		return fmt.Errorf(`failed to fetch %q: %w`, u, err)
+	}
+
+	key, ok := set.LookupKeyID(kid)
+	if !ok {
+		// It is not an error if the key with the kid doesn't exist
+		return nil
+	}
+
+	algs, err := AlgorithmsForKey(key)
+	if err != nil {
+		return fmt.Errorf(`failed to get a list of signature methods for key type %s: %w`, key.KeyType(), err)
+	}
+
+	hdrAlg := sig.ProtectedHeaders().Algorithm()
+	for _, alg := range algs {
+		// if we have a "alg" field in the JWS, we can only proceed if
+		// the inferred algorithm matches
+		if hdrAlg != "" && hdrAlg != alg {
+			continue
+		}
+
+		sink.Key(alg, key)
+		break
+	}
+	return nil
+}
+
+// KeyProviderFunc is a type of KeyProvider that is implemented by
+// a single function. You can use this to create ad-hoc `KeyProvider`
+// instances.
+type KeyProviderFunc func(context.Context, KeySink, *Signature, *Message) error
+
+func (kp KeyProviderFunc) FetchKeys(ctx context.Context, sink KeySink, sig *Signature, msg *Message) error {
+	return kp(ctx, sink, sig, msg)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/message.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/message.go
new file mode 100644
index 0000000000..adec8445c7
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/message.go
@@ -0,0 +1,503 @@
+package jws
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+
+	"github.com/lestrrat-go/jwx/v2/internal/base64"
+	"github.com/lestrrat-go/jwx/v2/internal/json"
+	"github.com/lestrrat-go/jwx/v2/internal/pool"
+	"github.com/lestrrat-go/jwx/v2/jwk"
+)
+
+func NewSignature() *Signature {
+	return &Signature{}
+}
+
+func (s *Signature) DecodeCtx() DecodeCtx {
+	return s.dc
+}
+
+func (s *Signature) SetDecodeCtx(dc DecodeCtx) {
+	s.dc = dc
+}
+
+func (s Signature) PublicHeaders() Headers {
+	return s.headers
+}
+
+func (s *Signature) SetPublicHeaders(v Headers) *Signature {
+	s.headers = v
+	return s
+}
+
+func (s Signature) ProtectedHeaders() Headers {
+	return s.protected
+}
+
+func (s *Signature) SetProtectedHeaders(v Headers) *Signature {
+	s.protected = v
+	return s
+}
+
+func (s Signature) Signature() []byte {
+	return s.signature
+}
+
+func (s *Signature) SetSignature(v []byte) *Signature {
+	s.signature = v
+	return s
+}
+
+type signatureUnmarshalProbe struct {
+	Header    Headers `json:"header,omitempty"`
+	Protected *string `json:"protected,omitempty"`
+	Signature *string `json:"signature,omitempty"`
+}
+
+func (s *Signature) UnmarshalJSON(data []byte) error {
+	var sup signatureUnmarshalProbe
+	sup.Header = NewHeaders()
+	if err := json.Unmarshal(data, &sup); err != nil {
+		return fmt.Errorf(`failed to unmarshal signature into temporary struct: %w`, err)
+	}
+
+	s.headers = sup.Header
+	if buf := sup.Protected; buf != nil {
+		src := []byte(*buf)
+		if !bytes.HasPrefix(src, []byte{'{'}) {
+			decoded, err := base64.Decode(src)
+			if err != nil {
+				return fmt.Errorf(`failed to base64 decode protected headers: %w`, err)
+			}
+			src = decoded
+		}
+
+		prt := NewHeaders()
+		//nolint:forcetypeassert
+		prt.(*stdHeaders).SetDecodeCtx(s.DecodeCtx())
+		if err := json.Unmarshal(src, prt); err != nil {
+			return fmt.Errorf(`failed to unmarshal protected headers: %w`, err)
+		}
+		//nolint:forcetypeassert
+		prt.(*stdHeaders).SetDecodeCtx(nil)
+		s.protected = prt
+	}
+
+	if sup.Signature != nil {
+		decoded, err := base64.DecodeString(*sup.Signature)
+		if err != nil {
+			return fmt.Errorf(`failed to base decode signature: %w`, err)
+		}
+		s.signature = decoded
+	}
+	return nil
+}
+
+// Sign populates the signature field, with a signature generated by
+// given the signer object and payload.
+//
+// The first return value is the raw signature in binary format.
+// The second return value s the full three-segment signature
+// (e.g. "eyXXXX.XXXXX.XXXX")
+func (s *Signature) Sign(payload []byte, signer Signer, key interface{}) ([]byte, []byte, error) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	hdrs, err := mergeHeaders(ctx, s.headers, s.protected)
+	if err != nil {
+		return nil, nil, fmt.Errorf(`failed to merge headers: %w`, err)
+	}
+
+	if err := hdrs.Set(AlgorithmKey, signer.Algorithm()); err != nil {
+		return nil, nil, fmt.Errorf(`failed to set "alg": %w`, err)
+	}
+
+	// If the key is a jwk.Key instance, obtain the raw key
+	if jwkKey, ok := key.(jwk.Key); ok {
+		// If we have a key ID specified by this jwk.Key, use that in the header
+		if kid := jwkKey.KeyID(); kid != "" {
+			if err := hdrs.Set(jwk.KeyIDKey, kid); err != nil {
+				return nil, nil, fmt.Errorf(`set key ID from jwk.Key: %w`, err)
+			}
+		}
+	}
+	hdrbuf, err := json.Marshal(hdrs)
+	if err != nil {
+		return nil, nil, fmt.Errorf(`failed to marshal headers: %w`, err)
+	}
+
+	buf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(buf)
+
+	buf.WriteString(base64.EncodeToString(hdrbuf))
+	buf.WriteByte('.')
+
+	var plen int
+	b64 := getB64Value(hdrs)
+	if b64 {
+		encoded := base64.EncodeToString(payload)
+		plen = len(encoded)
+		buf.WriteString(encoded)
+	} else {
+		if !s.detached {
+			if bytes.Contains(payload, []byte{'.'}) {
+				return nil, nil, fmt.Errorf(`payload must not contain a "."`)
+			}
+		}
+		plen = len(payload)
+		buf.Write(payload)
+	}
+
+	signature, err := signer.Sign(buf.Bytes(), key)
+	if err != nil {
+		return nil, nil, fmt.Errorf(`failed to sign payload: %w`, err)
+	}
+	s.signature = signature
+
+	// Detached payload, this should be removed from the end result
+	if s.detached {
+		buf.Truncate(buf.Len() - plen)
+	}
+
+	buf.WriteByte('.')
+	buf.WriteString(base64.EncodeToString(signature))
+	ret := make([]byte, buf.Len())
+	copy(ret, buf.Bytes())
+
+	return signature, ret, nil
+}
+
+func NewMessage() *Message {
+	return &Message{}
+}
+
+// Clears the internal raw buffer that was accumulated during
+// the verify phase
+func (m *Message) clearRaw() {
+	for _, sig := range m.signatures {
+		if protected := sig.protected; protected != nil {
+			if cr, ok := protected.(*stdHeaders); ok {
+				cr.raw = nil
+			}
+		}
+	}
+}
+
+func (m *Message) SetDecodeCtx(dc DecodeCtx) {
+	m.dc = dc
+}
+
+func (m *Message) DecodeCtx() DecodeCtx {
+	return m.dc
+}
+
+// Payload returns the decoded payload
+func (m Message) Payload() []byte {
+	return m.payload
+}
+
+func (m *Message) SetPayload(v []byte) *Message {
+	m.payload = v
+	return m
+}
+
+func (m Message) Signatures() []*Signature {
+	return m.signatures
+}
+
+func (m *Message) AppendSignature(v *Signature) *Message {
+	m.signatures = append(m.signatures, v)
+	return m
+}
+
+func (m *Message) ClearSignatures() *Message {
+	m.signatures = nil
+	return m
+}
+
+// LookupSignature looks up a particular signature entry using
+// the `kid` value
+func (m Message) LookupSignature(kid string) []*Signature {
+	var sigs []*Signature
+	for _, sig := range m.signatures {
+		if hdr := sig.PublicHeaders(); hdr != nil {
+			hdrKeyID := hdr.KeyID()
+			if hdrKeyID == kid {
+				sigs = append(sigs, sig)
+				continue
+			}
+		}
+
+		if hdr := sig.ProtectedHeaders(); hdr != nil {
+			hdrKeyID := hdr.KeyID()
+			if hdrKeyID == kid {
+				sigs = append(sigs, sig)
+				continue
+			}
+		}
+	}
+	return sigs
+}
+
+// This struct is used to first probe for the structure of the
+// incoming JSON object. We then decide how to parse it
+// from the fields that are populated.
+type messageUnmarshalProbe struct {
+	Payload    *string           `json:"payload"`
+	Signatures []json.RawMessage `json:"signatures,omitempty"`
+	Header     Headers           `json:"header,omitempty"`
+	Protected  *string           `json:"protected,omitempty"`
+	Signature  *string           `json:"signature,omitempty"`
+}
+
+func (m *Message) UnmarshalJSON(buf []byte) error {
+	m.payload = nil
+	m.signatures = nil
+	m.b64 = true
+
+	var mup messageUnmarshalProbe
+	mup.Header = NewHeaders()
+	if err := json.Unmarshal(buf, &mup); err != nil {
+		return fmt.Errorf(`failed to unmarshal into temporary structure: %w`, err)
+	}
+
+	b64 := true
+	if mup.Signature == nil { // flattened signature is NOT present
+		if len(mup.Signatures) == 0 {
+			return fmt.Errorf(`required field "signatures" not present`)
+		}
+
+		m.signatures = make([]*Signature, 0, len(mup.Signatures))
+		for i, rawsig := range mup.Signatures {
+			var sig Signature
+			sig.SetDecodeCtx(m.DecodeCtx())
+			if err := json.Unmarshal(rawsig, &sig); err != nil {
+				return fmt.Errorf(`failed to unmarshal signature #%d: %w`, i+1, err)
+			}
+			sig.SetDecodeCtx(nil)
+
+			if i == 0 {
+				if !getB64Value(sig.protected) {
+					b64 = false
+				}
+			} else {
+				if b64 != getB64Value(sig.protected) {
+					return fmt.Errorf(`b64 value must be the same for all signatures`)
+				}
+			}
+
+			m.signatures = append(m.signatures, &sig)
+		}
+	} else { // .signature is present, it's a flattened structure
+		if len(mup.Signatures) != 0 {
+			return fmt.Errorf(`invalid format ("signatures" and "signature" keys cannot both be present)`)
+		}
+
+		var sig Signature
+		sig.headers = mup.Header
+		if src := mup.Protected; src != nil {
+			decoded, err := base64.DecodeString(*src)
+			if err != nil {
+				return fmt.Errorf(`failed to base64 decode flattened protected headers: %w`, err)
+			}
+			prt := NewHeaders()
+			//nolint:forcetypeassert
+			prt.(*stdHeaders).SetDecodeCtx(m.DecodeCtx())
+			if err := json.Unmarshal(decoded, prt); err != nil {
+				return fmt.Errorf(`failed to unmarshal flattened protected headers: %w`, err)
+			}
+			//nolint:forcetypeassert
+			prt.(*stdHeaders).SetDecodeCtx(nil)
+			sig.protected = prt
+		}
+
+		decoded, err := base64.DecodeString(*mup.Signature)
+		if err != nil {
+			return fmt.Errorf(`failed to base64 decode flattened signature: %w`, err)
+		}
+		sig.signature = decoded
+
+		m.signatures = []*Signature{&sig}
+		b64 = getB64Value(sig.protected)
+	}
+
+	if mup.Payload != nil {
+		if !b64 { // NOT base64 encoded
+			m.payload = []byte(*mup.Payload)
+		} else {
+			decoded, err := base64.DecodeString(*mup.Payload)
+			if err != nil {
+				return fmt.Errorf(`failed to base64 decode payload: %w`, err)
+			}
+			m.payload = decoded
+		}
+	}
+	m.b64 = b64
+	return nil
+}
+
+func (m Message) MarshalJSON() ([]byte, error) {
+	if len(m.signatures) == 1 {
+		return m.marshalFlattened()
+	}
+	return m.marshalFull()
+}
+
+func (m Message) marshalFlattened() ([]byte, error) {
+	buf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(buf)
+
+	sig := m.signatures[0]
+
+	buf.WriteRune('{')
+	var wrote bool
+
+	if hdr := sig.headers; hdr != nil {
+		hdrjs, err := hdr.MarshalJSON()
+		if err != nil {
+			return nil, fmt.Errorf(`failed to marshal "header" (flattened format): %w`, err)
+		}
+		buf.WriteString(`"header":`)
+		buf.Write(hdrjs)
+		wrote = true
+	}
+
+	if wrote {
+		buf.WriteRune(',')
+	}
+	buf.WriteString(`"payload":"`)
+	buf.WriteString(base64.EncodeToString(m.payload))
+	buf.WriteRune('"')
+
+	if protected := sig.protected; protected != nil {
+		protectedbuf, err := protected.MarshalJSON()
+		if err != nil {
+			return nil, fmt.Errorf(`failed to marshal "protected" (flattened format): %w`, err)
+		}
+		buf.WriteString(`,"protected":"`)
+		buf.WriteString(base64.EncodeToString(protectedbuf))
+		buf.WriteRune('"')
+	}
+
+	buf.WriteString(`,"signature":"`)
+	buf.WriteString(base64.EncodeToString(sig.signature))
+	buf.WriteRune('"')
+	buf.WriteRune('}')
+
+	ret := make([]byte, buf.Len())
+	copy(ret, buf.Bytes())
+	return ret, nil
+}
+
+func (m Message) marshalFull() ([]byte, error) {
+	buf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(buf)
+
+	buf.WriteString(`{"payload":"`)
+	buf.WriteString(base64.EncodeToString(m.payload))
+	buf.WriteString(`","signatures":[`)
+	for i, sig := range m.signatures {
+		if i > 0 {
+			buf.WriteRune(',')
+		}
+
+		buf.WriteRune('{')
+		var wrote bool
+		if hdr := sig.headers; hdr != nil {
+			hdrbuf, err := hdr.MarshalJSON()
+			if err != nil {
+				return nil, fmt.Errorf(`failed to marshal "header" for signature #%d: %w`, i+1, err)
+			}
+			buf.WriteString(`"header":`)
+			buf.Write(hdrbuf)
+			wrote = true
+		}
+
+		if protected := sig.protected; protected != nil {
+			protectedbuf, err := protected.MarshalJSON()
+			if err != nil {
+				return nil, fmt.Errorf(`failed to marshal "protected" for signature #%d: %w`, i+1, err)
+			}
+			if wrote {
+				buf.WriteRune(',')
+			}
+			buf.WriteString(`"protected":"`)
+			buf.WriteString(base64.EncodeToString(protectedbuf))
+			buf.WriteRune('"')
+			wrote = true
+		}
+
+		if len(sig.signature) > 0 {
+			// If InsecureNoSignature is enabled, signature may not exist
+			if wrote {
+				buf.WriteRune(',')
+			}
+			buf.WriteString(`"signature":"`)
+			buf.WriteString(base64.EncodeToString(sig.signature))
+			buf.WriteString(`"`)
+		}
+		buf.WriteString(`}`)
+	}
+	buf.WriteString(`]}`)
+
+	ret := make([]byte, buf.Len())
+	copy(ret, buf.Bytes())
+	return ret, nil
+}
+
+// Compact generates a JWS message in compact serialization format from
+// `*jws.Message` object. The object contain exactly one signature, or
+// an error is returned.
+//
+// If using a detached payload, the payload must already be stored in
+// the `*jws.Message` object, and the `jws.WithDetached()` option
+// must be passed to the function.
+func Compact(msg *Message, options ...CompactOption) ([]byte, error) {
+	if l := len(msg.signatures); l != 1 {
+		return nil, fmt.Errorf(`jws.Compact: cannot serialize message with %d signatures (must be one)`, l)
+	}
+
+	var detached bool
+	for _, option := range options {
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identDetached{}:
+			detached = option.Value().(bool)
+		}
+	}
+
+	s := msg.signatures[0]
+	// XXX check if this is correct
+	hdrs := s.ProtectedHeaders()
+
+	hdrbuf, err := json.Marshal(hdrs)
+	if err != nil {
+		return nil, fmt.Errorf(`jws.Compress: failed to marshal headers: %w`, err)
+	}
+
+	buf := pool.GetBytesBuffer()
+	defer pool.ReleaseBytesBuffer(buf)
+
+	buf.WriteString(base64.EncodeToString(hdrbuf))
+	buf.WriteByte('.')
+
+	if !detached {
+		if getB64Value(hdrs) {
+			encoded := base64.EncodeToString(msg.payload)
+			buf.WriteString(encoded)
+		} else {
+			if bytes.Contains(msg.payload, []byte{'.'}) {
+				return nil, fmt.Errorf(`jws.Compress: payload must not contain a "."`)
+			}
+			buf.Write(msg.payload)
+		}
+	}
+
+	buf.WriteByte('.')
+	buf.WriteString(base64.EncodeToString(s.signature))
+	ret := make([]byte, buf.Len())
+	copy(ret, buf.Bytes())
+	return ret, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/options.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/options.go
new file mode 100644
index 0000000000..9eb04e8371
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/options.go
@@ -0,0 +1,204 @@
+package jws
+
+import (
+	"github.com/lestrrat-go/jwx/v2/jwa"
+	"github.com/lestrrat-go/jwx/v2/jwk"
+	"github.com/lestrrat-go/option"
+)
+
+type identHeaders struct{}
+type identInsecureNoSignature struct{}
+
+// WithHeaders allows you to specify extra header values to include in the
+// final JWS message
+func WithHeaders(h Headers) SignOption {
+	return &signOption{option.New(identHeaders{}, h)}
+}
+
+// WithJSON specifies that the result of `jws.Sign()` is serialized in
+// JSON format.
+//
+// If you pass multiple keys to `jws.Sign()`, it will fail unless
+// you also pass this option.
+func WithJSON(options ...WithJSONSuboption) SignOption {
+	var pretty bool
+	for _, option := range options {
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identPretty{}:
+			pretty = option.Value().(bool)
+		}
+	}
+
+	format := fmtJSON
+	if pretty {
+		format = fmtJSONPretty
+	}
+	return &signOption{option.New(identSerialization{}, format)}
+}
+
+type withKey struct {
+	alg       jwa.KeyAlgorithm
+	key       interface{}
+	protected Headers
+	public    Headers
+}
+
+// This exist as escape hatches to modify the header values after the fact
+func (w *withKey) Protected(v Headers) Headers {
+	if w.protected == nil && v != nil {
+		w.protected = v
+	}
+	return w.protected
+}
+
+// WithKey is used to pass a static algorithm/key pair to either `jws.Sign()` or `jws.Verify()`.
+//
+// The `alg` parameter is the identifier for the signature algorithm that should be used.
+// It is of type `jwa.KeyAlgorithm` but in reality you can only pass `jwa.SignatureAlgorithm`
+// types. It is this way so that the value in `(jwk.Key).Algorithm()` can be directly
+// passed to the option. If you specify other algorithm types such as `jwa.ContentEncryptionAlgorithm`,
+// then you will get an error when `jws.Sign()` or `jws.Verify()` is executed.
+//
+// The `alg` parameter cannot be "none" (jwa.NoSignature) for security reasons.
+// You will have to use a separate, more explicit option to allow the use of "none"
+// algorithm.
+//
+// The algorithm specified in the `alg` parameter must be able to support
+// the type of key you provided, otherwise an error is returned.
+//
+// Any of the followin is accepted for the `key` parameter:
+// * A "raw" key (e.g. rsa.PrivateKey, ecdsa.PrivateKey, etc)
+// * A crypto.Signer
+// * A jwk.Key
+//
+// A `crypto.Signer` is used when the private part of a key is
+// kept in an inaccessible location, such as hardware.
+// `crypto.Signer` is currently supported for RSA, ECDSA, and EdDSA
+// family of algorithms. You may consider using `github.com/jwx-go/crypto-signer`
+// if you would like to use keys stored in GCP/AWS KMS services.
+//
+// If the key is a jwk.Key and the key contains a key ID (`kid` field),
+// then it is added to the protected header generated by the signature.
+//
+// `jws.WithKey()` can furher accept suboptions to change signing behavior
+// when used with `jws.Sign()`. `jws.WithProtected()` and `jws.WithPublic()`
+// can be passed to specify JWS headers that should be used whe signing.
+//
+// If the protected headers contain "b64" field, then the boolean value for the field
+// is respected when serializing. That is, if you specify a header with
+// `{"b64": false}`, then the payload is not base64 encoded.
+//
+// These suboptions are ignored whe the `jws.WithKey()` option is used with `jws.Verify()`.
+func WithKey(alg jwa.KeyAlgorithm, key interface{}, options ...WithKeySuboption) SignVerifyOption {
+	// Implementation note: this option is shared between Sign() and
+	// Verify(). As such we don't create a KeyProvider here because
+	// if used in Sign() we would be doing something else.
+	var protected, public Headers
+	for _, option := range options {
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identProtectedHeaders{}:
+			protected = option.Value().(Headers)
+		case identPublicHeaders{}:
+			public = option.Value().(Headers)
+		}
+	}
+
+	return &signVerifyOption{
+		option.New(identKey{}, &withKey{
+			alg:       alg,
+			key:       key,
+			protected: protected,
+			public:    public,
+		}),
+	}
+}
+
+// WithKeySet specifies a JWKS (jwk.Set) to use for verification.
+//
+// By default both `alg` and `kid` fields in the JWS _and_ the
+// key must match for a key in the JWKS to be considered to be used.
+//
+// The behavior can be tweaked by using the `jws.WithKeySetSuboption`
+// suboption types.
+func WithKeySet(set jwk.Set, options ...WithKeySetSuboption) VerifyOption {
+	requireKid := true
+	var useDefault, inferAlgorithm, multipleKeysPerKeyID bool
+	for _, option := range options {
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identRequireKid{}:
+			requireKid = option.Value().(bool)
+		case identUseDefault{}:
+			useDefault = option.Value().(bool)
+		case identMultipleKeysPerKeyID{}:
+			multipleKeysPerKeyID = option.Value().(bool)
+		case identInferAlgorithmFromKey{}:
+			inferAlgorithm = option.Value().(bool)
+		}
+	}
+
+	return WithKeyProvider(&keySetProvider{
+		set:                  set,
+		requireKid:           requireKid,
+		useDefault:           useDefault,
+		multipleKeysPerKeyID: multipleKeysPerKeyID,
+		inferAlgorithm:       inferAlgorithm,
+	})
+}
+
+func WithVerifyAuto(f jwk.Fetcher, options ...jwk.FetchOption) VerifyOption {
+	if f == nil {
+		f = jwk.FetchFunc(jwk.Fetch)
+	}
+
+	// the option MUST start with a "disallow no whitelist" to force
+	// users provide a whitelist
+	options = append(append([]jwk.FetchOption(nil), jwk.WithFetchWhitelist(allowNoneWhitelist)), options...)
+
+	return WithKeyProvider(jkuProvider{
+		fetcher: f,
+		options: options,
+	})
+}
+
+type withInsecureNoSignature struct {
+	protected Headers
+}
+
+// This exist as escape hatches to modify the header values after the fact
+func (w *withInsecureNoSignature) Protected(v Headers) Headers {
+	if w.protected == nil && v != nil {
+		w.protected = v
+	}
+	return w.protected
+}
+
+// WithInsecureNoSignature creates an option that allows the user to use the
+// "none" signature algorithm.
+//
+// Please note that this is insecure, and should never be used in production
+// (this is exactly why specifying "none"/jwa.NoSignature to `jws.WithKey()`
+// results in an error when `jws.Sign()` is called -- we do not allow using
+// "none" by accident)
+//
+// TODO: create specific sub-option set for this option
+func WithInsecureNoSignature(options ...WithKeySuboption) SignOption {
+	var protected Headers
+	for _, option := range options {
+		//nolint:forcetypeassert
+		switch option.Ident() {
+		case identProtectedHeaders{}:
+			protected = option.Value().(Headers)
+		}
+	}
+
+	return &signOption{
+		option.New(identInsecureNoSignature{},
+			&withInsecureNoSignature{
+				protected: protected,
+			},
+		),
+	}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/options.yaml b/vendor/github.com/lestrrat-go/jwx/v2/jws/options.yaml
new file mode 100644
index 0000000000..5e1b5b2adc
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/options.yaml
@@ -0,0 +1,167 @@
+package_name: jws
+output: jws/options_gen.go
+interfaces:
+  - name: CompactOption
+    comment: |
+      CompactOption describes options that can be passed to `jws.Compact`
+  - name: VerifyOption
+    comment: |
+      VerifyOption describes options that can be passed to `jws.Verify`
+  - name: SignOption
+    comment: |
+      SignOption describes options that can be passed to `jws.Sign`
+  - name: SignVerifyOption
+    methods:
+      - signOption
+      - verifyOption
+    comment: |
+      SignVerifyOption describes options that can be passed to either `jws.Verify` or `jws.Sign`
+  - name: WithJSONSuboption
+    concrete_type: withJSONSuboption
+    comment: |
+      JSONSuboption describes suboptions that can be passed to `jws.WithJSON()` option
+  - name: WithKeySuboption
+    comment: |
+      WithKeySuboption describes option types that can be passed to the `jws.WithKey()`
+      option.
+  - name: WithKeySetSuboption
+    comment: |
+      WithKeySetSuboption is a suboption passed to the `jws.WithKeySet()` option
+  - name: ParseOption
+    methods:
+      - readFileOption
+    comment: |
+      ReadFileOption is a type of `Option` that can be passed to `jwe.Parse`
+  - name: ReadFileOption
+    comment: |
+      ReadFileOption is a type of `Option` that can be passed to `jws.ReadFile`
+options:
+  - ident: Key
+    skip_option: true
+  - ident: Serialization
+    skip_option: true
+  - ident: Serialization
+    option_name: WithCompact
+    interface: SignOption
+    constant_value: fmtCompact
+    comment: |
+      WithCompact specifies that the result of `jws.Sign()` is serialized in
+      compact format.
+      
+      By default `jws.Sign()` will opt to use compact format, so you usually
+      do not need to specify this option other than to be explicit about it
+  - ident: Detached
+    interface: CompactOption
+    argument_type: bool
+    comment: |
+      WithDetached specifies that the `jws.Message` should be serialized in
+      JWS compact serialization with detached payload. The resulting octet
+      sequence will not contain the payload section.
+  - ident: DetachedPayload
+    interface: SignVerifyOption
+    argument_type: '[]byte'
+    comment: |
+       WithDetachedPayload can be used to both sign or verify a JWS message with a
+       detached payload.
+       
+       When this option is used for `jws.Sign()`, the first parameter (normally the payload)
+       must be set to `nil`.
+       
+       If you have to verify using this option, you should know exactly how and why this works.
+  - ident: Message
+    interface: VerifyOption
+    argument_type: '*Message'
+    comment: |
+      WithMessage can be passed to Verify() to obtain the jws.Message upon
+      a successful verification.
+  - ident: KeyUsed
+    interface: VerifyOption
+    argument_type: 'interface{}'
+    comment: |
+      WithKeyUsed allows you to specify the `jws.Verify()` function to
+      return the key used for verification. This may be useful when
+      you specify multiple key sources or if you pass a `jwk.Set`
+      and you want to know which key was successful at verifying the
+      signature.
+      
+      `v` must be a pointer to an empty `interface{}`. Do not use
+      `jwk.Key` here unless you are 100% sure that all keys that you
+      have provided are instances of `jwk.Key` (remember that the
+      jwx API allows users to specify a raw key such as *rsa.PublicKey)
+  - ident: InferAlgorithmFromKey
+    interface: WithKeySetSuboption
+    argument_type: bool
+    comment: |
+      WithInferAlgorithmFromKey specifies whether the JWS signing algorithm name
+      should be inferred by looking at the provided key, in case the JWS
+      message or the key does not have a proper `alg` header.
+      
+      Compared to providing explicit `alg` from the key this is slower, and 
+      verification may fail to verify if some how our heuristics are wrong
+      or outdated.
+      
+      Also, automatic detection of signature verification methods are always
+      more vulnerable for potential attack vectors.
+      
+      It is highly recommended that you fix your key to contain a proper `alg`
+      header field instead of resorting to using this option, but sometimes
+      it just needs to happen.
+  - ident: UseDefault
+    interface: WithKeySetSuboption
+    argument_type: bool
+    comment: |
+      WithUseDefault specifies that if and only if a jwk.Key contains
+      exactly one jwk.Key, that tkey should be used.
+      (I think this should be removed)
+  - ident: RequireKid
+    interface: WithKeySetSuboption
+    argument_type: bool
+    comment: |
+      WithRequiredKid specifies whether the keys in the jwk.Set should
+      only be matched if the target JWS message's Key ID and the Key ID
+      in the given key matches.
+  - ident: MultipleKeysPerKeyID
+    interface: WithKeySetSuboption
+    argument_type: bool
+    comment: |
+      WithMultipleKeysPerKeyID specifies if we should expect multiple keys
+      to match against a key ID. By default it is assumed that key IDs are
+      unique, i.e. for a given key ID, the key set only contains a single
+      key that has the matching ID. When this option is set to true,
+      multiple keys that match the same key ID in the set can be tried.
+  - ident: Pretty
+    interface: WithJSONSuboption
+    argument_type: bool
+    comment: |
+      WithPretty specifies whether the JSON output should be formatted and
+      indented
+  - ident: KeyProvider
+    interface: VerifyOption
+    argument_type: KeyProvider
+  - ident: Context
+    interface: VerifyOption
+    argument_type: context.Context
+  - ident: ProtectedHeaders
+    interface: WithKeySuboption
+    argument_type: Headers
+    comment: |
+      WithProtected is used with `jws.WithKey()` option when used with `jws.Sign()`
+      to specify a protected header to be attached to the JWS signature.
+      
+      It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()`
+  - ident: PublicHeaders
+    interface: WithKeySuboption
+    argument_type: Headers
+    comment: |
+      WithPublic is used with `jws.WithKey()` option when used with `jws.Sign()`
+      to specify a public header to be attached to the JWS signature.
+      
+      It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()`
+      
+      `jws.Sign()` will result in an error if `jws.WithPublic()` is used
+      and the serialization format is compact serialization.
+  - ident: FS
+    interface: ReadFileOption
+    argument_type: fs.FS
+    comment: |
+      WithFS specifies the source `fs.FS` object to read the file from.
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/options_gen.go
new file mode 100644
index 0000000000..3a7df7e4e5
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/options_gen.go
@@ -0,0 +1,331 @@
+// Code generated by tools/cmd/genoptions/main.go. DO NOT EDIT.
+
+package jws
+
+import (
+	"context"
+	"io/fs"
+
+	"github.com/lestrrat-go/option"
+)
+
+type Option = option.Interface
+
+// CompactOption describes options that can be passed to `jws.Compact`
+type CompactOption interface {
+	Option
+	compactOption()
+}
+
+type compactOption struct {
+	Option
+}
+
+func (*compactOption) compactOption() {}
+
+// ReadFileOption is a type of `Option` that can be passed to `jwe.Parse`
+type ParseOption interface {
+	Option
+	readFileOption()
+}
+
+type parseOption struct {
+	Option
+}
+
+func (*parseOption) readFileOption() {}
+
+// ReadFileOption is a type of `Option` that can be passed to `jws.ReadFile`
+type ReadFileOption interface {
+	Option
+	readFileOption()
+}
+
+type readFileOption struct {
+	Option
+}
+
+func (*readFileOption) readFileOption() {}
+
+// SignOption describes options that can be passed to `jws.Sign`
+type SignOption interface {
+	Option
+	signOption()
+}
+
+type signOption struct {
+	Option
+}
+
+func (*signOption) signOption() {}
+
+// SignVerifyOption describes options that can be passed to either `jws.Verify` or `jws.Sign`
+type SignVerifyOption interface {
+	Option
+	signOption()
+	verifyOption()
+}
+
+type signVerifyOption struct {
+	Option
+}
+
+func (*signVerifyOption) signOption() {}
+
+func (*signVerifyOption) verifyOption() {}
+
+// VerifyOption describes options that can be passed to `jws.Verify`
+type VerifyOption interface {
+	Option
+	verifyOption()
+}
+
+type verifyOption struct {
+	Option
+}
+
+func (*verifyOption) verifyOption() {}
+
+// JSONSuboption describes suboptions that can be passed to `jws.WithJSON()` option
+type WithJSONSuboption interface {
+	Option
+	withJSONSuboption()
+}
+
+type withJSONSuboption struct {
+	Option
+}
+
+func (*withJSONSuboption) withJSONSuboption() {}
+
+// WithKeySetSuboption is a suboption passed to the `jws.WithKeySet()` option
+type WithKeySetSuboption interface {
+	Option
+	withKeySetSuboption()
+}
+
+type withKeySetSuboption struct {
+	Option
+}
+
+func (*withKeySetSuboption) withKeySetSuboption() {}
+
+// WithKeySuboption describes option types that can be passed to the `jws.WithKey()`
+// option.
+type WithKeySuboption interface {
+	Option
+	withKeySuboption()
+}
+
+type withKeySuboption struct {
+	Option
+}
+
+func (*withKeySuboption) withKeySuboption() {}
+
+type identContext struct{}
+type identDetached struct{}
+type identDetachedPayload struct{}
+type identFS struct{}
+type identInferAlgorithmFromKey struct{}
+type identKey struct{}
+type identKeyProvider struct{}
+type identKeyUsed struct{}
+type identMessage struct{}
+type identMultipleKeysPerKeyID struct{}
+type identPretty struct{}
+type identProtectedHeaders struct{}
+type identPublicHeaders struct{}
+type identRequireKid struct{}
+type identSerialization struct{}
+type identUseDefault struct{}
+
+func (identContext) String() string {
+	return "WithContext"
+}
+
+func (identDetached) String() string {
+	return "WithDetached"
+}
+
+func (identDetachedPayload) String() string {
+	return "WithDetachedPayload"
+}
+
+func (identFS) String() string {
+	return "WithFS"
+}
+
+func (identInferAlgorithmFromKey) String() string {
+	return "WithInferAlgorithmFromKey"
+}
+
+func (identKey) String() string {
+	return "WithKey"
+}
+
+func (identKeyProvider) String() string {
+	return "WithKeyProvider"
+}
+
+func (identKeyUsed) String() string {
+	return "WithKeyUsed"
+}
+
+func (identMessage) String() string {
+	return "WithMessage"
+}
+
+func (identMultipleKeysPerKeyID) String() string {
+	return "WithMultipleKeysPerKeyID"
+}
+
+func (identPretty) String() string {
+	return "WithPretty"
+}
+
+func (identProtectedHeaders) String() string {
+	return "WithProtectedHeaders"
+}
+
+func (identPublicHeaders) String() string {
+	return "WithPublicHeaders"
+}
+
+func (identRequireKid) String() string {
+	return "WithRequireKid"
+}
+
+func (identSerialization) String() string {
+	return "WithSerialization"
+}
+
+func (identUseDefault) String() string {
+	return "WithUseDefault"
+}
+
+func WithContext(v context.Context) VerifyOption {
+	return &verifyOption{option.New(identContext{}, v)}
+}
+
+// WithDetached specifies that the `jws.Message` should be serialized in
+// JWS compact serialization with detached payload. The resulting octet
+// sequence will not contain the payload section.
+func WithDetached(v bool) CompactOption {
+	return &compactOption{option.New(identDetached{}, v)}
+}
+
+// WithDetachedPayload can be used to both sign or verify a JWS message with a
+// detached payload.
+//
+// When this option is used for `jws.Sign()`, the first parameter (normally the payload)
+// must be set to `nil`.
+//
+// If you have to verify using this option, you should know exactly how and why this works.
+func WithDetachedPayload(v []byte) SignVerifyOption {
+	return &signVerifyOption{option.New(identDetachedPayload{}, v)}
+}
+
+// WithFS specifies the source `fs.FS` object to read the file from.
+func WithFS(v fs.FS) ReadFileOption {
+	return &readFileOption{option.New(identFS{}, v)}
+}
+
+// WithInferAlgorithmFromKey specifies whether the JWS signing algorithm name
+// should be inferred by looking at the provided key, in case the JWS
+// message or the key does not have a proper `alg` header.
+//
+// Compared to providing explicit `alg` from the key this is slower, and
+// verification may fail to verify if some how our heuristics are wrong
+// or outdated.
+//
+// Also, automatic detection of signature verification methods are always
+// more vulnerable for potential attack vectors.
+//
+// It is highly recommended that you fix your key to contain a proper `alg`
+// header field instead of resorting to using this option, but sometimes
+// it just needs to happen.
+func WithInferAlgorithmFromKey(v bool) WithKeySetSuboption {
+	return &withKeySetSuboption{option.New(identInferAlgorithmFromKey{}, v)}
+}
+
+func WithKeyProvider(v KeyProvider) VerifyOption {
+	return &verifyOption{option.New(identKeyProvider{}, v)}
+}
+
+// WithKeyUsed allows you to specify the `jws.Verify()` function to
+// return the key used for verification. This may be useful when
+// you specify multiple key sources or if you pass a `jwk.Set`
+// and you want to know which key was successful at verifying the
+// signature.
+//
+// `v` must be a pointer to an empty `interface{}`. Do not use
+// `jwk.Key` here unless you are 100% sure that all keys that you
+// have provided are instances of `jwk.Key` (remember that the
+// jwx API allows users to specify a raw key such as *rsa.PublicKey)
+func WithKeyUsed(v interface{}) VerifyOption {
+	return &verifyOption{option.New(identKeyUsed{}, v)}
+}
+
+// WithMessage can be passed to Verify() to obtain the jws.Message upon
+// a successful verification.
+func WithMessage(v *Message) VerifyOption {
+	return &verifyOption{option.New(identMessage{}, v)}
+}
+
+// WithMultipleKeysPerKeyID specifies if we should expect multiple keys
+// to match against a key ID. By default it is assumed that key IDs are
+// unique, i.e. for a given key ID, the key set only contains a single
+// key that has the matching ID. When this option is set to true,
+// multiple keys that match the same key ID in the set can be tried.
+func WithMultipleKeysPerKeyID(v bool) WithKeySetSuboption {
+	return &withKeySetSuboption{option.New(identMultipleKeysPerKeyID{}, v)}
+}
+
+// WithPretty specifies whether the JSON output should be formatted and
+// indented
+func WithPretty(v bool) WithJSONSuboption {
+	return &withJSONSuboption{option.New(identPretty{}, v)}
+}
+
+// WithProtected is used with `jws.WithKey()` option when used with `jws.Sign()`
+// to specify a protected header to be attached to the JWS signature.
+//
+// It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()`
+func WithProtectedHeaders(v Headers) WithKeySuboption {
+	return &withKeySuboption{option.New(identProtectedHeaders{}, v)}
+}
+
+// WithPublic is used with `jws.WithKey()` option when used with `jws.Sign()`
+// to specify a public header to be attached to the JWS signature.
+//
+// It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()`
+//
+// `jws.Sign()` will result in an error if `jws.WithPublic()` is used
+// and the serialization format is compact serialization.
+func WithPublicHeaders(v Headers) WithKeySuboption {
+	return &withKeySuboption{option.New(identPublicHeaders{}, v)}
+}
+
+// WithRequiredKid specifies whether the keys in the jwk.Set should
+// only be matched if the target JWS message's Key ID and the Key ID
+// in the given key matches.
+func WithRequireKid(v bool) WithKeySetSuboption {
+	return &withKeySetSuboption{option.New(identRequireKid{}, v)}
+}
+
+// WithCompact specifies that the result of `jws.Sign()` is serialized in
+// compact format.
+//
+// By default `jws.Sign()` will opt to use compact format, so you usually
+// do not need to specify this option other than to be explicit about it
+func WithCompact() SignOption {
+	return &signOption{option.New(identSerialization{}, fmtCompact)}
+}
+
+// WithUseDefault specifies that if and only if a jwk.Key contains
+// exactly one jwk.Key, that tkey should be used.
+// (I think this should be removed)
+func WithUseDefault(v bool) WithKeySetSuboption {
+	return &withKeySetSuboption{option.New(identUseDefault{}, v)}
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/rsa.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/rsa.go
new file mode 100644
index 0000000000..e239330a23
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/rsa.go
@@ -0,0 +1,142 @@
+package jws
+
+import (
+	"crypto"
+	"crypto/rand"
+	"crypto/rsa"
+	"fmt"
+
+	"github.com/lestrrat-go/jwx/v2/internal/keyconv"
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+var rsaSigners map[jwa.SignatureAlgorithm]*rsaSigner
+var rsaVerifiers map[jwa.SignatureAlgorithm]*rsaVerifier
+
+func init() {
+	algs := map[jwa.SignatureAlgorithm]struct {
+		Hash crypto.Hash
+		PSS  bool
+	}{
+		jwa.RS256: {
+			Hash: crypto.SHA256,
+		},
+		jwa.RS384: {
+			Hash: crypto.SHA384,
+		},
+		jwa.RS512: {
+			Hash: crypto.SHA512,
+		},
+		jwa.PS256: {
+			Hash: crypto.SHA256,
+			PSS:  true,
+		},
+		jwa.PS384: {
+			Hash: crypto.SHA384,
+			PSS:  true,
+		},
+		jwa.PS512: {
+			Hash: crypto.SHA512,
+			PSS:  true,
+		},
+	}
+
+	rsaSigners = make(map[jwa.SignatureAlgorithm]*rsaSigner)
+	rsaVerifiers = make(map[jwa.SignatureAlgorithm]*rsaVerifier)
+	for alg, item := range algs {
+		rsaSigners[alg] = &rsaSigner{
+			alg:  alg,
+			hash: item.Hash,
+			pss:  item.PSS,
+		}
+		rsaVerifiers[alg] = &rsaVerifier{
+			alg:  alg,
+			hash: item.Hash,
+			pss:  item.PSS,
+		}
+	}
+}
+
+type rsaSigner struct {
+	alg  jwa.SignatureAlgorithm
+	hash crypto.Hash
+	pss  bool
+}
+
+func newRSASigner(alg jwa.SignatureAlgorithm) Signer {
+	return rsaSigners[alg]
+}
+
+func (rs *rsaSigner) Algorithm() jwa.SignatureAlgorithm {
+	return rs.alg
+}
+
+func (rs *rsaSigner) Sign(payload []byte, key interface{}) ([]byte, error) {
+	if key == nil {
+		return nil, fmt.Errorf(`missing private key while signing payload`)
+	}
+
+	signer, ok := key.(crypto.Signer)
+	if !ok {
+		var privkey rsa.PrivateKey
+		if err := keyconv.RSAPrivateKey(&privkey, key); err != nil {
+			return nil, fmt.Errorf(`failed to retrieve rsa.PrivateKey out of %T: %w`, key, err)
+		}
+		signer = &privkey
+	}
+
+	h := rs.hash.New()
+	if _, err := h.Write(payload); err != nil {
+		return nil, fmt.Errorf(`failed to write payload to hash: %w`, err)
+	}
+	if rs.pss {
+		return signer.Sign(rand.Reader, h.Sum(nil), &rsa.PSSOptions{
+			Hash:       rs.hash,
+			SaltLength: rsa.PSSSaltLengthEqualsHash,
+		})
+	}
+	return signer.Sign(rand.Reader, h.Sum(nil), rs.hash)
+}
+
+type rsaVerifier struct {
+	alg  jwa.SignatureAlgorithm
+	hash crypto.Hash
+	pss  bool
+}
+
+func newRSAVerifier(alg jwa.SignatureAlgorithm) Verifier {
+	return rsaVerifiers[alg]
+}
+
+func (rv *rsaVerifier) Verify(payload, signature []byte, key interface{}) error {
+	if key == nil {
+		return fmt.Errorf(`missing public key while verifying payload`)
+	}
+
+	var pubkey rsa.PublicKey
+	if cs, ok := key.(crypto.Signer); ok {
+		cpub := cs.Public()
+		switch cpub := cpub.(type) {
+		case rsa.PublicKey:
+			pubkey = cpub
+		case *rsa.PublicKey:
+			pubkey = *cpub
+		default:
+			return fmt.Errorf(`failed to retrieve rsa.PublicKey out of crypto.Signer %T`, key)
+		}
+	} else {
+		if err := keyconv.RSAPublicKey(&pubkey, key); err != nil {
+			return fmt.Errorf(`failed to retrieve rsa.PublicKey out of %T: %w`, key, err)
+		}
+	}
+
+	h := rv.hash.New()
+	if _, err := h.Write(payload); err != nil {
+		return fmt.Errorf(`failed to write payload to hash: %w`, err)
+	}
+
+	if rv.pss {
+		return rsa.VerifyPSS(&pubkey, rv.hash, h.Sum(nil), signature, nil)
+	}
+	return rsa.VerifyPKCS1v15(&pubkey, rv.hash, h.Sum(nil), signature)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/signer.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/signer.go
new file mode 100644
index 0000000000..44c8bfb76b
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/signer.go
@@ -0,0 +1,106 @@
+package jws
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+type SignerFactory interface {
+	Create() (Signer, error)
+}
+type SignerFactoryFn func() (Signer, error)
+
+func (fn SignerFactoryFn) Create() (Signer, error) {
+	return fn()
+}
+
+var muSignerDB sync.RWMutex
+var signerDB map[jwa.SignatureAlgorithm]SignerFactory
+
+// RegisterSigner is used to register a factory object that creates
+// Signer objects based on the given algorithm.
+//
+// For example, if you would like to provide a custom signer for
+// jwa.EdDSA, use this function to register a `SignerFactory`
+// (probably in your `init()`)
+//
+// Unlike the `UnregisterSigner` function, this function automatically
+// calls `jwa.RegisterSignatureAlgorithm` to register the algorithm
+// in the known algorithms database.
+func RegisterSigner(alg jwa.SignatureAlgorithm, f SignerFactory) {
+	jwa.RegisterSignatureAlgorithm(alg)
+	muSignerDB.Lock()
+	signerDB[alg] = f
+	muSignerDB.Unlock()
+}
+
+// UnregisterSigner removes the signer factory associated with
+// the given algorithm.
+//
+// Note that when you call this function, the algorithm itself is
+// not automatically unregistered from the known algorithms database.
+// This is because the algorithm may still be required for verification or
+// some other operation (however unlikely, it is still possible).
+// Therefore, in order to completely remove the algorithm, you must
+// call `jwa.UnregisterSignatureAlgorithm` yourself.
+func UnregisterSigner(alg jwa.SignatureAlgorithm) {
+	muSignerDB.Lock()
+	delete(signerDB, alg)
+	muSignerDB.Unlock()
+}
+
+func init() {
+	signerDB = make(map[jwa.SignatureAlgorithm]SignerFactory)
+
+	for _, alg := range []jwa.SignatureAlgorithm{jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512} {
+		RegisterSigner(alg, func(alg jwa.SignatureAlgorithm) SignerFactory {
+			return SignerFactoryFn(func() (Signer, error) {
+				return newRSASigner(alg), nil
+			})
+		}(alg))
+	}
+
+	for _, alg := range []jwa.SignatureAlgorithm{jwa.ES256, jwa.ES384, jwa.ES512, jwa.ES256K} {
+		RegisterSigner(alg, func(alg jwa.SignatureAlgorithm) SignerFactory {
+			return SignerFactoryFn(func() (Signer, error) {
+				return newECDSASigner(alg), nil
+			})
+		}(alg))
+	}
+
+	for _, alg := range []jwa.SignatureAlgorithm{jwa.HS256, jwa.HS384, jwa.HS512} {
+		RegisterSigner(alg, func(alg jwa.SignatureAlgorithm) SignerFactory {
+			return SignerFactoryFn(func() (Signer, error) {
+				return newHMACSigner(alg), nil
+			})
+		}(alg))
+	}
+
+	RegisterSigner(jwa.EdDSA, SignerFactoryFn(func() (Signer, error) {
+		return newEdDSASigner(), nil
+	}))
+}
+
+// NewSigner creates a signer that signs payloads using the given signature algorithm.
+func NewSigner(alg jwa.SignatureAlgorithm) (Signer, error) {
+	muSignerDB.RLock()
+	f, ok := signerDB[alg]
+	muSignerDB.RUnlock()
+
+	if ok {
+		return f.Create()
+	}
+	return nil, fmt.Errorf(`unsupported signature algorithm "%s"`, alg)
+}
+
+type noneSigner struct{}
+
+func (noneSigner) Algorithm() jwa.SignatureAlgorithm {
+	return jwa.NoSignature
+}
+
+func (noneSigner) Sign([]byte, interface{}) ([]byte, error) {
+	return nil, nil
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/verifier.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/verifier.go
new file mode 100644
index 0000000000..2dd29c8485
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/verifier.go
@@ -0,0 +1,96 @@
+package jws
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/lestrrat-go/jwx/v2/jwa"
+)
+
+type VerifierFactory interface {
+	Create() (Verifier, error)
+}
+type VerifierFactoryFn func() (Verifier, error)
+
+func (fn VerifierFactoryFn) Create() (Verifier, error) {
+	return fn()
+}
+
+var muVerifierDB sync.RWMutex
+var verifierDB map[jwa.SignatureAlgorithm]VerifierFactory
+
+// RegisterVerifier is used to register a factory object that creates
+// Verifier objects based on the given algorithm.
+//
+// For example, if you would like to provide a custom verifier for
+// jwa.EdDSA, use this function to register a `VerifierFactory`
+// (probably in your `init()`)
+//
+// Unlike the `UnregisterVerifier` function, this function automatically
+// calls `jwa.RegisterSignatureAlgorithm` to register the algorithm
+// in the known algorithms database.
+func RegisterVerifier(alg jwa.SignatureAlgorithm, f VerifierFactory) {
+	jwa.RegisterSignatureAlgorithm(alg)
+	muVerifierDB.Lock()
+	verifierDB[alg] = f
+	muVerifierDB.Unlock()
+}
+
+// UnregisterVerifier removes the signer factory associated with
+// the given algorithm.
+//
+// Note that when you call this function, the algorithm itself is
+// not automatically unregistered from the known algorithms database.
+// This is because the algorithm may still be required for signing or
+// some other operation (however unlikely, it is still possible).
+// Therefore, in order to completely remove the algorithm, you must
+// call `jwa.UnregisterSignatureAlgorithm` yourself.
+func UnregisterVerifier(alg jwa.SignatureAlgorithm) {
+	muVerifierDB.Lock()
+	delete(verifierDB, alg)
+	muVerifierDB.Unlock()
+}
+
+func init() {
+	verifierDB = make(map[jwa.SignatureAlgorithm]VerifierFactory)
+
+	for _, alg := range []jwa.SignatureAlgorithm{jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512} {
+		RegisterVerifier(alg, func(alg jwa.SignatureAlgorithm) VerifierFactory {
+			return VerifierFactoryFn(func() (Verifier, error) {
+				return newRSAVerifier(alg), nil
+			})
+		}(alg))
+	}
+
+	for _, alg := range []jwa.SignatureAlgorithm{jwa.ES256, jwa.ES384, jwa.ES512, jwa.ES256K} {
+		RegisterVerifier(alg, func(alg jwa.SignatureAlgorithm) VerifierFactory {
+			return VerifierFactoryFn(func() (Verifier, error) {
+				return newECDSAVerifier(alg), nil
+			})
+		}(alg))
+	}
+
+	for _, alg := range []jwa.SignatureAlgorithm{jwa.HS256, jwa.HS384, jwa.HS512} {
+		RegisterVerifier(alg, func(alg jwa.SignatureAlgorithm) VerifierFactory {
+			return VerifierFactoryFn(func() (Verifier, error) {
+				return newHMACVerifier(alg), nil
+			})
+		}(alg))
+	}
+
+	RegisterVerifier(jwa.EdDSA, VerifierFactoryFn(func() (Verifier, error) {
+		return newEdDSAVerifier(), nil
+	}))
+}
+
+// NewVerifier creates a verifier that signs payloads using the given signature algorithm.
+func NewVerifier(alg jwa.SignatureAlgorithm) (Verifier, error) {
+	muVerifierDB.RLock()
+	f, ok := verifierDB[alg]
+	muVerifierDB.RUnlock()
+
+	if ok {
+		return f.Create()
+	}
+	return nil, fmt.Errorf(`unsupported signature algorithm "%s"`, alg)
+}
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/x25519/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v2/x25519/BUILD.bazel
new file mode 100644
index 0000000000..bfcc136bf3
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/x25519/BUILD.bazel
@@ -0,0 +1,24 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+    name = "x25519",
+    srcs = ["x25519.go"],
+    importpath = "github.com/lestrrat-go/jwx/v2/x25519",
+    visibility = ["//visibility:public"],
+    deps = ["@org_golang_x_crypto//curve25519"],
+)
+
+go_test(
+    name = "x25519_test",
+    srcs = ["x25519_test.go"],
+    deps = [
+        ":x25519",
+        "@com_github_stretchr_testify//assert",
+    ],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":x25519",
+    visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/lestrrat-go/jwx/v2/x25519/x25519.go b/vendor/github.com/lestrrat-go/jwx/v2/x25519/x25519.go
new file mode 100644
index 0000000000..0f9e32cbc3
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/jwx/v2/x25519/x25519.go
@@ -0,0 +1,115 @@
+package x25519
+
+import (
+	"bytes"
+	"crypto"
+	cryptorand "crypto/rand"
+	"fmt"
+	"io"
+
+	"golang.org/x/crypto/curve25519"
+)
+
+// This mirrors ed25519's structure for private/public "keys". jwx
+// requires dedicated types for these as they drive
+// serialization/deserialization logic, as well as encryption types.
+//
+// Note that with the x25519 scheme, the private key is a sequence of
+// 32 bytes, while the public key is the result of X25519(private,
+// basepoint).
+//
+// Portions of this file are from Go's ed25519.go, which is
+// Copyright 2016 The Go Authors. All rights reserved.
+
+const (
+	// PublicKeySize is the size, in bytes, of public keys as used in this package.
+	PublicKeySize = 32
+	// PrivateKeySize is the size, in bytes, of private keys as used in this package.
+	PrivateKeySize = 64
+	// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
+	SeedSize = 32
+)
+
+// PublicKey is the type of X25519 public keys
+type PublicKey []byte
+
+// Any methods implemented on PublicKey might need to also be implemented on
+// PrivateKey, as the latter embeds the former and will expose its methods.
+
+// Equal reports whether pub and x have the same value.
+func (pub PublicKey) Equal(x crypto.PublicKey) bool {
+	xx, ok := x.(PublicKey)
+	if !ok {
+		return false
+	}
+	return bytes.Equal(pub, xx)
+}
+
+// PrivateKey is the type of X25519 private key
+type PrivateKey []byte
+
+// Public returns the PublicKey corresponding to priv.
+func (priv PrivateKey) Public() crypto.PublicKey {
+	publicKey := make([]byte, PublicKeySize)
+	copy(publicKey, priv[SeedSize:])
+	return PublicKey(publicKey)
+}
+
+// Equal reports whether priv and x have the same value.
+func (priv PrivateKey) Equal(x crypto.PrivateKey) bool {
+	xx, ok := x.(PrivateKey)
+	if !ok {
+		return false
+	}
+	return bytes.Equal(priv, xx)
+}
+
+// Seed returns the private key seed corresponding to priv. It is provided for
+// interoperability with RFC 7748. RFC 7748's private keys correspond to seeds
+// in this package.
+func (priv PrivateKey) Seed() []byte {
+	seed := make([]byte, SeedSize)
+	copy(seed, priv[:SeedSize])
+	return seed
+}
+
+// NewKeyFromSeed calculates a private key from a seed. It will return
+// an error if len(seed) is not SeedSize. This function is provided
+// for interoperability with RFC 7748. RFC 7748's private keys
+// correspond to seeds in this package.
+func NewKeyFromSeed(seed []byte) (PrivateKey, error) {
+	privateKey := make([]byte, PrivateKeySize)
+	if len(seed) != SeedSize {
+		return nil, fmt.Errorf("unexpected seed size: %d", len(seed))
+	}
+	copy(privateKey, seed)
+	public, err := curve25519.X25519(seed, curve25519.Basepoint)
+	if err != nil {
+		return nil, fmt.Errorf(`failed to compute public key: %w`, err)
+	}
+	copy(privateKey[SeedSize:], public)
+
+	return privateKey, nil
+}
+
+// GenerateKey generates a public/private key pair using entropy from rand.
+// If rand is nil, crypto/rand.Reader will be used.
+func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
+	if rand == nil {
+		rand = cryptorand.Reader
+	}
+
+	seed := make([]byte, SeedSize)
+	if _, err := io.ReadFull(rand, seed); err != nil {
+		return nil, nil, err
+	}
+
+	privateKey, err := NewKeyFromSeed(seed)
+	if err != nil {
+		return nil, nil, err
+	}
+	publicKey := make([]byte, PublicKeySize)
+	copy(publicKey, privateKey[SeedSize:])
+
+	return publicKey, privateKey, nil
+}
diff --git a/vendor/github.com/lestrrat-go/option/.gitignore b/vendor/github.com/lestrrat-go/option/.gitignore
new file mode 100644
index 0000000000..66fd13c903
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/option/.gitignore
@@ -0,0 +1,15 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
diff --git a/vendor/github.com/lestrrat-go/option/LICENSE b/vendor/github.com/lestrrat-go/option/LICENSE
new file mode 100644
index 0000000000..188ea7685c
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/option/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 lestrrat-go
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/lestrrat-go/option/README.md b/vendor/github.com/lestrrat-go/option/README.md
new file mode 100644
index 0000000000..cab0044ed3
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/option/README.md
@@ -0,0 +1,245 @@
+# option
+
+Base object for the "Optional Parameters Pattern".
+
+# DESCRIPTION
+
+The beauty of this pattern is that you can achieve a method that can
+take the following simple calling style
+
+```go
+obj.Method(mandatory1, mandatory2)
+```
+
+or the following, if you want to modify its behavior with optional parameters
+
+```go
+obj.Method(mandatory1, mandatory2, optional1, optional2, optional3)
+```
+
+Instead of the more clunky zero value for optionals style
+
+```go
+obj.Method(mandatory1, mandatory2, nil, "", 0)
+```
+
+or the equally clunky config object style, which requires you to create a
+struct with `NamesThatLookReallyLongBecauseItNeedsToIncludeMethodNamesConfig	
+
+```go
+cfg := &ConfigForMethod{
+ Optional1: ...,
+ Optional2: ...,
+ Optional3: ...,
+}
+obj.Method(mandatory1, mandatory2, &cfg)
+```
+
+# SYNOPSIS 
+
+Create an "identifier" for the option. We recommend using an unexported empty struct,
+because
+
+1. It is uniquely identifiable globally
+1. Takes minimal space
+1. Since it's unexported, you do not have to worry about it leaking elsewhere or having it changed by consumers
+
+```go
+// an unexported empty struct
+type identFeatureX struct{} 
+```
+
+Then define a method to create an option using this identifier. Here we assume
+that the option will be a boolean option.
+
+```go
+// this is optional, but for readability we usually use a wrapper
+// around option.Interface, or a type alias.
+type Option
+func WithFeatureX(v bool) Option {
+  // use the constructor to create a new option
+  return option.New(identFeatureX{}, v)
+}
+```
+
+Now you can create an option, which essentially a two element tuple consisting
+of an identifier and its associated value.
+
+To consume this, you will need to create a function with variadic parameters,
+and iterate over the list looking for a particular identifier:
+
+```go
+func MyAwesomeFunc( /* mandatory parameters omitted */, options ...[]Option) {
+  var enableFeatureX bool
+  // The nolint directive is recommended if you are using linters such
+  // as golangci-lint
+  //nolint:forcetypeassert 
+  for _, option := range options {
+    switch option.Ident() {
+    case identFeatureX{}:
+      enableFeatureX = option.Value().(bool)
+    // other cases omitted
+    }
+  }
+  if enableFeatureX {
+    ....
+  }
+}
+```
+
+# Option objects
+
+Option objects take two arguments, its identifier and the value it contains.
+
+The identifier can be anything, but it's usually better to use a an unexported
+empty struct so that only you have the ability to generate said option:
+
+```go
+type identOptionalParamOne struct{}
+type identOptionalParamTwo struct{}
+type identOptionalParamThree struct{}
+
+func WithOptionOne(v ...) Option {
+	return option.New(identOptionalParamOne{}, v)
+}
+```
+
+Then you can call the method we described above as
+
+```go
+obj.Method(m1, m2, WithOptionOne(...), WithOptionTwo(...), WithOptionThree(...))
+```
+
+Options should be parsed in a code that looks somewhat like this
+
+```go
+func (obj *Object) Method(m1 Type1, m2 Type2, options ...Option) {
+  paramOne := defaultValueParamOne
+  for _, option := range options {
+    switch option.Ident() {
+    case identOptionalParamOne{}:
+      paramOne = option.Value().(...)
+    }
+  }
+  ...
+}
+```
+
+The loop requires a bit of boilerplate, and admittedly, this is the main downside
+of this module. However, if you think you want use the Option as a Function pattern,
+please check the FAQ below for rationale.
+
+# Simple usage
+
+Most of the times all you need to do is to declare the Option type as an alias
+in your code:
+
+```go
+package myawesomepkg
+
+import "github.com/lestrrat-go/option"
+
+type Option = option.Interface
+```
+
+Then you can start defining options like they are described in the SYNOPSIS section.
+
+# Differentiating Options
+
+When you have multiple methods and options, and those options can only be passed to
+each one the methods, it's hard to see which options should be passed to which method.
+
+```go
+func WithX() Option { ... }
+func WithY() Option { ... }
+
+// Now, which of WithX/WithY go to which method?
+func (*Obj) Method1(options ...Option) {}
+func (*Obj) Method2(options ...Option) {}
+```
+
+In this case the easiest way to make it obvious is to put an extra layer around
+the options so that they have different types
+
+```go
+type Method1Option interface {
+  Option
+  method1Option()
+}
+
+type method1Option struct { Option }
+func (*method1Option) method1Option() {}
+
+func WithX() Method1Option {
+  return &methodOption{option.New(...)}
+}
+
+func (*Obj) Method1(options ...Method1Option) {}
+```
+
+This way the compiler knows if an option can be passed to a given method.
+
+# FAQ
+
+## Why aren't these function-based?
+
+Using a base option type like `type Option func(ctx interface{})` is certainly one way to achieve the same goal. In this case, you are giving the option itself the ability to "configure" the main object. For example:
+
+```go
+type Foo struct {
+  optionaValue bool
+}
+
+type Option func(*Foo) error
+
+func WithOptionalValue(v bool) Option {
+  return Option(func(f *Foo) error {
+    f.optionalValue = v
+    return nil
+  })
+}
+
+func NewFoo(options ...Option) (*Foo, error) {
+  var f Foo
+  for _, o := range options {
+    if err := o(&f); err != nil {
+      return nil, err
+    }
+  }
+  return &f
+}
+```
+
+This in itself is fine, but we think there are a few problems:
+
+### 1. It's hard to create a reusable "Option" type
+
+We create many libraries using this optional pattern. We would like to provide a default base object. However, this function based approach is not reusuable because each "Option" type requires that it has a context-specific input type. For example, if the "Option" type in the previous example was `func(interface{}) error`, then its usability will significantly decrease because of the type conversion.
+
+This is not to say that this library's approach is better as it also requires type conversion to convert the _value_ of the option. However, part of the beauty of the original function based approach was the ease of its use, and we claim that this significantly decreases the merits of the function based approach.
+
+### 2. The receiver requires exported fields
+
+Part of the appeal for a function-based option pattern is by giving the option itself the ability to do what it wants, you open up the possibility of allowing third-parties to create options that do things that the library authors did not think about.
+
+```go
+package thirdparty
+, but when I read drum sheet music, I kind of get thrown off b/c many times it says to hit the bass drum where I feel like it's a snare hit.
+func WithMyAwesomeOption( ... ) mypkg.Option  {
+  return mypkg.Option(func(f *mypkg) error {
+    f.X = ...
+    f.Y = ...
+    f.Z = ...
+    return nil
+  })
+}
+```
+
+However, for any third party code to access and set field values, these fields (`X`, `Y`, `Z`) must be exported. Basically you will need an "open" struct.
+
+Exported fields are absolutely no problem when you have a struct that represents data alone (i.e., API calls that refer or change state information) happen, but we think that casually expose fields for a library struct is a sure way to maintenance hell in the future. What happens when you want to change the API? What happens when you realize that you want to use the field as state (i.e. use it for more than configuration)? What if they kept referring to that field, and then you have concurrent code accessing it?
+
+Giving third parties complete access to exported fields is like handing out a loaded weapon to the users, and you are at their mercy.
+
+Of course, providing public APIs for everything so you can validate and control concurrency is an option, but then ... it's a lot of work, and you may have to provide APIs _only_ so that users can refer it in the option-configuration phase. That sounds like a lot of extra work.
+
diff --git a/vendor/github.com/lestrrat-go/option/option.go b/vendor/github.com/lestrrat-go/option/option.go
new file mode 100644
index 0000000000..bfdbb118c0
--- /dev/null
+++ b/vendor/github.com/lestrrat-go/option/option.go
@@ -0,0 +1,38 @@
+package option
+
+import "fmt"
+
+// Interface defines the minimum interface that an option must fulfill
+type Interface interface {
+	// Ident returns the "identity" of this option, a unique identifier that
+	// can be used to differentiate between options
+	Ident() interface{}
+
+	// Value returns the corresponding value.
+	Value() interface{}
+}
+
+type pair struct {
+	ident interface{}
+	value interface{}
+}
+
+// New creates a new Option
+func New(ident, value interface{}) Interface {
+	return &pair{
+		ident: ident,
+		value: value,
+	}
+}
+
+func (p *pair) Ident() interface{} {
+	return p.ident
+}
+
+func (p *pair) Value() interface{} {
+	return p.value
+}
+
+func (p *pair) String() string {
+	return fmt.Sprintf(`%v(%v)`, p.ident, p.value)
+}
diff --git a/vendor/github.com/mattn/go-runewidth/.travis.yml b/vendor/github.com/mattn/go-runewidth/.travis.yml
deleted file mode 100644
index 6a21813a3e..0000000000
--- a/vendor/github.com/mattn/go-runewidth/.travis.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-language: go
-sudo: false
-go:
-  - 1.13.x
-  - tip
-
-before_install:
-  - go get -t -v ./...
-
-script:
-  - go generate
-  - git diff --cached --exit-code
-  - ./go.test.sh
-
-after_success:
-  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/mattn/go-runewidth/README.md b/vendor/github.com/mattn/go-runewidth/README.md
index aa56ab96c2..5e2cfd98cb 100644
--- a/vendor/github.com/mattn/go-runewidth/README.md
+++ b/vendor/github.com/mattn/go-runewidth/README.md
@@ -1,7 +1,7 @@
 go-runewidth
 ============
 
-[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth)
+[![Build Status](https://github.com/mattn/go-runewidth/workflows/test/badge.svg?branch=master)](https://github.com/mattn/go-runewidth/actions?query=workflow%3Atest)
 [![Codecov](https://codecov.io/gh/mattn/go-runewidth/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-runewidth)
 [![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth)
 [![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth)
diff --git a/vendor/github.com/mattn/go-runewidth/go.test.sh b/vendor/github.com/mattn/go-runewidth/go.test.sh
deleted file mode 100644
index 012162b077..0000000000
--- a/vendor/github.com/mattn/go-runewidth/go.test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-echo "" > coverage.txt
-
-for d in $(go list ./... | grep -v vendor); do
-    go test -race -coverprofile=profile.out -covermode=atomic "$d"
-    if [ -f profile.out ]; then
-        cat profile.out >> coverage.txt
-        rm profile.out
-    fi
-done
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go
index 3d7fa560b8..7dfbb3be91 100644
--- a/vendor/github.com/mattn/go-runewidth/runewidth.go
+++ b/vendor/github.com/mattn/go-runewidth/runewidth.go
@@ -2,6 +2,7 @@ package runewidth
 
 import (
 	"os"
+	"strings"
 
 	"github.com/rivo/uniseg"
 )
@@ -34,7 +35,13 @@ func handleEnv() {
 		EastAsianWidth = env == "1"
 	}
 	// update DefaultCondition
-	DefaultCondition.EastAsianWidth = EastAsianWidth
+	if DefaultCondition.EastAsianWidth != EastAsianWidth {
+		DefaultCondition.EastAsianWidth = EastAsianWidth
+		if len(DefaultCondition.combinedLut) > 0 {
+			DefaultCondition.combinedLut = DefaultCondition.combinedLut[:0]
+			CreateLUT()
+		}
+	}
 }
 
 type interval struct {
@@ -89,6 +96,7 @@ var nonprint = table{
 
 // Condition have flag EastAsianWidth whether the current locale is CJK or not.
 type Condition struct {
+	combinedLut        []byte
 	EastAsianWidth     bool
 	StrictEmojiNeutral bool
 }
@@ -104,10 +112,16 @@ func NewCondition() *Condition {
 // RuneWidth returns the number of cells in r.
 // See http://www.unicode.org/reports/tr11/
 func (c *Condition) RuneWidth(r rune) int {
+	if r < 0 || r > 0x10FFFF {
+		return 0
+	}
+	if len(c.combinedLut) > 0 {
+		return int(c.combinedLut[r>>1]>>(uint(r&1)*4)) & 3
+	}
 	// optimized version, verified by TestRuneWidthChecksums()
 	if !c.EastAsianWidth {
 		switch {
-		case r < 0x20 || r > 0x10FFFF:
+		case r < 0x20:
 			return 0
 		case (r >= 0x7F && r <= 0x9F) || r == 0xAD: // nonprint
 			return 0
@@ -124,7 +138,7 @@ func (c *Condition) RuneWidth(r rune) int {
 		}
 	} else {
 		switch {
-		case r < 0 || r > 0x10FFFF || inTables(r, nonprint, combining):
+		case inTables(r, nonprint, combining):
 			return 0
 		case inTable(r, narrow):
 			return 1
@@ -138,6 +152,27 @@ func (c *Condition) RuneWidth(r rune) int {
 	}
 }
 
+// CreateLUT will create an in-memory lookup table of 557056 bytes for faster operation.
+// This should not be called concurrently with other operations on c.
+// If options in c is changed, CreateLUT should be called again.
+func (c *Condition) CreateLUT() {
+	const max = 0x110000
+	lut := c.combinedLut
+	if len(c.combinedLut) != 0 {
+		// Remove so we don't use it.
+		c.combinedLut = nil
+	} else {
+		lut = make([]byte, max/2)
+	}
+	for i := range lut {
+		i32 := int32(i * 2)
+		x0 := c.RuneWidth(i32)
+		x1 := c.RuneWidth(i32 + 1)
+		lut[i] = uint8(x0) | uint8(x1)<<4
+	}
+	c.combinedLut = lut
+}
+
 // StringWidth return width as you can see
 func (c *Condition) StringWidth(s string) (width int) {
 	g := uniseg.NewGraphemes(s)
@@ -180,11 +215,47 @@ func (c *Condition) Truncate(s string, w int, tail string) string {
 	return s[:pos] + tail
 }
 
+// TruncateLeft cuts w cells from the beginning of the `s`.
+func (c *Condition) TruncateLeft(s string, w int, prefix string) string {
+	if c.StringWidth(s) <= w {
+		return prefix
+	}
+
+	var width int
+	pos := len(s)
+
+	g := uniseg.NewGraphemes(s)
+	for g.Next() {
+		var chWidth int
+		for _, r := range g.Runes() {
+			chWidth = c.RuneWidth(r)
+			if chWidth > 0 {
+				break // See StringWidth() for details.
+			}
+		}
+
+		if width+chWidth > w {
+			if width < w {
+				_, pos = g.Positions()
+				prefix += strings.Repeat(" ", width+chWidth-w)
+			} else {
+				pos, _ = g.Positions()
+			}
+
+			break
+		}
+
+		width += chWidth
+	}
+
+	return prefix + s[pos:]
+}
+
 // Wrap return string wrapped with w cells
 func (c *Condition) Wrap(s string, w int) string {
 	width := 0
 	out := ""
-	for _, r := range []rune(s) {
+	for _, r := range s {
 		cw := c.RuneWidth(r)
 		if r == '\n' {
 			out += string(r)
@@ -257,6 +328,11 @@ func Truncate(s string, w int, tail string) string {
 	return DefaultCondition.Truncate(s, w, tail)
 }
 
+// TruncateLeft cuts w cells from the beginning of the `s`.
+func TruncateLeft(s string, w int, prefix string) string {
+	return DefaultCondition.TruncateLeft(s, w, prefix)
+}
+
 // Wrap return string wrapped with w cells
 func Wrap(s string, w int) string {
 	return DefaultCondition.Wrap(s, w)
@@ -271,3 +347,12 @@ func FillLeft(s string, w int) string {
 func FillRight(s string, w int) string {
 	return DefaultCondition.FillRight(s, w)
 }
+
+// CreateLUT will create an in-memory lookup table of 557055 bytes for faster operation.
+// This should not be called concurrently with other operations.
+func CreateLUT() {
+	if len(DefaultCondition.combinedLut) > 0 {
+		return
+	}
+	DefaultCondition.CreateLUT()
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go
index 7d99f6e521..84b6528dfe 100644
--- a/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go
@@ -1,3 +1,4 @@
+//go:build appengine
 // +build appengine
 
 package runewidth
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go
index c5fdf40baa..c2abbc2db3 100644
--- a/vendor/github.com/mattn/go-runewidth/runewidth_js.go
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_js.go
@@ -1,5 +1,5 @@
-// +build js
-// +build !appengine
+//go:build js && !appengine
+// +build js,!appengine
 
 package runewidth
 
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
index 480ad74853..5a31d738ec 100644
--- a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
@@ -1,6 +1,5 @@
-// +build !windows
-// +build !js
-// +build !appengine
+//go:build !windows && !js && !appengine
+// +build !windows,!js,!appengine
 
 package runewidth
 
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
index d6a61777d7..5f987a310f 100644
--- a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
@@ -1,5 +1,5 @@
-// +build windows
-// +build !appengine
+//go:build windows && !appengine
+// +build windows,!appengine
 
 package runewidth
 
diff --git a/vendor/github.com/oleiade/reflections/.gitignore b/vendor/github.com/oleiade/reflections/.gitignore
new file mode 100644
index 0000000000..00268614f0
--- /dev/null
+++ b/vendor/github.com/oleiade/reflections/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/oleiade/reflections/AUTHORS.md b/vendor/github.com/oleiade/reflections/AUTHORS.md
new file mode 100644
index 0000000000..1038a43cd8
--- /dev/null
+++ b/vendor/github.com/oleiade/reflections/AUTHORS.md
@@ -0,0 +1,9 @@
+## Creator
+
+* Oleiade <tcrevon@gmail.com>
+
+## Contributors
+
+* Cengle <https://github.com/cengle>
+* Tomo Krajina <https://github.com/tkrajina>
+* Seth Shelnutt <https://github.com/Shelnutt2>
diff --git a/vendor/github.com/oleiade/reflections/LICENSE b/vendor/github.com/oleiade/reflections/LICENSE
new file mode 100644
index 0000000000..f642242427
--- /dev/null
+++ b/vendor/github.com/oleiade/reflections/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Théo Crevon
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/oleiade/reflections/README.md b/vendor/github.com/oleiade/reflections/README.md
new file mode 100644
index 0000000000..8f6b38d705
--- /dev/null
+++ b/vendor/github.com/oleiade/reflections/README.md
@@ -0,0 +1,235 @@
+Reflections
+===========
+
+Package reflections provides high level abstractions above the golang reflect library.
+
+Reflect library is very low-level and can be quite complex when it comes to do simple things like accessing a structure field value, a field tag...
+
+The purpose of reflections package is to make developers life easier when it comes to introspect structures at runtime.
+Its API is inspired from python language (getattr, setattr, hasattr...) and provides a simplified access to structure fields and tags.
+
+*Reflections is an open source library under the MIT license. Any hackers are welcome to supply ideas, features requests, patches, pull requests and so on: see [Contribute]()*
+
+#### Documentation
+
+Documentation is available at http://godoc.org/github.com/oleiade/reflections
+
+## Usage
+
+#### Accessing structure fields
+
+##### GetField
+
+*GetField* returns the content of a structure field. It can be very usefull when
+you'd wanna iterate over a struct specific fields values for example. You can whether
+provide *GetField* a structure or a pointer to structure as first argument.
+
+```go
+    s := MyStruct {
+        FirstField: "first value",
+        SecondField: 2,
+        ThirdField: "third value",
+    }
+
+    fieldsToExtract := []string{"FirstField", "ThirdField"}
+
+    for _, fieldName := range fieldsToExtract {
+        value, err := reflections.GetField(s, fieldName)
+        DoWhatEverWithThatValue(value)
+    }
+```
+
+##### GetFieldKind
+
+*GetFieldKind* returns the [reflect.Kind](http://golang.org/src/pkg/reflect/type.go?s=6916:6930#L189) of a structure field. It can be used to operate type assertion over a structure fields at runtime.  You can whether provide *GetFieldKind* a structure or a pointer to structure as first argument.
+
+```go
+    s := MyStruct{
+        FirstField:  "first value",
+        SecondField: 2,
+        ThirdField:  "third value",
+    }
+
+    var firstFieldKind reflect.String
+    var secondFieldKind reflect.Int
+    var err error
+
+    firstFieldKind, err = GetFieldKind(s, "FirstField")
+    if err != nil {
+        log.Fatal(err)
+    }
+
+    secondFieldKind, err = GetFieldKind(s, "SecondField")
+    if err != nil {
+        log.Fatal(err)
+    }
+```
+
+##### GetFieldType
+
+*GetFieldType* returns the string literal of a structure field type. It can be used to operate type assertion over a structure fields at runtime.  You can whether provide *GetFieldType* a structure or a pointer to structure as first argument.
+
+```go
+    s := MyStruct{
+        FirstField:  "first value",
+        SecondField: 2,
+        ThirdField:  "third value",
+    }
+
+    var firstFieldKind string
+    var secondFieldKind string
+    var err error
+
+    firstFieldKind, err = GetFieldType(s, "FirstField")
+    if err != nil {
+        log.Fatal(err)
+    }
+
+    secondFieldKind, err = GetFieldType(s, "SecondField")
+    if err != nil {
+        log.Fatal(err)
+    }
+```
+
+##### GetFieldTag
+
+*GetFieldTag* extracts a specific structure field tag. You can whether provide *GetFieldTag* a structure or a pointer to structure as first argument.
+
+
+```go
+    s := MyStruct{}
+
+    tag, err := reflections.GetFieldTag(s, "FirstField", "matched")
+    if err != nil {
+        log.Fatal(err)
+    }
+    fmt.Println(tag)
+
+    tag, err = reflections.GetFieldTag(s, "ThirdField", "unmatched")
+    if err != nil {
+        log.Fatal(err)
+    }
+    fmt.Println(tag)
+```
+
+##### HasField
+
+*HasField* asserts a field exists through structure. You can whether provide *HasField* a structure or a pointer to structure as first argument.
+
+
+```go
+    s := MyStruct {
+        FirstField: "first value",
+        SecondField: 2,
+        ThirdField: "third value",
+    }
+
+    // has == true
+    has, _ := reflections.HasField(s, "FirstField")
+
+    // has == false
+    has, _ := reflections.HasField(s, "FourthField")
+```
+
+##### Fields
+
+*Fields* returns the list of a structure field names, so you can access or modify them later on. You can whether provide *Fields* a structure or a pointer to structure as first argument.
+
+
+```go
+    s := MyStruct {
+        FirstField: "first value",
+        SecondField: 2,
+        ThirdField: "third value",
+    }
+
+    var fields []string
+
+    // Fields will list every structure exportable fields.
+    // Here, it's content would be equal to:
+    // []string{"FirstField", "SecondField", "ThirdField"}
+    fields, _ = reflections.Fields(s)
+```
+
+##### Items
+
+*Items* returns the structure's field name to values map. You can whether provide *Items* a structure or a pointer to structure as first argument.
+
+
+```go
+    s := MyStruct {
+        FirstField: "first value",
+        SecondField: 2,
+        ThirdField: "third value",
+    }
+
+    var structItems map[string]interface{}
+
+    // Items will return a field name to
+    // field value map
+    structItems, _ = reflections.Items(s)
+```
+
+##### Tags
+
+*Tags* returns the structure's fields tag with the provided key. You can whether provide *Tags* a structure or a pointer to structure as first argument.
+
+
+```go
+    s := MyStruct {
+        FirstField: "first value",      `matched:"first tag"`
+        SecondField: 2,                 `matched:"second tag"`
+        ThirdField: "third value",      `unmatched:"third tag"`
+    }
+
+    var structTags map[string]string
+
+    // Tags will return a field name to tag content
+    // map. Nota that only field with the tag name
+    // you've provided which will be matched.
+    // Here structTags will contain:
+    // {
+    //     "FirstField": "first tag",
+    //     "SecondField": "second tag",
+    // }
+    structTags, _ = reflections.Tags(s, "matched")
+```
+
+#### Set a structure field value
+
+*SetField* update's a structure's field value with the one provided. Note that
+unexported fields cannot be set, and that field type and value type have to match.
+
+```go
+    s := MyStruct {
+        FirstField: "first value",
+        SecondField: 2,
+        ThirdField: "third value",
+    }
+
+    // In order to be able to set the structure's values,
+    // a pointer to it has to be passed to it.
+    _ := reflections.SetField(&s, "FirstField", "new value")
+
+    // If you try to set a field's value using the wrong type,
+    // an error will be returned
+    err := reflection.SetField(&s, "FirstField", 123)  // err != nil
+```
+
+## Important notes
+
+* **unexported fields** cannot be accessed or set using reflections library: the golang reflect library intentionaly prohibits unexported fields values access or modifications.
+
+
+## Contribute
+
+* Check for open issues or open a fresh issue to start a discussion around a feature idea or a bug.
+* Fork `the repository`_ on GitHub to start making your changes to the **master** branch (or branch off of it).
+* Write tests which shows that the bug was fixed or that the feature works as expected.
+* Send a pull request and bug the maintainer until it gets merged and published. :) Make sure to add yourself to AUTHORS_.
+
+[the repository](http://github.com/oleiade/reflections)
+[AUTHORS](https://github.com/oleiade/reflections/blob/master/AUTHORS.md)
+
+
+[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/oleiade/reflections/trend.png)](https://bitdeli.com/free "Bitdeli Badge")
diff --git a/vendor/github.com/oleiade/reflections/reflections.go b/vendor/github.com/oleiade/reflections/reflections.go
new file mode 100644
index 0000000000..b25b423bb1
--- /dev/null
+++ b/vendor/github.com/oleiade/reflections/reflections.go
@@ -0,0 +1,301 @@
+// Copyright (c) 2013 Théo Crevon
+//
+// See the file LICENSE for copying permission.
+
+/*
+Package reflections provides high level abstractions above the
+reflect library.
+
+Reflect library is very low-level and as can be quite complex when it comes to do simple things like accessing a structure field value, a field tag...
+
+The purpose of reflections package is to make developers life easier when it comes to introspect structures at runtime.
+It's API is freely inspired from python language (getattr, setattr, hasattr...) and provides a simplified access to structure fields and tags.
+*/
+package reflections
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+)
+
+// GetField returns the value of the provided obj field. obj can whether
+// be a structure or pointer to structure.
+func GetField(obj interface{}, name string) (interface{}, error) {
+	if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) {
+		return nil, errors.New("Cannot use GetField on a non-struct interface")
+	}
+
+	objValue := reflectValue(obj)
+	field := objValue.FieldByName(name)
+	if !field.IsValid() {
+		return nil, fmt.Errorf("No such field: %s in obj", name)
+	}
+
+	return field.Interface(), nil
+}
+
+// GetFieldKind returns the kind of the provided obj field. obj can whether
+// be a structure or pointer to structure.
+func GetFieldKind(obj interface{}, name string) (reflect.Kind, error) {
+	if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) {
+		return reflect.Invalid, errors.New("Cannot use GetField on a non-struct interface")
+	}
+
+	objValue := reflectValue(obj)
+	field := objValue.FieldByName(name)
+
+	if !field.IsValid() {
+		return reflect.Invalid, fmt.Errorf("No such field: %s in obj", name)
+	}
+
+	return field.Type().Kind(), nil
+}
+
+// GetFieldType returns the kind of the provided obj field. obj can whether
+// be a structure or pointer to structure.
+func GetFieldType(obj interface{}, name string) (string, error) {
+	if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) {
+		return "", errors.New("Cannot use GetField on a non-struct interface")
+	}
+
+	objValue := reflectValue(obj)
+	field := objValue.FieldByName(name)
+
+	if !field.IsValid() {
+		return "", fmt.Errorf("No such field: %s in obj", name)
+	}
+
+	return field.Type().String(), nil
+}
+
+// GetFieldTag returns the provided obj field tag value. obj can whether
+// be a structure or pointer to structure.
+func GetFieldTag(obj interface{}, fieldName, tagKey string) (string, error) {
+	if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) {
+		return "", errors.New("Cannot use GetField on a non-struct interface")
+	}
+
+	objValue := reflectValue(obj)
+	objType := objValue.Type()
+
+	field, ok := objType.FieldByName(fieldName)
+	if !ok {
+		return "", fmt.Errorf("No such field: %s in obj", fieldName)
+	}
+
+	if !isExportableField(field) {
+		return "", errors.New("Cannot GetFieldTag on a non-exported struct field")
+	}
+
+	return field.Tag.Get(tagKey), nil
+}
+
+// SetField sets the provided obj field with provided value. obj param has
+// to be a pointer to a struct, otherwise it will soundly fail. Provided
+// value type should match with the struct field you're trying to set.
+func SetField(obj interface{}, name string, value interface{}) error {
+	// Fetch the field reflect.Value
+	structValue := reflect.ValueOf(obj).Elem()
+	structFieldValue := structValue.FieldByName(name)
+
+	if !structFieldValue.IsValid() {
+		return fmt.Errorf("No such field: %s in obj", name)
+	}
+
+	// If obj field value is not settable an error is thrown
+	if !structFieldValue.CanSet() {
+		return fmt.Errorf("Cannot set %s field value", name)
+	}
+
+	structFieldType := structFieldValue.Type()
+	val := reflect.ValueOf(value)
+	if structFieldType != val.Type() {
+		invalidTypeError := errors.New("Provided value type didn't match obj field type")
+		return invalidTypeError
+	}
+
+	structFieldValue.Set(val)
+	return nil
+}
+
+// HasField checks if the provided field name is part of a struct. obj can whether
+// be a structure or pointer to structure.
+func HasField(obj interface{}, name string) (bool, error) {
+	if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) {
+		return false, errors.New("Cannot use GetField on a non-struct interface")
+	}
+
+	objValue := reflectValue(obj)
+	objType := objValue.Type()
+	field, ok := objType.FieldByName(name)
+	if !ok || !isExportableField(field) {
+		return false, nil
+	}
+
+	return true, nil
+}
+
+// Fields returns the struct fields names list. obj can whether
+// be a structure or pointer to structure.
+func Fields(obj interface{}) ([]string, error) {
+	return fields(obj, false)
+}
+
+// FieldsDeep returns "flattened" fields (fields from anonymous
+// inner structs are treated as normal fields)
+func FieldsDeep(obj interface{}) ([]string, error) {
+	return fields(obj, true)
+}
+
+func fields(obj interface{}, deep bool) ([]string, error) {
+	if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) {
+		return nil, errors.New("Cannot use GetField on a non-struct interface")
+	}
+
+	objValue := reflectValue(obj)
+	objType := objValue.Type()
+	fieldsCount := objType.NumField()
+
+	var allFields []string
+	for i := 0; i < fieldsCount; i++ {
+		field := objType.Field(i)
+		if isExportableField(field) {
+			if deep && field.Anonymous {
+				fieldValue := objValue.Field(i)
+				subFields, err := fields(fieldValue.Interface(), deep)
+				if err != nil {
+					return nil, fmt.Errorf("Cannot get fields in %s: %s", field.Name, err.Error())
+				}
+				allFields = append(allFields, subFields...)
+			} else {
+				allFields = append(allFields, field.Name)
+			}
+		}
+	}
+
+	return allFields, nil
+}
+
+// Items returns the field - value struct pairs as a map. obj can whether
+// be a structure or pointer to structure.
+func Items(obj interface{}) (map[string]interface{}, error) {
+	return items(obj, false)
+}
+
+// FieldsDeep returns "flattened" items (fields from anonymous
+// inner structs are treated as normal fields)
+func ItemsDeep(obj interface{}) (map[string]interface{}, error) {
+	return items(obj, true)
+}
+
+func items(obj interface{}, deep bool) (map[string]interface{}, error) {
+	if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) {
+		return nil, errors.New("Cannot use GetField on a non-struct interface")
+	}
+
+	objValue := reflectValue(obj)
+	objType := objValue.Type()
+	fieldsCount := objType.NumField()
+
+	allItems := make(map[string]interface{})
+
+	for i := 0; i < fieldsCount; i++ {
+		field := objType.Field(i)
+		fieldValue := objValue.Field(i)
+		if isExportableField(field) {
+			if deep && field.Anonymous {
+				if m, err := items(fieldValue.Interface(), deep); err == nil {
+					for k, v := range m {
+						allItems[k] = v
+					}
+				} else {
+					return nil, fmt.Errorf("Cannot get items in %s: %s", field.Name, err.Error())
+				}
+			} else {
+				allItems[field.Name] = fieldValue.Interface()
+			}
+		}
+	}
+
+	return allItems, nil
+}
+
+// Tags lists the struct tag fields. obj can whether
+// be a structure or pointer to structure.
+func Tags(obj interface{}, key string) (map[string]string, error) {
+	return tags(obj, key, false)
+}
+
+// FieldsDeep returns "flattened" tags (fields from anonymous
+// inner structs are treated as normal fields)
+func TagsDeep(obj interface{}, key string) (map[string]string, error) {
+	return tags(obj, key, true)
+}
+
+func tags(obj interface{}, key string, deep bool) (map[string]string, error) {
+	if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) {
+		return nil, errors.New("Cannot use GetField on a non-struct interface")
+	}
+
+	objValue := reflectValue(obj)
+	objType := objValue.Type()
+	fieldsCount := objType.NumField()
+
+	allTags := make(map[string]string)
+
+	for i := 0; i < fieldsCount; i++ {
+		structField := objType.Field(i)
+		if isExportableField(structField) {
+			if deep && structField.Anonymous {
+				fieldValue := objValue.Field(i)
+				if m, err := tags(fieldValue.Interface(), key, deep); err == nil {
+					for k, v := range m {
+						allTags[k] = v
+					}
+				} else {
+					return nil, fmt.Errorf("Cannot get items in %s: %s", structField.Name, err.Error())
+				}
+			} else {
+				allTags[structField.Name] = structField.Tag.Get(key)
+			}
+		}
+	}
+
+	return allTags, nil
+}
+
+func reflectValue(obj interface{}) reflect.Value {
+	var val reflect.Value
+
+	if reflect.TypeOf(obj).Kind() == reflect.Ptr {
+		val = reflect.ValueOf(obj).Elem()
+	} else {
+		val = reflect.ValueOf(obj)
+	}
+
+	return val
+}
+
+func isExportableField(field reflect.StructField) bool {
+	// PkgPath is empty for exported fields.
+	return field.PkgPath == ""
+}
+
+func hasValidType(obj interface{}, types []reflect.Kind) bool {
+	for _, t := range types {
+		if reflect.TypeOf(obj).Kind() == t {
+			return true
+		}
+	}
+
+	return false
+}
+
+func isStruct(obj interface{}) bool {
+	return reflect.TypeOf(obj).Kind() == reflect.Struct
+}
+
+func isPointer(obj interface{}) bool {
+	return reflect.TypeOf(obj).Kind() == reflect.Ptr
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/.deepsource.toml b/vendor/github.com/outcaste-io/ristretto/.deepsource.toml
new file mode 100644
index 0000000000..40609eff3f
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/.deepsource.toml
@@ -0,0 +1,17 @@
+version = 1
+
+test_patterns = [
+  '**/*_test.go'
+]
+
+exclude_patterns = [
+  
+]
+
+[[analyzers]]
+name = 'go'
+enabled = true
+
+
+  [analyzers.meta]
+  import_path = 'github.com/dgraph-io/ristretto'
diff --git a/vendor/github.com/outcaste-io/ristretto/.mailmap b/vendor/github.com/outcaste-io/ristretto/.mailmap
new file mode 100644
index 0000000000..8ea0986d41
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/.mailmap
@@ -0,0 +1 @@
+Manish R Jain <manishrjain@gmail.com> <manish@dgraph.io>
diff --git a/vendor/github.com/outcaste-io/ristretto/CHANGELOG.md b/vendor/github.com/outcaste-io/ristretto/CHANGELOG.md
new file mode 100644
index 0000000000..da964bc0c8
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/CHANGELOG.md
@@ -0,0 +1,172 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project will adhere to [Semantic Versioning](http://semver.org/spec/v2.0.0.html) starting v1.0.0.
+
+## Unreleased
+
+## [0.1.0] - 2021-06-03
+
+[0.1.0]: https://github.com/dgraph-io/ristretto/compare/v0.1.0..v0.0.3
+This release contains bug fixes and improvements to Ristretto. It also contains
+major updates to the z package. The z package contains types such as Tree (B+
+tree), Buffer, Mmap file, etc. All these types are used in Badger and Dgraph to
+improve performance and reduce memory requirements.
+
+### Changed
+- Make item public. Add a new onReject call for rejected items. (#180)
+
+### Added
+- Use z.Buffer backing for B+ tree (#268)
+- expose GetTTL function (#270)
+- docs(README): Ristretto is production-ready. (#267)
+- Add IterateKV (#265)
+- feat(super-flags): Add GetPath method in superflags (#258)
+- add GetDuration to SuperFlag (#248)
+- add Has, GetFloat64, and GetInt64 to SuperFlag (#247)
+- move SuperFlag to Ristretto (#246)
+- add SuperFlagHelp tool to generate flag help text (#251)
+- allow empty defaults in SuperFlag (#254)
+- add mmaped b+ tree (#207)
+- Add API to allow the MaxCost of an existing cache to be updated. (#200)
+- Add OnExit handler which can be used for manual memory management (#183)
+- Add life expectancy histogram (#182)
+- Add mechanism to wait for items to be processed. (#184)
+
+### Fixed
+- change expiration type from int64 to time.Time (#277)
+- fix(buffer): make buffer capacity atleast defaultCapacity (#273)
+- Fixes for z.PersistentTree (#272)
+- Initialize persistent tree correctly (#271)
+- use xxhash v2 (#266)
+- update comments to correctly reflect counter space usage (#189)
+- enable riscv64 builds (#264)
+- Switch from log to glog (#263)
+- Use Fibonacci for latency numbers
+- cache: fix race when clearning a cache (#261)
+- Check for keys without values in superflags (#259)
+- chore(perf): using tags instead of runtime callers to improve the performance of leak detection (#255)
+- fix(Flags): panic on user errors (#256)
+- fix SuperFlagHelp newline (#252)
+- fix(arm): Fix crashing under ARMv6 due to memory mis-alignment (#239)
+- Fix incorrect unit test coverage depiction (#245)
+- chore(histogram): adding percentile in histogram (#241)
+- fix(windows): use filepath instead of path (#244)
+- fix(MmapFile): Close the fd before deleting the file (#242)
+- Fixes CGO_ENABLED=0 compilation error (#240)
+- fix(build): fix build on non-amd64 architectures (#238)
+- fix(b+tree): Do not double the size of btree (#237)
+- fix(jemalloc): Fix the stats of jemalloc (#236)
+- Don't print stuff, only return strings.
+- Bring memclrNoHeapPointers to z (#235)
+- increase number of buffers from 32 to 64 in allocator (#234)
+- Set minSize to 1MB.
+- Opt(btree): Use Go memory instead of mmap files
+- Opt(btree): Lightweight stats calculation
+- Put padding internally to z.Buffer
+- Chore(z): Add SetTmpDir API to set the temp directory (#233)
+- Add a BufferFrom
+- Bring z.Allocator and z.AllocatorPool back
+- Fix(z.Allocator): Make Allocator use Go memory
+- Updated ZeroOut to use a simple for loop.  (#231)
+- Add concurrency back
+- Add a test to check concurrency of Allocator.
+- Fix(buffer): Expose padding by z.Buffer's APIs and fix test (#222)
+- AllocateSlice should Truncate if the file is not big enough (#226)
+- Zero out allocations for structs now that we're reusing Allocators.
+- Fix the ristretto substring
+- Deal with nil z.AllocatorPool
+- Create an AllocatorPool class.
+- chore(btree): clean NewTree API (#225)
+- fix(MmapFile): Don't error out if fileSize > sz (#224)
+- feat(btree): allow option to reset btree and mmaping it to specified file. (#223)
+- Use mremap on Linux instead of munmap+mmap (#221)
+- Reuse pages in B+ tree (#220)
+- fix(allocator): make nil allocator return go byte slice (#217)
+- fix(buffer): Make padding internal to z.buffer (#216)
+- chore(buffer): add a parent directory field in z.Buffer (#215)
+- Make Allocator concurrent
+- Fix infinite loop in allocator (#214)
+- Add trim func
+- Use allocator pool. Turn off freelist.
+- Add freelists to Allocator to reuse.
+- make DeleteBelow delete values that are less than lo (#211)
+- Avoid an unnecessary Load procedure in IncrementOffset.
+- Add Stats method in Btree.
+- chore(script): fix local test script (#210)
+- fix(btree): Increase buffer size if needed. (#209)
+- chore(btree): add occupancy ratio, search benchmark and compact bug fix (#208)
+- Add licenses, remove prints, and fix a bug in compact
+- Add IncrementOffset API for z.buffers (#206)
+- Show count when printing histogram (#201)
+- Zbuffer: Add LenNoPadding and make padding 8 bytes (#204)
+- Allocate Go memory in case allocator is nil.
+- Add leak detection via leak build flag and fix a leak during cache.Close.
+- Add some APIs for allocator and buffer
+- Sync before truncation or close.
+- Handle nil MmapFile for Sync.
+- Public methods must not panic after Close() (#202)
+- Check for RD_ONLY correctly.
+- Modify MmapFile APIs
+- Add a bunch of APIs around MmapFile
+- Move APIs for mmapfile creation over to z package.
+- Add ZeroOut func
+- Add SliceOffsets
+- z: Add TotalSize method on bloom filter (#197)
+- Add Msync func
+- Buffer: Use 256 GB mmap size instead of MaxInt64 (#198)
+- Add a simple test to check next2Pow
+- Improve memory performance (#195)
+- Have a way to automatically mmap a growing buffer (#196)
+- Introduce Mmapped buffers and Merge Sort (#194)
+- Add a way to access an allocator via reference.
+- Use jemalloc.a to ensure compilation with the Go binary
+- Fix up a build issue with ReadMemStats
+- Add ReadMemStats function (#193)
+- Allocator helps allocate memory to be used by unsafe structs (#192)
+- Improve histogram output
+- Move Closer from y to z (#191)
+- Add histogram.Mean() method (#188)
+- Introduce Calloc: Manual Memory Management via jemalloc (#186)
+
+## [0.0.3] - 2020-07-06
+
+[0.0.3]: https://github.com/dgraph-io/ristretto/compare/v0.0.2..v0.0.3
+
+### Changed
+
+### Added
+
+### Fixed
+
+- z: use MemHashString and xxhash.Sum64String ([#153][])
+- Check conflict key before updating expiration map. ([#154][])
+- Fix race condition in Cache.Clear ([#133][])
+- Improve handling of updated items ([#168][])
+- Fix droppedSets count while updating the item ([#171][])
+
+## [0.0.2] - 2020-02-24
+
+[0.0.2]: https://github.com/dgraph-io/ristretto/compare/v0.0.1..v0.0.2
+
+### Added
+
+- Sets with TTL. ([#122][])
+
+### Fixed
+
+- Fix the way metrics are handled for deletions. ([#111][])
+- Support nil `*Cache` values in `Clear` and `Close`. ([#119][]) 
+- Delete item immediately. ([#113][])
+- Remove key from policy after TTL eviction. ([#130][])
+
+[#111]: https://github.com/dgraph-io/ristretto/issues/111
+[#113]: https://github.com/dgraph-io/ristretto/issues/113
+[#119]: https://github.com/dgraph-io/ristretto/issues/119
+[#122]: https://github.com/dgraph-io/ristretto/issues/122
+[#130]: https://github.com/dgraph-io/ristretto/issues/130
+
+## 0.0.1
+
+First release. Basic cache functionality based on a LFU policy.
diff --git a/vendor/github.com/outcaste-io/ristretto/LICENSE b/vendor/github.com/outcaste-io/ristretto/LICENSE
new file mode 100644
index 0000000000..d9a10c0d8e
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/LICENSE
@@ -0,0 +1,176 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
diff --git a/vendor/github.com/outcaste-io/ristretto/README.md b/vendor/github.com/outcaste-io/ristretto/README.md
new file mode 100644
index 0000000000..80a43ec147
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/README.md
@@ -0,0 +1,237 @@
+# Ristretto
+[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/outcaste-io/ristretto)
+[![Go Report Card](https://img.shields.io/badge/go%20report-A%2B-brightgreen)](https://goreportcard.com/report/github.com/outcaste-io/ristretto)
+[![Coverage](https://gocover.io/_badge/github.com/outcaste-io/ristretto)](https://gocover.io/github.com/outcaste-io/ristretto)
+![Tests](https://github.com/outcaste-io/ristretto/workflows/tests/badge.svg)
+
+**This is a fork of dgraph-io/ristretto, maintained by @manishrjain.**
+
+Ristretto is a fast, concurrent cache library built with a focus on performance and correctness.
+
+The motivation to build Ristretto comes from the need for a contention-free
+cache.
+
+[issues]: https://github.com/outcaste-io/issues
+
+## Features
+
+* **High Hit Ratios** - with our unique admission/eviction policy pairing, Ristretto's performance is best in class.
+	* **Eviction: SampledLFU** - on par with exact LRU and better performance on Search and Database traces.
+	* **Admission: TinyLFU** - extra performance with little memory overhead (12 bits per counter).
+* **Fast Throughput** - we use a variety of techniques for managing contention and the result is excellent throughput.
+* **Cost-Based Eviction** - any large new item deemed valuable can evict multiple smaller items (cost could be anything).
+* **Fully Concurrent** - you can use as many goroutines as you want with little throughput degradation.
+* **Metrics** - optional performance metrics for throughput, hit ratios, and other stats.
+* **Simple API** - just figure out your ideal `Config` values and you're off and running.
+
+## Note on jemalloc
+
+We have been using jemalloc v5.2.1.
+To use jemalloc, please configure jemalloc with these flags:
+
+```
+./configure --with-install-suffix='_outcaste' --with-jemalloc-prefix='je_' --with-malloc-conf='background_thread:true,metadata_thp:auto'; \
+make
+make install_lib install_include # Use sudo if needed in this step.
+```
+
+outserv/outserv Makefile has these build steps already present. You can run
+`make jemalloc` to install it. This jemalloc would not interfere with any other
+jemalloc installation that might already be present on the system.
+
+
+## Status
+
+Ristretto is production-ready. See [Projects using Ristretto](#projects-using-ristretto).
+
+## Table of Contents
+
+* [Usage](#Usage)
+	* [Example](#Example)
+	* [Config](#Config)
+		* [NumCounters](#Config)
+		* [MaxCost](#Config)
+		* [BufferItems](#Config)
+		* [Metrics](#Config)
+		* [OnEvict](#Config)
+		* [KeyToHash](#Config)
+        * [Cost](#Config)
+* [Benchmarks](#Benchmarks)
+	* [Hit Ratios](#Hit-Ratios)
+		* [Search](#Search)
+		* [Database](#Database)
+		* [Looping](#Looping)
+		* [CODASYL](#CODASYL)
+	* [Throughput](#Throughput)
+		* [Mixed](#Mixed)
+		* [Read](#Read)
+		* [Write](#Write)
+* [Projects using Ristretto](#projects-using-ristretto)
+* [FAQ](#FAQ)
+
+## Usage
+
+### Example
+
+```go
+func main() {
+	cache, err := ristretto.NewCache(&ristretto.Config{
+		NumCounters: 1e7,     // number of keys to track frequency of (10M).
+		MaxCost:     1 << 30, // maximum cost of cache (1GB).
+		BufferItems: 64,      // number of keys per Get buffer.
+	})
+	if err != nil {
+		panic(err)
+	}
+
+	// set a value with a cost of 1
+	cache.Set("key", "value", 1)
+
+	// wait for value to pass through buffers
+	cache.Wait()
+
+	value, found := cache.Get("key")
+	if !found {
+		panic("missing value")
+	}
+	fmt.Println(value)
+	cache.Del("key")
+}
+```
+
+### Config
+
+The `Config` struct is passed to `NewCache` when creating Ristretto instances (see the example above).
+
+**NumCounters** `int64`
+
+NumCounters is the number of 4-bit access counters to keep for admission and eviction. We've seen good performance in setting this to 10x the number of items you expect to keep in the cache when full.
+
+For example, if you expect each item to have a cost of 1 and MaxCost is 100, set NumCounters to 1,000. Or, if you use variable cost values but expect the cache to hold around 10,000 items when full, set NumCounters to 100,000. The important thing is the *number of unique items* in the full cache, not necessarily the MaxCost value.
+
+**MaxCost** `int64`
+
+MaxCost is how eviction decisions are made. For example, if MaxCost is 100 and a new item with a cost of 1 increases total cache cost to 101, 1 item will be evicted.
+
+MaxCost can also be used to denote the max size in bytes. For example, if MaxCost is 1,000,000 (1MB) and the cache is full with 1,000 1KB items, a new item (that's accepted) would cause 5 1KB items to be evicted.
+
+MaxCost could be anything as long as it matches how you're using the cost values when calling Set.
+
+**BufferItems** `int64`
+
+BufferItems is the size of the Get buffers. The best value we've found for this is 64.
+
+If for some reason you see Get performance decreasing with lots of contention (you shouldn't), try increasing this value in increments of 64. This is a fine-tuning mechanism and you probably won't have to touch this.
+
+**Metrics** `bool`
+
+Metrics is true when you want real-time logging of a variety of stats. The reason this is a Config flag is because there's a 10% throughput performance overhead.
+
+**OnEvict** `func(hashes [2]uint64, value interface{}, cost int64)`
+
+OnEvict is called for every eviction.
+
+**KeyToHash** `func(key interface{}) [2]uint64`
+
+KeyToHash is the hashing algorithm used for every key. If this is nil, Ristretto has a variety of [defaults depending on the underlying interface type](https://github.com/outcaste-io/ristretto/blob/master/z/z.go#L19-L41).
+
+Note that if you want 128bit hashes you should use the full `[2]uint64`,
+otherwise just fill the `uint64` at the `0` position and it will behave like
+any 64bit hash.
+
+**Cost** `func(value interface{}) int64`
+
+Cost is an optional function you can pass to the Config in order to evaluate
+item cost at runtime, and only for the Set calls that aren't dropped (this is
+useful if calculating item cost is particularly expensive and you don't want to
+waste time on items that will be dropped anyways).
+
+To signal to Ristretto that you'd like to use this Cost function:
+
+1. Set the Cost field to a non-nil function.
+2. When calling Set for new items or item updates, use a `cost` of 0.
+
+## Benchmarks
+
+The benchmarks can be found in https://github.com/dgraph-io/benchmarks/tree/master/cachebench/ristretto.
+
+### Hit Ratios
+
+#### Search
+
+This trace is described as "disk read accesses initiated by a large commercial
+search engine in response to various web search requests."
+
+<p align="center">
+	<img src="https://raw.githubusercontent.com/dgraph-io/ristretto/master/benchmarks/Hit%20Ratios%20-%20Search%20(ARC-S3).svg">
+</p>
+
+#### Database
+
+This trace is described as "a database server running at a commercial site
+running an ERP application on top of a commercial database."
+
+<p align="center">
+	<img src="https://raw.githubusercontent.com/dgraph-io/ristretto/master/benchmarks/Hit%20Ratios%20-%20Database%20(ARC-DS1).svg">
+</p>
+
+#### Looping
+
+This trace demonstrates a looping access pattern.
+
+<p align="center">
+	<img src="https://raw.githubusercontent.com/dgraph-io/ristretto/master/benchmarks/Hit%20Ratios%20-%20Glimpse%20(LIRS-GLI).svg">
+</p>
+
+#### CODASYL
+
+This trace is described as "references to a CODASYL database for a one hour
+period."
+
+<p align="center">
+	<img src="https://raw.githubusercontent.com/dgraph-io/ristretto/master/benchmarks/Hit%20Ratios%20-%20CODASYL%20(ARC-OLTP).svg">
+</p>
+
+### Throughput
+
+All throughput benchmarks were ran on an Intel Core i7-8700K (3.7GHz) with 16gb
+of RAM.
+
+#### Mixed
+
+<p align="center">
+	<img src="https://raw.githubusercontent.com/dgraph-io/ristretto/master/benchmarks/Throughput%20-%20Mixed.svg">
+</p>
+
+#### Read
+
+<p align="center">
+	<img src="https://raw.githubusercontent.com/dgraph-io/ristretto/master/benchmarks/Throughput%20-%20Read%20(Zipfian).svg">
+</p>
+
+#### Write
+
+<p align="center">
+	<img src="https://raw.githubusercontent.com/dgraph-io/ristretto/master/benchmarks/Throughput%20-%20Write%20(Zipfian).svg">
+</p>
+
+## Projects Using Ristretto
+
+Below is a list of known projects that use Ristretto:
+
+- [Badger](https://github.com/dgraph-io/badger) - Embeddable key-value DB in Go
+- [Dgraph](https://github.com/dgraph-io/dgraph) - Horizontally scalable and distributed GraphQL database with a graph backend
+- [Vitess](https://github.com/vitessio/vitess) - Database clustering system for horizontal scaling of MySQL
+- [SpiceDB](https://github.com/authzed/spicedb) - Horizontally scalable permissions database
+
+## FAQ
+
+### How are you achieving this performance? What shortcuts are you taking?
+
+We go into detail in the [Ristretto blog post](https://blog.dgraph.io/post/introducing-ristretto-high-perf-go-cache/), but in short: our throughput performance can be attributed to a mix of batching and eventual consistency. Our hit ratio performance is mostly due to an excellent [admission policy](https://arxiv.org/abs/1512.00727) and SampledLFU eviction policy.
+
+As for "shortcuts," the only thing Ristretto does that could be construed as one is dropping some Set calls. That means a Set call for a new item (updates are guaranteed) isn't guaranteed to make it into the cache. The new item could be dropped at two points: when passing through the Set buffer or when passing through the admission policy. However, this doesn't affect hit ratios much at all as we expect the most popular items to be Set multiple times and eventually make it in the cache.
+
+### Is Ristretto distributed?
+
+No, it's just like any other Go library that you can import into your project and use in a single process.
diff --git a/vendor/github.com/outcaste-io/ristretto/cache.go b/vendor/github.com/outcaste-io/ristretto/cache.go
new file mode 100644
index 0000000000..18e3647e8c
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/cache.go
@@ -0,0 +1,520 @@
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Ristretto is a fast, fixed size, in-memory cache with a dual focus on
+// throughput and hit ratio performance. You can easily add Ristretto to an
+// existing system and keep the most valuable data where you need it.
+package ristretto
+
+import (
+	"errors"
+	"sync"
+	"time"
+	"unsafe"
+
+	"github.com/outcaste-io/ristretto/z"
+	"go.uber.org/atomic"
+)
+
+var (
+	// TODO: find the optimal value for this or make it configurable
+	setBufSize = 32 * 1024
+)
+
+type itemCallback func(*Item)
+
+const itemSize = int64(unsafe.Sizeof(storeItem{}))
+
+// Cache is a thread-safe implementation of a hashmap with a TinyLFU admission
+// policy and a Sampled LFU eviction policy. You can use the same Cache instance
+// from as many goroutines as you want.
+type Cache struct {
+	// store is the central concurrent hashmap where key-value items are stored.
+	store *shardedMap
+	// policy determines what gets let in to the cache and what gets kicked out.
+	policy *lfuPolicy
+	// getBuf is a custom ring buffer implementation that gets pushed to when
+	// keys are read.
+	getBuf *ringBuffer
+	// setBuf is a buffer allowing us to batch/drop Sets during times of high
+	// contention.
+	setBuf chan *Item
+	// onEvict is called for item evictions.
+	onEvict itemCallback
+	// onReject is called when an item is rejected via admission policy.
+	onReject itemCallback
+	// onExit is called whenever a value goes out of scope from the cache.
+	onExit (func(interface{}))
+	// KeyToHash function is used to customize the key hashing algorithm.
+	// Each key will be hashed using the provided function. If keyToHash value
+	// is not set, the default keyToHash function is used.
+	keyToHash func(interface{}) (uint64, uint64)
+	// stop is used to stop the processItems goroutine.
+	stop chan struct{}
+	// indicates whether cache is closed.
+	isClosed atomic.Bool
+	// cost calculates cost from a value.
+	cost func(value interface{}) int64
+	// ignoreInternalCost dictates whether to ignore the cost of internally storing
+	// the item in the cost calculation.
+	ignoreInternalCost bool
+	// cleanupTicker is used to periodically check for entries whose TTL has passed.
+	cleanupTicker *time.Ticker
+	// Metrics contains a running log of important statistics like hits, misses,
+	// and dropped items.
+	Metrics *Metrics
+}
+
+// Config is passed to NewCache for creating new Cache instances.
+type Config struct {
+	// NumCounters determines the number of counters (keys) to keep that hold
+	// access frequency information. It's generally a good idea to have more
+	// counters than the max cache capacity, as this will improve eviction
+	// accuracy and subsequent hit ratios.
+	//
+	// For example, if you expect your cache to hold 1,000,000 items when full,
+	// NumCounters should be 10,000,000 (10x). Each counter takes up roughly
+	// 3 bytes (4 bits for each counter * 4 copies plus about a byte per
+	// counter for the bloom filter). Note that the number of counters is
+	// internally rounded up to the nearest power of 2, so the space usage
+	// may be a little larger than 3 bytes * NumCounters.
+	NumCounters int64
+	// MaxCost can be considered as the cache capacity, in whatever units you
+	// choose to use.
+	//
+	// For example, if you want the cache to have a max capacity of 100MB, you
+	// would set MaxCost to 100,000,000 and pass an item's number of bytes as
+	// the `cost` parameter for calls to Set. If new items are accepted, the
+	// eviction process will take care of making room for the new item and not
+	// overflowing the MaxCost value.
+	MaxCost int64
+	// BufferItems determines the size of Get buffers.
+	//
+	// Unless you have a rare use case, using `64` as the BufferItems value
+	// results in good performance.
+	BufferItems int64
+	// Metrics determines whether cache statistics are kept during the cache's
+	// lifetime. There *is* some overhead to keeping statistics, so you should
+	// only set this flag to true when testing or throughput performance isn't a
+	// major factor.
+	Metrics bool
+	// OnEvict is called for every eviction and passes the hashed key, value,
+	// and cost to the function.
+	OnEvict func(item *Item)
+	// OnReject is called for every rejection done via the policy.
+	OnReject func(item *Item)
+	// OnExit is called whenever a value is removed from cache. This can be
+	// used to do manual memory deallocation. Would also be called on eviction
+	// and rejection of the value.
+	OnExit func(val interface{})
+	// KeyToHash function is used to customize the key hashing algorithm.
+	// Each key will be hashed using the provided function. If keyToHash value
+	// is not set, the default keyToHash function is used.
+	KeyToHash func(key interface{}) (uint64, uint64)
+	// shouldUpdate is called when a value already exists in cache and is being updated.
+	ShouldUpdate func(prev, cur interface{}) bool
+	// Cost evaluates a value and outputs a corresponding cost. This function
+	// is ran after Set is called for a new item or an item update with a cost
+	// param of 0.
+	Cost func(value interface{}) int64
+	// IgnoreInternalCost set to true indicates to the cache that the cost of
+	// internally storing the value should be ignored. This is useful when the
+	// cost passed to set is not using bytes as units. Keep in mind that setting
+	// this to true will increase the memory usage.
+	IgnoreInternalCost bool
+}
+
+type itemFlag byte
+
+const (
+	itemNew itemFlag = iota
+	itemDelete
+	itemUpdate
+)
+
+// Item is passed to setBuf so items can eventually be added to the cache.
+type Item struct {
+	flag       itemFlag
+	Key        uint64
+	Conflict   uint64
+	Value      interface{}
+	Cost       int64
+	Expiration time.Time
+	wg         *sync.WaitGroup
+}
+
+// NewCache returns a new Cache instance and any configuration errors, if any.
+func NewCache(config *Config) (*Cache, error) {
+	switch {
+	case config.NumCounters == 0:
+		return nil, errors.New("NumCounters can't be zero")
+	case config.MaxCost == 0:
+		return nil, errors.New("MaxCost can't be zero")
+	case config.BufferItems == 0:
+		return nil, errors.New("BufferItems can't be zero")
+	}
+	policy := newPolicy(config.NumCounters, config.MaxCost)
+	cache := &Cache{
+		store:              newShardedMap(config.ShouldUpdate),
+		policy:             policy,
+		getBuf:             newRingBuffer(policy, config.BufferItems),
+		setBuf:             make(chan *Item, setBufSize),
+		keyToHash:          config.KeyToHash,
+		stop:               make(chan struct{}),
+		cost:               config.Cost,
+		ignoreInternalCost: config.IgnoreInternalCost,
+		cleanupTicker:      time.NewTicker(time.Duration(bucketDurationSecs) * time.Second / 2),
+	}
+	cache.onExit = func(val interface{}) {
+		if config.OnExit != nil && val != nil {
+			config.OnExit(val)
+		}
+	}
+	cache.onEvict = func(item *Item) {
+		if config.OnEvict != nil {
+			config.OnEvict(item)
+		}
+		cache.onExit(item.Value)
+	}
+	cache.onReject = func(item *Item) {
+		if config.OnReject != nil {
+			config.OnReject(item)
+		}
+		cache.onExit(item.Value)
+	}
+	cache.store.shouldUpdate = func(prev, cur interface{}) bool {
+		if config.ShouldUpdate != nil {
+			return config.ShouldUpdate(prev, cur)
+		}
+		return true
+	}
+	if cache.keyToHash == nil {
+		cache.keyToHash = z.KeyToHash
+	}
+	if config.Metrics {
+		cache.collectMetrics()
+	}
+	// NOTE: benchmarks seem to show that performance decreases the more
+	//       goroutines we have running cache.processItems(), so 1 should
+	//       usually be sufficient
+	go cache.processItems()
+	return cache, nil
+}
+
+func (c *Cache) Wait() {
+	if c == nil || c.isClosed.Load() {
+		return
+	}
+	wg := &sync.WaitGroup{}
+	wg.Add(1)
+	c.setBuf <- &Item{wg: wg}
+	wg.Wait()
+}
+
+// Get returns the value (if any) and a boolean representing whether the
+// value was found or not. The value can be nil and the boolean can be true at
+// the same time.
+func (c *Cache) Get(key interface{}) (interface{}, bool) {
+	if c == nil || c.isClosed.Load() || key == nil {
+		return nil, false
+	}
+	keyHash, conflictHash := c.keyToHash(key)
+	c.getBuf.Push(keyHash)
+	value, ok := c.store.Get(keyHash, conflictHash)
+	if ok {
+		c.Metrics.add(hit, keyHash, 1)
+	} else {
+		c.Metrics.add(miss, keyHash, 1)
+	}
+	return value, ok
+}
+
+// Set attempts to add the key-value item to the cache. If it returns false,
+// then the Set was dropped and the key-value item isn't added to the cache. If
+// it returns true, there's still a chance it could be dropped by the policy if
+// its determined that the key-value item isn't worth keeping, but otherwise the
+// item will be added and other items will be evicted in order to make room.
+//
+// To dynamically evaluate the items cost using the Config.Coster function, set
+// the cost parameter to 0 and Coster will be ran when needed in order to find
+// the items true cost.
+func (c *Cache) Set(key, value interface{}, cost int64) bool {
+	return c.SetWithTTL(key, value, cost, 0*time.Second)
+}
+
+// SetWithTTL works like Set but adds a key-value pair to the cache that will expire
+// after the specified TTL (time to live) has passed. A zero value means the value never
+// expires, which is identical to calling Set. A negative value is a no-op and the value
+// is discarded.
+func (c *Cache) SetWithTTL(key, value interface{}, cost int64, ttl time.Duration) bool {
+	return c.setInternal(key, value, cost, ttl, false)
+}
+
+// SetIfPresent is like Set, but only updates the value of an existing key. It
+// does NOT add the key to cache if it's absent.
+func (c *Cache) SetIfPresent(key, value interface{}, cost int64) bool {
+	return c.setInternal(key, value, cost, 0*time.Second, true)
+}
+
+func (c *Cache) setInternal(key, value interface{},
+	cost int64, ttl time.Duration, onlyUpdate bool) bool {
+	if c == nil || c.isClosed.Load() || key == nil {
+		return false
+	}
+
+	var expiration time.Time
+	switch {
+	case ttl == 0:
+		// No expiration.
+		break
+	case ttl < 0:
+		// Treat this a a no-op.
+		return false
+	default:
+		expiration = time.Now().Add(ttl)
+	}
+
+	keyHash, conflictHash := c.keyToHash(key)
+	i := &Item{
+		flag:       itemNew,
+		Key:        keyHash,
+		Conflict:   conflictHash,
+		Value:      value,
+		Cost:       cost,
+		Expiration: expiration,
+	}
+	if onlyUpdate {
+		i.flag = itemUpdate
+	}
+	// cost is eventually updated. The expiration must also be immediately updated
+	// to prevent items from being prematurely removed from the map.
+	if prev, ok := c.store.Update(i); ok {
+		c.onExit(prev)
+		i.flag = itemUpdate
+	} else if onlyUpdate {
+		// The instruction was to update the key, but store.Update failed. So,
+		// this is a NOOP.
+		return false
+	}
+	// Attempt to send item to policy.
+	select {
+	case c.setBuf <- i:
+		return true
+	default:
+		if i.flag == itemUpdate {
+			// Return true if this was an update operation since we've already
+			// updated the store. For all the other operations (set/delete), we
+			// return false which means the item was not inserted.
+			return true
+		}
+		c.Metrics.add(dropSets, keyHash, 1)
+		return false
+	}
+}
+
+// Del deletes the key-value item from the cache if it exists.
+func (c *Cache) Del(key interface{}) {
+	if c == nil || c.isClosed.Load() || key == nil {
+		return
+	}
+	keyHash, conflictHash := c.keyToHash(key)
+	// Delete immediately.
+	_, prev := c.store.Del(keyHash, conflictHash)
+	c.onExit(prev)
+	// If we've set an item, it would be applied slightly later.
+	// So we must push the same item to `setBuf` with the deletion flag.
+	// This ensures that if a set is followed by a delete, it will be
+	// applied in the correct order.
+	c.setBuf <- &Item{
+		flag:     itemDelete,
+		Key:      keyHash,
+		Conflict: conflictHash,
+	}
+}
+
+// GetTTL returns the TTL for the specified key and a bool that is true if the
+// item was found and is not expired.
+func (c *Cache) GetTTL(key interface{}) (time.Duration, bool) {
+	if c == nil || key == nil {
+		return 0, false
+	}
+
+	keyHash, conflictHash := c.keyToHash(key)
+	if _, ok := c.store.Get(keyHash, conflictHash); !ok {
+		// not found
+		return 0, false
+	}
+
+	expiration := c.store.Expiration(keyHash)
+	if expiration.IsZero() {
+		// found but no expiration
+		return 0, true
+	}
+
+	if time.Now().After(expiration) {
+		// found but expired
+		return 0, false
+	}
+
+	return time.Until(expiration), true
+}
+
+// Close stops all goroutines and closes all channels.
+func (c *Cache) Close() {
+	if c == nil || c.isClosed.Load() {
+		return
+	}
+	c.Clear()
+
+	// Block until processItems goroutine is returned.
+	c.stop <- struct{}{}
+	close(c.stop)
+	close(c.setBuf)
+	c.policy.Close()
+	c.isClosed.Store(true)
+}
+
+// Clear empties the hashmap and zeroes all policy counters. Note that this is
+// not an atomic operation (but that shouldn't be a problem as it's assumed that
+// Set/Get calls won't be occurring until after this).
+func (c *Cache) Clear() {
+	if c == nil || c.isClosed.Load() {
+		return
+	}
+	// Block until processItems goroutine is returned.
+	c.stop <- struct{}{}
+
+	// Clear out the setBuf channel.
+loop:
+	for {
+		select {
+		case i := <-c.setBuf:
+			if i.wg != nil {
+				i.wg.Done()
+				continue
+			}
+			if i.flag != itemUpdate {
+				// In itemUpdate, the value is already set in the store.  So, no need to call
+				// onEvict here.
+				c.onEvict(i)
+			}
+		default:
+			break loop
+		}
+	}
+
+	// Clear value hashmap and policy data.
+	c.policy.Clear()
+	c.store.Clear(c.onEvict)
+	// Only reset metrics if they're enabled.
+	if c.Metrics != nil {
+		c.Metrics.Clear()
+	}
+	// Restart processItems goroutine.
+	go c.processItems()
+}
+
+// MaxCost returns the max cost of the cache.
+func (c *Cache) MaxCost() int64 {
+	if c == nil {
+		return 0
+	}
+	return c.policy.MaxCost()
+}
+
+// UpdateMaxCost updates the maxCost of an existing cache.
+func (c *Cache) UpdateMaxCost(maxCost int64) {
+	if c == nil {
+		return
+	}
+	c.policy.UpdateMaxCost(maxCost)
+}
+
+// processItems is ran by goroutines processing the Set buffer.
+func (c *Cache) processItems() {
+	startTs := make(map[uint64]time.Time)
+	numToKeep := 100000 // TODO: Make this configurable via options.
+
+	trackAdmission := func(key uint64) {
+		if c.Metrics == nil {
+			return
+		}
+		startTs[key] = time.Now()
+		if len(startTs) > numToKeep {
+			for k := range startTs {
+				if len(startTs) <= numToKeep {
+					break
+				}
+				delete(startTs, k)
+			}
+		}
+	}
+	onEvict := func(i *Item) {
+		if ts, has := startTs[i.Key]; has {
+			c.Metrics.trackEviction(int64(time.Since(ts) / time.Second))
+			delete(startTs, i.Key)
+		}
+		if c.onEvict != nil {
+			c.onEvict(i)
+		}
+	}
+
+	for {
+		select {
+		case i := <-c.setBuf:
+			if i.wg != nil {
+				i.wg.Done()
+				continue
+			}
+			// Calculate item cost value if new or update.
+			if i.Cost == 0 && c.cost != nil && i.flag != itemDelete {
+				i.Cost = c.cost(i.Value)
+			}
+			if !c.ignoreInternalCost {
+				// Add the cost of internally storing the object.
+				i.Cost += itemSize
+			}
+
+			switch i.flag {
+			case itemNew:
+				victims, added := c.policy.Add(i.Key, i.Cost)
+				if added {
+					c.store.Set(i)
+					c.Metrics.add(keyAdd, i.Key, 1)
+					trackAdmission(i.Key)
+				} else {
+					c.onReject(i)
+				}
+				for _, victim := range victims {
+					victim.Conflict, victim.Value = c.store.Del(victim.Key, 0)
+					onEvict(victim)
+				}
+
+			case itemUpdate:
+				c.policy.Update(i.Key, i.Cost)
+
+			case itemDelete:
+				c.policy.Del(i.Key) // Deals with metrics updates.
+				_, val := c.store.Del(i.Key, i.Conflict)
+				c.onExit(val)
+			}
+		case <-c.cleanupTicker.C:
+			c.store.Cleanup(c.policy, onEvict)
+		case <-c.stop:
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/metrics.go b/vendor/github.com/outcaste-io/ristretto/metrics.go
new file mode 100644
index 0000000000..c2db77fad4
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/metrics.go
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2021 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ristretto
+
+import (
+	"bytes"
+	"fmt"
+	"sync"
+	"sync/atomic"
+
+	"github.com/outcaste-io/ristretto/z"
+)
+
+type metricType int
+
+const (
+	// The following 2 keep track of hits and misses.
+	hit = iota
+	miss
+	// The following 3 keep track of number of keys added, updated and evicted.
+	keyAdd
+	keyUpdate
+	keyEvict
+	// The following 2 keep track of cost of keys added and evicted.
+	costAdd
+	costEvict
+	// The following keep track of how many sets were dropped or rejected later.
+	dropSets
+	rejectSets
+	// The following 2 keep track of how many gets were kept and dropped on the
+	// floor.
+	dropGets
+	keepGets
+	// This should be the final enum. Other enums should be set before this.
+	doNotUse
+)
+
+func stringFor(t metricType) string {
+	switch t {
+	case hit:
+		return "hit"
+	case miss:
+		return "miss"
+	case keyAdd:
+		return "keys-added"
+	case keyUpdate:
+		return "keys-updated"
+	case keyEvict:
+		return "keys-evicted"
+	case costAdd:
+		return "cost-added"
+	case costEvict:
+		return "cost-evicted"
+	case dropSets:
+		return "sets-dropped"
+	case rejectSets:
+		return "sets-rejected" // by policy.
+	case dropGets:
+		return "gets-dropped"
+	case keepGets:
+		return "gets-kept"
+	default:
+		return "unidentified"
+	}
+}
+
+// Metrics is a snapshot of performance statistics for the lifetime of a cache instance.
+type Metrics struct {
+	all [doNotUse][]*uint64
+
+	mu   sync.RWMutex
+	life *z.HistogramData // Tracks the life expectancy of a key.
+}
+
+// collectMetrics just creates a new *Metrics instance and adds the pointers
+// to the cache and policy instances.
+func (c *Cache) collectMetrics() {
+	c.Metrics = newMetrics()
+	c.policy.CollectMetrics(c.Metrics)
+}
+
+func newMetrics() *Metrics {
+	s := &Metrics{
+		life: z.NewHistogramData(z.HistogramBounds(1, 16)),
+	}
+	for i := 0; i < doNotUse; i++ {
+		s.all[i] = make([]*uint64, 256)
+		slice := s.all[i]
+		for j := range slice {
+			slice[j] = new(uint64)
+		}
+	}
+	return s
+}
+
+func (p *Metrics) add(t metricType, hash, delta uint64) {
+	if p == nil {
+		return
+	}
+	valp := p.all[t]
+	// Avoid false sharing by padding at least 64 bytes of space between two
+	// atomic counters which would be incremented.
+	idx := (hash % 25) * 10
+	atomic.AddUint64(valp[idx], delta)
+}
+
+func (p *Metrics) get(t metricType) uint64 {
+	if p == nil {
+		return 0
+	}
+	valp := p.all[t]
+	var total uint64
+	for i := range valp {
+		total += atomic.LoadUint64(valp[i])
+	}
+	return total
+}
+
+// Hits is the number of Get calls where a value was found for the corresponding key.
+func (p *Metrics) Hits() uint64 {
+	return p.get(hit)
+}
+
+// Misses is the number of Get calls where a value was not found for the corresponding key.
+func (p *Metrics) Misses() uint64 {
+	return p.get(miss)
+}
+
+// KeysAdded is the total number of Set calls where a new key-value item was added.
+func (p *Metrics) KeysAdded() uint64 {
+	return p.get(keyAdd)
+}
+
+// KeysUpdated is the total number of Set calls where the value was updated.
+func (p *Metrics) KeysUpdated() uint64 {
+	return p.get(keyUpdate)
+}
+
+// KeysEvicted is the total number of keys evicted.
+func (p *Metrics) KeysEvicted() uint64 {
+	return p.get(keyEvict)
+}
+
+// CostAdded is the sum of costs that have been added (successful Set calls).
+func (p *Metrics) CostAdded() uint64 {
+	return p.get(costAdd)
+}
+
+// CostEvicted is the sum of all costs that have been evicted.
+func (p *Metrics) CostEvicted() uint64 {
+	return p.get(costEvict)
+}
+
+// SetsDropped is the number of Set calls that don't make it into internal
+// buffers (due to contention or some other reason).
+func (p *Metrics) SetsDropped() uint64 {
+	return p.get(dropSets)
+}
+
+// SetsRejected is the number of Set calls rejected by the policy (TinyLFU).
+func (p *Metrics) SetsRejected() uint64 {
+	return p.get(rejectSets)
+}
+
+// GetsDropped is the number of Get counter increments that are dropped
+// internally.
+func (p *Metrics) GetsDropped() uint64 {
+	return p.get(dropGets)
+}
+
+// GetsKept is the number of Get counter increments that are kept.
+func (p *Metrics) GetsKept() uint64 {
+	return p.get(keepGets)
+}
+
+// Ratio is the number of Hits over all accesses (Hits + Misses). This is the
+// percentage of successful Get calls.
+func (p *Metrics) Ratio() float64 {
+	if p == nil {
+		return 0.0
+	}
+	hits, misses := p.get(hit), p.get(miss)
+	if hits == 0 && misses == 0 {
+		return 0.0
+	}
+	return float64(hits) / float64(hits+misses)
+}
+
+func (p *Metrics) trackEviction(numSeconds int64) {
+	if p == nil {
+		return
+	}
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	p.life.Update(numSeconds)
+}
+
+func (p *Metrics) LifeExpectancySeconds() *z.HistogramData {
+	if p == nil {
+		return nil
+	}
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+	return p.life.Copy()
+}
+
+// Clear resets all the metrics.
+func (p *Metrics) Clear() {
+	if p == nil {
+		return
+	}
+	for i := 0; i < doNotUse; i++ {
+		for j := range p.all[i] {
+			atomic.StoreUint64(p.all[i][j], 0)
+		}
+	}
+	p.mu.Lock()
+	p.life = z.NewHistogramData(z.HistogramBounds(1, 16))
+	p.mu.Unlock()
+}
+
+// String returns a string representation of the metrics.
+func (p *Metrics) String() string {
+	if p == nil {
+		return ""
+	}
+	var buf bytes.Buffer
+	for i := 0; i < doNotUse; i++ {
+		t := metricType(i)
+		fmt.Fprintf(&buf, "%s: %d ", stringFor(t), p.get(t))
+	}
+	fmt.Fprintf(&buf, "gets-total: %d ", p.get(hit)+p.get(miss))
+	fmt.Fprintf(&buf, "hit-ratio: %.2f", p.Ratio())
+	return buf.String()
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/policy.go b/vendor/github.com/outcaste-io/ristretto/policy.go
new file mode 100644
index 0000000000..58a65f9931
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/policy.go
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2020 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ristretto
+
+import (
+	"math"
+	"sync"
+	"sync/atomic"
+
+	"github.com/outcaste-io/ristretto/z"
+)
+
+const (
+	// lfuSample is the number of items to sample when looking at eviction
+	// candidates. 5 seems to be the most optimal number [citation needed].
+	lfuSample = 5
+)
+
+// lfuPolicy encapsulates eviction/admission behavior.
+type lfuPolicy struct {
+	sync.Mutex
+	admit    *tinyLFU
+	costs    *keyCosts
+	itemsCh  chan []uint64
+	stop     chan struct{}
+	isClosed bool
+	metrics  *Metrics
+}
+
+func newPolicy(numCounters, maxCost int64) *lfuPolicy {
+	p := &lfuPolicy{
+		admit:   newTinyLFU(numCounters),
+		costs:   newSampledLFU(maxCost),
+		itemsCh: make(chan []uint64, 3),
+		stop:    make(chan struct{}),
+	}
+	go p.processItems()
+	return p
+}
+
+func (p *lfuPolicy) CollectMetrics(metrics *Metrics) {
+	p.metrics = metrics
+	p.costs.metrics = metrics
+}
+
+type policyPair struct {
+	key  uint64
+	cost int64
+}
+
+func (p *lfuPolicy) processItems() {
+	for {
+		select {
+		case items := <-p.itemsCh:
+			p.Lock()
+			p.admit.Push(items)
+			p.Unlock()
+		case <-p.stop:
+			return
+		}
+	}
+}
+
+func (p *lfuPolicy) Push(keys []uint64) bool {
+	if p.isClosed {
+		return false
+	}
+
+	if len(keys) == 0 {
+		return true
+	}
+
+	select {
+	case p.itemsCh <- keys:
+		p.metrics.add(keepGets, keys[0], uint64(len(keys)))
+		return true
+	default:
+		p.metrics.add(dropGets, keys[0], uint64(len(keys)))
+		return false
+	}
+}
+
+// Add decides whether the item with the given key and cost should be accepted by
+// the policy. It returns the list of victims that have been evicted and a boolean
+// indicating whether the incoming item should be accepted.
+func (p *lfuPolicy) Add(key uint64, cost int64) ([]*Item, bool) {
+	p.Lock()
+	defer p.Unlock()
+
+	// Cannot add an item bigger than entire cache.
+	if cost > p.costs.getMaxCost() {
+		return nil, false
+	}
+
+	// No need to go any further if the item is already in the cache.
+	if has := p.costs.updateIfHas(key, cost); has {
+		// An update does not count as an addition, so return false.
+		return nil, false
+	}
+
+	// If the execution reaches this point, the key doesn't exist in the cache.
+	// Calculate the remaining room in the cache (usually bytes).
+	room := p.costs.roomLeft(cost)
+	if room >= 0 {
+		// There's enough room in the cache to store the new item without
+		// overflowing. Do that now and stop here.
+		p.costs.add(key, cost)
+		p.metrics.add(costAdd, key, uint64(cost))
+		return nil, true
+	}
+
+	// incHits is the hit count for the incoming item.
+	incHits := p.admit.Estimate(key)
+	// sample is the eviction candidate pool to be filled via random sampling.
+	// TODO: perhaps we should use a min heap here. Right now our time
+	// complexity is N for finding the min. Min heap should bring it down to
+	// O(lg N).
+	sample := make([]*policyPair, 0, lfuSample)
+	// As items are evicted they will be appended to victims.
+	victims := make([]*Item, 0)
+
+	// Delete victims until there's enough space or a minKey is found that has
+	// more hits than incoming item.
+	for ; room < 0; room = p.costs.roomLeft(cost) {
+		// Fill up empty slots in sample.
+		sample = p.costs.fillSample(sample)
+
+		// Find minimally used item in sample.
+		minKey, minHits, minId, minCost := uint64(0), int64(math.MaxInt64), 0, int64(0)
+		for i, pair := range sample {
+			// Look up hit count for sample key.
+			if hits := p.admit.Estimate(pair.key); hits < minHits {
+				minKey, minHits, minId, minCost = pair.key, hits, i, pair.cost
+			}
+		}
+
+		// If the incoming item isn't worth keeping in the policy, reject.
+		if incHits < minHits {
+			p.metrics.add(rejectSets, key, 1)
+			return victims, false
+		}
+
+		// Delete the victim from metadata.
+		p.costs.del(minKey)
+
+		// Delete the victim from sample.
+		sample[minId] = sample[len(sample)-1]
+		sample = sample[:len(sample)-1]
+		// Store victim in evicted victims slice.
+		victims = append(victims, &Item{
+			Key:      minKey,
+			Conflict: 0,
+			Cost:     minCost,
+		})
+	}
+
+	p.costs.add(key, cost)
+	p.metrics.add(costAdd, key, uint64(cost))
+	return victims, true
+}
+
+func (p *lfuPolicy) Has(key uint64) bool {
+	p.Lock()
+	_, exists := p.costs.keyCosts[key]
+	p.Unlock()
+	return exists
+}
+
+func (p *lfuPolicy) Del(key uint64) {
+	p.Lock()
+	p.costs.del(key)
+	p.Unlock()
+}
+
+func (p *lfuPolicy) Cap() int64 {
+	p.Lock()
+	capacity := int64(p.costs.getMaxCost() - p.costs.used)
+	p.Unlock()
+	return capacity
+}
+
+func (p *lfuPolicy) Update(key uint64, cost int64) {
+	p.Lock()
+	p.costs.updateIfHas(key, cost)
+	p.Unlock()
+}
+
+func (p *lfuPolicy) Cost(key uint64) int64 {
+	p.Lock()
+	if cost, found := p.costs.keyCosts[key]; found {
+		p.Unlock()
+		return cost
+	}
+	p.Unlock()
+	return -1
+}
+
+func (p *lfuPolicy) Clear() {
+	p.Lock()
+	p.admit.clear()
+	p.costs.clear()
+	p.Unlock()
+}
+
+func (p *lfuPolicy) Close() {
+	if p.isClosed {
+		return
+	}
+
+	// Block until the p.processItems goroutine returns.
+	p.stop <- struct{}{}
+	close(p.stop)
+	close(p.itemsCh)
+	p.isClosed = true
+}
+
+func (p *lfuPolicy) MaxCost() int64 {
+	if p == nil || p.costs == nil {
+		return 0
+	}
+	return p.costs.getMaxCost()
+}
+
+func (p *lfuPolicy) UpdateMaxCost(maxCost int64) {
+	if p == nil || p.costs == nil {
+		return
+	}
+	p.costs.updateMaxCost(maxCost)
+}
+
+// keyCosts stores key-cost pairs.
+type keyCosts struct {
+	// NOTE: align maxCost to 64-bit boundary for use with atomic.
+	// As per https://golang.org/pkg/sync/atomic/: "On ARM, x86-32,
+	// and 32-bit MIPS, it is the caller’s responsibility to arrange
+	// for 64-bit alignment of 64-bit words accessed atomically.
+	// The first word in a variable or in an allocated struct, array,
+	// or slice can be relied upon to be 64-bit aligned."
+	maxCost  int64
+	used     int64
+	metrics  *Metrics
+	keyCosts map[uint64]int64
+}
+
+func newSampledLFU(maxCost int64) *keyCosts {
+	return &keyCosts{
+		keyCosts: make(map[uint64]int64),
+		maxCost:  maxCost,
+	}
+}
+
+func (p *keyCosts) getMaxCost() int64 {
+	return atomic.LoadInt64(&p.maxCost)
+}
+
+func (p *keyCosts) updateMaxCost(maxCost int64) {
+	atomic.StoreInt64(&p.maxCost, maxCost)
+}
+
+func (p *keyCosts) roomLeft(cost int64) int64 {
+	return p.getMaxCost() - (p.used + cost)
+}
+
+func (p *keyCosts) fillSample(in []*policyPair) []*policyPair {
+	if len(in) >= lfuSample {
+		return in
+	}
+	for key, cost := range p.keyCosts {
+		in = append(in, &policyPair{key, cost})
+		if len(in) >= lfuSample {
+			return in
+		}
+	}
+	return in
+}
+
+func (p *keyCosts) del(key uint64) {
+	cost, ok := p.keyCosts[key]
+	if !ok {
+		return
+	}
+	p.used -= cost
+	delete(p.keyCosts, key)
+	p.metrics.add(costEvict, key, uint64(cost))
+	p.metrics.add(keyEvict, key, 1)
+}
+
+func (p *keyCosts) add(key uint64, cost int64) {
+	p.keyCosts[key] = cost
+	p.used += cost
+}
+
+func (p *keyCosts) updateIfHas(key uint64, cost int64) bool {
+	if prev, found := p.keyCosts[key]; found {
+		// Update the cost of an existing key, but don't worry about evicting.
+		// Evictions will be handled the next time a new item is added.
+		p.metrics.add(keyUpdate, key, 1)
+		if prev > cost {
+			diff := prev - cost
+			p.metrics.add(costAdd, key, ^uint64(uint64(diff)-1))
+		} else if cost > prev {
+			diff := cost - prev
+			p.metrics.add(costAdd, key, uint64(diff))
+		}
+		p.used += cost - prev
+		p.keyCosts[key] = cost
+		return true
+	}
+	return false
+}
+
+func (p *keyCosts) clear() {
+	p.used = 0
+	p.keyCosts = make(map[uint64]int64)
+}
+
+// tinyLFU is an admission helper that keeps track of access frequency using
+// tiny (4-bit) counters in the form of a count-min sketch.
+// tinyLFU is NOT thread safe.
+type tinyLFU struct {
+	freq    *cmSketch
+	door    *z.Bloom
+	incrs   int64
+	resetAt int64
+}
+
+func newTinyLFU(numCounters int64) *tinyLFU {
+	return &tinyLFU{
+		freq:    newCmSketch(numCounters),
+		door:    z.NewBloomFilter(float64(numCounters), 0.01),
+		resetAt: numCounters,
+	}
+}
+
+func (p *tinyLFU) Push(keys []uint64) {
+	for _, key := range keys {
+		p.Increment(key)
+	}
+}
+
+func (p *tinyLFU) Estimate(key uint64) int64 {
+	hits := p.freq.Estimate(key)
+	if p.door.Has(key) {
+		hits++
+	}
+	return hits
+}
+
+func (p *tinyLFU) Increment(key uint64) {
+	// Flip doorkeeper bit if not already done.
+	if added := p.door.AddIfNotHas(key); !added {
+		// Increment count-min counter if doorkeeper bit is already set.
+		p.freq.Increment(key)
+	}
+	p.incrs++
+	if p.incrs >= p.resetAt {
+		p.reset()
+	}
+}
+
+func (p *tinyLFU) reset() {
+	// Zero out incrs.
+	p.incrs = 0
+	// clears doorkeeper bits
+	p.door.Clear()
+	// halves count-min counters
+	p.freq.Reset()
+}
+
+func (p *tinyLFU) clear() {
+	p.incrs = 0
+	p.door.Clear()
+	p.freq.Clear()
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/ring.go b/vendor/github.com/outcaste-io/ristretto/ring.go
new file mode 100644
index 0000000000..5dbed4cc59
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/ring.go
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ristretto
+
+import (
+	"sync"
+)
+
+// ringConsumer is the user-defined object responsible for receiving and
+// processing items in batches when buffers are drained.
+type ringConsumer interface {
+	Push([]uint64) bool
+}
+
+// ringStripe is a singular ring buffer that is not concurrent safe.
+type ringStripe struct {
+	cons ringConsumer
+	data []uint64
+	capa int
+}
+
+func newRingStripe(cons ringConsumer, capa int64) *ringStripe {
+	return &ringStripe{
+		cons: cons,
+		data: make([]uint64, 0, capa),
+		capa: int(capa),
+	}
+}
+
+// Push appends an item in the ring buffer and drains (copies items and
+// sends to Consumer) if full.
+func (s *ringStripe) Push(item uint64) {
+	s.data = append(s.data, item)
+	// Decide if the ring buffer should be drained.
+	if len(s.data) >= s.capa {
+		// Send elements to consumer and create a new ring stripe.
+		if s.cons.Push(s.data) {
+			s.data = make([]uint64, 0, s.capa)
+		} else {
+			s.data = s.data[:0]
+		}
+	}
+}
+
+// ringBuffer stores multiple buffers (stripes) and distributes Pushed items
+// between them to lower contention.
+//
+// This implements the "batching" process described in the BP-Wrapper paper
+// (section III part A).
+type ringBuffer struct {
+	pool *sync.Pool
+}
+
+// newRingBuffer returns a striped ring buffer. The Consumer in ringConfig will
+// be called when individual stripes are full and need to drain their elements.
+func newRingBuffer(cons ringConsumer, capa int64) *ringBuffer {
+	// LOSSY buffers use a very simple sync.Pool for concurrently reusing
+	// stripes. We do lose some stripes due to GC (unheld items in sync.Pool
+	// are cleared), but the performance gains generally outweigh the small
+	// percentage of elements lost. The performance primarily comes from
+	// low-level runtime functions used in the standard library that aren't
+	// available to us (such as runtime_procPin()).
+	return &ringBuffer{
+		pool: &sync.Pool{
+			New: func() interface{} { return newRingStripe(cons, capa) },
+		},
+	}
+}
+
+// Push adds an element to one of the internal stripes and possibly drains if
+// the stripe becomes full.
+func (b *ringBuffer) Push(item uint64) {
+	// Reuse or create a new stripe.
+	stripe := b.pool.Get().(*ringStripe)
+	stripe.Push(item)
+	b.pool.Put(stripe)
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/sketch.go b/vendor/github.com/outcaste-io/ristretto/sketch.go
new file mode 100644
index 0000000000..10f414689a
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/sketch.go
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This package includes multiple probabalistic data structures needed for
+// admission/eviction metadata. Most are Counting Bloom Filter variations, but
+// a caching-specific feature that is also required is a "freshness" mechanism,
+// which basically serves as a "lifetime" process. This freshness mechanism
+// was described in the original TinyLFU paper [1], but other mechanisms may
+// be better suited for certain data distributions.
+//
+// [1]: https://arxiv.org/abs/1512.00727
+package ristretto
+
+import (
+	"fmt"
+	"math/rand"
+	"time"
+)
+
+// cmSketch is a Count-Min sketch implementation with 4-bit counters, heavily
+// based on Damian Gryski's CM4 [1].
+//
+// [1]: https://github.com/dgryski/go-tinylfu/blob/master/cm4.go
+type cmSketch struct {
+	rows [cmDepth]cmRow
+	seed [cmDepth]uint64
+	mask uint64
+}
+
+const (
+	// cmDepth is the number of counter copies to store (think of it as rows).
+	cmDepth = 4
+)
+
+func newCmSketch(numCounters int64) *cmSketch {
+	if numCounters == 0 {
+		panic("cmSketch: bad numCounters")
+	}
+	// Get the next power of 2 for better cache performance.
+	numCounters = next2Power(numCounters)
+	sketch := &cmSketch{mask: uint64(numCounters - 1)}
+	// Initialize rows of counters and seeds.
+	source := rand.New(rand.NewSource(time.Now().UnixNano()))
+	for i := 0; i < cmDepth; i++ {
+		sketch.seed[i] = source.Uint64()
+		sketch.rows[i] = newCmRow(numCounters)
+	}
+	return sketch
+}
+
+// Increment increments the count(ers) for the specified key.
+func (s *cmSketch) Increment(hashed uint64) {
+	for i := range s.rows {
+		s.rows[i].increment((hashed ^ s.seed[i]) & s.mask)
+	}
+}
+
+// Estimate returns the value of the specified key.
+func (s *cmSketch) Estimate(hashed uint64) int64 {
+	min := byte(255)
+	for i := range s.rows {
+		val := s.rows[i].get((hashed ^ s.seed[i]) & s.mask)
+		if val < min {
+			min = val
+		}
+	}
+	return int64(min)
+}
+
+// Reset halves all counter values.
+func (s *cmSketch) Reset() {
+	for _, r := range s.rows {
+		r.reset()
+	}
+}
+
+// Clear zeroes all counters.
+func (s *cmSketch) Clear() {
+	for _, r := range s.rows {
+		r.clear()
+	}
+}
+
+// cmRow is a row of bytes, with each byte holding two counters.
+type cmRow []byte
+
+func newCmRow(numCounters int64) cmRow {
+	return make(cmRow, numCounters/2)
+}
+
+func (r cmRow) get(n uint64) byte {
+	return byte(r[n/2]>>((n&1)*4)) & 0x0f
+}
+
+func (r cmRow) increment(n uint64) {
+	// Index of the counter.
+	i := n / 2
+	// Shift distance (even 0, odd 4).
+	s := (n & 1) * 4
+	// Counter value.
+	v := (r[i] >> s) & 0x0f
+	// Only increment if not max value (overflow wrap is bad for LFU).
+	if v < 15 {
+		r[i] += 1 << s
+	}
+}
+
+func (r cmRow) reset() {
+	// Halve each counter.
+	for i := range r {
+		r[i] = (r[i] >> 1) & 0x77
+	}
+}
+
+func (r cmRow) clear() {
+	// Zero each counter.
+	for i := range r {
+		r[i] = 0
+	}
+}
+
+func (r cmRow) string() string {
+	s := ""
+	for i := uint64(0); i < uint64(len(r)*2); i++ {
+		s += fmt.Sprintf("%02d ", (r[(i/2)]>>((i&1)*4))&0x0f)
+	}
+	s = s[:len(s)-1]
+	return s
+}
+
+// next2Power rounds x up to the next power of 2, if it's not already one.
+func next2Power(x int64) int64 {
+	x--
+	x |= x >> 1
+	x |= x >> 2
+	x |= x >> 4
+	x |= x >> 8
+	x |= x >> 16
+	x |= x >> 32
+	x++
+	return x
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/store.go b/vendor/github.com/outcaste-io/ristretto/store.go
new file mode 100644
index 0000000000..5d5395c8d0
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/store.go
@@ -0,0 +1,225 @@
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ristretto
+
+import (
+	"sync"
+	"time"
+)
+
+// TODO: Do we need this to be a separate struct from Item?
+type storeItem struct {
+	key        uint64
+	conflict   uint64
+	value      interface{}
+	expiration time.Time
+}
+
+const numShards uint64 = 256
+
+type updateFn func(prev, cur interface{}) bool
+type shardedMap struct {
+	shards       []*lockedMap
+	expiryMap    *expirationMap
+	shouldUpdate func(prev, cur interface{}) bool
+}
+
+// newShardedMap is safe for concurrent usage.
+func newShardedMap(fn updateFn) *shardedMap {
+	sm := &shardedMap{
+		shards:    make([]*lockedMap, int(numShards)),
+		expiryMap: newExpirationMap(),
+	}
+	if fn == nil {
+		fn = func(prev, cur interface{}) bool {
+			return true
+		}
+	}
+	for i := range sm.shards {
+		sm.shards[i] = newLockedMap(fn, sm.expiryMap)
+	}
+	return sm
+}
+
+func (sm *shardedMap) Get(key, conflict uint64) (interface{}, bool) {
+	return sm.shards[key%numShards].get(key, conflict)
+}
+
+func (sm *shardedMap) Expiration(key uint64) time.Time {
+	return sm.shards[key%numShards].Expiration(key)
+}
+
+func (sm *shardedMap) Set(i *Item) {
+	if i == nil {
+		// If item is nil make this Set a no-op.
+		return
+	}
+
+	sm.shards[i.Key%numShards].Set(i)
+}
+
+func (sm *shardedMap) Del(key, conflict uint64) (uint64, interface{}) {
+	return sm.shards[key%numShards].Del(key, conflict)
+}
+
+func (sm *shardedMap) Update(newItem *Item) (interface{}, bool) {
+	return sm.shards[newItem.Key%numShards].Update(newItem)
+}
+
+func (sm *shardedMap) Cleanup(policy *lfuPolicy, onEvict itemCallback) {
+	sm.expiryMap.cleanup(sm, policy, onEvict)
+}
+
+func (sm *shardedMap) Clear(onEvict itemCallback) {
+	for i := uint64(0); i < numShards; i++ {
+		sm.shards[i].Clear(onEvict)
+	}
+}
+
+type lockedMap struct {
+	sync.RWMutex
+	data         map[uint64]storeItem
+	em           *expirationMap
+	shouldUpdate updateFn
+}
+
+func newLockedMap(fn updateFn, em *expirationMap) *lockedMap {
+	return &lockedMap{
+		data:         make(map[uint64]storeItem),
+		em:           em,
+		shouldUpdate: fn,
+	}
+}
+
+func (m *lockedMap) get(key, conflict uint64) (interface{}, bool) {
+	m.RLock()
+	item, ok := m.data[key]
+	m.RUnlock()
+	if !ok {
+		return nil, false
+	}
+	if conflict != 0 && (conflict != item.conflict) {
+		return nil, false
+	}
+
+	// Handle expired items.
+	if !item.expiration.IsZero() && time.Now().After(item.expiration) {
+		return nil, false
+	}
+	return item.value, true
+}
+
+func (m *lockedMap) Expiration(key uint64) time.Time {
+	m.RLock()
+	defer m.RUnlock()
+	return m.data[key].expiration
+}
+
+func (m *lockedMap) Set(i *Item) {
+	if i == nil {
+		// If the item is nil make this Set a no-op.
+		return
+	}
+
+	m.Lock()
+	defer m.Unlock()
+	item, ok := m.data[i.Key]
+
+	if ok {
+		// The item existed already. We need to check the conflict key and reject the
+		// update if they do not match. Only after that the expiration map is updated.
+		if i.Conflict != 0 && (i.Conflict != item.conflict) {
+			return
+		}
+		if !m.shouldUpdate(item.value, i.Value) {
+			return
+		}
+		m.em.update(i.Key, i.Conflict, item.expiration, i.Expiration)
+	} else {
+		// The value is not in the map already. There's no need to return anything.
+		// Simply add the expiration map.
+		m.em.add(i.Key, i.Conflict, i.Expiration)
+	}
+
+	m.data[i.Key] = storeItem{
+		key:        i.Key,
+		conflict:   i.Conflict,
+		value:      i.Value,
+		expiration: i.Expiration,
+	}
+}
+
+func (m *lockedMap) Del(key, conflict uint64) (uint64, interface{}) {
+	m.Lock()
+	item, ok := m.data[key]
+	if !ok {
+		m.Unlock()
+		return 0, nil
+	}
+	if conflict != 0 && (conflict != item.conflict) {
+		m.Unlock()
+		return 0, nil
+	}
+
+	if !item.expiration.IsZero() {
+		m.em.del(key, item.expiration)
+	}
+
+	delete(m.data, key)
+	m.Unlock()
+	return item.conflict, item.value
+}
+
+func (m *lockedMap) Update(newItem *Item) (interface{}, bool) {
+	m.Lock()
+	defer m.Unlock()
+
+	item, ok := m.data[newItem.Key]
+	if !ok {
+		return nil, false
+	}
+	if newItem.Conflict != 0 && (newItem.Conflict != item.conflict) {
+		return nil, false
+	}
+	if !m.shouldUpdate(item.value, newItem.Value) {
+		return item.value, false
+	}
+
+	m.em.update(newItem.Key, newItem.Conflict, item.expiration, newItem.Expiration)
+	m.data[newItem.Key] = storeItem{
+		key:        newItem.Key,
+		conflict:   newItem.Conflict,
+		value:      newItem.Value,
+		expiration: newItem.Expiration,
+	}
+	return item.value, true
+}
+
+func (m *lockedMap) Clear(onEvict itemCallback) {
+	m.Lock()
+	i := &Item{}
+	if onEvict != nil {
+		for _, si := range m.data {
+			i.Key = si.key
+			i.Conflict = si.conflict
+			i.Value = si.value
+			onEvict(i)
+		}
+	}
+	m.data = make(map[uint64]storeItem)
+	m.Unlock()
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/test.sh b/vendor/github.com/outcaste-io/ristretto/test.sh
new file mode 100644
index 0000000000..99fdc99a38
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/test.sh
@@ -0,0 +1,20 @@
+#! /bin/sh
+
+starttest() {
+	set -e
+	GO111MODULE=on go test -race ./...
+}
+
+if [ -z "${TEAMCITY_VERSION}" ]; then
+	# running locally, so start test in a container
+	# TEAMCITY_VERSION=local will avoid recursive calls, when it would be running in container
+	docker run --rm --name ristretto-test -ti \
+  		-v `pwd`:/go/src/github.com/outcaste-io/ristretto \
+  		--workdir /go/src/github.com/outcaste-io/ristretto \
+		--env TEAMCITY_VERSION=local \
+  		golang:1.16 \
+  		sh test.sh
+else
+	# running in teamcity, since teamcity itself run this in container, let's simply run this
+	starttest
+fi
diff --git a/vendor/github.com/outcaste-io/ristretto/ttl.go b/vendor/github.com/outcaste-io/ristretto/ttl.go
new file mode 100644
index 0000000000..6e4bf38bfe
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/ttl.go
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2020 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ristretto
+
+import (
+	"sync"
+	"time"
+)
+
+var (
+	// TODO: find the optimal value or make it configurable.
+	bucketDurationSecs = int64(5)
+)
+
+func storageBucket(t time.Time) int64 {
+	return (t.Unix() / bucketDurationSecs) + 1
+}
+
+func cleanupBucket(t time.Time) int64 {
+	// The bucket to cleanup is always behind the storage bucket by one so that
+	// no elements in that bucket (which might not have expired yet) are deleted.
+	return storageBucket(t) - 1
+}
+
+// bucket type is a map of key to conflict.
+type bucket map[uint64]uint64
+
+// expirationMap is a map of bucket number to the corresponding bucket.
+type expirationMap struct {
+	sync.RWMutex
+	buckets map[int64]bucket
+}
+
+func newExpirationMap() *expirationMap {
+	return &expirationMap{
+		buckets: make(map[int64]bucket),
+	}
+}
+
+func (m *expirationMap) add(key, conflict uint64, expiration time.Time) {
+	if m == nil {
+		return
+	}
+
+	// Items that don't expire don't need to be in the expiration map.
+	if expiration.IsZero() {
+		return
+	}
+
+	bucketNum := storageBucket(expiration)
+	m.Lock()
+	defer m.Unlock()
+
+	b, ok := m.buckets[bucketNum]
+	if !ok {
+		b = make(bucket)
+		m.buckets[bucketNum] = b
+	}
+	b[key] = conflict
+}
+
+func (m *expirationMap) update(key, conflict uint64, oldExpTime, newExpTime time.Time) {
+	if m == nil {
+		return
+	}
+	if oldExpTime.IsZero() && newExpTime.IsZero() {
+		return
+	}
+
+	m.Lock()
+	defer m.Unlock()
+
+	oldBucketNum := storageBucket(oldExpTime)
+	newBucketNum := storageBucket(newExpTime)
+	if oldBucketNum == newBucketNum {
+		// No change.
+		return
+	}
+
+	oldBucket, ok := m.buckets[oldBucketNum]
+	if ok {
+		delete(oldBucket, key)
+	}
+
+	newBucket, ok := m.buckets[newBucketNum]
+	if !ok {
+		newBucket = make(bucket)
+		m.buckets[newBucketNum] = newBucket
+	}
+	newBucket[key] = conflict
+}
+
+func (m *expirationMap) del(key uint64, expiration time.Time) {
+	if m == nil {
+		return
+	}
+
+	bucketNum := storageBucket(expiration)
+	m.Lock()
+	defer m.Unlock()
+	_, ok := m.buckets[bucketNum]
+	if !ok {
+		return
+	}
+	delete(m.buckets[bucketNum], key)
+}
+
+// cleanup removes all the items in the bucket that was just completed. It deletes
+// those items from the store, and calls the onEvict function on those items.
+// This function is meant to be called periodically.
+func (m *expirationMap) cleanup(store *shardedMap, policy *lfuPolicy, onEvict itemCallback) {
+	if m == nil {
+		return
+	}
+
+	m.Lock()
+	now := time.Now()
+	bucketNum := cleanupBucket(now)
+	keys := m.buckets[bucketNum]
+	delete(m.buckets, bucketNum)
+	m.Unlock()
+
+	for key, conflict := range keys {
+		// Sanity check. Verify that the store agrees that this key is expired.
+		if store.Expiration(key).After(now) {
+			continue
+		}
+
+		cost := policy.Cost(key)
+		policy.Del(key)
+		_, value := store.Del(key, conflict)
+
+		if onEvict != nil {
+			onEvict(&Item{Key: key,
+				Conflict: conflict,
+				Value:    value,
+				Cost:     cost,
+			})
+		}
+	}
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/LICENSE b/vendor/github.com/outcaste-io/ristretto/z/LICENSE
new file mode 100644
index 0000000000..0860cbfe85
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/LICENSE
@@ -0,0 +1,64 @@
+bbloom.go
+
+// The MIT License (MIT)
+// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
+
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+rtutil.go
+
+// MIT License
+
+// Copyright (c) 2019 Ewan Chou
+
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+Modifications:
+
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
diff --git a/vendor/github.com/outcaste-io/ristretto/z/README.md b/vendor/github.com/outcaste-io/ristretto/z/README.md
new file mode 100644
index 0000000000..6d77e146eb
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/README.md
@@ -0,0 +1,129 @@
+## bbloom: a bitset Bloom filter for go/golang
+===
+
+package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter. 
+
+NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom
+
+===
+
+changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache.  
+
+This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html". 
+Nonetheless bbloom should work with any other form of entries. 
+
+~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm  http://www.cse.yorku.ca/~oz/hash.html~~
+
+Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash )
+
+Minimum hashset size is: 512 ([4]uint64; will be set automatically). 
+
+###install
+
+```sh
+go get github.com/AndreasBriese/bbloom
+```
+
+###test
++ change to folder ../bbloom 
++ create wordlist in file "words.txt" (you might use `python permut.py`)
++ run 'go test -bench=.' within the folder
+
+```go
+go test -bench=.
+```
+
+~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~
+
+using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively)
+
+### usage
+
+after installation add
+
+```go
+import (
+	...
+	"github.com/AndreasBriese/bbloom"
+	...
+	)
+```
+
+at your header. In the program use
+
+```go
+// create a bloom filter for 65536 items and 1 % wrong-positive ratio 
+bf := bbloom.New(float64(1<<16), float64(0.01))
+
+// or 
+// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly
+// bf = bbloom.New(float64(650000), float64(7))
+// or
+bf = bbloom.New(650000.0, 7.0)
+
+// add one item
+bf.Add([]byte("butter"))
+
+// Number of elements added is exposed now 
+// Note: ElemNum will not be included in JSON export (for compatability to older version)
+nOfElementsInFilter := bf.ElemNum
+
+// check if item is in the filter
+isIn := bf.Has([]byte("butter"))    // should be true
+isNotIn := bf.Has([]byte("Butter")) // should be false
+
+// 'add only if item is new' to the bloomfilter
+added := bf.AddIfNotHas([]byte("butter"))    // should be false because 'butter' is already in the set
+added = bf.AddIfNotHas([]byte("buTTer"))    // should be true because 'buTTer' is new
+
+// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS
+// add one item
+bf.AddTS([]byte("peanutbutter"))
+// check if item is in the filter
+isIn = bf.HasTS([]byte("peanutbutter"))    // should be true
+isNotIn = bf.HasTS([]byte("peanutButter")) // should be false
+// 'add only if item is new' to the bloomfilter
+added = bf.AddIfNotHasTS([]byte("butter"))    // should be false because 'peanutbutter' is already in the set
+added = bf.AddIfNotHasTS([]byte("peanutbuTTer"))    // should be true because 'penutbuTTer' is new
+
+// convert to JSON ([]byte) 
+Json := bf.JSONMarshal()
+
+// bloomfilters Mutex is exposed for external un-/locking
+// i.e. mutex lock while doing JSON conversion
+bf.Mtx.Lock()
+Json = bf.JSONMarshal()
+bf.Mtx.Unlock()
+
+// restore a bloom filter from storage 
+bfNew := bbloom.JSONUnmarshal(Json)
+
+isInNew := bfNew.Has([]byte("butter"))    // should be true
+isNotInNew := bfNew.Has([]byte("Butter")) // should be false
+
+```
+
+to work with the bloom filter.
+
+### why 'fast'? 
+
+It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint: 
+
+	
+	Bloom filter (filter size 524288, 7 hashlocs)
+	github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op)
+    github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op)
+	github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op)
+	github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op)
+	
+	github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op)
+	github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op)
+	github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op)
+	github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op)
+	github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op)
+	github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op)
+
+(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz)
+
+
+With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions.   
diff --git a/vendor/github.com/outcaste-io/ristretto/z/allocator.go b/vendor/github.com/outcaste-io/ristretto/z/allocator.go
new file mode 100644
index 0000000000..db00ff5eca
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/allocator.go
@@ -0,0 +1,403 @@
+/*
+ * Copyright 2020 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package z
+
+import (
+	"bytes"
+	"fmt"
+	"math"
+	"math/bits"
+	"math/rand"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+	"unsafe"
+
+	"github.com/dustin/go-humanize"
+)
+
+// Allocator amortizes the cost of small allocations by allocating memory in
+// bigger chunks.  Internally it uses z.Calloc to allocate memory. Once
+// allocated, the memory is not moved, so it is safe to use the allocated bytes
+// to unsafe cast them to Go struct pointers. Maintaining a freelist is slow.
+// Instead, Allocator only allocates memory, with the idea that finally we
+// would just release the entire Allocator.
+type Allocator struct {
+	sync.Mutex
+	compIdx uint64 // Stores bufIdx in 32 MSBs and posIdx in 32 LSBs.
+	buffers [][]byte
+	Ref     uint64
+	Tag     string
+}
+
+// allocs keeps references to all Allocators, so we can safely discard them later.
+var allocsMu *sync.Mutex
+var allocRef uint64
+var allocs map[uint64]*Allocator
+var calculatedLog2 []int
+
+func init() {
+	allocsMu = new(sync.Mutex)
+	allocs = make(map[uint64]*Allocator)
+
+	// Set up a unique Ref per process.
+	rand.Seed(time.Now().UnixNano())
+	allocRef = uint64(rand.Int63n(1<<16)) << 48
+
+	calculatedLog2 = make([]int, 1025)
+	for i := 1; i <= 1024; i++ {
+		calculatedLog2[i] = int(math.Log2(float64(i)))
+	}
+}
+
+// NewAllocator creates an allocator starting with the given size.
+func NewAllocator(sz int, tag string) *Allocator {
+	ref := atomic.AddUint64(&allocRef, 1)
+	// We should not allow a zero sized page because addBufferWithMinSize
+	// will run into an infinite loop trying to double the pagesize.
+	if sz < 512 {
+		sz = 512
+	}
+	a := &Allocator{
+		Ref:     ref,
+		buffers: make([][]byte, 64),
+		Tag:     tag,
+	}
+	l2 := uint64(log2(sz))
+	if bits.OnesCount64(uint64(sz)) > 1 {
+		l2 += 1
+	}
+	a.buffers[0] = Calloc(1<<l2, a.Tag)
+
+	allocsMu.Lock()
+	allocs[ref] = a
+	allocsMu.Unlock()
+	return a
+}
+
+func (a *Allocator) Reset() {
+	atomic.StoreUint64(&a.compIdx, 0)
+}
+
+func Allocators() string {
+	allocsMu.Lock()
+	tags := make(map[string]uint64)
+	num := make(map[string]int)
+	for _, ac := range allocs {
+		tags[ac.Tag] += ac.Allocated()
+		num[ac.Tag] += 1
+	}
+
+	var buf bytes.Buffer
+	for tag, sz := range tags {
+		fmt.Fprintf(&buf, "Tag: %s Num: %d Size: %s . ", tag, num[tag], humanize.IBytes(sz))
+	}
+	allocsMu.Unlock()
+	return buf.String()
+}
+
+func (a *Allocator) String() string {
+	var s strings.Builder
+	s.WriteString(fmt.Sprintf("Allocator: %x\n", a.Ref))
+	var cum int
+	for i, b := range a.buffers {
+		cum += len(b)
+		if len(b) == 0 {
+			break
+		}
+		s.WriteString(fmt.Sprintf("idx: %d len: %d cum: %d\n", i, len(b), cum))
+	}
+	pos := atomic.LoadUint64(&a.compIdx)
+	bi, pi := parse(pos)
+	s.WriteString(fmt.Sprintf("bi: %d pi: %d\n", bi, pi))
+	s.WriteString(fmt.Sprintf("Size: %d\n", a.Size()))
+	return s.String()
+}
+
+// AllocatorFrom would return the allocator corresponding to the ref.
+func AllocatorFrom(ref uint64) *Allocator {
+	allocsMu.Lock()
+	a := allocs[ref]
+	allocsMu.Unlock()
+	return a
+}
+
+func parse(pos uint64) (bufIdx, posIdx int) {
+	return int(pos >> 32), int(pos & 0xFFFFFFFF)
+}
+
+// Size returns the size of the allocations so far.
+func (a *Allocator) Size() int {
+	pos := atomic.LoadUint64(&a.compIdx)
+	bi, pi := parse(pos)
+	var sz int
+	for i, b := range a.buffers {
+		if i < bi {
+			sz += len(b)
+			continue
+		}
+		sz += pi
+		return sz
+	}
+	panic("Size should not reach here")
+}
+
+func log2(sz int) int {
+	if sz < len(calculatedLog2) {
+		return calculatedLog2[sz]
+	}
+	pow := 10
+	sz >>= 10
+	for sz > 1 {
+		sz >>= 1
+		pow++
+	}
+	return pow
+}
+
+func (a *Allocator) Allocated() uint64 {
+	var alloc int
+	for _, b := range a.buffers {
+		alloc += cap(b)
+	}
+	return uint64(alloc)
+}
+
+func (a *Allocator) TrimTo(max int) {
+	var alloc int
+	for i, b := range a.buffers {
+		if len(b) == 0 {
+			break
+		}
+		alloc += len(b)
+		if alloc < max {
+			continue
+		}
+		Free(b)
+		a.buffers[i] = nil
+	}
+}
+
+// Release would release the memory back. Remember to make this call to avoid memory leaks.
+func (a *Allocator) Release() {
+	if a == nil {
+		return
+	}
+
+	var alloc int
+	for _, b := range a.buffers {
+		if len(b) == 0 {
+			break
+		}
+		alloc += len(b)
+		Free(b)
+	}
+
+	allocsMu.Lock()
+	delete(allocs, a.Ref)
+	allocsMu.Unlock()
+}
+
+const maxAlloc = 1 << 30
+
+func (a *Allocator) MaxAlloc() int {
+	return maxAlloc
+}
+
+const nodeAlign = unsafe.Sizeof(uint64(0)) - 1
+
+func (a *Allocator) AllocateAligned(sz int) []byte {
+	tsz := sz + int(nodeAlign)
+	out := a.Allocate(tsz)
+	// We are reusing allocators. In that case, it's important to zero out the memory allocated
+	// here. We don't always zero it out (in Allocate), because other functions would be immediately
+	// overwriting the allocated slices anyway (see Copy).
+	ZeroOut(out, 0, len(out))
+
+	addr := uintptr(unsafe.Pointer(&out[0]))
+	aligned := (addr + nodeAlign) & ^nodeAlign
+	start := int(aligned - addr)
+
+	return out[start : start+sz]
+}
+
+func (a *Allocator) Copy(buf []byte) []byte {
+	if a == nil {
+		return append([]byte{}, buf...)
+	}
+	out := a.Allocate(len(buf))
+	copy(out, buf)
+	return out
+}
+
+func (a *Allocator) addBufferAt(bufIdx, minSz int) {
+	for {
+		if bufIdx >= len(a.buffers) {
+			panic(fmt.Sprintf("Allocator can not allocate more than %d buffers", len(a.buffers)))
+		}
+		if len(a.buffers[bufIdx]) == 0 {
+			break
+		}
+		if minSz <= len(a.buffers[bufIdx]) {
+			// No need to do anything. We already have a buffer which can satisfy minSz.
+			return
+		}
+		bufIdx++
+	}
+	assert(bufIdx > 0)
+	// We need to allocate a new buffer.
+	// Make pageSize double of the last allocation.
+	pageSize := 2 * len(a.buffers[bufIdx-1])
+	// Ensure pageSize is bigger than sz.
+	for pageSize < minSz {
+		pageSize *= 2
+	}
+	// If bigger than maxAlloc, trim to maxAlloc.
+	if pageSize > maxAlloc {
+		pageSize = maxAlloc
+	}
+
+	buf := Calloc(pageSize, a.Tag)
+	assert(len(a.buffers[bufIdx]) == 0)
+	a.buffers[bufIdx] = buf
+}
+
+func (a *Allocator) Allocate(sz int) []byte {
+	if a == nil {
+		return make([]byte, sz)
+	}
+	if sz > maxAlloc {
+		panic(fmt.Sprintf("Unable to allocate more than %d\n", maxAlloc))
+	}
+	if sz == 0 {
+		return nil
+	}
+	for {
+		pos := atomic.AddUint64(&a.compIdx, uint64(sz))
+		bufIdx, posIdx := parse(pos)
+		buf := a.buffers[bufIdx]
+		if posIdx > len(buf) {
+			a.Lock()
+			newPos := atomic.LoadUint64(&a.compIdx)
+			newBufIdx, _ := parse(newPos)
+			if newBufIdx != bufIdx {
+				a.Unlock()
+				continue
+			}
+			a.addBufferAt(bufIdx+1, sz)
+			atomic.StoreUint64(&a.compIdx, uint64((bufIdx+1)<<32))
+			a.Unlock()
+			// We added a new buffer. Let's acquire slice the right way by going back to the top.
+			continue
+		}
+		data := buf[posIdx-sz : posIdx]
+		return data
+	}
+}
+
+type AllocatorPool struct {
+	numGets int64
+	allocCh chan *Allocator
+	closer  *Closer
+}
+
+func NewAllocatorPool(sz int) *AllocatorPool {
+	a := &AllocatorPool{
+		allocCh: make(chan *Allocator, sz),
+		closer:  NewCloser(1),
+	}
+	go a.freeupAllocators()
+	return a
+}
+
+func (p *AllocatorPool) Get(sz int, tag string) *Allocator {
+	if p == nil {
+		return NewAllocator(sz, tag)
+	}
+	atomic.AddInt64(&p.numGets, 1)
+	select {
+	case alloc := <-p.allocCh:
+		alloc.Reset()
+		alloc.Tag = tag
+		return alloc
+	default:
+		return NewAllocator(sz, tag)
+	}
+}
+func (p *AllocatorPool) Return(a *Allocator) {
+	if a == nil {
+		return
+	}
+	if p == nil {
+		a.Release()
+		return
+	}
+	a.TrimTo(400 << 20)
+
+	select {
+	case p.allocCh <- a:
+		return
+	default:
+		a.Release()
+	}
+}
+
+func (p *AllocatorPool) Release() {
+	if p == nil {
+		return
+	}
+	p.closer.SignalAndWait()
+}
+
+func (p *AllocatorPool) freeupAllocators() {
+	defer p.closer.Done()
+
+	ticker := time.NewTicker(2 * time.Second)
+	defer ticker.Stop()
+
+	releaseOne := func() bool {
+		select {
+		case alloc := <-p.allocCh:
+			alloc.Release()
+			return true
+		default:
+			return false
+		}
+	}
+
+	var last int64
+	for {
+		select {
+		case <-p.closer.HasBeenClosed():
+			close(p.allocCh)
+			for alloc := range p.allocCh {
+				alloc.Release()
+			}
+			return
+
+		case <-ticker.C:
+			gets := atomic.LoadInt64(&p.numGets)
+			if gets != last {
+				// Some retrievals were made since the last time. So, let's avoid doing a release.
+				last = gets
+				continue
+			}
+			releaseOne()
+		}
+	}
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/bbloom.go b/vendor/github.com/outcaste-io/ristretto/z/bbloom.go
new file mode 100644
index 0000000000..4d657e4e1e
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/bbloom.go
@@ -0,0 +1,209 @@
+// The MIT License (MIT)
+// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
+
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package z
+
+import (
+	"bytes"
+	"encoding/json"
+	"math"
+	"unsafe"
+)
+
+// helper
+var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128}
+
+func getSize(ui64 uint64) (size uint64, exponent uint64) {
+	if ui64 < uint64(512) {
+		ui64 = uint64(512)
+	}
+	size = uint64(1)
+	for size < ui64 {
+		size <<= 1
+		exponent++
+	}
+	return size, exponent
+}
+
+func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) {
+	size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2)
+	locs := math.Ceil(float64(0.69314718056) * size / numEntries)
+	return uint64(size), uint64(locs)
+}
+
+// NewBloomFilter returns a new bloomfilter.
+func NewBloomFilter(params ...float64) (bloomfilter *Bloom) {
+	var entries, locs uint64
+	if len(params) == 2 {
+		if params[1] < 1 {
+			entries, locs = calcSizeByWrongPositives(params[0], params[1])
+		} else {
+			entries, locs = uint64(params[0]), uint64(params[1])
+		}
+	} else {
+		fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations))" +
+			" i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries)," +
+			" float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))")
+	}
+	size, exponent := getSize(entries)
+	bloomfilter = &Bloom{
+		sizeExp: exponent,
+		size:    size - 1,
+		setLocs: locs,
+		shift:   64 - exponent,
+	}
+	bloomfilter.Size(size)
+	return bloomfilter
+}
+
+// Bloom filter
+type Bloom struct {
+	bitset  []uint64
+	ElemNum uint64
+	sizeExp uint64
+	size    uint64
+	setLocs uint64
+	shift   uint64
+}
+
+// <--- http://www.cse.yorku.ca/~oz/hash.html
+// modified Berkeley DB Hash (32bit)
+// hash is casted to l, h = 16bit fragments
+// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) {
+// 	hash := uint64(len(*b))
+// 	for _, c := range *b {
+// 		hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash
+// 	}
+// 	h = hash >> bl.shift
+// 	l = hash << bl.shift >> bl.shift
+// 	return l, h
+// }
+
+// Add adds hash of a key to the bloomfilter.
+func (bl *Bloom) Add(hash uint64) {
+	h := hash >> bl.shift
+	l := hash << bl.shift >> bl.shift
+	for i := uint64(0); i < bl.setLocs; i++ {
+		bl.Set((h + i*l) & bl.size)
+		bl.ElemNum++
+	}
+}
+
+// Has checks if bit(s) for entry hash is/are set,
+// returns true if the hash was added to the Bloom Filter.
+func (bl Bloom) Has(hash uint64) bool {
+	h := hash >> bl.shift
+	l := hash << bl.shift >> bl.shift
+	for i := uint64(0); i < bl.setLocs; i++ {
+		if !bl.IsSet((h + i*l) & bl.size) {
+			return false
+		}
+	}
+	return true
+}
+
+// AddIfNotHas only Adds hash, if it's not present in the bloomfilter.
+// Returns true if hash was added.
+// Returns false if hash was already registered in the bloomfilter.
+func (bl *Bloom) AddIfNotHas(hash uint64) bool {
+	if bl.Has(hash) {
+		return false
+	}
+	bl.Add(hash)
+	return true
+}
+
+// TotalSize returns the total size of the bloom filter.
+func (bl *Bloom) TotalSize() int {
+	// The bl struct has 5 members and each one is 8 byte. The bitset is a
+	// uint64 byte slice.
+	return len(bl.bitset)*8 + 5*8
+}
+
+// Size makes Bloom filter with as bitset of size sz.
+func (bl *Bloom) Size(sz uint64) {
+	bl.bitset = make([]uint64, sz>>6)
+}
+
+// Clear resets the Bloom filter.
+func (bl *Bloom) Clear() {
+	for i := range bl.bitset {
+		bl.bitset[i] = 0
+	}
+}
+
+// Set sets the bit[idx] of bitset.
+func (bl *Bloom) Set(idx uint64) {
+	ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))
+	*(*uint8)(ptr) |= mask[idx%8]
+}
+
+// IsSet checks if bit[idx] of bitset is set, returns true/false.
+func (bl *Bloom) IsSet(idx uint64) bool {
+	ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))
+	r := ((*(*uint8)(ptr)) >> (idx % 8)) & 1
+	return r == 1
+}
+
+// bloomJSONImExport
+// Im/Export structure used by JSONMarshal / JSONUnmarshal
+type bloomJSONImExport struct {
+	FilterSet []byte
+	SetLocs   uint64
+}
+
+// NewWithBoolset takes a []byte slice and number of locs per entry,
+// returns the bloomfilter with a bitset populated according to the input []byte.
+func newWithBoolset(bs *[]byte, locs uint64) *Bloom {
+	bloomfilter := NewBloomFilter(float64(len(*bs)<<3), float64(locs))
+	for i, b := range *bs {
+		*(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bloomfilter.bitset[0])) + uintptr(i))) = b
+	}
+	return bloomfilter
+}
+
+// JSONUnmarshal takes JSON-Object (type bloomJSONImExport) as []bytes
+// returns bloom32 / bloom64 object.
+func JSONUnmarshal(dbData []byte) (*Bloom, error) {
+	bloomImEx := bloomJSONImExport{}
+	if err := json.Unmarshal(dbData, &bloomImEx); err != nil {
+		return nil, err
+	}
+	buf := bytes.NewBuffer(bloomImEx.FilterSet)
+	bs := buf.Bytes()
+	bf := newWithBoolset(&bs, bloomImEx.SetLocs)
+	return bf, nil
+}
+
+// JSONMarshal returns JSON-object (type bloomJSONImExport) as []byte.
+func (bl Bloom) JSONMarshal() []byte {
+	bloomImEx := bloomJSONImExport{}
+	bloomImEx.SetLocs = bl.setLocs
+	bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3)
+	for i := range bloomImEx.FilterSet {
+		bloomImEx.FilterSet[i] = *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[0])) +
+			uintptr(i)))
+	}
+	data, err := json.Marshal(bloomImEx)
+	if err != nil {
+		fatal("json.Marshal failed: ", err)
+	}
+	return data
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/btree.go b/vendor/github.com/outcaste-io/ristretto/z/btree.go
new file mode 100644
index 0000000000..0b28ae5b8d
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/btree.go
@@ -0,0 +1,710 @@
+/*
+ * Copyright 2020 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package z
+
+import (
+	"fmt"
+	"math"
+	"os"
+	"reflect"
+	"strings"
+	"unsafe"
+
+	"github.com/outcaste-io/ristretto/z/simd"
+)
+
+var (
+	pageSize = os.Getpagesize()
+	maxKeys  = (pageSize / 16) - 1
+	oneThird = int(float64(maxKeys) / 3)
+)
+
+const (
+	absoluteMax = uint64(math.MaxUint64 - 1)
+	minSize     = 1 << 20
+)
+
+// Tree represents the structure for custom mmaped B+ tree.
+// It supports keys in range [1, math.MaxUint64-1] and values [1, math.Uint64].
+type Tree struct {
+	buffer   *Buffer
+	data     []byte
+	nextPage uint64
+	freePage uint64
+	stats    TreeStats
+}
+
+func (t *Tree) initRootNode() {
+	// This is the root node.
+	t.newNode(0)
+	// This acts as the rightmost pointer (all the keys are <= this key).
+	t.Set(absoluteMax, 0)
+}
+
+// NewTree returns an in-memory B+ tree.
+func NewTree(tag string) *Tree {
+	const defaultTag = "tree"
+	if tag == "" {
+		tag = defaultTag
+	}
+	t := &Tree{buffer: NewBuffer(minSize, tag)}
+	t.Reset()
+	return t
+}
+
+// NewTree returns a persistent on-disk B+ tree.
+func NewTreePersistent(path string) (*Tree, error) {
+	t := &Tree{}
+	var err error
+
+	// Open the buffer from disk and set it to the maximum allocated size.
+	t.buffer, err = NewBufferPersistent(path, minSize)
+	if err != nil {
+		return nil, err
+	}
+	t.buffer.offset = uint64(len(t.buffer.buf))
+	t.data = t.buffer.Bytes()
+
+	// pageID can never be 0 if the tree has been initialized.
+	root := t.node(1)
+	isInitialized := root.pageID() != 0
+
+	if !isInitialized {
+		t.nextPage = 1
+		t.freePage = 0
+		t.initRootNode()
+	} else {
+		t.reinit()
+	}
+
+	return t, nil
+}
+
+// reinit sets the internal variables of a Tree, which are normally stored
+// in-memory, but are lost when loading from disk.
+func (t *Tree) reinit() {
+	// Calculate t.nextPage by finding the first node whose pageID is not set.
+	t.nextPage = 1
+	for int(t.nextPage)*pageSize < len(t.data) {
+		n := t.node(t.nextPage)
+		if n.pageID() == 0 {
+			break
+		}
+		t.nextPage++
+	}
+	maxPageId := t.nextPage - 1
+
+	// Calculate t.freePage by finding the page to which no other page points.
+	// This would be the head of the page linked list.
+	// tailPages[i] is true if pageId i+1 is not the head of the list.
+	tailPages := make([]bool, maxPageId)
+	// Mark all pages containing nodes as tail pages.
+	t.Iterate(func(n node) {
+		i := n.pageID() - 1
+		tailPages[i] = true
+		// If this is a leaf node, increment the stats.
+		if n.isLeaf() {
+			t.stats.NumLeafKeys += n.numKeys()
+		}
+	})
+	// pointedPages is a list of page IDs that the tail pages point to.
+	pointedPages := make([]uint64, 0)
+	for i, isTail := range tailPages {
+		if !isTail {
+			pageId := uint64(i) + 1
+			// Skip if nextPageId = 0, as that is equivalent to null page.
+			if nextPageId := t.node(pageId).uint64(0); nextPageId != 0 {
+				pointedPages = append(pointedPages, nextPageId)
+			}
+			t.stats.NumPagesFree++
+		}
+	}
+
+	// Mark all pages being pointed to as tail pages.
+	for _, pageId := range pointedPages {
+		i := pageId - 1
+		tailPages[i] = true
+	}
+	// There should only be one head page left.
+	for i, isTail := range tailPages {
+		if !isTail {
+			pageId := uint64(i) + 1
+			t.freePage = pageId
+			break
+		}
+	}
+}
+
+// Reset resets the tree and truncates it to maxSz.
+func (t *Tree) Reset() {
+	// Tree relies on uninitialized data being zeroed out, so we need to Memclr
+	// the data before using it again.
+	Memclr(t.buffer.buf)
+	t.buffer.Reset()
+	t.buffer.AllocateOffset(minSize)
+	t.data = t.buffer.Bytes()
+	t.stats = TreeStats{}
+	t.nextPage = 1
+	t.freePage = 0
+	t.initRootNode()
+}
+
+// Close releases the memory used by the tree.
+func (t *Tree) Close() error {
+	if t == nil {
+		return nil
+	}
+	return t.buffer.Release()
+}
+
+type TreeStats struct {
+	Allocated    int     // Derived.
+	Bytes        int     // Derived.
+	NumLeafKeys  int     // Calculated.
+	NumPages     int     // Derived.
+	NumPagesFree int     // Calculated.
+	Occupancy    float64 // Derived.
+	PageSize     int     // Derived.
+}
+
+// Stats returns stats about the tree.
+func (t *Tree) Stats() TreeStats {
+	numPages := int(t.nextPage - 1)
+	out := TreeStats{
+		Bytes:        numPages * pageSize,
+		Allocated:    len(t.data),
+		NumLeafKeys:  t.stats.NumLeafKeys,
+		NumPages:     numPages,
+		NumPagesFree: t.stats.NumPagesFree,
+		PageSize:     pageSize,
+	}
+	out.Occupancy = 100.0 * float64(out.NumLeafKeys) / float64(maxKeys*numPages)
+	return out
+}
+
+// BytesToUint64Slice converts a byte slice to a uint64 slice.
+func BytesToUint64Slice(b []byte) []uint64 {
+	if len(b) == 0 {
+		return nil
+	}
+	var u64s []uint64
+	hdr := (*reflect.SliceHeader)(unsafe.Pointer(&u64s))
+	hdr.Len = len(b) / 8
+	hdr.Cap = hdr.Len
+	hdr.Data = uintptr(unsafe.Pointer(&b[0]))
+	return u64s
+}
+
+func (t *Tree) newNode(bit uint64) node {
+	var pageId uint64
+	if t.freePage > 0 {
+		pageId = t.freePage
+		t.stats.NumPagesFree--
+	} else {
+		pageId = t.nextPage
+		t.nextPage++
+		offset := int(pageId) * pageSize
+		reqSize := offset + pageSize
+		if reqSize > len(t.data) {
+			t.buffer.AllocateOffset(reqSize - len(t.data))
+			t.data = t.buffer.Bytes()
+		}
+	}
+	n := t.node(pageId)
+	if t.freePage > 0 {
+		t.freePage = n.uint64(0)
+	}
+	zeroOut(n)
+	n.setBit(bit)
+	n.setAt(keyOffset(maxKeys), pageId)
+	return n
+}
+
+func getNode(data []byte) node {
+	return node(BytesToUint64Slice(data))
+}
+
+func zeroOut(data []uint64) {
+	for i := 0; i < len(data); i++ {
+		data[i] = 0
+	}
+}
+
+func (t *Tree) node(pid uint64) node {
+	// page does not exist
+	if pid == 0 {
+		return nil
+	}
+	start := pageSize * int(pid)
+	return getNode(t.data[start : start+pageSize])
+}
+
+// Set sets the key-value pair in the tree.
+func (t *Tree) Set(k, v uint64) {
+	if k == math.MaxUint64 || k == 0 {
+		panic("Error setting zero or MaxUint64")
+	}
+	root := t.set(1, k, v)
+	if root.isFull() {
+		right := t.split(1)
+		left := t.newNode(root.bits())
+		// Re-read the root as the underlying buffer for tree might have changed during split.
+		root = t.node(1)
+		copy(left[:keyOffset(maxKeys)], root)
+		left.setNumKeys(root.numKeys())
+
+		// reset the root node.
+		zeroOut(root[:keyOffset(maxKeys)])
+		root.setNumKeys(0)
+
+		// set the pointers for left and right child in the root node.
+		root.set(left.maxKey(), left.pageID())
+		root.set(right.maxKey(), right.pageID())
+	}
+}
+
+// For internal nodes, they contain <key, ptr>.
+// where all entries <= key are stored in the corresponding ptr.
+func (t *Tree) set(pid, k, v uint64) node {
+	n := t.node(pid)
+	if n.isLeaf() {
+		t.stats.NumLeafKeys += n.set(k, v)
+		return n
+	}
+
+	// This is an internal node.
+	idx := n.search(k)
+	if idx >= maxKeys {
+		panic("search returned index >= maxKeys")
+	}
+	// If no key at idx.
+	if n.key(idx) == 0 {
+		n.setAt(keyOffset(idx), k)
+		n.setNumKeys(n.numKeys() + 1)
+	}
+	child := t.node(n.val(idx))
+	if child == nil {
+		child = t.newNode(bitLeaf)
+		n = t.node(pid)
+		n.setAt(valOffset(idx), child.pageID())
+	}
+	child = t.set(child.pageID(), k, v)
+	// Re-read n as the underlying buffer for tree might have changed during set.
+	n = t.node(pid)
+	if child.isFull() {
+		// Just consider the left sibling for simplicity.
+		// if t.shareWithSibling(n, idx) {
+		// 	return n
+		// }
+
+		nn := t.split(child.pageID())
+		// Re-read n and child as the underlying buffer for tree might have changed during split.
+		n = t.node(pid)
+		child = t.node(n.uint64(valOffset(idx)))
+		// Set child pointers in the node n.
+		// Note that key for right node (nn) already exist in node n, but the
+		// pointer is updated.
+		n.set(child.maxKey(), child.pageID())
+		n.set(nn.maxKey(), nn.pageID())
+	}
+	return n
+}
+
+// Get looks for key and returns the corresponding value.
+// If key is not found, 0 is returned.
+func (t *Tree) Get(k uint64) uint64 {
+	if k == math.MaxUint64 || k == 0 {
+		panic("Does not support getting MaxUint64/Zero")
+	}
+	root := t.node(1)
+	return t.get(root, k)
+}
+
+func (t *Tree) get(n node, k uint64) uint64 {
+	if n.isLeaf() {
+		return n.get(k)
+	}
+	// This is internal node
+	idx := n.search(k)
+	if idx == n.numKeys() || n.key(idx) == 0 {
+		return 0
+	}
+	child := t.node(n.uint64(valOffset(idx)))
+	assert(child != nil)
+	return t.get(child, k)
+}
+
+// DeleteBelow deletes all keys with value under ts.
+func (t *Tree) DeleteBelow(ts uint64) {
+	root := t.node(1)
+	t.stats.NumLeafKeys = 0
+	t.compact(root, ts)
+	assert(root.numKeys() >= 1)
+}
+
+func (t *Tree) compact(n node, ts uint64) int {
+	if n.isLeaf() {
+		numKeys := n.compact(ts)
+		t.stats.NumLeafKeys += n.numKeys()
+		return numKeys
+	}
+	// Not leaf.
+	N := n.numKeys()
+	for i := 0; i < N; i++ {
+		assert(n.key(i) > 0)
+		childID := n.uint64(valOffset(i))
+		child := t.node(childID)
+		if rem := t.compact(child, ts); rem == 0 && i < N-1 {
+			// If no valid key is remaining we can drop this child. However, don't do that if this
+			// is the max key.
+			t.stats.NumLeafKeys -= child.numKeys()
+			child.setAt(0, t.freePage)
+			t.freePage = childID
+			n.setAt(valOffset(i), 0)
+			t.stats.NumPagesFree++
+		}
+	}
+	// We use ts=1 here because we want to delete all the keys whose value is 0, which means they no
+	// longer have a valid page for that key.
+	return n.compact(1)
+}
+
+func (t *Tree) iterate(n node, fn func(node)) {
+	fn(n)
+	if n.isLeaf() {
+		return
+	}
+	// Explore children.
+	for i := 0; i < maxKeys; i++ {
+		if n.key(i) == 0 {
+			return
+		}
+		childID := n.uint64(valOffset(i))
+		assert(childID > 0)
+
+		child := t.node(childID)
+		t.iterate(child, fn)
+	}
+}
+
+// Iterate iterates over the tree and executes the fn on each node.
+func (t *Tree) Iterate(fn func(node)) {
+	root := t.node(1)
+	t.iterate(root, fn)
+}
+
+// IterateKV iterates through all keys and values in the tree.
+// If newVal is non-zero, it will be set in the tree.
+func (t *Tree) IterateKV(f func(key, val uint64) (newVal uint64)) {
+	t.Iterate(func(n node) {
+		// Only leaf nodes contain keys.
+		if !n.isLeaf() {
+			return
+		}
+
+		for i := 0; i < n.numKeys(); i++ {
+			key := n.key(i)
+			val := n.val(i)
+
+			// A zero value here means that this is a bogus entry.
+			if val == 0 {
+				continue
+			}
+
+			newVal := f(key, val)
+			if newVal != 0 {
+				n.setAt(valOffset(i), newVal)
+			}
+		}
+	})
+}
+
+func (t *Tree) print(n node, parentID uint64) {
+	n.print(parentID)
+	if n.isLeaf() {
+		return
+	}
+	pid := n.pageID()
+	for i := 0; i < maxKeys; i++ {
+		if n.key(i) == 0 {
+			return
+		}
+		childID := n.uint64(valOffset(i))
+		child := t.node(childID)
+		t.print(child, pid)
+	}
+}
+
+// Print iterates over the tree and prints all valid KVs.
+func (t *Tree) Print() {
+	root := t.node(1)
+	t.print(root, 0)
+}
+
+// Splits the node into two. It moves right half of the keys from the original node to a newly
+// created right node. It returns the right node.
+func (t *Tree) split(pid uint64) node {
+	n := t.node(pid)
+	if !n.isFull() {
+		panic("This should be called only when n is full")
+	}
+
+	// Create a new node nn, copy over half the keys from n, and set the parent to n's parent.
+	nn := t.newNode(n.bits())
+	// Re-read n as the underlying buffer for tree might have changed during newNode.
+	n = t.node(pid)
+	rightHalf := n[keyOffset(maxKeys/2):keyOffset(maxKeys)]
+	copy(nn, rightHalf)
+	nn.setNumKeys(maxKeys - maxKeys/2)
+
+	// Remove entries from node n.
+	zeroOut(rightHalf)
+	n.setNumKeys(maxKeys / 2)
+	return nn
+}
+
+// shareWithSiblingXXX is unused for now. The idea is to move some keys to
+// sibling when a node is full. But, I don't see any special benefits in our
+// access pattern. It doesn't result in better occupancy ratios.
+func (t *Tree) shareWithSiblingXXX(n node, idx int) bool {
+	if idx == 0 {
+		return false
+	}
+	left := t.node(n.val(idx - 1))
+	ns := left.numKeys()
+	if ns >= maxKeys/2 {
+		// Sibling is already getting full.
+		return false
+	}
+
+	right := t.node(n.val(idx))
+	// Copy over keys from right child to left child.
+	copied := copy(left[keyOffset(ns):], right[:keyOffset(oneThird)])
+	copied /= 2 // Considering that key-val constitute one key.
+	left.setNumKeys(ns + copied)
+
+	// Update the max key in parent node n for the left sibling.
+	n.setAt(keyOffset(idx-1), left.maxKey())
+
+	// Now move keys to left for the right sibling.
+	until := copy(right, right[keyOffset(oneThird):keyOffset(maxKeys)])
+	right.setNumKeys(until / 2)
+	zeroOut(right[until:keyOffset(maxKeys)])
+	return true
+}
+
+// Each node in the node is of size pageSize. Two kinds of nodes. Leaf nodes and internal nodes.
+// Leaf nodes only contain the data. Internal nodes would contain the key and the offset to the
+// child node.
+// Internal node would have first entry as
+// <0 offset to child>, <1000 offset>, <5000 offset>, and so on...
+// Leaf nodes would just have: <key, value>, <key, value>, and so on...
+// Last 16 bytes of the node are off limits.
+// | pageID (8 bytes) | metaBits (1 byte) | 3 free bytes | numKeys (4 bytes) |
+type node []uint64
+
+func (n node) uint64(start int) uint64 { return n[start] }
+
+// func (n node) uint32(start int) uint32 { return *(*uint32)(unsafe.Pointer(&n[start])) }
+
+func keyOffset(i int) int          { return 2 * i }
+func valOffset(i int) int          { return 2*i + 1 }
+func (n node) numKeys() int        { return int(n.uint64(valOffset(maxKeys)) & 0xFFFFFFFF) }
+func (n node) pageID() uint64      { return n.uint64(keyOffset(maxKeys)) }
+func (n node) key(i int) uint64    { return n.uint64(keyOffset(i)) }
+func (n node) val(i int) uint64    { return n.uint64(valOffset(i)) }
+func (n node) data(i int) []uint64 { return n[keyOffset(i):keyOffset(i+1)] }
+
+func (n node) setAt(start int, k uint64) {
+	n[start] = k
+}
+
+func (n node) setNumKeys(num int) {
+	idx := valOffset(maxKeys)
+	val := n[idx]
+	val &= 0xFFFFFFFF00000000
+	val |= uint64(num)
+	n[idx] = val
+}
+
+func (n node) moveRight(lo int) {
+	hi := n.numKeys()
+	assert(hi != maxKeys)
+	// copy works despite of overlap in src and dst.
+	// See https://golang.org/pkg/builtin/#copy
+	copy(n[keyOffset(lo+1):keyOffset(hi+1)], n[keyOffset(lo):keyOffset(hi)])
+}
+
+const (
+	bitLeaf = uint64(1 << 63)
+)
+
+func (n node) setBit(b uint64) {
+	vo := valOffset(maxKeys)
+	val := n[vo]
+	val &= 0xFFFFFFFF
+	val |= b
+	n[vo] = val
+}
+func (n node) bits() uint64 {
+	return n.val(maxKeys) & 0xFF00000000000000
+}
+func (n node) isLeaf() bool {
+	return n.bits()&bitLeaf > 0
+}
+
+// isFull checks that the node is already full.
+func (n node) isFull() bool {
+	return n.numKeys() == maxKeys
+}
+
+// Search returns the index of a smallest key >= k in a node.
+func (n node) search(k uint64) int {
+	N := n.numKeys()
+	if N < 4 {
+		for i := 0; i < N; i++ {
+			if ki := n.key(i); ki >= k {
+				return i
+			}
+		}
+		return N
+	}
+	return int(simd.Search(n[:2*N], k))
+	// lo, hi := 0, N
+	// // Reduce the search space using binary seach and then do linear search.
+	// for hi-lo > 32 {
+	// 	mid := (hi + lo) / 2
+	// 	km := n.key(mid)
+	// 	if k == km {
+	// 		return mid
+	// 	}
+	// 	if k > km {
+	// 		// key is greater than the key at mid, so move right.
+	// 		lo = mid + 1
+	// 	} else {
+	// 		// else move left.
+	// 		hi = mid
+	// 	}
+	// }
+	// for i := lo; i <= hi; i++ {
+	// 	if ki := n.key(i); ki >= k {
+	// 		return i
+	// 	}
+	// }
+	// return N
+}
+func (n node) maxKey() uint64 {
+	idx := n.numKeys()
+	// idx points to the first key which is zero.
+	if idx > 0 {
+		idx--
+	}
+	return n.key(idx)
+}
+
+// compacts the node i.e., remove all the kvs with value < lo. It returns the remaining number of
+// keys.
+func (n node) compact(lo uint64) int {
+	N := n.numKeys()
+	mk := n.maxKey()
+	var left, right int
+	for right = 0; right < N; right++ {
+		if n.val(right) < lo && n.key(right) < mk {
+			// Skip over this key. Don't copy it.
+			continue
+		}
+		// Valid data. Copy it from right to left. Advance left.
+		if left != right {
+			copy(n.data(left), n.data(right))
+		}
+		left++
+	}
+	// zero out rest of the kv pairs.
+	zeroOut(n[keyOffset(left):keyOffset(right)])
+	n.setNumKeys(left)
+
+	// If the only key we have is the max key, and its value is less than lo, then we can indicate
+	// to the caller by returning a zero that it's OK to drop the node.
+	if left == 1 && n.key(0) == mk && n.val(0) < lo {
+		return 0
+	}
+	return left
+}
+
+func (n node) get(k uint64) uint64 {
+	idx := n.search(k)
+	// key is not found
+	if idx == n.numKeys() {
+		return 0
+	}
+	if ki := n.key(idx); ki == k {
+		return n.val(idx)
+	}
+	return 0
+}
+
+// set returns true if it added a new key.
+func (n node) set(k, v uint64) (numAdded int) {
+	idx := n.search(k)
+	ki := n.key(idx)
+	if n.numKeys() == maxKeys {
+		// This happens during split of non-root node, when we are updating the child pointer of
+		// right node. Hence, the key should already exist.
+		assert(ki == k)
+	}
+	if ki > k {
+		// Found the first entry which is greater than k. So, we need to fit k
+		// just before it. For that, we should move the rest of the data in the
+		// node to the right to make space for k.
+		n.moveRight(idx)
+	}
+	// If the k does not exist already, increment the number of keys.
+	if ki != k {
+		n.setNumKeys(n.numKeys() + 1)
+		numAdded = 1
+	}
+	if ki == 0 || ki >= k {
+		n.setAt(keyOffset(idx), k)
+		n.setAt(valOffset(idx), v)
+		return
+	}
+	panic("shouldn't reach here")
+}
+
+func (n node) iterate(fn func(node, int)) {
+	for i := 0; i < maxKeys; i++ {
+		if k := n.key(i); k > 0 {
+			fn(n, i)
+		} else {
+			break
+		}
+	}
+}
+
+func (n node) print(parentID uint64) {
+	var keys []string
+	n.iterate(func(n node, i int) {
+		keys = append(keys, fmt.Sprintf("%d", n.key(i)))
+	})
+	if len(keys) > 8 {
+		copy(keys[4:], keys[len(keys)-4:])
+		keys[3] = "..."
+		keys = keys[:8]
+	}
+	fmt.Printf("%d Child of: %d num keys: %d keys: %s\n",
+		n.pageID(), parentID, n.numKeys(), strings.Join(keys, " "))
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/buffer.go b/vendor/github.com/outcaste-io/ristretto/z/buffer.go
new file mode 100644
index 0000000000..8f760c7d3c
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/buffer.go
@@ -0,0 +1,543 @@
+/*
+ * Copyright 2020 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package z
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"sort"
+	"sync/atomic"
+
+	"github.com/pkg/errors"
+)
+
+const (
+	defaultCapacity = 64
+	defaultTag      = "buffer"
+)
+
+// Buffer is equivalent of bytes.Buffer without the ability to read. It is NOT thread-safe.
+//
+// In UseCalloc mode, z.Calloc is used to allocate memory, which depending upon how the code is
+// compiled could use jemalloc for allocations.
+//
+// In UseMmap mode, Buffer  uses file mmap to allocate memory. This allows us to store big data
+// structures without using physical memory.
+//
+// MaxSize can be set to limit the memory usage.
+type Buffer struct {
+	padding       uint64     // number of starting bytes used for padding
+	offset        uint64     // used length of the buffer
+	buf           []byte     // backing slice for the buffer
+	bufType       BufferType // type of the underlying buffer
+	curSz         int        // capacity of the buffer
+	maxSz         int        // causes a panic if the buffer grows beyond this size
+	mmapFile      *MmapFile  // optional mmap backing for the buffer
+	autoMmapAfter int        // Calloc falls back to an mmaped tmpfile after crossing this size
+	autoMmapDir   string     // directory for autoMmap to create a tempfile in
+	persistent    bool       // when enabled, Release will not delete the underlying mmap file
+	tag           string     // used for jemalloc stats
+}
+
+func NewBuffer(capacity int, tag string) *Buffer {
+	if capacity < defaultCapacity {
+		capacity = defaultCapacity
+	}
+	if tag == "" {
+		tag = defaultTag
+	}
+	return &Buffer{
+		buf:     Calloc(capacity, tag),
+		bufType: UseCalloc,
+		curSz:   capacity,
+		offset:  8,
+		padding: 8,
+		tag:     tag,
+	}
+}
+
+// It is the caller's responsibility to set offset after this, because Buffer
+// doesn't remember what it was.
+func NewBufferPersistent(path string, capacity int) (*Buffer, error) {
+	file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0666)
+	if err != nil {
+		return nil, err
+	}
+	buffer, err := newBufferFile(file, capacity)
+	if err != nil {
+		return nil, err
+	}
+	buffer.persistent = true
+	return buffer, nil
+}
+
+func NewBufferTmp(dir string, capacity int) (*Buffer, error) {
+	if dir == "" {
+		dir = tmpDir
+	}
+	file, err := ioutil.TempFile(dir, "buffer")
+	if err != nil {
+		return nil, err
+	}
+	return newBufferFile(file, capacity)
+}
+
+func newBufferFile(file *os.File, capacity int) (*Buffer, error) {
+	if capacity < defaultCapacity {
+		capacity = defaultCapacity
+	}
+	mmapFile, err := OpenMmapFileUsing(file, capacity, true)
+	if err != nil && err != NewFile {
+		return nil, err
+	}
+	buf := &Buffer{
+		buf:      mmapFile.Data,
+		bufType:  UseMmap,
+		curSz:    len(mmapFile.Data),
+		mmapFile: mmapFile,
+		offset:   8,
+		padding:  8,
+	}
+	return buf, nil
+}
+
+func NewBufferSlice(slice []byte) *Buffer {
+	return &Buffer{
+		offset:  uint64(len(slice)),
+		buf:     slice,
+		bufType: UseInvalid,
+	}
+}
+
+func (b *Buffer) WithAutoMmap(threshold int, path string) *Buffer {
+	if b.bufType != UseCalloc {
+		panic("can only autoMmap with UseCalloc")
+	}
+	b.autoMmapAfter = threshold
+	if path == "" {
+		b.autoMmapDir = tmpDir
+	} else {
+		b.autoMmapDir = path
+	}
+	return b
+}
+
+func (b *Buffer) WithMaxSize(size int) *Buffer {
+	b.maxSz = size
+	return b
+}
+
+func (b *Buffer) IsEmpty() bool {
+	return int(b.offset) == b.StartOffset()
+}
+
+// LenWithPadding would return the number of bytes written to the buffer so far
+// plus the padding at the start of the buffer.
+func (b *Buffer) LenWithPadding() int {
+	return int(atomic.LoadUint64(&b.offset))
+}
+
+// LenNoPadding would return the number of bytes written to the buffer so far
+// (without the padding).
+func (b *Buffer) LenNoPadding() int {
+	return int(atomic.LoadUint64(&b.offset) - b.padding)
+}
+
+// Bytes would return all the written bytes as a slice.
+func (b *Buffer) Bytes() []byte {
+	off := atomic.LoadUint64(&b.offset)
+	return b.buf[b.padding:off]
+}
+
+// Grow would grow the buffer to have at least n more bytes. In case the buffer is at capacity, it
+// would reallocate twice the size of current capacity + n, to ensure n bytes can be written to the
+// buffer without further allocation. In UseMmap mode, this might result in underlying file
+// expansion.
+func (b *Buffer) Grow(n int) {
+	if b.buf == nil {
+		panic("z.Buffer needs to be initialized before using")
+	}
+	if b.maxSz > 0 && int(b.offset)+n > b.maxSz {
+		err := fmt.Errorf(
+			"z.Buffer max size exceeded: %d offset: %d grow: %d", b.maxSz, b.offset, n)
+		panic(err)
+	}
+	if int(b.offset)+n < b.curSz {
+		return
+	}
+
+	// Calculate new capacity.
+	growBy := b.curSz + n
+	// Don't allocate more than 1GB at a time.
+	if growBy > 1<<30 {
+		growBy = 1 << 30
+	}
+	// Allocate at least n, even if it exceeds the 1GB limit above.
+	if n > growBy {
+		growBy = n
+	}
+	b.curSz += growBy
+
+	switch b.bufType {
+	case UseCalloc:
+		// If autoMmap gets triggered, copy the slice over to an mmaped file.
+		if b.autoMmapAfter > 0 && b.curSz > b.autoMmapAfter {
+			b.bufType = UseMmap
+			file, err := ioutil.TempFile(b.autoMmapDir, "")
+			if err != nil {
+				panic(err)
+			}
+			mmapFile, err := OpenMmapFileUsing(file, b.curSz, true)
+			if err != nil && err != NewFile {
+				panic(err)
+			}
+			assert(int(b.offset) == copy(mmapFile.Data, b.buf[:b.offset]))
+			Free(b.buf)
+			b.mmapFile = mmapFile
+			b.buf = mmapFile.Data
+			break
+		}
+
+		// Else, reallocate the slice.
+		newBuf := Calloc(b.curSz, b.tag)
+		assert(int(b.offset) == copy(newBuf, b.buf[:b.offset]))
+		Free(b.buf)
+		b.buf = newBuf
+
+	case UseMmap:
+		// Truncate and remap the underlying file.
+		if err := b.mmapFile.Truncate(int64(b.curSz)); err != nil {
+			err = errors.Wrapf(err,
+				"while trying to truncate file: %s to size: %d", b.mmapFile.Fd.Name(), b.curSz)
+			panic(err)
+		}
+		b.buf = b.mmapFile.Data
+
+	default:
+		panic("can only use Grow on UseCalloc and UseMmap buffers")
+	}
+}
+
+// Allocate is a way to get a slice of size n back from the buffer. This slice can be directly
+// written to. Warning: Allocate is not thread-safe. The byte slice returned MUST be used before
+// further calls to Buffer.
+func (b *Buffer) Allocate(n int) []byte {
+	b.Grow(n)
+	off := b.offset
+	b.offset += uint64(n)
+	return b.buf[off:int(b.offset)]
+}
+
+// AllocateOffset works the same way as allocate, but instead of returning a byte slice, it returns
+// the offset of the allocation.
+func (b *Buffer) AllocateOffset(n int) int {
+	b.Grow(n)
+	b.offset += uint64(n)
+	return int(b.offset) - n
+}
+
+func (b *Buffer) writeLen(sz int) {
+	buf := b.Allocate(4)
+	binary.BigEndian.PutUint32(buf, uint32(sz))
+}
+
+// SliceAllocate would encode the size provided into the buffer, followed by a call to Allocate,
+// hence returning the slice of size sz. This can be used to allocate a lot of small buffers into
+// this big buffer.
+// Note that SliceAllocate should NOT be mixed with normal calls to Write.
+func (b *Buffer) SliceAllocate(sz int) []byte {
+	b.Grow(4 + sz)
+	b.writeLen(sz)
+	return b.Allocate(sz)
+}
+
+func (b *Buffer) StartOffset() int {
+	return int(b.padding)
+}
+
+func (b *Buffer) WriteSlice(slice []byte) {
+	dst := b.SliceAllocate(len(slice))
+	assert(len(slice) == copy(dst, slice))
+}
+
+func (b *Buffer) SliceIterate(f func(slice []byte) error) error {
+	if b.IsEmpty() {
+		return nil
+	}
+	slice, next := []byte{}, b.StartOffset()
+	for next >= 0 {
+		slice, next = b.Slice(next)
+		if len(slice) == 0 {
+			continue
+		}
+		if err := f(slice); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+const (
+	UseCalloc BufferType = iota
+	UseMmap
+	UseInvalid
+)
+
+type BufferType int
+
+func (t BufferType) String() string {
+	switch t {
+	case UseCalloc:
+		return "UseCalloc"
+	case UseMmap:
+		return "UseMmap"
+	default:
+		return "UseInvalid"
+	}
+}
+
+type LessFunc func(a, b []byte) bool
+type sortHelper struct {
+	offsets []int
+	b       *Buffer
+	tmp     *Buffer
+	less    LessFunc
+	small   []int
+}
+
+func (s *sortHelper) sortSmall(start, end int) {
+	s.tmp.Reset()
+	s.small = s.small[:0]
+	next := start
+	for next >= 0 && next < end {
+		s.small = append(s.small, next)
+		_, next = s.b.Slice(next)
+	}
+
+	// We are sorting the slices pointed to by s.small offsets, but only moving the offsets around.
+	sort.Slice(s.small, func(i, j int) bool {
+		left, _ := s.b.Slice(s.small[i])
+		right, _ := s.b.Slice(s.small[j])
+		return s.less(left, right)
+	})
+	// Now we iterate over the s.small offsets and copy over the slices. The result is now in order.
+	for _, off := range s.small {
+		s.tmp.Write(rawSlice(s.b.buf[off:]))
+	}
+	assert(end-start == copy(s.b.buf[start:end], s.tmp.Bytes()))
+}
+
+func assert(b bool) {
+	if !b {
+		fatalf("%+v", errors.Errorf("Assertion failure"))
+	}
+}
+func check(err error) {
+	if err != nil {
+		fatalf("%+v", err)
+	}
+}
+func check2(_ interface{}, err error) {
+	check(err)
+}
+
+func (s *sortHelper) merge(left, right []byte, start, end int) {
+	if len(left) == 0 || len(right) == 0 {
+		return
+	}
+	s.tmp.Reset()
+	check2(s.tmp.Write(left))
+	left = s.tmp.Bytes()
+
+	var ls, rs []byte
+
+	copyLeft := func() {
+		assert(len(ls) == copy(s.b.buf[start:], ls))
+		left = left[len(ls):]
+		start += len(ls)
+	}
+	copyRight := func() {
+		assert(len(rs) == copy(s.b.buf[start:], rs))
+		right = right[len(rs):]
+		start += len(rs)
+	}
+
+	for start < end {
+		if len(left) == 0 {
+			assert(len(right) == copy(s.b.buf[start:end], right))
+			return
+		}
+		if len(right) == 0 {
+			assert(len(left) == copy(s.b.buf[start:end], left))
+			return
+		}
+		ls = rawSlice(left)
+		rs = rawSlice(right)
+
+		// We skip the first 4 bytes in the rawSlice, because that stores the length.
+		if s.less(ls[4:], rs[4:]) {
+			copyLeft()
+		} else {
+			copyRight()
+		}
+	}
+}
+
+func (s *sortHelper) sort(lo, hi int) []byte {
+	assert(lo <= hi)
+
+	mid := lo + (hi-lo)/2
+	loff, hoff := s.offsets[lo], s.offsets[hi]
+	if lo == mid {
+		// No need to sort, just return the buffer.
+		return s.b.buf[loff:hoff]
+	}
+
+	// lo, mid would sort from [offset[lo], offset[mid]) .
+	left := s.sort(lo, mid)
+	// Typically we'd use mid+1, but here mid represents an offset in the buffer. Each offset
+	// contains a thousand entries. So, if we do mid+1, we'd skip over those entries.
+	right := s.sort(mid, hi)
+
+	s.merge(left, right, loff, hoff)
+	return s.b.buf[loff:hoff]
+}
+
+// SortSlice is like SortSliceBetween but sorting over the entire buffer.
+func (b *Buffer) SortSlice(less func(left, right []byte) bool) {
+	b.SortSliceBetween(b.StartOffset(), int(b.offset), less)
+}
+func (b *Buffer) SortSliceBetween(start, end int, less LessFunc) {
+	if start >= end {
+		return
+	}
+	if start == 0 {
+		panic("start can never be zero")
+	}
+
+	var offsets []int
+	next, count := start, 0
+	for next >= 0 && next < end {
+		if count%1024 == 0 {
+			offsets = append(offsets, next)
+		}
+		_, next = b.Slice(next)
+		count++
+	}
+	assert(len(offsets) > 0)
+	if offsets[len(offsets)-1] != end {
+		offsets = append(offsets, end)
+	}
+
+	szTmp := int(float64((end-start)/2) * 1.1)
+	s := &sortHelper{
+		offsets: offsets,
+		b:       b,
+		less:    less,
+		small:   make([]int, 0, 1024),
+		tmp:     NewBuffer(szTmp, b.tag),
+	}
+	defer s.tmp.Release()
+
+	left := offsets[0]
+	for _, off := range offsets[1:] {
+		s.sortSmall(left, off)
+		left = off
+	}
+	s.sort(0, len(offsets)-1)
+}
+
+func rawSlice(buf []byte) []byte {
+	sz := binary.BigEndian.Uint32(buf)
+	return buf[:4+int(sz)]
+}
+
+// Slice would return the slice written at offset.
+func (b *Buffer) Slice(offset int) ([]byte, int) {
+	if offset >= int(b.offset) {
+		return nil, -1
+	}
+
+	sz := binary.BigEndian.Uint32(b.buf[offset:])
+	start := offset + 4
+	next := start + int(sz)
+	res := b.buf[start:next]
+	if next >= int(b.offset) {
+		next = -1
+	}
+	return res, next
+}
+
+// SliceOffsets is an expensive function. Use sparingly.
+func (b *Buffer) SliceOffsets() []int {
+	next := b.StartOffset()
+	var offsets []int
+	for next >= 0 {
+		offsets = append(offsets, next)
+		_, next = b.Slice(next)
+	}
+	return offsets
+}
+
+func (b *Buffer) Data(offset int) []byte {
+	if offset > b.curSz {
+		panic("offset beyond current size")
+	}
+	return b.buf[offset:b.curSz]
+}
+
+// Write would write p bytes to the buffer.
+func (b *Buffer) Write(p []byte) (n int, err error) {
+	n = len(p)
+	b.Grow(n)
+	assert(n == copy(b.buf[b.offset:], p))
+	b.offset += uint64(n)
+	return n, nil
+}
+
+// Reset would reset the buffer to be reused.
+func (b *Buffer) Reset() {
+	b.offset = uint64(b.StartOffset())
+}
+
+// Release would free up the memory allocated by the buffer. Once the usage of buffer is done, it is
+// important to call Release, otherwise a memory leak can happen.
+func (b *Buffer) Release() error {
+	if b == nil {
+		return nil
+	}
+	switch b.bufType {
+	case UseCalloc:
+		Free(b.buf)
+	case UseMmap:
+		if b.mmapFile == nil {
+			return nil
+		}
+		path := b.mmapFile.Fd.Name()
+		if err := b.mmapFile.Close(-1); err != nil {
+			return errors.Wrapf(err, "while closing file: %s", path)
+		}
+		if !b.persistent {
+			if err := os.Remove(path); err != nil {
+				return errors.Wrapf(err, "while deleting file %s", path)
+			}
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/calloc.go b/vendor/github.com/outcaste-io/ristretto/z/calloc.go
new file mode 100644
index 0000000000..2e5d613813
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/calloc.go
@@ -0,0 +1,42 @@
+package z
+
+import "sync/atomic"
+
+var numBytes int64
+
+// NumAllocBytes returns the number of bytes allocated using calls to z.Calloc. The allocations
+// could be happening via either Go or jemalloc, depending upon the build flags.
+func NumAllocBytes() int64 {
+	return atomic.LoadInt64(&numBytes)
+}
+
+// MemStats is used to fetch JE Malloc Stats. The stats are fetched from
+// the mallctl namespace http://jemalloc.net/jemalloc.3.html#mallctl_namespace.
+type MemStats struct {
+	// Total number of bytes allocated by the application.
+	// http://jemalloc.net/jemalloc.3.html#stats.allocated
+	Allocated uint64
+	// Total number of bytes in active pages allocated by the application. This
+	// is a multiple of the page size, and greater than or equal to
+	// Allocated.
+	// http://jemalloc.net/jemalloc.3.html#stats.active
+	Active uint64
+	// Maximum number of bytes in physically resident data pages mapped by the
+	// allocator, comprising all pages dedicated to allocator metadata, pages
+	// backing active allocations, and unused dirty pages. This is a maximum
+	// rather than precise because pages may not actually be physically
+	// resident if they correspond to demand-zeroed virtual memory that has not
+	// yet been touched. This is a multiple of the page size, and is larger
+	// than stats.active.
+	// http://jemalloc.net/jemalloc.3.html#stats.resident
+	Resident uint64
+	// Total number of bytes in virtual memory mappings that were retained
+	// rather than being returned to the operating system via e.g. munmap(2) or
+	// similar. Retained virtual memory is typically untouched, decommitted, or
+	// purged, so it has no strongly associated physical memory (see extent
+	// hooks http://jemalloc.net/jemalloc.3.html#arena.i.extent_hooks for
+	// details). Retained memory is excluded from mapped memory statistics,
+	// e.g. stats.mapped (http://jemalloc.net/jemalloc.3.html#stats.mapped).
+	// http://jemalloc.net/jemalloc.3.html#stats.retained
+	Retained uint64
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/calloc_32bit.go b/vendor/github.com/outcaste-io/ristretto/z/calloc_32bit.go
new file mode 100644
index 0000000000..3a0442614f
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/calloc_32bit.go
@@ -0,0 +1,14 @@
+// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
+// of this source code is governed by a BSD-style license that can be found in
+// the LICENSE file.
+
+// +build 386 amd64p32 arm armbe  mips mipsle mips64p32 mips64p32le ppc sparc
+
+package z
+
+const (
+	// MaxArrayLen is a safe maximum length for slices on this architecture.
+	MaxArrayLen = 1<<31 - 1
+	// MaxBufferSize is the size of virtually unlimited buffer on this architecture.
+	MaxBufferSize = 1 << 30
+)
diff --git a/vendor/github.com/outcaste-io/ristretto/z/calloc_64bit.go b/vendor/github.com/outcaste-io/ristretto/z/calloc_64bit.go
new file mode 100644
index 0000000000..b898248bba
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/calloc_64bit.go
@@ -0,0 +1,14 @@
+// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
+// of this source code is governed by a BSD-style license that can be found in
+// the LICENSE file.
+
+// +build amd64 arm64 arm64be ppc64 ppc64le mips64 mips64le riscv64 s390x sparc64
+
+package z
+
+const (
+	// MaxArrayLen is a safe maximum length for slices on this architecture.
+	MaxArrayLen = 1<<50 - 1
+	// MaxBufferSize is the size of virtually unlimited buffer on this architecture.
+	MaxBufferSize = 256 << 30
+)
diff --git a/vendor/github.com/outcaste-io/ristretto/z/calloc_jemalloc.go b/vendor/github.com/outcaste-io/ristretto/z/calloc_jemalloc.go
new file mode 100644
index 0000000000..88a5acedba
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/calloc_jemalloc.go
@@ -0,0 +1,173 @@
+// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
+// of this source code is governed by a BSD-style license that can be found in
+// the LICENSE file.
+
+//go:build jemalloc
+// +build jemalloc
+
+package z
+
+/*
+#cgo LDFLAGS: /usr/local/lib/libjemalloc_outcaste.a -L/usr/local/lib -Wl,-rpath,/usr/local/lib -ljemalloc_outcaste -lm -lstdc++ -pthread -ldl
+#include <stdlib.h>
+#include <jemalloc/jemalloc_outcaste.h>
+*/
+import "C"
+import (
+	"bytes"
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"unsafe"
+
+	"github.com/dustin/go-humanize"
+)
+
+// The go:linkname directives provides backdoor access to private functions in
+// the runtime. Below we're accessing the throw function.
+
+//go:linkname throw runtime.throw
+func throw(s string)
+
+// New allocates a slice of size n. The returned slice is from manually managed
+// memory and MUST be released by calling Free. Failure to do so will result in
+// a memory leak.
+//
+// Compile jemalloc with ./configure --with-jemalloc-prefix="je_"
+// https://android.googlesource.com/platform/external/jemalloc_new/+/6840b22e8e11cb68b493297a5cd757d6eaa0b406/TUNING.md
+// These two config options seems useful for frequent allocations and deallocations in
+// multi-threaded programs (like we have).
+// JE_MALLOC_CONF="background_thread:true,metadata_thp:auto"
+//
+// Compile Go program with `go build -tags=jemalloc` to enable this.
+
+type dalloc struct {
+	t  string
+	sz int
+}
+
+var dallocsMu sync.Mutex
+var dallocs map[unsafe.Pointer]*dalloc
+
+func init() {
+	// By initializing dallocs, we can start tracking allocations and deallocations via z.Calloc.
+	dallocs = make(map[unsafe.Pointer]*dalloc)
+}
+
+func Calloc(n int, tag string) []byte {
+	if n == 0 {
+		return make([]byte, 0)
+	}
+	// We need to be conscious of the Cgo pointer passing rules:
+	//
+	//   https://golang.org/cmd/cgo/#hdr-Passing_pointers
+	//
+	//   ...
+	//   Note: the current implementation has a bug. While Go code is permitted
+	//   to write nil or a C pointer (but not a Go pointer) to C memory, the
+	//   current implementation may sometimes cause a runtime error if the
+	//   contents of the C memory appear to be a Go pointer. Therefore, avoid
+	//   passing uninitialized C memory to Go code if the Go code is going to
+	//   store pointer values in it. Zero out the memory in C before passing it
+	//   to Go.
+
+	ptr := C.je_calloc(C.size_t(n), 1)
+	if ptr == nil {
+		// NB: throw is like panic, except it guarantees the process will be
+		// terminated. The call below is exactly what the Go runtime invokes when
+		// it cannot allocate memory.
+		throw("out of memory")
+	}
+
+	uptr := unsafe.Pointer(ptr)
+	dallocsMu.Lock()
+	dallocs[uptr] = &dalloc{
+		t:  tag,
+		sz: n,
+	}
+	dallocsMu.Unlock()
+	atomic.AddInt64(&numBytes, int64(n))
+	// Interpret the C pointer as a pointer to a Go array, then slice.
+	return (*[MaxArrayLen]byte)(uptr)[:n:n]
+}
+
+// CallocNoRef does the exact same thing as Calloc with jemalloc enabled.
+func CallocNoRef(n int, tag string) []byte {
+	return Calloc(n, tag)
+}
+
+// Free frees the specified slice.
+func Free(b []byte) {
+	if sz := cap(b); sz != 0 {
+		b = b[:cap(b)]
+		ptr := unsafe.Pointer(&b[0])
+		C.je_free(ptr)
+		atomic.AddInt64(&numBytes, -int64(sz))
+		dallocsMu.Lock()
+		delete(dallocs, ptr)
+		dallocsMu.Unlock()
+	}
+}
+
+func Leaks() string {
+	if dallocs == nil {
+		return "Leak detection disabled. Enable with 'leak' build flag."
+	}
+	dallocsMu.Lock()
+	defer dallocsMu.Unlock()
+	if len(dallocs) == 0 {
+		return "NO leaks found."
+	}
+	m := make(map[string]int)
+	for _, da := range dallocs {
+		m[da.t] += da.sz
+	}
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "Allocations:\n")
+	for f, sz := range m {
+		fmt.Fprintf(&buf, "%s at file: %s\n", humanize.IBytes(uint64(sz)), f)
+	}
+	return buf.String()
+}
+
+// ReadMemStats populates stats with JE Malloc statistics.
+func ReadMemStats(stats *MemStats) {
+	if stats == nil {
+		return
+	}
+	// Call an epoch mallclt to refresh the stats data as mentioned in the docs.
+	// http://jemalloc.net/jemalloc.3.html#epoch
+	// Note: This epoch mallctl is as expensive as a malloc call. It takes up the
+	// malloc_mutex_lock.
+	epoch := 1
+	sz := unsafe.Sizeof(&epoch)
+	C.je_mallctl(
+		(C.CString)("epoch"),
+		unsafe.Pointer(&epoch),
+		(*C.size_t)(unsafe.Pointer(&sz)),
+		unsafe.Pointer(&epoch),
+		(C.size_t)(unsafe.Sizeof(epoch)))
+	stats.Allocated = fetchStat("stats.allocated")
+	stats.Active = fetchStat("stats.active")
+	stats.Resident = fetchStat("stats.resident")
+	stats.Retained = fetchStat("stats.retained")
+}
+
+// fetchStat is used to read a specific attribute from je malloc stats using mallctl.
+func fetchStat(s string) uint64 {
+	var out uint64
+	sz := unsafe.Sizeof(&out)
+	C.je_mallctl(
+		(C.CString)(s),                   // Query: eg: stats.allocated, stats.resident, etc.
+		unsafe.Pointer(&out),             // Variable to store the output.
+		(*C.size_t)(unsafe.Pointer(&sz)), // Size of the output variable.
+		nil,                              // Input variable used to set a value.
+		0)                                // Size of the input variable.
+	return out
+}
+
+func StatsPrint() {
+	opts := C.CString("mdablxe")
+	C.je_malloc_stats_print(nil, nil, opts)
+	C.free(unsafe.Pointer(opts))
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/calloc_nojemalloc.go b/vendor/github.com/outcaste-io/ristretto/z/calloc_nojemalloc.go
new file mode 100644
index 0000000000..93ceedf906
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/calloc_nojemalloc.go
@@ -0,0 +1,37 @@
+// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
+// of this source code is governed by a BSD-style license that can be found in
+// the LICENSE file.
+
+// +build !jemalloc !cgo
+
+package z
+
+import (
+	"fmt"
+)
+
+// Provides versions of Calloc, CallocNoRef, etc when jemalloc is not available
+// (eg: build without jemalloc tag).
+
+// Calloc allocates a slice of size n.
+func Calloc(n int, tag string) []byte {
+	return make([]byte, n)
+}
+
+// CallocNoRef will not give you memory back without jemalloc.
+func CallocNoRef(n int, tag string) []byte {
+	// We do the add here just to stay compatible with a corresponding Free call.
+	return nil
+}
+
+// Free does not do anything in this mode.
+func Free(b []byte) {}
+
+func Leaks() string { return "Leaks: Using Go memory" }
+func StatsPrint() {
+	fmt.Println("Using Go memory")
+}
+
+// ReadMemStats doesn't do anything since all the memory is being managed
+// by the Go runtime.
+func ReadMemStats(_ *MemStats) { return }
diff --git a/vendor/github.com/outcaste-io/ristretto/z/file.go b/vendor/github.com/outcaste-io/ristretto/z/file.go
new file mode 100644
index 0000000000..880caf0ad9
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/file.go
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2020 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package z
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+
+	"github.com/pkg/errors"
+)
+
+// MmapFile represents an mmapd file and includes both the buffer to the data
+// and the file descriptor.
+type MmapFile struct {
+	Data []byte
+	Fd   *os.File
+}
+
+var NewFile = errors.New("Create a new file")
+
+func OpenMmapFileUsing(fd *os.File, sz int, writable bool) (*MmapFile, error) {
+	filename := fd.Name()
+	fi, err := fd.Stat()
+	if err != nil {
+		return nil, errors.Wrapf(err, "cannot stat file: %s", filename)
+	}
+
+	var rerr error
+	fileSize := fi.Size()
+	if sz > 0 && fileSize == 0 {
+		// If file is empty, truncate it to sz.
+		if err := fd.Truncate(int64(sz)); err != nil {
+			return nil, errors.Wrapf(err, "error while truncation")
+		}
+		fileSize = int64(sz)
+		rerr = NewFile
+	}
+
+	// fmt.Printf("Mmaping file: %s with writable: %v filesize: %d\n", fd.Name(), writable, fileSize)
+	buf, err := Mmap(fd, writable, fileSize) // Mmap up to file size.
+	if err != nil {
+		return nil, errors.Wrapf(err, "while mmapping %s with size: %d", fd.Name(), fileSize)
+	}
+
+	if fileSize == 0 {
+		dir, _ := filepath.Split(filename)
+		go SyncDir(dir)
+	}
+	return &MmapFile{
+		Data: buf,
+		Fd:   fd,
+	}, rerr
+}
+
+// OpenMmapFile opens an existing file or creates a new file. If the file is
+// created, it would truncate the file to maxSz. In both cases, it would mmap
+// the file to maxSz and returned it. In case the file is created, z.NewFile is
+// returned.
+func OpenMmapFile(filename string, flag int, maxSz int) (*MmapFile, error) {
+	// fmt.Printf("opening file %s with flag: %v\n", filename, flag)
+	fd, err := os.OpenFile(filename, flag, 0666)
+	if err != nil {
+		return nil, errors.Wrapf(err, "unable to open: %s", filename)
+	}
+	writable := true
+	if flag == os.O_RDONLY {
+		writable = false
+	}
+	return OpenMmapFileUsing(fd, maxSz, writable)
+}
+
+type mmapReader struct {
+	Data   []byte
+	offset int
+}
+
+func (mr *mmapReader) Read(buf []byte) (int, error) {
+	if mr.offset > len(mr.Data) {
+		return 0, io.EOF
+	}
+	n := copy(buf, mr.Data[mr.offset:])
+	mr.offset += n
+	if n < len(buf) {
+		return n, io.EOF
+	}
+	return n, nil
+}
+
+func (m *MmapFile) NewReader(offset int) io.Reader {
+	return &mmapReader{
+		Data:   m.Data,
+		offset: offset,
+	}
+}
+
+// Bytes returns data starting from offset off of size sz. If there's not enough data, it would
+// return nil slice and io.EOF.
+func (m *MmapFile) Bytes(off, sz int) ([]byte, error) {
+	if len(m.Data[off:]) < sz {
+		return nil, io.EOF
+	}
+	return m.Data[off : off+sz], nil
+}
+
+// Slice returns the slice at the given offset.
+func (m *MmapFile) Slice(offset int) []byte {
+	sz := binary.BigEndian.Uint32(m.Data[offset:])
+	start := offset + 4
+	next := start + int(sz)
+	if next > len(m.Data) {
+		return []byte{}
+	}
+	res := m.Data[start:next]
+	return res
+}
+
+// AllocateSlice allocates a slice of the given size at the given offset.
+func (m *MmapFile) AllocateSlice(sz, offset int) ([]byte, int, error) {
+	start := offset + 4
+
+	// If the file is too small, double its size or increase it by 1GB, whichever is smaller.
+	if start+sz > len(m.Data) {
+		const oneGB = 1 << 30
+		growBy := len(m.Data)
+		if growBy > oneGB {
+			growBy = oneGB
+		}
+		if growBy < sz+4 {
+			growBy = sz + 4
+		}
+		if err := m.Truncate(int64(len(m.Data) + growBy)); err != nil {
+			return nil, 0, err
+		}
+	}
+
+	binary.BigEndian.PutUint32(m.Data[offset:], uint32(sz))
+	return m.Data[start : start+sz], start + sz, nil
+}
+
+func (m *MmapFile) Sync() error {
+	if m == nil {
+		return nil
+	}
+	return Msync(m.Data)
+}
+
+func (m *MmapFile) Delete() error {
+	// Badger can set the m.Data directly, without setting any Fd. In that case, this should be a
+	// NOOP.
+	if m.Fd == nil {
+		return nil
+	}
+
+	if err := Munmap(m.Data); err != nil {
+		return fmt.Errorf("while munmap file: %s, error: %v\n", m.Fd.Name(), err)
+	}
+	m.Data = nil
+	if err := m.Fd.Truncate(0); err != nil {
+		return fmt.Errorf("while truncate file: %s, error: %v\n", m.Fd.Name(), err)
+	}
+	if err := m.Fd.Close(); err != nil {
+		return fmt.Errorf("while close file: %s, error: %v\n", m.Fd.Name(), err)
+	}
+	return os.Remove(m.Fd.Name())
+}
+
+// Close would close the file. It would also truncate the file if maxSz >= 0.
+func (m *MmapFile) Close(maxSz int64) error {
+	// Badger can set the m.Data directly, without setting any Fd. In that case, this should be a
+	// NOOP.
+	if m.Fd == nil {
+		return nil
+	}
+	if err := m.Sync(); err != nil {
+		return fmt.Errorf("while sync file: %s, error: %v\n", m.Fd.Name(), err)
+	}
+	if err := Munmap(m.Data); err != nil {
+		return fmt.Errorf("while munmap file: %s, error: %v\n", m.Fd.Name(), err)
+	}
+	if maxSz >= 0 {
+		if err := m.Fd.Truncate(maxSz); err != nil {
+			return fmt.Errorf("while truncate file: %s, error: %v\n", m.Fd.Name(), err)
+		}
+	}
+	return m.Fd.Close()
+}
+
+func SyncDir(dir string) error {
+	df, err := os.Open(dir)
+	if err != nil {
+		return errors.Wrapf(err, "while opening %s", dir)
+	}
+	if err := df.Sync(); err != nil {
+		return errors.Wrapf(err, "while syncing %s", dir)
+	}
+	if err := df.Close(); err != nil {
+		return errors.Wrapf(err, "while closing %s", dir)
+	}
+	return nil
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/file_default.go b/vendor/github.com/outcaste-io/ristretto/z/file_default.go
new file mode 100644
index 0000000000..d9c0db43e7
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/file_default.go
@@ -0,0 +1,39 @@
+// +build !linux
+
+/*
+ * Copyright 2020 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package z
+
+import "fmt"
+
+// Truncate would truncate the mmapped file to the given size. On Linux, we truncate
+// the underlying file and then call mremap, but on other systems, we unmap first,
+// then truncate, then re-map.
+func (m *MmapFile) Truncate(maxSz int64) error {
+	if err := m.Sync(); err != nil {
+		return fmt.Errorf("while sync file: %s, error: %v\n", m.Fd.Name(), err)
+	}
+	if err := Munmap(m.Data); err != nil {
+		return fmt.Errorf("while munmap file: %s, error: %v\n", m.Fd.Name(), err)
+	}
+	if err := m.Fd.Truncate(maxSz); err != nil {
+		return fmt.Errorf("while truncate file: %s, error: %v\n", m.Fd.Name(), err)
+	}
+	var err error
+	m.Data, err = Mmap(m.Fd, true, maxSz) // Mmap up to max size.
+	return err
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/file_linux.go b/vendor/github.com/outcaste-io/ristretto/z/file_linux.go
new file mode 100644
index 0000000000..7f670bd2cc
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/file_linux.go
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2020 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package z
+
+import (
+	"fmt"
+)
+
+// Truncate would truncate the mmapped file to the given size. On Linux, we truncate
+// the underlying file and then call mremap, but on other systems, we unmap first,
+// then truncate, then re-map.
+func (m *MmapFile) Truncate(maxSz int64) error {
+	if err := m.Sync(); err != nil {
+		return fmt.Errorf("while sync file: %s, error: %v\n", m.Fd.Name(), err)
+	}
+	if err := m.Fd.Truncate(maxSz); err != nil {
+		return fmt.Errorf("while truncate file: %s, error: %v\n", m.Fd.Name(), err)
+	}
+
+	var err error
+	m.Data, err = mremap(m.Data, int(maxSz)) // Mmap up to max size.
+	return err
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/flags.go b/vendor/github.com/outcaste-io/ristretto/z/flags.go
new file mode 100644
index 0000000000..9e0e79804b
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/flags.go
@@ -0,0 +1,310 @@
+package z
+
+import (
+	"fmt"
+	"os"
+	"os/user"
+	"path/filepath"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+// SuperFlagHelp makes it really easy to generate command line `--help` output for a SuperFlag. For
+// example:
+//
+//	const flagDefaults = `enabled=true; path=some/path;`
+//
+//	var help string = z.NewSuperFlagHelp(flagDefaults).
+//		Flag("enabled", "Turns on <something>.").
+//		Flag("path", "The path to <something>.").
+//		Flag("another", "Not present in defaults, but still included.").
+//		String()
+//
+// The `help` string would then contain:
+//
+//	enabled=true; Turns on <something>.
+//	path=some/path; The path to <something>.
+//	another=; Not present in defaults, but still included.
+//
+// All flags are sorted alphabetically for consistent `--help` output. Flags with default values are
+// placed at the top, and everything else goes under.
+type SuperFlagHelp struct {
+	head     string
+	defaults *SuperFlag
+	flags    map[string]string
+}
+
+func NewSuperFlagHelp(defaults string) *SuperFlagHelp {
+	return &SuperFlagHelp{
+		defaults: NewSuperFlag(defaults),
+		flags:    make(map[string]string, 0),
+	}
+}
+
+func (h *SuperFlagHelp) Head(head string) *SuperFlagHelp {
+	h.head = head
+	return h
+}
+
+func (h *SuperFlagHelp) Flag(name, description string) *SuperFlagHelp {
+	h.flags[name] = description
+	return h
+}
+
+func (h *SuperFlagHelp) String() string {
+	defaultLines := make([]string, 0)
+	otherLines := make([]string, 0)
+	for name, help := range h.flags {
+		val, found := h.defaults.m[name]
+		line := fmt.Sprintf("    %s=%s; %s\n", name, val, help)
+		if found {
+			defaultLines = append(defaultLines, line)
+		} else {
+			otherLines = append(otherLines, line)
+		}
+	}
+	sort.Strings(defaultLines)
+	sort.Strings(otherLines)
+	dls := strings.Join(defaultLines, "")
+	ols := strings.Join(otherLines, "")
+	if len(h.defaults.m) == 0 && len(ols) == 0 {
+		// remove last newline
+		dls = dls[:len(dls)-1]
+	}
+	// remove last newline
+	if len(h.defaults.m) == 0 && len(ols) > 1 {
+		ols = ols[:len(ols)-1]
+	}
+	return h.head + "\n" + dls + ols
+}
+
+func parseFlag(flag string) (map[string]string, error) {
+	kvm := make(map[string]string)
+	for _, kv := range strings.Split(flag, ";") {
+		if strings.TrimSpace(kv) == "" {
+			continue
+		}
+		// For a non-empty separator, 0 < len(splits) ≤ 2.
+		splits := strings.SplitN(kv, "=", 2)
+		k := strings.TrimSpace(splits[0])
+		if len(splits) < 2 {
+			return nil, fmt.Errorf("superflag: missing value for '%s' in flag: %s", k, flag)
+		}
+		k = strings.ToLower(k)
+		k = strings.ReplaceAll(k, "_", "-")
+		kvm[k] = strings.TrimSpace(splits[1])
+	}
+	return kvm, nil
+}
+
+type SuperFlag struct {
+	m map[string]string
+}
+
+func NewSuperFlag(flag string) *SuperFlag {
+	sf, err := newSuperFlagImpl(flag)
+	if err != nil {
+		fatal(err)
+	}
+	return sf
+}
+
+func newSuperFlagImpl(flag string) (*SuperFlag, error) {
+	m, err := parseFlag(flag)
+	if err != nil {
+		return nil, err
+	}
+	return &SuperFlag{m}, nil
+}
+
+func (sf *SuperFlag) String() string {
+	if sf == nil {
+		return ""
+	}
+	kvs := make([]string, 0, len(sf.m))
+	for k, v := range sf.m {
+		kvs = append(kvs, fmt.Sprintf("%s=%s", k, v))
+	}
+	return strings.Join(kvs, "; ")
+}
+
+func (sf *SuperFlag) MergeAndCheckDefault(flag string) *SuperFlag {
+	sf, err := sf.MergeWithDefault(flag)
+	if err != nil {
+		fatal(err)
+	}
+	return sf
+}
+
+func (sf *SuperFlag) MergeWithDefault(flag string) (*SuperFlag, error) {
+	if sf == nil {
+		m, err := parseFlag(flag)
+		if err != nil {
+			return nil, err
+		}
+		return &SuperFlag{m}, nil
+	}
+
+	src, err := parseFlag(flag)
+	if err != nil {
+		return nil, err
+	}
+
+	numKeys := len(sf.m)
+	for k := range src {
+		if _, ok := sf.m[k]; ok {
+			numKeys--
+		}
+	}
+	if numKeys != 0 {
+		return nil, fmt.Errorf("superflag: found invalid options in flag: %s.\nvalid options: %v", sf, flag)
+	}
+	for k, v := range src {
+		if _, ok := sf.m[k]; !ok {
+			sf.m[k] = v
+		}
+	}
+	return sf, nil
+}
+
+func (sf *SuperFlag) Has(opt string) bool {
+	val := sf.GetString(opt)
+	return val != ""
+}
+
+func (sf *SuperFlag) GetDuration(opt string) time.Duration {
+	val := sf.GetString(opt)
+	if val == "" {
+		return time.Duration(0)
+	}
+	if strings.Contains(val, "d") {
+		val = strings.Replace(val, "d", "", 1)
+		days, err := strconv.ParseUint(val, 0, 64)
+		if err != nil {
+			return time.Duration(0)
+		}
+		return time.Hour * 24 * time.Duration(days)
+	}
+	d, err := time.ParseDuration(val)
+	if err != nil {
+		return time.Duration(0)
+	}
+	return d
+}
+
+func (sf *SuperFlag) GetBool(opt string) bool {
+	val := sf.GetString(opt)
+	if val == "" {
+		return false
+	}
+	b, err := strconv.ParseBool(val)
+	if err != nil {
+		err = errors.Wrapf(err,
+			"Unable to parse %s as bool for key: %s. Options: %s\n",
+			val, opt, sf)
+		fatalf("%+v", err)
+	}
+	return b
+}
+
+func (sf *SuperFlag) GetFloat64(opt string) float64 {
+	val := sf.GetString(opt)
+	if val == "" {
+		return 0
+	}
+	f, err := strconv.ParseFloat(val, 64)
+	if err != nil {
+		err = errors.Wrapf(err,
+			"Unable to parse %s as float64 for key: %s. Options: %s\n",
+			val, opt, sf)
+		fatalf("%+v", err)
+	}
+	return f
+}
+
+func (sf *SuperFlag) GetInt64(opt string) int64 {
+	val := sf.GetString(opt)
+	if val == "" {
+		return 0
+	}
+	i, err := strconv.ParseInt(val, 0, 64)
+	if err != nil {
+		err = errors.Wrapf(err,
+			"Unable to parse %s as int64 for key: %s. Options: %s\n",
+			val, opt, sf)
+		fatalf("%+v", err)
+	}
+	return i
+}
+
+func (sf *SuperFlag) GetUint64(opt string) uint64 {
+	val := sf.GetString(opt)
+	if val == "" {
+		return 0
+	}
+	u, err := strconv.ParseUint(val, 0, 64)
+	if err != nil {
+		err = errors.Wrapf(err,
+			"Unable to parse %s as uint64 for key: %s. Options: %s\n",
+			val, opt, sf)
+		fatalf("%+v", err)
+	}
+	return u
+}
+
+func (sf *SuperFlag) GetUint32(opt string) uint32 {
+	val := sf.GetString(opt)
+	if val == "" {
+		return 0
+	}
+	u, err := strconv.ParseUint(val, 0, 32)
+	if err != nil {
+		err = errors.Wrapf(err,
+			"Unable to parse %s as uint32 for key: %s. Options: %s\n",
+			val, opt, sf)
+		fatalf("%+v", err)
+	}
+	return uint32(u)
+}
+
+func (sf *SuperFlag) GetString(opt string) string {
+	if sf == nil {
+		return ""
+	}
+	return sf.m[opt]
+}
+
+func (sf *SuperFlag) GetPath(opt string) string {
+	p := sf.GetString(opt)
+	path, err := expandPath(p)
+	if err != nil {
+		fatalf("Failed to get path: %+v", err)
+	}
+	return path
+}
+
+// expandPath expands the paths containing ~ to /home/user. It also computes the absolute path
+// from the relative paths. For example: ~/abc/../cef will be transformed to /home/user/cef.
+func expandPath(path string) (string, error) {
+	if len(path) == 0 {
+		return "", nil
+	}
+	if path[0] == '~' && (len(path) == 1 || os.IsPathSeparator(path[1])) {
+		usr, err := user.Current()
+		if err != nil {
+			return "", errors.Wrap(err, "Failed to get the home directory of the user")
+		}
+		path = filepath.Join(usr.HomeDir, path[1:])
+	}
+
+	var err error
+	path, err = filepath.Abs(path)
+	if err != nil {
+		return "", errors.Wrap(err, "Failed to generate absolute path")
+	}
+	return path, nil
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/histogram.go b/vendor/github.com/outcaste-io/ristretto/z/histogram.go
new file mode 100644
index 0000000000..4eb0c4f6c9
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/histogram.go
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2020 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package z
+
+import (
+	"fmt"
+	"math"
+	"strings"
+
+	"github.com/dustin/go-humanize"
+)
+
+// Creates bounds for an histogram. The bounds are powers of two of the form
+// [2^min_exponent, ..., 2^max_exponent].
+func HistogramBounds(minExponent, maxExponent uint32) []float64 {
+	var bounds []float64
+	for i := minExponent; i <= maxExponent; i++ {
+		bounds = append(bounds, float64(int(1)<<i))
+	}
+	return bounds
+}
+
+func Fibonacci(num int) []float64 {
+	assert(num > 4)
+	bounds := make([]float64, num)
+	bounds[0] = 1
+	bounds[1] = 2
+	for i := 2; i < num; i++ {
+		bounds[i] = bounds[i-1] + bounds[i-2]
+	}
+	return bounds
+}
+
+// HistogramData stores the information needed to represent the sizes of the keys and values
+// as a histogram.
+type HistogramData struct {
+	Bounds         []float64
+	Count          int64
+	CountPerBucket []int64
+	Min            int64
+	Max            int64
+	Sum            int64
+}
+
+// NewHistogramData returns a new instance of HistogramData with properly initialized fields.
+func NewHistogramData(bounds []float64) *HistogramData {
+	return &HistogramData{
+		Bounds:         bounds,
+		CountPerBucket: make([]int64, len(bounds)+1),
+		Max:            0,
+		Min:            math.MaxInt64,
+	}
+}
+
+func (histogram *HistogramData) Copy() *HistogramData {
+	if histogram == nil {
+		return nil
+	}
+	return &HistogramData{
+		Bounds:         append([]float64{}, histogram.Bounds...),
+		CountPerBucket: append([]int64{}, histogram.CountPerBucket...),
+		Count:          histogram.Count,
+		Min:            histogram.Min,
+		Max:            histogram.Max,
+		Sum:            histogram.Sum,
+	}
+}
+
+// Update changes the Min and Max fields if value is less than or greater than the current values.
+func (histogram *HistogramData) Update(value int64) {
+	if histogram == nil {
+		return
+	}
+	if value > histogram.Max {
+		histogram.Max = value
+	}
+	if value < histogram.Min {
+		histogram.Min = value
+	}
+
+	histogram.Sum += value
+	histogram.Count++
+
+	for index := 0; index <= len(histogram.Bounds); index++ {
+		// Allocate value in the last buckets if we reached the end of the Bounds array.
+		if index == len(histogram.Bounds) {
+			histogram.CountPerBucket[index]++
+			break
+		}
+
+		if value < int64(histogram.Bounds[index]) {
+			histogram.CountPerBucket[index]++
+			break
+		}
+	}
+}
+
+// Mean returns the mean value for the histogram.
+func (histogram *HistogramData) Mean() float64 {
+	if histogram.Count == 0 {
+		return 0
+	}
+	return float64(histogram.Sum) / float64(histogram.Count)
+}
+
+// String converts the histogram data into human-readable string.
+func (histogram *HistogramData) String() string {
+	if histogram == nil {
+		return ""
+	}
+	var b strings.Builder
+
+	b.WriteString("\n -- Histogram: \n")
+	b.WriteString(fmt.Sprintf("Min value: %d \n", histogram.Min))
+	b.WriteString(fmt.Sprintf("Max value: %d \n", histogram.Max))
+	b.WriteString(fmt.Sprintf("Count: %d \n", histogram.Count))
+	b.WriteString(fmt.Sprintf("50p: %.2f \n", histogram.Percentile(0.5)))
+	b.WriteString(fmt.Sprintf("75p: %.2f \n", histogram.Percentile(0.75)))
+	b.WriteString(fmt.Sprintf("90p: %.2f \n", histogram.Percentile(0.90)))
+
+	numBounds := len(histogram.Bounds)
+	var cum float64
+	for index, count := range histogram.CountPerBucket {
+		if count == 0 {
+			continue
+		}
+
+		// The last bucket represents the bucket that contains the range from
+		// the last bound up to infinity so it's processed differently than the
+		// other buckets.
+		if index == len(histogram.CountPerBucket)-1 {
+			lowerBound := uint64(histogram.Bounds[numBounds-1])
+			page := float64(count*100) / float64(histogram.Count)
+			cum += page
+			b.WriteString(fmt.Sprintf("[%s, %s) %d %.2f%% %.2f%%\n",
+				humanize.IBytes(lowerBound), "infinity", count, page, cum))
+			continue
+		}
+
+		upperBound := uint64(histogram.Bounds[index])
+		lowerBound := uint64(0)
+		if index > 0 {
+			lowerBound = uint64(histogram.Bounds[index-1])
+		}
+
+		page := float64(count*100) / float64(histogram.Count)
+		cum += page
+		b.WriteString(fmt.Sprintf("[%d, %d) %d %.2f%% %.2f%%\n",
+			lowerBound, upperBound, count, page, cum))
+	}
+	b.WriteString(" --\n")
+	return b.String()
+}
+
+// Percentile returns the percentile value for the histogram.
+// value of p should be between [0.0-1.0]
+func (histogram *HistogramData) Percentile(p float64) float64 {
+	if histogram == nil {
+		return 0
+	}
+
+	if histogram.Count == 0 {
+		// if no data return the minimum range
+		return histogram.Bounds[0]
+	}
+	pval := int64(float64(histogram.Count) * p)
+	for i, v := range histogram.CountPerBucket {
+		pval = pval - v
+		if pval <= 0 {
+			if i == len(histogram.Bounds) {
+				break
+			}
+			return histogram.Bounds[i]
+		}
+	}
+	// default return should be the max range
+	return histogram.Bounds[len(histogram.Bounds)-1]
+}
+
+// Clear reset the histogram. Helpful in situations where we need to reset the metrics
+func (histogram *HistogramData) Clear() {
+	if histogram == nil {
+		return
+	}
+
+	histogram.Count = 0
+	histogram.CountPerBucket = make([]int64, len(histogram.Bounds)+1)
+	histogram.Sum = 0
+	histogram.Max = 0
+	histogram.Min = math.MaxInt64
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/mmap.go b/vendor/github.com/outcaste-io/ristretto/z/mmap.go
new file mode 100644
index 0000000000..9b02510003
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/mmap.go
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package z
+
+import (
+	"os"
+)
+
+// Mmap uses the mmap system call to memory-map a file. If writable is true,
+// memory protection of the pages is set so that they may be written to as well.
+func Mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
+	return mmap(fd, writable, size)
+}
+
+// Munmap unmaps a previously mapped slice.
+func Munmap(b []byte) error {
+	return munmap(b)
+}
+
+// Madvise uses the madvise system call to give advise about the use of memory
+// when using a slice that is memory-mapped to a file. Set the readahead flag to
+// false if page references are expected in random order.
+func Madvise(b []byte, readahead bool) error {
+	return madvise(b, readahead)
+}
+
+// Msync would call sync on the mmapped data.
+func Msync(b []byte) error {
+	return msync(b)
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/mmap_darwin.go b/vendor/github.com/outcaste-io/ristretto/z/mmap_darwin.go
new file mode 100644
index 0000000000..4d6d74f193
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/mmap_darwin.go
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package z
+
+import (
+	"os"
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+// Mmap uses the mmap system call to memory-map a file. If writable is true,
+// memory protection of the pages is set so that they may be written to as well.
+func mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
+	mtype := unix.PROT_READ
+	if writable {
+		mtype |= unix.PROT_WRITE
+	}
+	return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED)
+}
+
+// Munmap unmaps a previously mapped slice.
+func munmap(b []byte) error {
+	return unix.Munmap(b)
+}
+
+// This is required because the unix package does not support the madvise system call on OS X.
+func madvise(b []byte, readahead bool) error {
+	advice := unix.MADV_NORMAL
+	if !readahead {
+		advice = unix.MADV_RANDOM
+	}
+
+	_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])),
+		uintptr(len(b)), uintptr(advice))
+	if e1 != 0 {
+		return e1
+	}
+	return nil
+}
+
+func msync(b []byte) error {
+	return unix.Msync(b, unix.MS_SYNC)
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/mmap_linux.go b/vendor/github.com/outcaste-io/ristretto/z/mmap_linux.go
new file mode 100644
index 0000000000..8843e4243b
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/mmap_linux.go
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2020 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package z
+
+import (
+	"os"
+	"reflect"
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+// mmap uses the mmap system call to memory-map a file. If writable is true,
+// memory protection of the pages is set so that they may be written to as well.
+func mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
+	mtype := unix.PROT_READ
+	if writable {
+		mtype |= unix.PROT_WRITE
+	}
+	return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED)
+}
+
+// mremap is a Linux-specific system call to remap pages in memory. This can be used in place of munmap + mmap.
+func mremap(data []byte, size int) ([]byte, error) {
+	// taken from <https://github.com/torvalds/linux/blob/f8394f232b1eab649ce2df5c5f15b0e528c92091/include/uapi/linux/mman.h#L8>
+	const MREMAP_MAYMOVE = 0x1
+
+	header := (*reflect.SliceHeader)(unsafe.Pointer(&data))
+	mmapAddr, _, errno := unix.Syscall6(
+		unix.SYS_MREMAP,
+		header.Data,
+		uintptr(header.Len),
+		uintptr(size),
+		uintptr(MREMAP_MAYMOVE),
+		0,
+		0,
+	)
+	if errno != 0 {
+		return nil, errno
+	}
+
+	header.Data = mmapAddr
+	header.Cap = size
+	header.Len = size
+	return data, nil
+}
+
+// munmap unmaps a previously mapped slice.
+//
+// unix.Munmap maintains an internal list of mmapped addresses, and only calls munmap
+// if the address is present in that list. If we use mremap, this list is not updated.
+// To bypass this, we call munmap ourselves.
+func munmap(data []byte) error {
+	if len(data) == 0 || len(data) != cap(data) {
+		return unix.EINVAL
+	}
+	_, _, errno := unix.Syscall(
+		unix.SYS_MUNMAP,
+		uintptr(unsafe.Pointer(&data[0])),
+		uintptr(len(data)),
+		0,
+	)
+	if errno != 0 {
+		return errno
+	}
+	return nil
+}
+
+// madvise uses the madvise system call to give advise about the use of memory
+// when using a slice that is memory-mapped to a file. Set the readahead flag to
+// false if page references are expected in random order.
+func madvise(b []byte, readahead bool) error {
+	flags := unix.MADV_NORMAL
+	if !readahead {
+		flags = unix.MADV_RANDOM
+	}
+	return unix.Madvise(b, flags)
+}
+
+// msync writes any modified data to persistent storage.
+func msync(b []byte) error {
+	return unix.Msync(b, unix.MS_SYNC)
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/mmap_plan9.go b/vendor/github.com/outcaste-io/ristretto/z/mmap_plan9.go
new file mode 100644
index 0000000000..f30729654f
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/mmap_plan9.go
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2020 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package z
+
+import (
+	"os"
+	"syscall"
+)
+
+// Mmap uses the mmap system call to memory-map a file. If writable is true,
+// memory protection of the pages is set so that they may be written to as well.
+func mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
+	return nil, syscall.EPLAN9
+}
+
+// Munmap unmaps a previously mapped slice.
+func munmap(b []byte) error {
+	return syscall.EPLAN9
+}
+
+// Madvise uses the madvise system call to give advise about the use of memory
+// when using a slice that is memory-mapped to a file. Set the readahead flag to
+// false if page references are expected in random order.
+func madvise(b []byte, readahead bool) error {
+	return syscall.EPLAN9
+}
+
+func msync(b []byte) error {
+	return syscall.EPLAN9
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/mmap_unix.go b/vendor/github.com/outcaste-io/ristretto/z/mmap_unix.go
new file mode 100644
index 0000000000..e8b2699cf9
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/mmap_unix.go
@@ -0,0 +1,55 @@
+// +build !windows,!darwin,!plan9,!linux
+
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package z
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+// Mmap uses the mmap system call to memory-map a file. If writable is true,
+// memory protection of the pages is set so that they may be written to as well.
+func mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
+	mtype := unix.PROT_READ
+	if writable {
+		mtype |= unix.PROT_WRITE
+	}
+	return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED)
+}
+
+// Munmap unmaps a previously mapped slice.
+func munmap(b []byte) error {
+	return unix.Munmap(b)
+}
+
+// Madvise uses the madvise system call to give advise about the use of memory
+// when using a slice that is memory-mapped to a file. Set the readahead flag to
+// false if page references are expected in random order.
+func madvise(b []byte, readahead bool) error {
+	flags := unix.MADV_NORMAL
+	if !readahead {
+		flags = unix.MADV_RANDOM
+	}
+	return unix.Madvise(b, flags)
+}
+
+func msync(b []byte) error {
+	return unix.Msync(b, unix.MS_SYNC)
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/mmap_windows.go b/vendor/github.com/outcaste-io/ristretto/z/mmap_windows.go
new file mode 100644
index 0000000000..0ea6e9448e
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/mmap_windows.go
@@ -0,0 +1,95 @@
+// +build windows
+
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package z
+
+import (
+	"fmt"
+	"os"
+	"syscall"
+	"unsafe"
+)
+
+func mmap(fd *os.File, write bool, size int64) ([]byte, error) {
+	protect := syscall.PAGE_READONLY
+	access := syscall.FILE_MAP_READ
+
+	if write {
+		protect = syscall.PAGE_READWRITE
+		access = syscall.FILE_MAP_WRITE
+	}
+	fi, err := fd.Stat()
+	if err != nil {
+		return nil, err
+	}
+
+	// In windows, we cannot mmap a file more than it's actual size.
+	// So truncate the file to the size of the mmap.
+	if fi.Size() < size {
+		if err := fd.Truncate(size); err != nil {
+			return nil, fmt.Errorf("truncate: %s", err)
+		}
+	}
+
+	// Open a file mapping handle.
+	sizelo := uint32(size >> 32)
+	sizehi := uint32(size) & 0xffffffff
+
+	handler, err := syscall.CreateFileMapping(syscall.Handle(fd.Fd()), nil,
+		uint32(protect), sizelo, sizehi, nil)
+	if err != nil {
+		return nil, os.NewSyscallError("CreateFileMapping", err)
+	}
+
+	// Create the memory map.
+	addr, err := syscall.MapViewOfFile(handler, uint32(access), 0, 0, uintptr(size))
+	if addr == 0 {
+		return nil, os.NewSyscallError("MapViewOfFile", err)
+	}
+
+	// Close mapping handle.
+	if err := syscall.CloseHandle(syscall.Handle(handler)); err != nil {
+		return nil, os.NewSyscallError("CloseHandle", err)
+	}
+
+	// Slice memory layout
+	// Copied this snippet from golang/sys package
+	var sl = struct {
+		addr uintptr
+		len  int
+		cap  int
+	}{addr, int(size), int(size)}
+
+	// Use unsafe to turn sl into a []byte.
+	data := *(*[]byte)(unsafe.Pointer(&sl))
+
+	return data, nil
+}
+
+func munmap(b []byte) error {
+	return syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&b[0])))
+}
+
+func madvise(b []byte, readahead bool) error {
+	// Do Nothing. We don’t care about this setting on Windows
+	return nil
+}
+
+func msync(b []byte) error {
+	return syscall.FlushViewOfFile(uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)))
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/rtutil.go b/vendor/github.com/outcaste-io/ristretto/z/rtutil.go
new file mode 100644
index 0000000000..8f317c80d3
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/rtutil.go
@@ -0,0 +1,75 @@
+// MIT License
+
+// Copyright (c) 2019 Ewan Chou
+
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package z
+
+import (
+	"unsafe"
+)
+
+// NanoTime returns the current time in nanoseconds from a monotonic clock.
+//go:linkname NanoTime runtime.nanotime
+func NanoTime() int64
+
+// CPUTicks is a faster alternative to NanoTime to measure time duration.
+//go:linkname CPUTicks runtime.cputicks
+func CPUTicks() int64
+
+type stringStruct struct {
+	str unsafe.Pointer
+	len int
+}
+
+//go:noescape
+//go:linkname memhash runtime.memhash
+func memhash(p unsafe.Pointer, h, s uintptr) uintptr
+
+// MemHash is the hash function used by go map, it utilizes available hardware instructions(behaves
+// as aeshash if aes instruction is available).
+// NOTE: The hash seed changes for every process. So, this cannot be used as a persistent hash.
+func MemHash(data []byte) uint64 {
+	ss := (*stringStruct)(unsafe.Pointer(&data))
+	return uint64(memhash(ss.str, 0, uintptr(ss.len)))
+}
+
+// MemHashString is the hash function used by go map, it utilizes available hardware instructions
+// (behaves as aeshash if aes instruction is available).
+// NOTE: The hash seed changes for every process. So, this cannot be used as a persistent hash.
+func MemHashString(str string) uint64 {
+	ss := (*stringStruct)(unsafe.Pointer(&str))
+	return uint64(memhash(ss.str, 0, uintptr(ss.len)))
+}
+
+// FastRand is a fast thread local random function.
+//go:linkname FastRand runtime.fastrand
+func FastRand() uint32
+
+//go:linkname memclrNoHeapPointers runtime.memclrNoHeapPointers
+func memclrNoHeapPointers(p unsafe.Pointer, n uintptr)
+
+func Memclr(b []byte) {
+	if len(b) == 0 {
+		return
+	}
+	p := unsafe.Pointer(&b[0])
+	memclrNoHeapPointers(p, uintptr(len(b)))
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/rtutil.s b/vendor/github.com/outcaste-io/ristretto/z/rtutil.s
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/vendor/github.com/outcaste-io/ristretto/z/simd/baseline.go b/vendor/github.com/outcaste-io/ristretto/z/simd/baseline.go
new file mode 100644
index 0000000000..967e3a307e
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/simd/baseline.go
@@ -0,0 +1,127 @@
+package simd
+
+import (
+	"fmt"
+	"runtime"
+	"sort"
+	"sync"
+)
+
+// Search finds the key using the naive way
+func Naive(xs []uint64, k uint64) int16 {
+	var i int
+	for i = 0; i < len(xs); i += 2 {
+		x := xs[i]
+		if x >= k {
+			return int16(i / 2)
+		}
+	}
+	return int16(i / 2)
+}
+
+func Clever(xs []uint64, k uint64) int16 {
+	if len(xs) < 8 {
+		return Naive(xs, k)
+	}
+	var twos, pk [4]uint64
+	pk[0] = k
+	pk[1] = k
+	pk[2] = k
+	pk[3] = k
+	for i := 0; i < len(xs); i += 8 {
+		twos[0] = xs[i]
+		twos[1] = xs[i+2]
+		twos[2] = xs[i+4]
+		twos[3] = xs[i+6]
+		if twos[0] >= pk[0] {
+			return int16(i / 2)
+		}
+		if twos[1] >= pk[1] {
+			return int16((i + 2) / 2)
+		}
+		if twos[2] >= pk[2] {
+			return int16((i + 4) / 2)
+		}
+		if twos[3] >= pk[3] {
+			return int16((i + 6) / 2)
+		}
+
+	}
+	return int16(len(xs) / 2)
+}
+
+func Parallel(xs []uint64, k uint64) int16 {
+	cpus := runtime.NumCPU()
+	if cpus%2 != 0 {
+		panic(fmt.Sprintf("odd number of CPUs %v", cpus))
+	}
+	sz := len(xs)/cpus + 1
+	var wg sync.WaitGroup
+	retChan := make(chan int16, cpus)
+	for i := 0; i < len(xs); i += sz {
+		end := i + sz
+		if end >= len(xs) {
+			end = len(xs)
+		}
+		chunk := xs[i:end]
+		wg.Add(1)
+		go func(hd int16, xs []uint64, k uint64, wg *sync.WaitGroup, ch chan int16) {
+			for i := 0; i < len(xs); i += 2 {
+				if xs[i] >= k {
+					ch <- (int16(i) + hd) / 2
+					break
+				}
+			}
+			wg.Done()
+		}(int16(i), chunk, k, &wg, retChan)
+	}
+	wg.Wait()
+	close(retChan)
+	var min int16 = (1 << 15) - 1
+	for i := range retChan {
+		if i < min {
+			min = i
+		}
+	}
+	if min == (1<<15)-1 {
+		return int16(len(xs) / 2)
+	}
+	return min
+}
+
+func Binary(keys []uint64, key uint64) int16 {
+	return int16(sort.Search(len(keys), func(i int) bool {
+		if i*2 >= len(keys) {
+			return true
+		}
+		return keys[i*2] >= key
+	}))
+}
+
+func cmp2_native(twos, pk [2]uint64) int16 {
+	if twos[0] == pk[0] {
+		return 0
+	}
+	if twos[1] == pk[1] {
+		return 1
+	}
+	return 2
+}
+
+func cmp4_native(fours, pk [4]uint64) int16 {
+	for i := range fours {
+		if fours[i] >= pk[i] {
+			return int16(i)
+		}
+	}
+	return 4
+}
+
+func cmp8_native(a [8]uint64, pk [4]uint64) int16 {
+	for i := range a {
+		if a[i] >= pk[0] {
+			return int16(i)
+		}
+	}
+	return 8
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/simd/search.go b/vendor/github.com/outcaste-io/ristretto/z/simd/search.go
new file mode 100644
index 0000000000..b1e639225a
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/simd/search.go
@@ -0,0 +1,51 @@
+// +build !amd64
+
+/*
+ * Copyright 2020 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package simd
+
+// Search uses the Clever search to find the correct key.
+func Search(xs []uint64, k uint64) int16 {
+	if len(xs) < 8 || (len(xs) % 8 != 0) {
+		return Naive(xs, k)
+	}
+	var twos, pk [4]uint64
+	pk[0] = k
+	pk[1] = k
+	pk[2] = k
+	pk[3] = k
+	for i := 0; i < len(xs); i += 8 {
+		twos[0] = xs[i]
+		twos[1] = xs[i+2]
+		twos[2] = xs[i+4]
+		twos[3] = xs[i+6]
+		if twos[0] >= pk[0] {
+			return int16(i / 2)
+		}
+		if twos[1] >= pk[1] {
+			return int16((i + 2) / 2)
+		}
+		if twos[2] >= pk[2] {
+			return int16((i + 4) / 2)
+		}
+		if twos[3] >= pk[3] {
+			return int16((i + 6) / 2)
+		}
+
+	}
+	return int16(len(xs) / 2)
+}
diff --git a/vendor/github.com/outcaste-io/ristretto/z/simd/search_amd64.s b/vendor/github.com/outcaste-io/ristretto/z/simd/search_amd64.s
new file mode 100644
index 0000000000..150c846647
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/simd/search_amd64.s
@@ -0,0 +1,60 @@
+// Code generated by command: go run asm2.go -out search_amd64.s -stubs stub_search_amd64.go. DO NOT EDIT.
+
+#include "textflag.h"
+
+// func Search(xs []uint64, k uint64) int16
+TEXT ·Search(SB), NOSPLIT, $0-34
+	MOVQ xs_base+0(FP), AX
+	MOVQ xs_len+8(FP), CX
+	MOVQ k+24(FP), DX
+
+	// Save n
+	MOVQ CX, BX
+
+	// Initialize idx register to zero.
+	XORL BP, BP
+
+loop:
+	// Unroll1
+	CMPQ (AX)(BP*8), DX
+	JAE  Found
+
+	// Unroll2
+	CMPQ 16(AX)(BP*8), DX
+	JAE  Found2
+
+	// Unroll3
+	CMPQ 32(AX)(BP*8), DX
+	JAE  Found3
+
+	// Unroll4
+	CMPQ 48(AX)(BP*8), DX
+	JAE  Found4
+
+	// plus8
+	ADDQ $0x08, BP
+	CMPQ BP, CX
+	JB   loop
+	JMP  NotFound
+
+Found2:
+	ADDL $0x02, BP
+	JMP  Found
+
+Found3:
+	ADDL $0x04, BP
+	JMP  Found
+
+Found4:
+	ADDL $0x06, BP
+
+Found:
+	MOVL BP, BX
+
+NotFound:
+	MOVL BX, BP
+	SHRL $0x1f, BP
+	ADDL BX, BP
+	SHRL $0x01, BP
+	MOVL BP, ret+32(FP)
+	RET
diff --git a/vendor/github.com/outcaste-io/ristretto/z/simd/stub_search_amd64.go b/vendor/github.com/outcaste-io/ristretto/z/simd/stub_search_amd64.go
new file mode 100644
index 0000000000..0821d38a77
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/simd/stub_search_amd64.go
@@ -0,0 +1,6 @@
+// Code generated by command: go run asm2.go -out search_amd64.s -stubs stub_search_amd64.go. DO NOT EDIT.
+
+package simd
+
+// Search finds the first idx for which xs[idx] >= k in xs.
+func Search(xs []uint64, k uint64) int16
diff --git a/vendor/github.com/outcaste-io/ristretto/z/z.go b/vendor/github.com/outcaste-io/ristretto/z/z.go
new file mode 100644
index 0000000000..45c15cdcd8
--- /dev/null
+++ b/vendor/github.com/outcaste-io/ristretto/z/z.go
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2019 Dgraph Labs, Inc. and Contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package z
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"sync"
+
+	"github.com/cespare/xxhash/v2"
+)
+
+// TODO: Figure out a way to re-use memhash for the second uint64 hash, we
+//       already know that appending bytes isn't reliable for generating a
+//       second hash (see Ristretto PR #88).
+//
+//       We also know that while the Go runtime has a runtime memhash128
+//       function, it's not possible to use it to generate [2]uint64 or
+//       anything resembling a 128bit hash, even though that's exactly what
+//       we need in this situation.
+func KeyToHash(key interface{}) (uint64, uint64) {
+	if key == nil {
+		return 0, 0
+	}
+	switch k := key.(type) {
+	case uint64:
+		return k, 0
+	case string:
+		return MemHashString(k), xxhash.Sum64String(k)
+	case []byte:
+		return MemHash(k), xxhash.Sum64(k)
+	case byte:
+		return uint64(k), 0
+	case int:
+		return uint64(k), 0
+	case int32:
+		return uint64(k), 0
+	case uint32:
+		return uint64(k), 0
+	case int64:
+		return uint64(k), 0
+	default:
+		panic("Key type not supported")
+	}
+}
+
+var (
+	dummyCloserChan <-chan struct{}
+	tmpDir          string
+)
+
+// Closer holds the two things we need to close a goroutine and wait for it to
+// finish: a chan to tell the goroutine to shut down, and a WaitGroup with
+// which to wait for it to finish shutting down.
+type Closer struct {
+	waiting sync.WaitGroup
+
+	ctx    context.Context
+	cancel context.CancelFunc
+}
+
+// SetTmpDir sets the temporary directory for the temporary buffers.
+func SetTmpDir(dir string) {
+	tmpDir = dir
+}
+
+// NewCloser constructs a new Closer, with an initial count on the WaitGroup.
+func NewCloser(initial int) *Closer {
+	ret := &Closer{}
+	ret.ctx, ret.cancel = context.WithCancel(context.Background())
+	ret.waiting.Add(initial)
+	return ret
+}
+
+// AddRunning Add()'s delta to the WaitGroup.
+func (lc *Closer) AddRunning(delta int) {
+	lc.waiting.Add(delta)
+}
+
+// Ctx can be used to get a context, which would automatically get cancelled when Signal is called.
+func (lc *Closer) Ctx() context.Context {
+	if lc == nil {
+		return context.Background()
+	}
+	return lc.ctx
+}
+
+// Signal signals the HasBeenClosed signal.
+func (lc *Closer) Signal() {
+	// Todo(ibrahim): Change Signal to return error on next badger breaking change.
+	lc.cancel()
+}
+
+// HasBeenClosed gets signaled when Signal() is called.
+func (lc *Closer) HasBeenClosed() <-chan struct{} {
+	if lc == nil {
+		return dummyCloserChan
+	}
+	return lc.ctx.Done()
+}
+
+// Done calls Done() on the WaitGroup.
+func (lc *Closer) Done() {
+	if lc == nil {
+		return
+	}
+	lc.waiting.Done()
+}
+
+// Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done
+// calls to balance out.)
+func (lc *Closer) Wait() {
+	lc.waiting.Wait()
+}
+
+// SignalAndWait calls Signal(), then Wait().
+func (lc *Closer) SignalAndWait() {
+	lc.Signal()
+	lc.Wait()
+}
+
+// ZeroOut zeroes out all the bytes in the range [start, end).
+func ZeroOut(dst []byte, start, end int) {
+	if start < 0 || start >= len(dst) {
+		return // BAD
+	}
+	if end >= len(dst) {
+		end = len(dst)
+	}
+	if end-start <= 0 {
+		return
+	}
+	Memclr(dst[start:end])
+	// b := dst[start:end]
+	// for i := range b {
+	// 	b[i] = 0x0
+	// }
+}
+
+func fatal(args ...interface{}) {
+	defer os.Exit(1)
+	panic(fmt.Sprint(args...))
+}
+
+func fatalf(format string, args ...interface{}) {
+	defer os.Exit(1)
+	panic(fmt.Sprintf(format, args...))
+}
diff --git a/vendor/github.com/philhofer/fwd/LICENSE.md b/vendor/github.com/philhofer/fwd/LICENSE.md
new file mode 100644
index 0000000000..1ac6a81f6a
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/LICENSE.md
@@ -0,0 +1,7 @@
+Copyright (c) 2014-2015, Philip Hofer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/philhofer/fwd/README.md b/vendor/github.com/philhofer/fwd/README.md
new file mode 100644
index 0000000000..62bd5c6d0d
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/README.md
@@ -0,0 +1,359 @@
+
+# fwd
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/philhofer/fwd.svg)](https://pkg.go.dev/github.com/philhofer/fwd)
+
+
+`import "github.com/philhofer/fwd"`
+
+* [Overview](#pkg-overview)
+* [Index](#pkg-index)
+
+## <a name="pkg-overview">Overview</a>
+Package fwd provides a buffered reader
+and writer. Each has methods that help improve
+the encoding/decoding performance of some binary
+protocols.
+
+The `Writer` and `Reader` type provide similar
+functionality to their counterparts in `bufio`, plus
+a few extra utility methods that simplify read-ahead
+and write-ahead. I wrote this package to improve serialization
+performance for [github.com/tinylib/msgp](https://github.com/tinylib/msgp),
+where it provided about a 2x speedup over `bufio` for certain
+workloads. However, care must be taken to understand the semantics of the
+extra methods provided by this package, as they allow
+the user to access and manipulate the buffer memory
+directly.
+
+The extra methods for `fwd.Reader` are `Peek`, `Skip`
+and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`,
+will re-allocate the read buffer in order to accommodate arbitrarily
+large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes
+in the stream, and uses the `io.Seeker` interface if the underlying
+stream implements it. `(*fwd.Reader).Next` returns a slice pointing
+to the next `n` bytes in the read buffer (like `Peek`), but also
+increments the read position. This allows users to process streams
+in arbitrary block sizes without having to manage appropriately-sized
+slices. Additionally, obviating the need to copy the data from the
+buffer to another location in memory can improve performance dramatically
+in CPU-bound applications.
+
+`fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which
+returns a slice pointing to the next `n` bytes of the writer, and increments
+the write position by the length of the returned slice. This allows users
+to write directly to the end of the buffer.
+
+
+
+
+## <a name="pkg-index">Index</a>
+* [Constants](#pkg-constants)
+* [type Reader](#Reader)
+  * [func NewReader(r io.Reader) *Reader](#NewReader)
+  * [func NewReaderBuf(r io.Reader, buf []byte) *Reader](#NewReaderBuf)
+  * [func NewReaderSize(r io.Reader, n int) *Reader](#NewReaderSize)
+  * [func (r *Reader) BufferSize() int](#Reader.BufferSize)
+  * [func (r *Reader) Buffered() int](#Reader.Buffered)
+  * [func (r *Reader) Next(n int) ([]byte, error)](#Reader.Next)
+  * [func (r *Reader) Peek(n int) ([]byte, error)](#Reader.Peek)
+  * [func (r *Reader) Read(b []byte) (int, error)](#Reader.Read)
+  * [func (r *Reader) ReadByte() (byte, error)](#Reader.ReadByte)
+  * [func (r *Reader) ReadFull(b []byte) (int, error)](#Reader.ReadFull)
+  * [func (r *Reader) Reset(rd io.Reader)](#Reader.Reset)
+  * [func (r *Reader) Skip(n int) (int, error)](#Reader.Skip)
+  * [func (r *Reader) WriteTo(w io.Writer) (int64, error)](#Reader.WriteTo)
+* [type Writer](#Writer)
+  * [func NewWriter(w io.Writer) *Writer](#NewWriter)
+  * [func NewWriterBuf(w io.Writer, buf []byte) *Writer](#NewWriterBuf)
+  * [func NewWriterSize(w io.Writer, n int) *Writer](#NewWriterSize)
+  * [func (w *Writer) BufferSize() int](#Writer.BufferSize)
+  * [func (w *Writer) Buffered() int](#Writer.Buffered)
+  * [func (w *Writer) Flush() error](#Writer.Flush)
+  * [func (w *Writer) Next(n int) ([]byte, error)](#Writer.Next)
+  * [func (w *Writer) ReadFrom(r io.Reader) (int64, error)](#Writer.ReadFrom)
+  * [func (w *Writer) Write(p []byte) (int, error)](#Writer.Write)
+  * [func (w *Writer) WriteByte(b byte) error](#Writer.WriteByte)
+  * [func (w *Writer) WriteString(s string) (int, error)](#Writer.WriteString)
+
+
+## <a name="pkg-constants">Constants</a>
+``` go
+const (
+    // DefaultReaderSize is the default size of the read buffer
+    DefaultReaderSize = 2048
+)
+```
+``` go
+const (
+    // DefaultWriterSize is the
+    // default write buffer size.
+    DefaultWriterSize = 2048
+)
+```
+
+
+
+## type Reader
+``` go
+type Reader struct {
+    // contains filtered or unexported fields
+}
+```
+Reader is a buffered look-ahead reader
+
+
+
+
+
+
+
+
+
+### func NewReader
+``` go
+func NewReader(r io.Reader) *Reader
+```
+NewReader returns a new *Reader that reads from 'r'
+
+
+### func NewReaderSize
+``` go
+func NewReaderSize(r io.Reader, n int) *Reader
+```
+NewReaderSize returns a new *Reader that
+reads from 'r' and has a buffer size 'n'
+
+
+
+
+### func (\*Reader) BufferSize
+``` go
+func (r *Reader) BufferSize() int
+```
+BufferSize returns the total size of the buffer
+
+
+
+### func (\*Reader) Buffered
+``` go
+func (r *Reader) Buffered() int
+```
+Buffered returns the number of bytes currently in the buffer
+
+
+
+### func (\*Reader) Next
+``` go
+func (r *Reader) Next(n int) ([]byte, error)
+```
+Next returns the next 'n' bytes in the stream.
+Unlike Peek, Next advances the reader position.
+The returned bytes point to the same
+data as the buffer, so the slice is
+only valid until the next reader method call.
+An EOF is considered an unexpected error.
+If an the returned slice is less than the
+length asked for, an error will be returned,
+and the reader position will not be incremented.
+
+
+
+### <a name="Reader.Peek">func</a> (\*Reader) Peek
+``` go
+func (r *Reader) Peek(n int) ([]byte, error)
+```
+Peek returns the next 'n' buffered bytes,
+reading from the underlying reader if necessary.
+It will only return a slice shorter than 'n' bytes
+if it also returns an error. Peek does not advance
+the reader. EOF errors are *not* returned as
+io.ErrUnexpectedEOF.
+
+
+
+### <a name="Reader.Read">func</a> (\*Reader) Read
+``` go
+func (r *Reader) Read(b []byte) (int, error)
+```
+Read implements `io.Reader`.
+
+
+
+### <a name="Reader.ReadByte">func</a> (\*Reader) ReadByte
+``` go
+func (r *Reader) ReadByte() (byte, error)
+```
+ReadByte implements `io.ByteReader`.
+
+
+
+### <a name="Reader.ReadFull">func</a> (\*Reader) ReadFull
+``` go
+func (r *Reader) ReadFull(b []byte) (int, error)
+```
+ReadFull attempts to read len(b) bytes into
+'b'. It returns the number of bytes read into
+'b', and an error if it does not return len(b).
+EOF is considered an unexpected error.
+
+
+
+### <a name="Reader.Reset">func</a> (\*Reader) Reset
+``` go
+func (r *Reader) Reset(rd io.Reader)
+```
+Reset resets the underlying reader
+and the read buffer.
+
+
+
+### <a name="Reader.Skip">func</a> (\*Reader) Skip
+``` go
+func (r *Reader) Skip(n int) (int, error)
+```
+Skip moves the reader forward 'n' bytes.
+Returns the number of bytes skipped and any
+errors encountered. It is analogous to Seek(n, 1).
+If the underlying reader implements io.Seeker, then
+that method will be used to skip forward.
+
+If the reader encounters
+an EOF before skipping 'n' bytes, it
+returns `io.ErrUnexpectedEOF`. If the
+underlying reader implements `io.Seeker`, then
+those rules apply instead. (Many implementations
+will not return `io.EOF` until the next call
+to Read).
+
+
+
+
+### <a name="Reader.WriteTo">func</a> (\*Reader) WriteTo
+``` go
+func (r *Reader) WriteTo(w io.Writer) (int64, error)
+```
+WriteTo implements `io.WriterTo`.
+
+
+
+
+## <a name="Writer">type</a> Writer
+``` go
+type Writer struct {
+    // contains filtered or unexported fields
+}
+
+```
+Writer is a buffered writer
+
+
+
+
+
+
+
+### <a name="NewWriter">func</a> NewWriter
+``` go
+func NewWriter(w io.Writer) *Writer
+```
+NewWriter returns a new writer
+that writes to 'w' and has a buffer
+that is `DefaultWriterSize` bytes.
+
+
+### <a name="NewWriterBuf">func</a> NewWriterBuf
+``` go
+func NewWriterBuf(w io.Writer, buf []byte) *Writer
+```
+NewWriterBuf returns a new writer
+that writes to 'w' and has 'buf' as a buffer.
+'buf' is not used when has smaller capacity than 18,
+custom buffer is allocated instead.
+
+
+### <a name="NewWriterSize">func</a> NewWriterSize
+``` go
+func NewWriterSize(w io.Writer, n int) *Writer
+```
+NewWriterSize returns a new writer that
+writes to 'w' and has a buffer size 'n'.
+
+### <a name="Writer.BufferSize">func</a> (\*Writer) BufferSize
+``` go
+func (w *Writer) BufferSize() int
+```
+BufferSize returns the maximum size of the buffer.
+
+
+
+### <a name="Writer.Buffered">func</a> (\*Writer) Buffered
+``` go
+func (w *Writer) Buffered() int
+```
+Buffered returns the number of buffered bytes
+in the reader.
+
+
+
+### <a name="Writer.Flush">func</a> (\*Writer) Flush
+``` go
+func (w *Writer) Flush() error
+```
+Flush flushes any buffered bytes
+to the underlying writer.
+
+
+
+### <a name="Writer.Next">func</a> (\*Writer) Next
+``` go
+func (w *Writer) Next(n int) ([]byte, error)
+```
+Next returns the next 'n' free bytes
+in the write buffer, flushing the writer
+as necessary. Next will return `io.ErrShortBuffer`
+if 'n' is greater than the size of the write buffer.
+Calls to 'next' increment the write position by
+the size of the returned buffer.
+
+
+
+### <a name="Writer.ReadFrom">func</a> (\*Writer) ReadFrom
+``` go
+func (w *Writer) ReadFrom(r io.Reader) (int64, error)
+```
+ReadFrom implements `io.ReaderFrom`
+
+
+
+### <a name="Writer.Write">func</a> (\*Writer) Write
+``` go
+func (w *Writer) Write(p []byte) (int, error)
+```
+Write implements `io.Writer`
+
+
+
+### <a name="Writer.WriteByte">func</a> (\*Writer) WriteByte
+``` go
+func (w *Writer) WriteByte(b byte) error
+```
+WriteByte implements `io.ByteWriter`
+
+
+
+### <a name="Writer.WriteString">func</a> (\*Writer) WriteString
+``` go
+func (w *Writer) WriteString(s string) (int, error)
+```
+WriteString is analogous to Write, but it takes a string.
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](https://github.com/davecheney/godoc2md)
diff --git a/vendor/github.com/philhofer/fwd/reader.go b/vendor/github.com/philhofer/fwd/reader.go
new file mode 100644
index 0000000000..7c21f8fb44
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/reader.go
@@ -0,0 +1,383 @@
+// Package fwd provides a buffered reader
+// and writer. Each has methods that help improve
+// the encoding/decoding performance of some binary
+// protocols.
+//
+// The [Writer] and [Reader] type provide similar
+// functionality to their counterparts in [bufio], plus
+// a few extra utility methods that simplify read-ahead
+// and write-ahead. I wrote this package to improve serialization
+// performance for http://github.com/tinylib/msgp,
+// where it provided about a 2x speedup over `bufio` for certain
+// workloads. However, care must be taken to understand the semantics of the
+// extra methods provided by this package, as they allow
+// the user to access and manipulate the buffer memory
+// directly.
+//
+// The extra methods for [Reader] are [Reader.Peek], [Reader.Skip]
+// and [Reader.Next]. (*fwd.Reader).Peek, unlike (*bufio.Reader).Peek,
+// will re-allocate the read buffer in order to accommodate arbitrarily
+// large read-ahead. (*fwd.Reader).Skip skips the next 'n' bytes
+// in the stream, and uses the [io.Seeker] interface if the underlying
+// stream implements it. (*fwd.Reader).Next returns a slice pointing
+// to the next 'n' bytes in the read buffer (like Reader.Peek), but also
+// increments the read position. This allows users to process streams
+// in arbitrary block sizes without having to manage appropriately-sized
+// slices. Additionally, obviating the need to copy the data from the
+// buffer to another location in memory can improve performance dramatically
+// in CPU-bound applications.
+//
+// [Writer] only has one extra method, which is (*fwd.Writer).Next, which
+// returns a slice pointing to the next 'n' bytes of the writer, and increments
+// the write position by the length of the returned slice. This allows users
+// to write directly to the end of the buffer.
+package fwd
+
+import (
+	"io"
+	"os"
+)
+
+const (
+	// DefaultReaderSize is the default size of the read buffer
+	DefaultReaderSize = 2048
+
+	// minimum read buffer; straight from bufio
+	minReaderSize = 16
+)
+
+// NewReader returns a new *Reader that reads from 'r'
+func NewReader(r io.Reader) *Reader {
+	return NewReaderSize(r, DefaultReaderSize)
+}
+
+// NewReaderSize returns a new *Reader that
+// reads from 'r' and has a buffer size 'n'.
+func NewReaderSize(r io.Reader, n int) *Reader {
+	buf := make([]byte, 0, max(n, minReaderSize))
+	return NewReaderBuf(r, buf)
+}
+
+// NewReaderBuf returns a new *Reader that
+// reads from 'r' and uses 'buf' as a buffer.
+// 'buf' is not used when has smaller capacity than 16,
+// custom buffer is allocated instead.
+func NewReaderBuf(r io.Reader, buf []byte) *Reader {
+	if cap(buf) < minReaderSize {
+		buf = make([]byte, 0, minReaderSize)
+	}
+	buf = buf[:0]
+	rd := &Reader{
+		r:    r,
+		data: buf,
+	}
+	if s, ok := r.(io.Seeker); ok {
+		rd.rs = s
+	}
+	return rd
+}
+
+// Reader is a buffered look-ahead reader
+type Reader struct {
+	r io.Reader // underlying reader
+
+	// data[n:len(data)] is buffered data; data[len(data):cap(data)] is free buffer space
+	data  []byte // data
+	n     int    // read offset
+	state error  // last read error
+
+	// if the reader past to NewReader was
+	// also an io.Seeker, this is non-nil
+	rs io.Seeker
+}
+
+// Reset resets the underlying reader
+// and the read buffer.
+func (r *Reader) Reset(rd io.Reader) {
+	r.r = rd
+	r.data = r.data[0:0]
+	r.n = 0
+	r.state = nil
+	if s, ok := rd.(io.Seeker); ok {
+		r.rs = s
+	} else {
+		r.rs = nil
+	}
+}
+
+// more() does one read on the underlying reader
+func (r *Reader) more() {
+	// move data backwards so that
+	// the read offset is 0; this way
+	// we can supply the maximum number of
+	// bytes to the reader
+	if r.n != 0 {
+		if r.n < len(r.data) {
+			r.data = r.data[:copy(r.data[0:], r.data[r.n:])]
+		} else {
+			r.data = r.data[:0]
+		}
+		r.n = 0
+	}
+	var a int
+	a, r.state = r.r.Read(r.data[len(r.data):cap(r.data)])
+	if a == 0 && r.state == nil {
+		r.state = io.ErrNoProgress
+		return
+	} else if a > 0 && r.state == io.EOF {
+		// discard the io.EOF if we read more than 0 bytes.
+		// the next call to Read should return io.EOF again.
+		r.state = nil
+	} else if r.state != nil {
+		return
+	}
+	r.data = r.data[:len(r.data)+a]
+}
+
+// pop error
+func (r *Reader) err() (e error) {
+	e, r.state = r.state, nil
+	return
+}
+
+// pop error; EOF -> io.ErrUnexpectedEOF
+func (r *Reader) noEOF() (e error) {
+	e, r.state = r.state, nil
+	if e == io.EOF {
+		e = io.ErrUnexpectedEOF
+	}
+	return
+}
+
+// buffered bytes
+func (r *Reader) buffered() int { return len(r.data) - r.n }
+
+// Buffered returns the number of bytes currently in the buffer
+func (r *Reader) Buffered() int { return len(r.data) - r.n }
+
+// BufferSize returns the total size of the buffer
+func (r *Reader) BufferSize() int { return cap(r.data) }
+
+// Peek returns the next 'n' buffered bytes,
+// reading from the underlying reader if necessary.
+// It will only return a slice shorter than 'n' bytes
+// if it also returns an error. Peek does not advance
+// the reader. EOF errors are *not* returned as
+// io.ErrUnexpectedEOF.
+func (r *Reader) Peek(n int) ([]byte, error) {
+	// in the degenerate case,
+	// we may need to realloc
+	// (the caller asked for more
+	// bytes than the size of the buffer)
+	if cap(r.data) < n {
+		old := r.data[r.n:]
+		r.data = make([]byte, n+r.buffered())
+		r.data = r.data[:copy(r.data, old)]
+		r.n = 0
+	}
+
+	// keep filling until
+	// we hit an error or
+	// read enough bytes
+	for r.buffered() < n && r.state == nil {
+		r.more()
+	}
+
+	// we must have hit an error
+	if r.buffered() < n {
+		return r.data[r.n:], r.err()
+	}
+
+	return r.data[r.n : r.n+n], nil
+}
+
+// discard(n) discards up to 'n' buffered bytes, and
+// and returns the number of bytes discarded
+func (r *Reader) discard(n int) int {
+	inbuf := r.buffered()
+	if inbuf <= n {
+		r.n = 0
+		r.data = r.data[:0]
+		return inbuf
+	}
+	r.n += n
+	return n
+}
+
+// Skip moves the reader forward 'n' bytes.
+// Returns the number of bytes skipped and any
+// errors encountered. It is analogous to Seek(n, 1).
+// If the underlying reader implements io.Seeker, then
+// that method will be used to skip forward.
+//
+// If the reader encounters
+// an EOF before skipping 'n' bytes, it
+// returns [io.ErrUnexpectedEOF]. If the
+// underlying reader implements [io.Seeker], then
+// those rules apply instead. (Many implementations
+// will not return [io.EOF] until the next call
+// to Read).
+func (r *Reader) Skip(n int) (int, error) {
+	if n < 0 {
+		return 0, os.ErrInvalid
+	}
+
+	// discard some or all of the current buffer
+	skipped := r.discard(n)
+
+	// if we can Seek() through the remaining bytes, do that
+	if n > skipped && r.rs != nil {
+		nn, err := r.rs.Seek(int64(n-skipped), 1)
+		return int(nn) + skipped, err
+	}
+	// otherwise, keep filling the buffer
+	// and discarding it up to 'n'
+	for skipped < n && r.state == nil {
+		r.more()
+		skipped += r.discard(n - skipped)
+	}
+	return skipped, r.noEOF()
+}
+
+// Next returns the next 'n' bytes in the stream.
+// Unlike Peek, Next advances the reader position.
+// The returned bytes point to the same
+// data as the buffer, so the slice is
+// only valid until the next reader method call.
+// An EOF is considered an unexpected error.
+// If an the returned slice is less than the
+// length asked for, an error will be returned,
+// and the reader position will not be incremented.
+func (r *Reader) Next(n int) ([]byte, error) {
+	// in case the buffer is too small
+	if cap(r.data) < n {
+		old := r.data[r.n:]
+		r.data = make([]byte, n+r.buffered())
+		r.data = r.data[:copy(r.data, old)]
+		r.n = 0
+	}
+
+	// fill at least 'n' bytes
+	for r.buffered() < n && r.state == nil {
+		r.more()
+	}
+
+	if r.buffered() < n {
+		return r.data[r.n:], r.noEOF()
+	}
+	out := r.data[r.n : r.n+n]
+	r.n += n
+	return out, nil
+}
+
+// Read implements [io.Reader].
+func (r *Reader) Read(b []byte) (int, error) {
+	// if we have data in the buffer, just
+	// return that.
+	if r.buffered() != 0 {
+		x := copy(b, r.data[r.n:])
+		r.n += x
+		return x, nil
+	}
+	var n int
+	// we have no buffered data; determine
+	// whether or not to buffer or call
+	// the underlying reader directly
+	if len(b) >= cap(r.data) {
+		n, r.state = r.r.Read(b)
+	} else {
+		r.more()
+		n = copy(b, r.data)
+		r.n = n
+	}
+	if n == 0 {
+		return 0, r.err()
+	}
+	return n, nil
+}
+
+// ReadFull attempts to read len(b) bytes into
+// 'b'. It returns the number of bytes read into
+// 'b', and an error if it does not return len(b).
+// EOF is considered an unexpected error.
+func (r *Reader) ReadFull(b []byte) (int, error) {
+	var n int  // read into b
+	var nn int // scratch
+	l := len(b)
+	// either read buffered data,
+	// or read directly for the underlying
+	// buffer, or fetch more buffered data.
+	for n < l && r.state == nil {
+		if r.buffered() != 0 {
+			nn = copy(b[n:], r.data[r.n:])
+			n += nn
+			r.n += nn
+		} else if l-n > cap(r.data) {
+			nn, r.state = r.r.Read(b[n:])
+			n += nn
+		} else {
+			r.more()
+		}
+	}
+	if n < l {
+		return n, r.noEOF()
+	}
+	return n, nil
+}
+
+// ReadByte implements [io.ByteReader].
+func (r *Reader) ReadByte() (byte, error) {
+	for r.buffered() < 1 && r.state == nil {
+		r.more()
+	}
+	if r.buffered() < 1 {
+		return 0, r.err()
+	}
+	b := r.data[r.n]
+	r.n++
+	return b, nil
+}
+
+// WriteTo implements [io.WriterTo].
+func (r *Reader) WriteTo(w io.Writer) (int64, error) {
+	var (
+		i   int64
+		ii  int
+		err error
+	)
+	// first, clear buffer
+	if r.buffered() > 0 {
+		ii, err = w.Write(r.data[r.n:])
+		i += int64(ii)
+		if err != nil {
+			return i, err
+		}
+		r.data = r.data[0:0]
+		r.n = 0
+	}
+	for r.state == nil {
+		// here we just do
+		// 1:1 reads and writes
+		r.more()
+		if r.buffered() > 0 {
+			ii, err = w.Write(r.data)
+			i += int64(ii)
+			if err != nil {
+				return i, err
+			}
+			r.data = r.data[0:0]
+			r.n = 0
+		}
+	}
+	if r.state != io.EOF {
+		return i, r.err()
+	}
+	return i, nil
+}
+
+func max(a int, b int) int {
+	if a < b {
+		return b
+	}
+	return a
+}
diff --git a/vendor/github.com/philhofer/fwd/writer.go b/vendor/github.com/philhofer/fwd/writer.go
new file mode 100644
index 0000000000..4d6ea15b33
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/writer.go
@@ -0,0 +1,236 @@
+package fwd
+
+import "io"
+
+const (
+	// DefaultWriterSize is the
+	// default write buffer size.
+	DefaultWriterSize = 2048
+
+	minWriterSize = minReaderSize
+)
+
+// Writer is a buffered writer
+type Writer struct {
+	w   io.Writer // writer
+	buf []byte    // 0:len(buf) is bufered data
+}
+
+// NewWriter returns a new writer
+// that writes to 'w' and has a buffer
+// that is `DefaultWriterSize` bytes.
+func NewWriter(w io.Writer) *Writer {
+	if wr, ok := w.(*Writer); ok {
+		return wr
+	}
+	return &Writer{
+		w:   w,
+		buf: make([]byte, 0, DefaultWriterSize),
+	}
+}
+
+// NewWriterSize returns a new writer that
+// writes to 'w' and has a buffer size 'n'.
+func NewWriterSize(w io.Writer, n int) *Writer {
+	if wr, ok := w.(*Writer); ok && cap(wr.buf) >= n {
+		return wr
+	}
+	buf := make([]byte, 0, max(n, minWriterSize))
+	return NewWriterBuf(w, buf)
+}
+
+// NewWriterBuf returns a new writer
+// that writes to 'w' and has 'buf' as a buffer.
+// 'buf' is not used when has smaller capacity than 18,
+// custom buffer is allocated instead.
+func NewWriterBuf(w io.Writer, buf []byte) *Writer {
+	if cap(buf) < minWriterSize {
+		buf = make([]byte, 0, minWriterSize)
+	}
+	buf = buf[:0]
+	return &Writer{
+		w:   w,
+		buf: buf,
+	}
+}
+
+// Buffered returns the number of buffered bytes
+// in the reader.
+func (w *Writer) Buffered() int { return len(w.buf) }
+
+// BufferSize returns the maximum size of the buffer.
+func (w *Writer) BufferSize() int { return cap(w.buf) }
+
+// Flush flushes any buffered bytes
+// to the underlying writer.
+func (w *Writer) Flush() error {
+	l := len(w.buf)
+	if l > 0 {
+		n, err := w.w.Write(w.buf)
+
+		// if we didn't write the whole
+		// thing, copy the unwritten
+		// bytes to the beginnning of the
+		// buffer.
+		if n < l && n > 0 {
+			w.pushback(n)
+			if err == nil {
+				err = io.ErrShortWrite
+			}
+		}
+		if err != nil {
+			return err
+		}
+		w.buf = w.buf[:0]
+		return nil
+	}
+	return nil
+}
+
+// Write implements `io.Writer`
+func (w *Writer) Write(p []byte) (int, error) {
+	c, l, ln := cap(w.buf), len(w.buf), len(p)
+	avail := c - l
+
+	// requires flush
+	if avail < ln {
+		if err := w.Flush(); err != nil {
+			return 0, err
+		}
+		l = len(w.buf)
+	}
+	// too big to fit in buffer;
+	// write directly to w.w
+	if c < ln {
+		return w.w.Write(p)
+	}
+
+	// grow buf slice; copy; return
+	w.buf = w.buf[:l+ln]
+	return copy(w.buf[l:], p), nil
+}
+
+// WriteString is analogous to Write, but it takes a string.
+func (w *Writer) WriteString(s string) (int, error) {
+	c, l, ln := cap(w.buf), len(w.buf), len(s)
+	avail := c - l
+
+	// requires flush
+	if avail < ln {
+		if err := w.Flush(); err != nil {
+			return 0, err
+		}
+		l = len(w.buf)
+	}
+	// too big to fit in buffer;
+	// write directly to w.w
+	//
+	// yes, this is unsafe. *but*
+	// io.Writer is not allowed
+	// to mutate its input or
+	// maintain a reference to it,
+	// per the spec in package io.
+	//
+	// plus, if the string is really
+	// too big to fit in the buffer, then
+	// creating a copy to write it is
+	// expensive (and, strictly speaking,
+	// unnecessary)
+	if c < ln {
+		return w.w.Write(unsafestr(s))
+	}
+
+	// grow buf slice; copy; return
+	w.buf = w.buf[:l+ln]
+	return copy(w.buf[l:], s), nil
+}
+
+// WriteByte implements `io.ByteWriter`
+func (w *Writer) WriteByte(b byte) error {
+	if len(w.buf) == cap(w.buf) {
+		if err := w.Flush(); err != nil {
+			return err
+		}
+	}
+	w.buf = append(w.buf, b)
+	return nil
+}
+
+// Next returns the next 'n' free bytes
+// in the write buffer, flushing the writer
+// as necessary. Next will return `io.ErrShortBuffer`
+// if 'n' is greater than the size of the write buffer.
+// Calls to 'next' increment the write position by
+// the size of the returned buffer.
+func (w *Writer) Next(n int) ([]byte, error) {
+	c, l := cap(w.buf), len(w.buf)
+	if n > c {
+		return nil, io.ErrShortBuffer
+	}
+	avail := c - l
+	if avail < n {
+		if err := w.Flush(); err != nil {
+			return nil, err
+		}
+		l = len(w.buf)
+	}
+	w.buf = w.buf[:l+n]
+	return w.buf[l:], nil
+}
+
+// take the bytes from w.buf[n:len(w.buf)]
+// and put them at the beginning of w.buf,
+// and resize to the length of the copied segment.
+func (w *Writer) pushback(n int) {
+	w.buf = w.buf[:copy(w.buf, w.buf[n:])]
+}
+
+// ReadFrom implements `io.ReaderFrom`
+func (w *Writer) ReadFrom(r io.Reader) (int64, error) {
+	// anticipatory flush
+	if err := w.Flush(); err != nil {
+		return 0, err
+	}
+
+	w.buf = w.buf[0:cap(w.buf)] // expand buffer
+
+	var nn int64  // written
+	var err error // error
+	var x int     // read
+
+	// 1:1 reads and writes
+	for err == nil {
+		x, err = r.Read(w.buf)
+		if x > 0 {
+			n, werr := w.w.Write(w.buf[:x])
+			nn += int64(n)
+
+			if err != nil {
+				if n < x && n > 0 {
+					w.pushback(n - x)
+				}
+				return nn, werr
+			}
+			if n < x {
+				w.pushback(n - x)
+				return nn, io.ErrShortWrite
+			}
+		} else if err == nil {
+			err = io.ErrNoProgress
+			break
+		}
+	}
+	if err != io.EOF {
+		return nn, err
+	}
+
+	// we only clear here
+	// because we are sure
+	// the writes have
+	// succeeded. otherwise,
+	// we retain the data in case
+	// future writes succeed.
+	w.buf = w.buf[0:0]
+
+	return nn, nil
+}
diff --git a/vendor/github.com/philhofer/fwd/writer_appengine.go b/vendor/github.com/philhofer/fwd/writer_appengine.go
new file mode 100644
index 0000000000..a978e3b6a0
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/writer_appengine.go
@@ -0,0 +1,6 @@
+//go:build appengine
+// +build appengine
+
+package fwd
+
+func unsafestr(s string) []byte { return []byte(s) }
diff --git a/vendor/github.com/philhofer/fwd/writer_tinygo.go b/vendor/github.com/philhofer/fwd/writer_tinygo.go
new file mode 100644
index 0000000000..b060faf7a0
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/writer_tinygo.go
@@ -0,0 +1,19 @@
+//go:build tinygo
+// +build tinygo
+
+package fwd
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// unsafe cast string as []byte
+func unsafestr(b string) []byte {
+	l := uintptr(len(b))
+	return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
+		Len:  l,
+		Cap:  l,
+		Data: (*reflect.StringHeader)(unsafe.Pointer(&b)).Data,
+	}))
+}
diff --git a/vendor/github.com/philhofer/fwd/writer_unsafe.go b/vendor/github.com/philhofer/fwd/writer_unsafe.go
new file mode 100644
index 0000000000..e4cb4a830d
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/writer_unsafe.go
@@ -0,0 +1,20 @@
+//go:build !appengine && !tinygo
+// +build !appengine,!tinygo
+
+package fwd
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// unsafe cast string as []byte
+func unsafestr(s string) []byte {
+	var b []byte
+	sHdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
+	bHdr := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+	bHdr.Data = sHdr.Data
+	bHdr.Len = sHdr.Len
+	bHdr.Cap = sHdr.Len
+	return b
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index f4fc884552..9063978151 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -132,7 +132,10 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error {
 	}
 	// Pick off one MetricFamily per Decode until there's nothing left.
 	for key, fam := range d.fams {
-		*v = *fam
+		v.Name = fam.Name
+		v.Help = fam.Help
+		v.Type = fam.Type
+		v.Metric = fam.Metric
 		delete(d.fams, key)
 		return nil
 	}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
index 64dc0eb40c..7f611ffaad 100644
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -18,9 +18,9 @@ import (
 	"io"
 	"net/http"
 
-	"github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
 	"github.com/matttproud/golang_protobuf_extensions/pbutil"
 	"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+	"google.golang.org/protobuf/encoding/prototext"
 
 	dto "github.com/prometheus/client_model/go"
 )
@@ -99,8 +99,11 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
 		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
 			return FmtText
 		}
-		if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
-			return FmtOpenMetrics
+		if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
+			if ver == OpenMetricsVersion_1_0_0 {
+				return FmtOpenMetrics_1_0_0
+			}
+			return FmtOpenMetrics_0_0_1
 		}
 	}
 	return FmtText
@@ -133,7 +136,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
 	case FmtProtoText:
 		return encoderCloser{
 			encode: func(v *dto.MetricFamily) error {
-				_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+				_, err := fmt.Fprintln(w, prototext.Format(v))
 				return err
 			},
 			close: func() error { return nil },
@@ -146,7 +149,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
 			},
 			close: func() error { return nil },
 		}
-	case FmtOpenMetrics:
+	case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0:
 		return encoderCloser{
 			encode: func(v *dto.MetricFamily) error {
 				_, err := MetricFamilyToOpenMetrics(w, v)
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index 0f176fa64f..c4cb20f0d3 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -19,20 +19,22 @@ type Format string
 
 // Constants to assemble the Content-Type values for the different wire protocols.
 const (
-	TextVersion        = "0.0.4"
-	ProtoType          = `application/vnd.google.protobuf`
-	ProtoProtocol      = `io.prometheus.client.MetricFamily`
-	ProtoFmt           = ProtoType + "; proto=" + ProtoProtocol + ";"
-	OpenMetricsType    = `application/openmetrics-text`
-	OpenMetricsVersion = "0.0.1"
+	TextVersion              = "0.0.4"
+	ProtoType                = `application/vnd.google.protobuf`
+	ProtoProtocol            = `io.prometheus.client.MetricFamily`
+	ProtoFmt                 = ProtoType + "; proto=" + ProtoProtocol + ";"
+	OpenMetricsType          = `application/openmetrics-text`
+	OpenMetricsVersion_0_0_1 = "0.0.1"
+	OpenMetricsVersion_1_0_0 = "1.0.0"
 
 	// The Content-Type values for the different wire protocols.
-	FmtUnknown      Format = `<unknown>`
-	FmtText         Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
-	FmtProtoDelim   Format = ProtoFmt + ` encoding=delimited`
-	FmtProtoText    Format = ProtoFmt + ` encoding=text`
-	FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
-	FmtOpenMetrics  Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
+	FmtUnknown           Format = `<unknown>`
+	FmtText              Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
+	FmtProtoDelim        Format = ProtoFmt + ` encoding=delimited`
+	FmtProtoText         Format = ProtoFmt + ` encoding=text`
+	FmtProtoCompact      Format = ProtoFmt + ` encoding=compact-text`
+	FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
+	FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
 )
 
 const (
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index ac2482782c..35db1cc9d7 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -24,8 +24,8 @@ import (
 
 	dto "github.com/prometheus/client_model/go"
 
-	"github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
 	"github.com/prometheus/common/model"
+	"google.golang.org/protobuf/proto"
 )
 
 // A stateFn is a function that represents a state in a state machine. By
diff --git a/vendor/github.com/puzpuzpuz/xsync/v2/.gitignore b/vendor/github.com/puzpuzpuz/xsync/v2/.gitignore
new file mode 100644
index 0000000000..66fd13c903
--- /dev/null
+++ b/vendor/github.com/puzpuzpuz/xsync/v2/.gitignore
@@ -0,0 +1,15 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
diff --git a/vendor/github.com/puzpuzpuz/xsync/v2/BENCHMARKS.md b/vendor/github.com/puzpuzpuz/xsync/v2/BENCHMARKS.md
new file mode 100644
index 0000000000..1e3169c4f2
--- /dev/null
+++ b/vendor/github.com/puzpuzpuz/xsync/v2/BENCHMARKS.md
@@ -0,0 +1,131 @@
+# xsync benchmarks
+
+If you're interested in `MapOf` comparison with some of the popular concurrent hash maps written in Go, check [this](https://github.com/cornelk/hashmap/pull/70) and [this](https://github.com/alphadose/haxmap/pull/22) PRs.
+
+The below results were obtained for xsync v2.3.1 on a c6g.metal EC2 instance (64 CPU, 128GB RAM) running Linux and Go 1.19.3. I'd like to thank [@felixge](https://github.com/felixge) who kindly run the benchmarks.
+
+The following commands were used to run the benchmarks:
+```bash
+$ go test -run='^$' -cpu=1,2,4,8,16,32,64 -bench . -count=30 -timeout=0 | tee bench.txt
+$ benchstat bench.txt | tee benchstat.txt
+```
+
+The below sections contain some of the results. Refer to [this gist](https://gist.github.com/puzpuzpuz/e62e38e06feadecfdc823c0f941ece0b) for the complete output.
+
+### Counter vs. atomic int64
+
+```
+name                                            time/op
+Counter                                         27.3ns ± 1%
+Counter-2                                       27.2ns ±11%
+Counter-4                                       15.3ns ± 8%
+Counter-8                                       7.43ns ± 7%
+Counter-16                                      3.70ns ±10%
+Counter-32                                      1.77ns ± 3%
+Counter-64                                      0.96ns ±10%
+AtomicInt64                                     7.60ns ± 0%
+AtomicInt64-2                                   12.6ns ±13%
+AtomicInt64-4                                   13.5ns ±14%
+AtomicInt64-8                                   12.7ns ± 9%
+AtomicInt64-16                                  12.8ns ± 8%
+AtomicInt64-32                                  13.0ns ± 6%
+AtomicInt64-64                                  12.9ns ± 7%
+```
+
+Here `time/op` stands for average time spent on operation. If you divide `10^9` by the result in nanosecond per operation, you'd get the throughput in operations per second. Thus, ideal theoretical scalability of a concurrent data structure implies that the reported `time/op` decreases proportionally with the increased number of CPU cores. On the contrary, if the measured time per operation increases when run on more cores, it means performance degradation.
+
+### MapOf vs. sync.Map
+
+1,000 `[int, int]` entries with warm-up, 100% Loads:
+```
+IntegerMapOf_WarmUp/reads=100%                  24.0ns ± 0%
+IntegerMapOf_WarmUp/reads=100%-2                12.0ns ± 0%
+IntegerMapOf_WarmUp/reads=100%-4                6.02ns ± 0%
+IntegerMapOf_WarmUp/reads=100%-8                3.01ns ± 0%
+IntegerMapOf_WarmUp/reads=100%-16               1.50ns ± 0%
+IntegerMapOf_WarmUp/reads=100%-32               0.75ns ± 0%
+IntegerMapOf_WarmUp/reads=100%-64               0.38ns ± 0%
+IntegerMapStandard_WarmUp/reads=100%            55.3ns ± 0%
+IntegerMapStandard_WarmUp/reads=100%-2          27.6ns ± 0%
+IntegerMapStandard_WarmUp/reads=100%-4          16.1ns ± 3%
+IntegerMapStandard_WarmUp/reads=100%-8          8.35ns ± 7%
+IntegerMapStandard_WarmUp/reads=100%-16         4.24ns ± 7%
+IntegerMapStandard_WarmUp/reads=100%-32         2.18ns ± 6%
+IntegerMapStandard_WarmUp/reads=100%-64         1.11ns ± 3%
+```
+
+1,000 `[int, int]` entries with warm-up, 99% Loads, 0.5% Stores, 0.5% Deletes:
+```
+IntegerMapOf_WarmUp/reads=99%                   31.0ns ± 0%
+IntegerMapOf_WarmUp/reads=99%-2                 16.4ns ± 1%
+IntegerMapOf_WarmUp/reads=99%-4                 8.42ns ± 0%
+IntegerMapOf_WarmUp/reads=99%-8                 4.41ns ± 0%
+IntegerMapOf_WarmUp/reads=99%-16                2.38ns ± 2%
+IntegerMapOf_WarmUp/reads=99%-32                1.37ns ± 4%
+IntegerMapOf_WarmUp/reads=99%-64                0.85ns ± 2%
+IntegerMapStandard_WarmUp/reads=99%              121ns ± 1%
+IntegerMapStandard_WarmUp/reads=99%-2            109ns ± 3%
+IntegerMapStandard_WarmUp/reads=99%-4            115ns ± 4%
+IntegerMapStandard_WarmUp/reads=99%-8            114ns ± 2%
+IntegerMapStandard_WarmUp/reads=99%-16           105ns ± 2%
+IntegerMapStandard_WarmUp/reads=99%-32          97.0ns ± 3%
+IntegerMapStandard_WarmUp/reads=99%-64          98.0ns ± 2%
+```
+
+1,000 `[int, int]` entries with warm-up, 75% Loads, 12.5% Stores, 12.5% Deletes:
+```
+IntegerMapOf_WarmUp/reads=75%-reads             46.2ns ± 1%
+IntegerMapOf_WarmUp/reads=75%-reads-2           36.7ns ± 2%
+IntegerMapOf_WarmUp/reads=75%-reads-4           22.0ns ± 1%
+IntegerMapOf_WarmUp/reads=75%-reads-8           12.8ns ± 2%
+IntegerMapOf_WarmUp/reads=75%-reads-16          7.69ns ± 1%
+IntegerMapOf_WarmUp/reads=75%-reads-32          5.16ns ± 1%
+IntegerMapOf_WarmUp/reads=75%-reads-64          4.91ns ± 1%
+IntegerMapStandard_WarmUp/reads=75%-reads        156ns ± 0%
+IntegerMapStandard_WarmUp/reads=75%-reads-2      177ns ± 1%
+IntegerMapStandard_WarmUp/reads=75%-reads-4      197ns ± 1%
+IntegerMapStandard_WarmUp/reads=75%-reads-8      221ns ± 2%
+IntegerMapStandard_WarmUp/reads=75%-reads-16     242ns ± 1%
+IntegerMapStandard_WarmUp/reads=75%-reads-32     258ns ± 1%
+IntegerMapStandard_WarmUp/reads=75%-reads-64     264ns ± 1%
+```
+
+### MPMCQueue vs. Go channels
+
+Concurrent producers and consumers (1:1), queue/channel size 1,000, some work:
+```
+QueueProdConsWork100                             252ns ± 0%
+QueueProdConsWork100-2                           206ns ± 5%
+QueueProdConsWork100-4                           136ns ±12%
+QueueProdConsWork100-8                           110ns ± 6%
+QueueProdConsWork100-16                          108ns ± 2%
+QueueProdConsWork100-32                          102ns ± 2%
+QueueProdConsWork100-64                          101ns ± 0%
+ChanProdConsWork100                              283ns ± 0%
+ChanProdConsWork100-2                            406ns ±21%
+ChanProdConsWork100-4                            549ns ± 7%
+ChanProdConsWork100-8                            754ns ± 7%
+ChanProdConsWork100-16                           828ns ± 7%
+ChanProdConsWork100-32                           810ns ± 8%
+ChanProdConsWork100-64                           832ns ± 4%
+```
+
+### RBMutex vs. sync.RWMutex
+
+Writer locks on each 100,000 iteration, both no work and some work in the critical section:
+```
+RBMutexWorkWrite100000                           146ns ± 0%
+RBMutexWorkWrite100000-2                        73.3ns ± 0%
+RBMutexWorkWrite100000-4                        36.7ns ± 0%
+RBMutexWorkWrite100000-8                        18.6ns ± 0%
+RBMutexWorkWrite100000-16                       9.83ns ± 3%
+RBMutexWorkWrite100000-32                       5.53ns ± 0%
+RBMutexWorkWrite100000-64                       4.04ns ± 3%
+RWMutexWorkWrite100000                           121ns ± 0%
+RWMutexWorkWrite100000-2                         128ns ± 1%
+RWMutexWorkWrite100000-4                         124ns ± 2%
+RWMutexWorkWrite100000-8                         101ns ± 1%
+RWMutexWorkWrite100000-16                       92.9ns ± 1%
+RWMutexWorkWrite100000-32                       89.9ns ± 1%
+RWMutexWorkWrite100000-64                       88.4ns ± 1%
+```
diff --git a/vendor/github.com/puzpuzpuz/xsync/v2/LICENSE b/vendor/github.com/puzpuzpuz/xsync/v2/LICENSE
new file mode 100644
index 0000000000..8376971940
--- /dev/null
+++ b/vendor/github.com/puzpuzpuz/xsync/v2/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Andrey Pechkurov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/puzpuzpuz/xsync/v2/README.md b/vendor/github.com/puzpuzpuz/xsync/v2/README.md
new file mode 100644
index 0000000000..a941d26d22
--- /dev/null
+++ b/vendor/github.com/puzpuzpuz/xsync/v2/README.md
@@ -0,0 +1,138 @@
+[![GoDoc reference](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/puzpuzpuz/xsync/v2)
+[![GoReport](https://goreportcard.com/badge/github.com/puzpuzpuz/xsync/v2)](https://goreportcard.com/report/github.com/puzpuzpuz/xsync/v2)
+[![codecov](https://codecov.io/gh/puzpuzpuz/xsync/branch/main/graph/badge.svg)](https://codecov.io/gh/puzpuzpuz/xsync)
+
+# xsync
+
+Concurrent data structures for Go. Aims to provide more scalable alternatives for some of the data structures from the standard `sync` package, but not only.
+
+Covered with tests following the approach described [here](https://puzpuzpuz.dev/testing-concurrent-code-for-fun-and-profit).
+
+## Benchmarks
+
+Benchmark results may be found [here](BENCHMARKS.md). I'd like to thank [@felixge](https://github.com/felixge) who kindly run the benchmarks on a beefy multicore machine.
+
+Also, a non-scientific, unfair benchmark comparing Java's [j.u.c.ConcurrentHashMap](https://docs.oracle.com/en/java/javase/17/docs/api/java.base/java/util/concurrent/ConcurrentHashMap.html) and `xsync.MapOf` is available [here](https://puzpuzpuz.dev/concurrent-map-in-go-vs-java-yet-another-meaningless-benchmark).
+
+## Usage
+
+The latest xsync major version is v2, so `/v2` suffix should be used when importing the library:
+
+```go
+import (
+	"github.com/puzpuzpuz/xsync/v2"
+)
+```
+
+### Counter
+
+A `Counter` is a striped `int64` counter inspired by the `j.u.c.a.LongAdder` class from Java standard library.
+
+```go
+c := xsync.NewCounter()
+// increment and decrement the counter
+c.Inc()
+c.Dec()
+// read the current value 
+v := c.Value()
+```
+
+Works better in comparison with a single atomically updated `int64` counter in high contention scenarios.
+
+### Map
+
+A `Map` is like a concurrent hash table based map. It follows the interface of `sync.Map` with a number of valuable extensions like `Compute` or `Size`.
+
+```go
+m := xsync.NewMap()
+m.Store("foo", "bar")
+v, ok := m.Load("foo")
+s := m.Size()
+```
+
+`Map` uses a modified version of Cache-Line Hash Table (CLHT) data structure: https://github.com/LPD-EPFL/CLHT
+
+CLHT is built around idea to organize the hash table in cache-line-sized buckets, so that on all modern CPUs update operations complete with minimal cache-line transfer. Also, `Get` operations are obstruction-free and involve no writes to shared memory, hence no mutexes or any other sort of locks. Due to this design, in all considered scenarios `Map` outperforms `sync.Map`.
+
+One important difference with `sync.Map` is that only string keys are supported. That's because Golang standard library does not expose the built-in hash functions for `interface{}` values.
+
+`MapOf[K, V]` is an implementation with parametrized value type. It is available for Go 1.18 or later. While it's still a CLHT-inspired hash map, `MapOf`'s design is quite different from `Map`. As a result, less GC pressure and less atomic operations on reads.
+
+```go
+m := xsync.NewMapOf[string]()
+m.Store("foo", "bar")
+v, ok := m.Load("foo")
+```
+
+One important difference with `Map` is that `MapOf` supports arbitrary `comparable` key types:
+
+```go
+type Point struct {
+	x int32
+	y int32
+}
+m := NewTypedMapOf[Point, int](func(seed maphash.Seed, p Point) uint64 {
+	// provide a hash function when creating the MapOf;
+	// we recommend using the hash/maphash package for the function
+	var h maphash.Hash
+	h.SetSeed(seed)
+	binary.Write(&h, binary.LittleEndian, p.x)
+	hash := h.Sum64()
+	h.Reset()
+	binary.Write(&h, binary.LittleEndian, p.y)
+	return 31*hash + h.Sum64()
+})
+m.Store(Point{42, 42}, 42)
+v, ok := m.Load(point{42, 42})
+```
+
+### MPMCQueue
+
+A `MPMCQueue` is a bounded multi-producer multi-consumer concurrent queue.
+
+```go
+q := xsync.NewMPMCQueue(1024)
+// producer inserts an item into the queue
+q.Enqueue("foo")
+// optimistic insertion attempt; doesn't block
+inserted := q.TryEnqueue("bar")
+// consumer obtains an item from the queue
+item := q.Dequeue()
+// optimistic obtain attempt; doesn't block
+item, ok := q.TryDequeue()
+```
+
+Based on the algorithm from the [MPMCQueue](https://github.com/rigtorp/MPMCQueue) C++ library which in its turn references D.Vyukov's [MPMC queue](https://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue). According to the following [classification](https://www.1024cores.net/home/lock-free-algorithms/queues), the queue is array-based, fails on overflow, provides causal FIFO, has blocking producers and consumers.
+
+The idea of the algorithm is to allow parallelism for concurrent producers and consumers by introducing the notion of tickets, i.e. values of two counters, one per producers/consumers. An atomic increment of one of those counters is the only noticeable contention point in queue operations. The rest of the operation avoids contention on writes thanks to the turn-based read/write access for each of the queue items.
+
+In essence, `MPMCQueue` is a specialized queue for scenarios where there are multiple concurrent producers and consumers of a single queue running on a large multicore machine.
+
+To get the optimal performance, you may want to set the queue size to be large enough, say, an order of magnitude greater than the number of producers/consumers, to allow producers and consumers to progress with their queue operations in parallel most of the time.
+
+### RBMutex
+
+A `RBMutex` is a reader biased reader/writer mutual exclusion lock. The lock can be held by an many readers or a single writer.
+
+```go
+mu := xsync.NewRBMutex()
+// reader lock calls return a token
+t := mu.RLock()
+// the token must be later used to unlock the mutex
+mu.RUnlock(t)
+// writer locks are the same as in sync.RWMutex
+mu.Lock()
+mu.Unlock()
+```
+
+`RBMutex` is based on a modified version of BRAVO (Biased Locking for Reader-Writer Locks) algorithm: https://arxiv.org/pdf/1810.01553.pdf
+
+The idea of the algorithm is to build on top of an existing reader-writer mutex and introduce a fast path for readers. On the fast path, reader lock attempts are sharded over an internal array based on the reader identity (a token in case of Golang). This means that readers do not contend over a single atomic counter like it's done in, say, `sync.RWMutex` allowing for better scalability in terms of cores.
+
+Hence, by the design `RBMutex` is a specialized mutex for scenarios, such as caches, where the vast majority of locks are acquired by readers and write lock acquire attempts are infrequent. In such scenarios, `RBMutex` should perform better than the `sync.RWMutex` on large multicore machines.
+
+`RBMutex` extends `sync.RWMutex` internally and uses it as the "reader bias disabled" fallback, so the same semantics apply. The only noticeable difference is in the reader tokens returned from the `RLock`/`RUnlock` methods.
+
+## License
+
+Licensed under MIT.
diff --git a/vendor/github.com/puzpuzpuz/xsync/v2/counter.go b/vendor/github.com/puzpuzpuz/xsync/v2/counter.go
new file mode 100644
index 0000000000..4bf2c91d8d
--- /dev/null
+++ b/vendor/github.com/puzpuzpuz/xsync/v2/counter.go
@@ -0,0 +1,99 @@
+package xsync
+
+import (
+	"sync"
+	"sync/atomic"
+)
+
+// pool for P tokens
+var ptokenPool sync.Pool
+
+// a P token is used to point at the current OS thread (P)
+// on which the goroutine is run; exact identity of the thread,
+// as well as P migration tolerance, is not important since
+// it's used to as a best effort mechanism for assigning
+// concurrent operations (goroutines) to different stripes of
+// the counter
+type ptoken struct {
+	idx uint32
+	//lint:ignore U1000 prevents false sharing
+	pad [cacheLineSize - 4]byte
+}
+
+// A Counter is a striped int64 counter.
+//
+// Should be preferred over a single atomically updated int64
+// counter in high contention scenarios.
+//
+// A Counter must not be copied after first use.
+type Counter struct {
+	stripes []cstripe
+	mask    uint32
+}
+
+type cstripe struct {
+	c int64
+	//lint:ignore U1000 prevents false sharing
+	pad [cacheLineSize - 8]byte
+}
+
+// NewCounter creates a new Counter instance.
+func NewCounter() *Counter {
+	nstripes := nextPowOf2(parallelism())
+	c := Counter{
+		stripes: make([]cstripe, nstripes),
+		mask:    nstripes - 1,
+	}
+	return &c
+}
+
+// Inc increments the counter by 1.
+func (c *Counter) Inc() {
+	c.Add(1)
+}
+
+// Dec decrements the counter by 1.
+func (c *Counter) Dec() {
+	c.Add(-1)
+}
+
+// Add adds the delta to the counter.
+func (c *Counter) Add(delta int64) {
+	t, ok := ptokenPool.Get().(*ptoken)
+	if !ok {
+		t = new(ptoken)
+		t.idx = fastrand()
+	}
+	for {
+		stripe := &c.stripes[t.idx&c.mask]
+		cnt := atomic.LoadInt64(&stripe.c)
+		if atomic.CompareAndSwapInt64(&stripe.c, cnt, cnt+delta) {
+			break
+		}
+		// Give a try with another randomly selected stripe.
+		t.idx = fastrand()
+	}
+	ptokenPool.Put(t)
+}
+
+// Value returns the current counter value.
+// The returned value may not include all of the latest operations in
+// presence of concurrent modifications of the counter.
+func (c *Counter) Value() int64 {
+	v := int64(0)
+	for i := 0; i < len(c.stripes); i++ {
+		stripe := &c.stripes[i]
+		v += atomic.LoadInt64(&stripe.c)
+	}
+	return v
+}
+
+// Reset resets the counter to zero.
+// This method should only be used when it is known that there are
+// no concurrent modifications of the counter.
+func (c *Counter) Reset() {
+	for i := 0; i < len(c.stripes); i++ {
+		stripe := &c.stripes[i]
+		atomic.StoreInt64(&stripe.c, 0)
+	}
+}
diff --git a/vendor/github.com/puzpuzpuz/xsync/v2/map.go b/vendor/github.com/puzpuzpuz/xsync/v2/map.go
new file mode 100644
index 0000000000..749293cbf2
--- /dev/null
+++ b/vendor/github.com/puzpuzpuz/xsync/v2/map.go
@@ -0,0 +1,785 @@
+package xsync
+
+import (
+	"fmt"
+	"hash/maphash"
+	"math"
+	"runtime"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unsafe"
+)
+
+type mapResizeHint int
+
+const (
+	mapGrowHint   mapResizeHint = 0
+	mapShrinkHint mapResizeHint = 1
+	mapClearHint  mapResizeHint = 2
+)
+
+const (
+	// number of entries per bucket; 3 entries lead to size of 64B
+	// (one cache line) on 64-bit machines
+	entriesPerMapBucket = 3
+	// threshold fraction of table occupation to start a table shrinking
+	// when deleting the last entry in a bucket chain
+	mapShrinkFraction = 128
+	// map load factor to trigger a table resize during insertion;
+	// a map holds up to mapLoadFactor*entriesPerMapBucket*mapTableLen
+	// key-value pairs (this is a soft limit)
+	mapLoadFactor = 0.75
+	// minimal table size, i.e. number of buckets; thus, minimal map
+	// capacity can be calculated as entriesPerMapBucket*minMapTableLen
+	minMapTableLen = 32
+	// minimal table capacity
+	minMapTableCap = minMapTableLen * entriesPerMapBucket
+	// minimum counter stripes to use
+	minMapCounterLen = 8
+	// maximum counter stripes to use; stands for around 4KB of memory
+	maxMapCounterLen = 32
+)
+
+var (
+	topHashMask       = uint64((1<<20)-1) << 44
+	topHashEntryMasks = [3]uint64{
+		topHashMask,
+		topHashMask >> 20,
+		topHashMask >> 40,
+	}
+)
+
+// Map is like a Go map[string]interface{} but is safe for concurrent
+// use by multiple goroutines without additional locking or
+// coordination. It follows the interface of sync.Map with
+// a number of valuable extensions like Compute or Size.
+//
+// A Map must not be copied after first use.
+//
+// Map uses a modified version of Cache-Line Hash Table (CLHT)
+// data structure: https://github.com/LPD-EPFL/CLHT
+//
+// CLHT is built around idea to organize the hash table in
+// cache-line-sized buckets, so that on all modern CPUs update
+// operations complete with at most one cache-line transfer.
+// Also, Get operations involve no write to memory, as well as no
+// mutexes or any other sort of locks. Due to this design, in all
+// considered scenarios Map outperforms sync.Map.
+//
+// One important difference with sync.Map is that only string keys
+// are supported. That's because Golang standard library does not
+// expose the built-in hash functions for interface{} values.
+type Map struct {
+	totalGrowths int64
+	totalShrinks int64
+	resizing     int64          // resize in progress flag; updated atomically
+	resizeMu     sync.Mutex     // only used along with resizeCond
+	resizeCond   sync.Cond      // used to wake up resize waiters (concurrent modifications)
+	table        unsafe.Pointer // *mapTable
+}
+
+type mapTable struct {
+	buckets []bucketPadded
+	// striped counter for number of table entries;
+	// used to determine if a table shrinking is needed
+	// occupies min(buckets_memory/1024, 64KB) of memory
+	size []counterStripe
+	seed maphash.Seed
+}
+
+type counterStripe struct {
+	c int64
+	//lint:ignore U1000 prevents false sharing
+	pad [cacheLineSize - 8]byte
+}
+
+type bucketPadded struct {
+	//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
+	pad [cacheLineSize - unsafe.Sizeof(bucket{})]byte
+	bucket
+}
+
+type bucket struct {
+	next   unsafe.Pointer // *bucketPadded
+	keys   [entriesPerMapBucket]unsafe.Pointer
+	values [entriesPerMapBucket]unsafe.Pointer
+	// topHashMutex is a 2-in-1 value.
+	//
+	// It contains packed top 20 bits (20 MSBs) of hash codes for keys
+	// stored in the bucket:
+	// | key 0's top hash | key 1's top hash | key 2's top hash | bitmap for keys | mutex |
+	// |      20 bits     |      20 bits     |      20 bits     |     3 bits      | 1 bit |
+	//
+	// The least significant bit is used for the mutex (TTAS spinlock).
+	topHashMutex uint64
+}
+
+type rangeEntry struct {
+	key   unsafe.Pointer
+	value unsafe.Pointer
+}
+
+// NewMap creates a new Map instance.
+func NewMap() *Map {
+	return NewMapPresized(minMapTableCap)
+}
+
+// NewMapPresized creates a new Map instance with capacity enough to hold
+// sizeHint entries. If sizeHint is zero or negative, the value is ignored.
+func NewMapPresized(sizeHint int) *Map {
+	m := &Map{}
+	m.resizeCond = *sync.NewCond(&m.resizeMu)
+	var table *mapTable
+	if sizeHint <= minMapTableCap {
+		table = newMapTable(minMapTableLen)
+	} else {
+		tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket))
+		table = newMapTable(int(tableLen))
+	}
+	atomic.StorePointer(&m.table, unsafe.Pointer(table))
+	return m
+}
+
+func newMapTable(tableLen int) *mapTable {
+	buckets := make([]bucketPadded, tableLen)
+	counterLen := tableLen >> 10
+	if counterLen < minMapCounterLen {
+		counterLen = minMapCounterLen
+	} else if counterLen > maxMapCounterLen {
+		counterLen = maxMapCounterLen
+	}
+	counter := make([]counterStripe, counterLen)
+	t := &mapTable{
+		buckets: buckets,
+		size:    counter,
+		seed:    maphash.MakeSeed(),
+	}
+	return t
+}
+
+// Load returns the value stored in the map for a key, or nil if no
+// value is present.
+// The ok result indicates whether value was found in the map.
+func (m *Map) Load(key string) (value interface{}, ok bool) {
+	table := (*mapTable)(atomic.LoadPointer(&m.table))
+	hash := hashString(table.seed, key)
+	bidx := uint64(len(table.buckets)-1) & hash
+	b := &table.buckets[bidx]
+	for {
+		topHashes := atomic.LoadUint64(&b.topHashMutex)
+		for i := 0; i < entriesPerMapBucket; i++ {
+			if !topHashMatch(hash, topHashes, i) {
+				continue
+			}
+		atomic_snapshot:
+			// Start atomic snapshot.
+			vp := atomic.LoadPointer(&b.values[i])
+			kp := atomic.LoadPointer(&b.keys[i])
+			if kp != nil && vp != nil {
+				if key == derefKey(kp) {
+					if uintptr(vp) == uintptr(atomic.LoadPointer(&b.values[i])) {
+						// Atomic snapshot succeeded.
+						return derefValue(vp), true
+					}
+					// Concurrent update/remove. Go for another spin.
+					goto atomic_snapshot
+				}
+			}
+		}
+		bptr := atomic.LoadPointer(&b.next)
+		if bptr == nil {
+			return
+		}
+		b = (*bucketPadded)(bptr)
+	}
+}
+
+// Store sets the value for a key.
+func (m *Map) Store(key string, value interface{}) {
+	m.doCompute(
+		key,
+		func(interface{}, bool) (interface{}, bool) {
+			return value, false
+		},
+		false,
+		false,
+	)
+}
+
+// LoadOrStore returns the existing value for the key if present.
+// Otherwise, it stores and returns the given value.
+// The loaded result is true if the value was loaded, false if stored.
+func (m *Map) LoadOrStore(key string, value interface{}) (actual interface{}, loaded bool) {
+	return m.doCompute(
+		key,
+		func(interface{}, bool) (interface{}, bool) {
+			return value, false
+		},
+		true,
+		false,
+	)
+}
+
+// LoadAndStore returns the existing value for the key if present,
+// while setting the new value for the key.
+// It stores the new value and returns the existing one, if present.
+// The loaded result is true if the existing value was loaded,
+// false otherwise.
+func (m *Map) LoadAndStore(key string, value interface{}) (actual interface{}, loaded bool) {
+	return m.doCompute(
+		key,
+		func(interface{}, bool) (interface{}, bool) {
+			return value, false
+		},
+		false,
+		false,
+	)
+}
+
+// LoadOrCompute returns the existing value for the key if present.
+// Otherwise, it computes the value using the provided function and
+// returns the computed value. The loaded result is true if the value
+// was loaded, false if stored.
+func (m *Map) LoadOrCompute(key string, valueFn func() interface{}) (actual interface{}, loaded bool) {
+	return m.doCompute(
+		key,
+		func(interface{}, bool) (interface{}, bool) {
+			return valueFn(), false
+		},
+		true,
+		false,
+	)
+}
+
+// Compute either sets the computed new value for the key or deletes
+// the value for the key. When the delete result of the valueFn function
+// is set to true, the value will be deleted, if it exists. When delete
+// is set to false, the value is updated to the newValue.
+// The ok result indicates whether value was computed and stored, thus, is
+// present in the map. The actual result contains the new value in cases where
+// the value was computed and stored. See the example for a few use cases.
+func (m *Map) Compute(
+	key string,
+	valueFn func(oldValue interface{}, loaded bool) (newValue interface{}, delete bool),
+) (actual interface{}, ok bool) {
+	return m.doCompute(key, valueFn, false, true)
+}
+
+// LoadAndDelete deletes the value for a key, returning the previous
+// value if any. The loaded result reports whether the key was
+// present.
+func (m *Map) LoadAndDelete(key string) (value interface{}, loaded bool) {
+	return m.doCompute(
+		key,
+		func(value interface{}, loaded bool) (interface{}, bool) {
+			return value, true
+		},
+		false,
+		false,
+	)
+}
+
+// Delete deletes the value for a key.
+func (m *Map) Delete(key string) {
+	m.doCompute(
+		key,
+		func(value interface{}, loaded bool) (interface{}, bool) {
+			return value, true
+		},
+		false,
+		false,
+	)
+}
+
+func (m *Map) doCompute(
+	key string,
+	valueFn func(oldValue interface{}, loaded bool) (interface{}, bool),
+	loadIfExists, computeOnly bool,
+) (interface{}, bool) {
+	// Read-only path.
+	if loadIfExists {
+		if v, ok := m.Load(key); ok {
+			return v, !computeOnly
+		}
+	}
+	// Write path.
+	for {
+	compute_attempt:
+		var (
+			emptyb       *bucketPadded
+			emptyidx     int
+			hintNonEmpty int
+		)
+		table := (*mapTable)(atomic.LoadPointer(&m.table))
+		tableLen := len(table.buckets)
+		hash := hashString(table.seed, key)
+		bidx := uint64(len(table.buckets)-1) & hash
+		rootb := &table.buckets[bidx]
+		lockBucket(&rootb.topHashMutex)
+		if m.newerTableExists(table) {
+			// Someone resized the table. Go for another attempt.
+			unlockBucket(&rootb.topHashMutex)
+			goto compute_attempt
+		}
+		if m.resizeInProgress() {
+			// Resize is in progress. Wait, then go for another attempt.
+			unlockBucket(&rootb.topHashMutex)
+			m.waitForResize()
+			goto compute_attempt
+		}
+		b := rootb
+		for {
+			topHashes := atomic.LoadUint64(&b.topHashMutex)
+			for i := 0; i < entriesPerMapBucket; i++ {
+				if b.keys[i] == nil {
+					if emptyb == nil {
+						emptyb = b
+						emptyidx = i
+					}
+					continue
+				}
+				if !topHashMatch(hash, topHashes, i) {
+					hintNonEmpty++
+					continue
+				}
+				if key == derefKey(b.keys[i]) {
+					vp := b.values[i]
+					if loadIfExists {
+						unlockBucket(&rootb.topHashMutex)
+						return derefValue(vp), !computeOnly
+					}
+					// In-place update/delete.
+					// We get a copy of the value via an interface{} on each call,
+					// thus the live value pointers are unique. Otherwise atomic
+					// snapshot won't be correct in case of multiple Store calls
+					// using the same value.
+					oldValue := derefValue(vp)
+					newValue, del := valueFn(oldValue, true)
+					if del {
+						// Deletion.
+						// First we update the value, then the key.
+						// This is important for atomic snapshot states.
+						atomic.StoreUint64(&b.topHashMutex, eraseTopHash(topHashes, i))
+						atomic.StorePointer(&b.values[i], nil)
+						atomic.StorePointer(&b.keys[i], nil)
+						leftEmpty := false
+						if hintNonEmpty == 0 {
+							leftEmpty = isEmptyBucket(b)
+						}
+						unlockBucket(&rootb.topHashMutex)
+						table.addSize(bidx, -1)
+						// Might need to shrink the table.
+						if leftEmpty {
+							m.resize(table, mapShrinkHint)
+						}
+						return oldValue, !computeOnly
+					}
+					nvp := unsafe.Pointer(&newValue)
+					if assertionsEnabled && vp == nvp {
+						panic("non-unique value pointer")
+					}
+					atomic.StorePointer(&b.values[i], nvp)
+					unlockBucket(&rootb.topHashMutex)
+					if computeOnly {
+						// Compute expects the new value to be returned.
+						return newValue, true
+					}
+					// LoadAndStore expects the old value to be returned.
+					return oldValue, true
+				}
+				hintNonEmpty++
+			}
+			if b.next == nil {
+				if emptyb != nil {
+					// Insertion into an existing bucket.
+					var zeroedV interface{}
+					newValue, del := valueFn(zeroedV, false)
+					if del {
+						unlockBucket(&rootb.topHashMutex)
+						return zeroedV, false
+					}
+					// First we update the value, then the key.
+					// This is important for atomic snapshot states.
+					topHashes = atomic.LoadUint64(&emptyb.topHashMutex)
+					atomic.StoreUint64(&emptyb.topHashMutex, storeTopHash(hash, topHashes, emptyidx))
+					atomic.StorePointer(&emptyb.values[emptyidx], unsafe.Pointer(&newValue))
+					atomic.StorePointer(&emptyb.keys[emptyidx], unsafe.Pointer(&key))
+					unlockBucket(&rootb.topHashMutex)
+					table.addSize(bidx, 1)
+					return newValue, computeOnly
+				}
+				growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor
+				if table.sumSize() > int64(growThreshold) {
+					// Need to grow the table. Then go for another attempt.
+					unlockBucket(&rootb.topHashMutex)
+					m.resize(table, mapGrowHint)
+					goto compute_attempt
+				}
+				// Insertion into a new bucket.
+				var zeroedV interface{}
+				newValue, del := valueFn(zeroedV, false)
+				if del {
+					unlockBucket(&rootb.topHashMutex)
+					return newValue, false
+				}
+				// Create and append the bucket.
+				newb := new(bucketPadded)
+				newb.keys[0] = unsafe.Pointer(&key)
+				newb.values[0] = unsafe.Pointer(&newValue)
+				newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0)
+				atomic.StorePointer(&b.next, unsafe.Pointer(newb))
+				unlockBucket(&rootb.topHashMutex)
+				table.addSize(bidx, 1)
+				return newValue, computeOnly
+			}
+			b = (*bucketPadded)(b.next)
+		}
+	}
+}
+
+func (m *Map) newerTableExists(table *mapTable) bool {
+	curTablePtr := atomic.LoadPointer(&m.table)
+	return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
+}
+
+func (m *Map) resizeInProgress() bool {
+	return atomic.LoadInt64(&m.resizing) == 1
+}
+
+func (m *Map) waitForResize() {
+	m.resizeMu.Lock()
+	for m.resizeInProgress() {
+		m.resizeCond.Wait()
+	}
+	m.resizeMu.Unlock()
+}
+
+func (m *Map) resize(table *mapTable, hint mapResizeHint) {
+	var shrinkThreshold int64
+	tableLen := len(table.buckets)
+	// Fast path for shrink attempts.
+	if hint == mapShrinkHint {
+		shrinkThreshold = int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
+		if tableLen == minMapTableLen || table.sumSize() > shrinkThreshold {
+			return
+		}
+	}
+	// Slow path.
+	if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
+		// Someone else started resize. Wait for it to finish.
+		m.waitForResize()
+		return
+	}
+	var newTable *mapTable
+	switch hint {
+	case mapGrowHint:
+		// Grow the table with factor of 2.
+		atomic.AddInt64(&m.totalGrowths, 1)
+		newTable = newMapTable(tableLen << 1)
+	case mapShrinkHint:
+		if table.sumSize() <= shrinkThreshold {
+			// Shrink the table with factor of 2.
+			atomic.AddInt64(&m.totalShrinks, 1)
+			newTable = newMapTable(tableLen >> 1)
+		} else {
+			// No need to shrink. Wake up all waiters and give up.
+			m.resizeMu.Lock()
+			atomic.StoreInt64(&m.resizing, 0)
+			m.resizeCond.Broadcast()
+			m.resizeMu.Unlock()
+			return
+		}
+	case mapClearHint:
+		newTable = newMapTable(minMapTableLen)
+	default:
+		panic(fmt.Sprintf("unexpected resize hint: %d", hint))
+	}
+	// Copy the data only if we're not clearing the map.
+	if hint != mapClearHint {
+		for i := 0; i < tableLen; i++ {
+			copied := copyBucket(&table.buckets[i], newTable)
+			newTable.addSizePlain(uint64(i), copied)
+		}
+	}
+	// Publish the new table and wake up all waiters.
+	atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
+	m.resizeMu.Lock()
+	atomic.StoreInt64(&m.resizing, 0)
+	m.resizeCond.Broadcast()
+	m.resizeMu.Unlock()
+}
+
+func copyBucket(b *bucketPadded, destTable *mapTable) (copied int) {
+	rootb := b
+	lockBucket(&rootb.topHashMutex)
+	for {
+		for i := 0; i < entriesPerMapBucket; i++ {
+			if b.keys[i] != nil {
+				k := derefKey(b.keys[i])
+				hash := hashString(destTable.seed, k)
+				bidx := uint64(len(destTable.buckets)-1) & hash
+				destb := &destTable.buckets[bidx]
+				appendToBucket(hash, b.keys[i], b.values[i], destb)
+				copied++
+			}
+		}
+		if b.next == nil {
+			unlockBucket(&rootb.topHashMutex)
+			return
+		}
+		b = (*bucketPadded)(b.next)
+	}
+}
+
+func appendToBucket(hash uint64, keyPtr, valPtr unsafe.Pointer, b *bucketPadded) {
+	for {
+		for i := 0; i < entriesPerMapBucket; i++ {
+			if b.keys[i] == nil {
+				b.keys[i] = keyPtr
+				b.values[i] = valPtr
+				b.topHashMutex = storeTopHash(hash, b.topHashMutex, i)
+				return
+			}
+		}
+		if b.next == nil {
+			newb := new(bucketPadded)
+			newb.keys[0] = keyPtr
+			newb.values[0] = valPtr
+			newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0)
+			b.next = unsafe.Pointer(newb)
+			return
+		}
+		b = (*bucketPadded)(b.next)
+	}
+}
+
+func isEmptyBucket(rootb *bucketPadded) bool {
+	b := rootb
+	for {
+		for i := 0; i < entriesPerMapBucket; i++ {
+			if b.keys[i] != nil {
+				return false
+			}
+		}
+		if b.next == nil {
+			return true
+		}
+		b = (*bucketPadded)(b.next)
+	}
+}
+
+// Range calls f sequentially for each key and value present in the
+// map. If f returns false, range stops the iteration.
+//
+// Range does not necessarily correspond to any consistent snapshot
+// of the Map's contents: no key will be visited more than once, but
+// if the value for any key is stored or deleted concurrently, Range
+// may reflect any mapping for that key from any point during the
+// Range call.
+//
+// It is safe to modify the map while iterating it. However, the
+// concurrent modification rule apply, i.e. the changes may be not
+// reflected in the subsequently iterated entries.
+func (m *Map) Range(f func(key string, value interface{}) bool) {
+	var zeroEntry rangeEntry
+	// Pre-allocate array big enough to fit entries for most hash tables.
+	bentries := make([]rangeEntry, 0, 16*entriesPerMapBucket)
+	tablep := atomic.LoadPointer(&m.table)
+	table := *(*mapTable)(tablep)
+	for i := range table.buckets {
+		rootb := &table.buckets[i]
+		b := rootb
+		// Prevent concurrent modifications and copy all entries into
+		// the intermediate slice.
+		lockBucket(&rootb.topHashMutex)
+		for {
+			for i := 0; i < entriesPerMapBucket; i++ {
+				if b.keys[i] != nil {
+					bentries = append(bentries, rangeEntry{
+						key:   b.keys[i],
+						value: b.values[i],
+					})
+				}
+			}
+			if b.next == nil {
+				unlockBucket(&rootb.topHashMutex)
+				break
+			}
+			b = (*bucketPadded)(b.next)
+		}
+		// Call the function for all copied entries.
+		for j := range bentries {
+			k := derefKey(bentries[j].key)
+			v := derefValue(bentries[j].value)
+			if !f(k, v) {
+				return
+			}
+			// Remove the reference to avoid preventing the copied
+			// entries from being GCed until this method finishes.
+			bentries[j] = zeroEntry
+		}
+		bentries = bentries[:0]
+	}
+}
+
+// Clear deletes all keys and values currently stored in the map.
+func (m *Map) Clear() {
+	table := (*mapTable)(atomic.LoadPointer(&m.table))
+	m.resize(table, mapClearHint)
+}
+
+// Size returns current size of the map.
+func (m *Map) Size() int {
+	table := (*mapTable)(atomic.LoadPointer(&m.table))
+	return int(table.sumSize())
+}
+
+func derefKey(keyPtr unsafe.Pointer) string {
+	return *(*string)(keyPtr)
+}
+
+func derefValue(valuePtr unsafe.Pointer) interface{} {
+	return *(*interface{})(valuePtr)
+}
+
+func lockBucket(mu *uint64) {
+	for {
+		var v uint64
+		for {
+			v = atomic.LoadUint64(mu)
+			if v&1 != 1 {
+				break
+			}
+			runtime.Gosched()
+		}
+		if atomic.CompareAndSwapUint64(mu, v, v|1) {
+			return
+		}
+		runtime.Gosched()
+	}
+}
+
+func unlockBucket(mu *uint64) {
+	v := atomic.LoadUint64(mu)
+	atomic.StoreUint64(mu, v&^1)
+}
+
+func topHashMatch(hash, topHashes uint64, idx int) bool {
+	if topHashes&(1<<(idx+1)) == 0 {
+		// Entry is not present.
+		return false
+	}
+	hash = hash & topHashMask
+	topHashes = (topHashes & topHashEntryMasks[idx]) << (20 * idx)
+	return hash == topHashes
+}
+
+func storeTopHash(hash, topHashes uint64, idx int) uint64 {
+	// Zero out top hash at idx.
+	topHashes = topHashes &^ topHashEntryMasks[idx]
+	// Chop top 20 MSBs of the given hash and position them at idx.
+	hash = (hash & topHashMask) >> (20 * idx)
+	// Store the MSBs.
+	topHashes = topHashes | hash
+	// Mark the entry as present.
+	return topHashes | (1 << (idx + 1))
+}
+
+func eraseTopHash(topHashes uint64, idx int) uint64 {
+	return topHashes &^ (1 << (idx + 1))
+}
+
+func (table *mapTable) addSize(bucketIdx uint64, delta int) {
+	cidx := uint64(len(table.size)-1) & bucketIdx
+	atomic.AddInt64(&table.size[cidx].c, int64(delta))
+}
+
+func (table *mapTable) addSizePlain(bucketIdx uint64, delta int) {
+	cidx := uint64(len(table.size)-1) & bucketIdx
+	table.size[cidx].c += int64(delta)
+}
+
+func (table *mapTable) sumSize() int64 {
+	sum := int64(0)
+	for i := range table.size {
+		sum += atomic.LoadInt64(&table.size[i].c)
+	}
+	return sum
+}
+
+type mapStats struct {
+	RootBuckets  int
+	TotalBuckets int
+	EmptyBuckets int
+	Capacity     int
+	Size         int // calculated number of entries
+	Counter      int // number of entries according to table counter
+	CounterLen   int // number of counter stripes
+	MinEntries   int // min entries per chain of buckets
+	MaxEntries   int // max entries per chain of buckets
+	TotalGrowths int64
+	TotalShrinks int64
+}
+
+func (s *mapStats) ToString() string {
+	var sb strings.Builder
+	sb.WriteString("\n---\n")
+	sb.WriteString(fmt.Sprintf("RootBuckets:  %d\n", s.RootBuckets))
+	sb.WriteString(fmt.Sprintf("TotalBuckets: %d\n", s.TotalBuckets))
+	sb.WriteString(fmt.Sprintf("EmptyBuckets: %d\n", s.EmptyBuckets))
+	sb.WriteString(fmt.Sprintf("Capacity:     %d\n", s.Capacity))
+	sb.WriteString(fmt.Sprintf("Size:         %d\n", s.Size))
+	sb.WriteString(fmt.Sprintf("Counter:      %d\n", s.Counter))
+	sb.WriteString(fmt.Sprintf("CounterLen:   %d\n", s.CounterLen))
+	sb.WriteString(fmt.Sprintf("MinEntries:   %d\n", s.MinEntries))
+	sb.WriteString(fmt.Sprintf("MaxEntries:   %d\n", s.MaxEntries))
+	sb.WriteString(fmt.Sprintf("TotalGrowths: %d\n", s.TotalGrowths))
+	sb.WriteString(fmt.Sprintf("TotalShrinks: %d\n", s.TotalShrinks))
+	sb.WriteString("---\n")
+	return sb.String()
+}
+
+// O(N) operation; use for debug purposes only
+func (m *Map) stats() mapStats {
+	stats := mapStats{
+		TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
+		TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
+		MinEntries:   math.MaxInt32,
+	}
+	table := (*mapTable)(atomic.LoadPointer(&m.table))
+	stats.RootBuckets = len(table.buckets)
+	stats.Counter = int(table.sumSize())
+	stats.CounterLen = len(table.size)
+	for i := range table.buckets {
+		nentries := 0
+		b := &table.buckets[i]
+		stats.TotalBuckets++
+		for {
+			nentriesLocal := 0
+			stats.Capacity += entriesPerMapBucket
+			for i := 0; i < entriesPerMapBucket; i++ {
+				if atomic.LoadPointer(&b.keys[i]) != nil {
+					stats.Size++
+					nentriesLocal++
+				}
+			}
+			nentries += nentriesLocal
+			if nentriesLocal == 0 {
+				stats.EmptyBuckets++
+			}
+			if b.next == nil {
+				break
+			}
+			b = (*bucketPadded)(b.next)
+			stats.TotalBuckets++
+		}
+		if nentries < stats.MinEntries {
+			stats.MinEntries = nentries
+		}
+		if nentries > stats.MaxEntries {
+			stats.MaxEntries = nentries
+		}
+	}
+	return stats
+}
diff --git a/vendor/github.com/puzpuzpuz/xsync/v2/mapof.go b/vendor/github.com/puzpuzpuz/xsync/v2/mapof.go
new file mode 100644
index 0000000000..cadaeb438c
--- /dev/null
+++ b/vendor/github.com/puzpuzpuz/xsync/v2/mapof.go
@@ -0,0 +1,688 @@
+//go:build go1.18
+// +build go1.18
+
+package xsync
+
+import (
+	"fmt"
+	"hash/maphash"
+	"math"
+	"sync"
+	"sync/atomic"
+	"unsafe"
+)
+
+// MapOf is like a Go map[string]V but is safe for concurrent
+// use by multiple goroutines without additional locking or
+// coordination. It follows the interface of sync.Map with
+// a number of valuable extensions like Compute or Size.
+//
+// A MapOf must not be copied after first use.
+//
+// MapOf uses a modified version of Cache-Line Hash Table (CLHT)
+// data structure: https://github.com/LPD-EPFL/CLHT
+//
+// CLHT is built around idea to organize the hash table in
+// cache-line-sized buckets, so that on all modern CPUs update
+// operations complete with at most one cache-line transfer.
+// Also, Get operations involve no write to memory, as well as no
+// mutexes or any other sort of locks. Due to this design, in all
+// considered scenarios MapOf outperforms sync.Map.
+type MapOf[K comparable, V any] struct {
+	totalGrowths int64
+	totalShrinks int64
+	resizing     int64          // resize in progress flag; updated atomically
+	resizeMu     sync.Mutex     // only used along with resizeCond
+	resizeCond   sync.Cond      // used to wake up resize waiters (concurrent modifications)
+	table        unsafe.Pointer // *mapOfTable
+	hasher       func(maphash.Seed, K) uint64
+}
+
+type mapOfTable[K comparable, V any] struct {
+	buckets []bucketOfPadded
+	// striped counter for number of table entries;
+	// used to determine if a table shrinking is needed
+	// occupies min(buckets_memory/1024, 64KB) of memory
+	size []counterStripe
+	seed maphash.Seed
+}
+
+// bucketOfPadded is a CL-sized map bucket holding up to
+// entriesPerMapBucket entries.
+type bucketOfPadded struct {
+	//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
+	pad [cacheLineSize - unsafe.Sizeof(bucketOf{})]byte
+	bucketOf
+}
+
+type bucketOf struct {
+	hashes  [entriesPerMapBucket]uint64
+	entries [entriesPerMapBucket]unsafe.Pointer // *entryOf
+	next    unsafe.Pointer                      // *bucketOfPadded
+	mu      sync.Mutex
+}
+
+// entryOf is an immutable map entry.
+type entryOf[K comparable, V any] struct {
+	key   K
+	value V
+}
+
+// NewMapOf creates a new MapOf instance with string keys.
+func NewMapOf[V any]() *MapOf[string, V] {
+	return NewTypedMapOfPresized[string, V](hashString, minMapTableCap)
+}
+
+// NewMapOfPresized creates a new MapOf instance with string keys and capacity
+// enough to hold sizeHint entries. If sizeHint is zero or negative, the value
+// is ignored.
+func NewMapOfPresized[V any](sizeHint int) *MapOf[string, V] {
+	return NewTypedMapOfPresized[string, V](hashString, sizeHint)
+}
+
+// IntegerConstraint represents any integer type.
+type IntegerConstraint interface {
+	// Recreation of golang.org/x/exp/constraints.Integer to avoid taking a dependency on an
+	// experimental package.
+	~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// NewIntegerMapOf creates a new MapOf instance with integer typed keys.
+func NewIntegerMapOf[K IntegerConstraint, V any]() *MapOf[K, V] {
+	return NewTypedMapOfPresized[K, V](hashUint64[K], minMapTableCap)
+}
+
+// NewIntegerMapOfPresized creates a new MapOf instance with integer typed keys
+// and capacity enough to hold sizeHint entries. If sizeHint is zero or
+// negative, the value is ignored.
+func NewIntegerMapOfPresized[K IntegerConstraint, V any](sizeHint int) *MapOf[K, V] {
+	return NewTypedMapOfPresized[K, V](hashUint64[K], sizeHint)
+}
+
+// NewTypedMapOf creates a new MapOf instance with arbitrarily typed keys.
+//
+// Keys are hashed to uint64 using the hasher function. It is strongly
+// recommended to use the hash/maphash package to implement hasher. See the
+// example for how to do that.
+func NewTypedMapOf[K comparable, V any](hasher func(maphash.Seed, K) uint64) *MapOf[K, V] {
+	return NewTypedMapOfPresized[K, V](hasher, minMapTableCap)
+}
+
+// NewTypedMapOfPresized creates a new MapOf instance with arbitrarily typed
+// keys and capacity enough to hold sizeHint entries. If sizeHint is zero or
+// negative, the value is ignored.
+//
+// Keys are hashed to uint64 using the hasher function. It is strongly
+// recommended to use the hash/maphash package to implement hasher. See the
+// example for how to do that.
+func NewTypedMapOfPresized[K comparable, V any](hasher func(maphash.Seed, K) uint64, sizeHint int) *MapOf[K, V] {
+	m := &MapOf[K, V]{}
+	m.resizeCond = *sync.NewCond(&m.resizeMu)
+	m.hasher = hasher
+	var table *mapOfTable[K, V]
+	if sizeHint <= minMapTableCap {
+		table = newMapOfTable[K, V](minMapTableLen)
+	} else {
+		tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket))
+		table = newMapOfTable[K, V](int(tableLen))
+	}
+	atomic.StorePointer(&m.table, unsafe.Pointer(table))
+	return m
+}
+
+func newMapOfTable[K comparable, V any](tableLen int) *mapOfTable[K, V] {
+	buckets := make([]bucketOfPadded, tableLen)
+	counterLen := tableLen >> 10
+	if counterLen < minMapCounterLen {
+		counterLen = minMapCounterLen
+	} else if counterLen > maxMapCounterLen {
+		counterLen = maxMapCounterLen
+	}
+	counter := make([]counterStripe, counterLen)
+	t := &mapOfTable[K, V]{
+		buckets: buckets,
+		size:    counter,
+		seed:    maphash.MakeSeed(),
+	}
+	return t
+}
+
+// Load returns the value stored in the map for a key, or nil if no
+// value is present.
+// The ok result indicates whether value was found in the map.
+func (m *MapOf[K, V]) Load(key K) (value V, ok bool) {
+	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
+	hash := shiftHash(m.hasher(table.seed, key))
+	bidx := uint64(len(table.buckets)-1) & hash
+	b := &table.buckets[bidx]
+	for {
+		for i := 0; i < entriesPerMapBucket; i++ {
+			// We treat the hash code only as a hint, so there is no
+			// need to get an atomic snapshot.
+			h := atomic.LoadUint64(&b.hashes[i])
+			if h == uint64(0) || h != hash {
+				continue
+			}
+			eptr := atomic.LoadPointer(&b.entries[i])
+			if eptr == nil {
+				continue
+			}
+			e := (*entryOf[K, V])(eptr)
+			if e.key == key {
+				return e.value, true
+			}
+		}
+		bptr := atomic.LoadPointer(&b.next)
+		if bptr == nil {
+			return
+		}
+		b = (*bucketOfPadded)(bptr)
+	}
+}
+
+// Store sets the value for a key.
+func (m *MapOf[K, V]) Store(key K, value V) {
+	m.doCompute(
+		key,
+		func(V, bool) (V, bool) {
+			return value, false
+		},
+		false,
+		false,
+	)
+}
+
+// LoadOrStore returns the existing value for the key if present.
+// Otherwise, it stores and returns the given value.
+// The loaded result is true if the value was loaded, false if stored.
+func (m *MapOf[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
+	return m.doCompute(
+		key,
+		func(V, bool) (V, bool) {
+			return value, false
+		},
+		true,
+		false,
+	)
+}
+
+// LoadAndStore returns the existing value for the key if present,
+// while setting the new value for the key.
+// It stores the new value and returns the existing one, if present.
+// The loaded result is true if the existing value was loaded,
+// false otherwise.
+func (m *MapOf[K, V]) LoadAndStore(key K, value V) (actual V, loaded bool) {
+	return m.doCompute(
+		key,
+		func(V, bool) (V, bool) {
+			return value, false
+		},
+		false,
+		false,
+	)
+}
+
+// LoadOrCompute returns the existing value for the key if present.
+// Otherwise, it computes the value using the provided function and
+// returns the computed value. The loaded result is true if the value
+// was loaded, false if stored.
+func (m *MapOf[K, V]) LoadOrCompute(key K, valueFn func() V) (actual V, loaded bool) {
+	return m.doCompute(
+		key,
+		func(V, bool) (V, bool) {
+			return valueFn(), false
+		},
+		true,
+		false,
+	)
+}
+
+// Compute either sets the computed new value for the key or deletes
+// the value for the key. When the delete result of the valueFn function
+// is set to true, the value will be deleted, if it exists. When delete
+// is set to false, the value is updated to the newValue.
+// The ok result indicates whether value was computed and stored, thus, is
+// present in the map. The actual result contains the new value in cases where
+// the value was computed and stored. See the example for a few use cases.
+func (m *MapOf[K, V]) Compute(
+	key K,
+	valueFn func(oldValue V, loaded bool) (newValue V, delete bool),
+) (actual V, ok bool) {
+	return m.doCompute(key, valueFn, false, true)
+}
+
+// LoadAndDelete deletes the value for a key, returning the previous
+// value if any. The loaded result reports whether the key was
+// present.
+func (m *MapOf[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
+	return m.doCompute(
+		key,
+		func(value V, loaded bool) (V, bool) {
+			return value, true
+		},
+		false,
+		false,
+	)
+}
+
+// Delete deletes the value for a key.
+func (m *MapOf[K, V]) Delete(key K) {
+	m.doCompute(
+		key,
+		func(value V, loaded bool) (V, bool) {
+			return value, true
+		},
+		false,
+		false,
+	)
+}
+
+func (m *MapOf[K, V]) doCompute(
+	key K,
+	valueFn func(oldValue V, loaded bool) (V, bool),
+	loadIfExists, computeOnly bool,
+) (V, bool) {
+	// Read-only path.
+	if loadIfExists {
+		if v, ok := m.Load(key); ok {
+			return v, !computeOnly
+		}
+	}
+	// Write path.
+	for {
+	compute_attempt:
+		var (
+			emptyb       *bucketOfPadded
+			emptyidx     int
+			hintNonEmpty int
+		)
+		table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
+		tableLen := len(table.buckets)
+		hash := shiftHash(m.hasher(table.seed, key))
+		bidx := uint64(len(table.buckets)-1) & hash
+		rootb := &table.buckets[bidx]
+		rootb.mu.Lock()
+		if m.newerTableExists(table) {
+			// Someone resized the table. Go for another attempt.
+			rootb.mu.Unlock()
+			goto compute_attempt
+		}
+		if m.resizeInProgress() {
+			// Resize is in progress. Wait, then go for another attempt.
+			rootb.mu.Unlock()
+			m.waitForResize()
+			goto compute_attempt
+		}
+		b := rootb
+		for {
+			for i := 0; i < entriesPerMapBucket; i++ {
+				h := atomic.LoadUint64(&b.hashes[i])
+				if h == uint64(0) {
+					if emptyb == nil {
+						emptyb = b
+						emptyidx = i
+					}
+					continue
+				}
+				if h != hash {
+					hintNonEmpty++
+					continue
+				}
+				e := (*entryOf[K, V])(b.entries[i])
+				if e.key == key {
+					if loadIfExists {
+						rootb.mu.Unlock()
+						return e.value, !computeOnly
+					}
+					// In-place update/delete.
+					// We get a copy of the value via an interface{} on each call,
+					// thus the live value pointers are unique. Otherwise atomic
+					// snapshot won't be correct in case of multiple Store calls
+					// using the same value.
+					oldv := e.value
+					newv, del := valueFn(oldv, true)
+					if del {
+						// Deletion.
+						// First we update the hash, then the entry.
+						atomic.StoreUint64(&b.hashes[i], uint64(0))
+						atomic.StorePointer(&b.entries[i], nil)
+						leftEmpty := false
+						if hintNonEmpty == 0 {
+							leftEmpty = isEmptyBucketOf(b)
+						}
+						rootb.mu.Unlock()
+						table.addSize(bidx, -1)
+						// Might need to shrink the table.
+						if leftEmpty {
+							m.resize(table, mapShrinkHint)
+						}
+						return oldv, !computeOnly
+					}
+					newe := new(entryOf[K, V])
+					newe.key = key
+					newe.value = newv
+					atomic.StorePointer(&b.entries[i], unsafe.Pointer(newe))
+					rootb.mu.Unlock()
+					if computeOnly {
+						// Compute expects the new value to be returned.
+						return newv, true
+					}
+					// LoadAndStore expects the old value to be returned.
+					return oldv, true
+				}
+				hintNonEmpty++
+			}
+			if b.next == nil {
+				if emptyb != nil {
+					// Insertion into an existing bucket.
+					var zeroedV V
+					newValue, del := valueFn(zeroedV, false)
+					if del {
+						rootb.mu.Unlock()
+						return zeroedV, false
+					}
+					newe := new(entryOf[K, V])
+					newe.key = key
+					newe.value = newValue
+					// First we update the hash, then the entry.
+					atomic.StoreUint64(&emptyb.hashes[emptyidx], hash)
+					atomic.StorePointer(&emptyb.entries[emptyidx], unsafe.Pointer(newe))
+					rootb.mu.Unlock()
+					table.addSize(bidx, 1)
+					return newValue, computeOnly
+				}
+				growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor
+				if table.sumSize() > int64(growThreshold) {
+					// Need to grow the table. Then go for another attempt.
+					rootb.mu.Unlock()
+					m.resize(table, mapGrowHint)
+					goto compute_attempt
+				}
+				// Insertion into a new bucket.
+				var zeroedV V
+				newValue, del := valueFn(zeroedV, false)
+				if del {
+					rootb.mu.Unlock()
+					return newValue, false
+				}
+				// Create and append the bucket.
+				newb := new(bucketOfPadded)
+				newb.hashes[0] = hash
+				newe := new(entryOf[K, V])
+				newe.key = key
+				newe.value = newValue
+				newb.entries[0] = unsafe.Pointer(newe)
+				atomic.StorePointer(&b.next, unsafe.Pointer(newb))
+				rootb.mu.Unlock()
+				table.addSize(bidx, 1)
+				return newValue, computeOnly
+			}
+			b = (*bucketOfPadded)(b.next)
+		}
+	}
+}
+
+func (m *MapOf[K, V]) newerTableExists(table *mapOfTable[K, V]) bool {
+	curTablePtr := atomic.LoadPointer(&m.table)
+	return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
+}
+
+func (m *MapOf[K, V]) resizeInProgress() bool {
+	return atomic.LoadInt64(&m.resizing) == 1
+}
+
+func (m *MapOf[K, V]) waitForResize() {
+	m.resizeMu.Lock()
+	for m.resizeInProgress() {
+		m.resizeCond.Wait()
+	}
+	m.resizeMu.Unlock()
+}
+
+func (m *MapOf[K, V]) resize(table *mapOfTable[K, V], hint mapResizeHint) {
+	var shrinkThreshold int64
+	tableLen := len(table.buckets)
+	// Fast path for shrink attempts.
+	if hint == mapShrinkHint {
+		shrinkThreshold = int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
+		if tableLen == minMapTableLen || table.sumSize() > shrinkThreshold {
+			return
+		}
+	}
+	// Slow path.
+	if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
+		// Someone else started resize. Wait for it to finish.
+		m.waitForResize()
+		return
+	}
+	var newTable *mapOfTable[K, V]
+	switch hint {
+	case mapGrowHint:
+		// Grow the table with factor of 2.
+		atomic.AddInt64(&m.totalGrowths, 1)
+		newTable = newMapOfTable[K, V](tableLen << 1)
+	case mapShrinkHint:
+		if table.sumSize() <= shrinkThreshold {
+			// Shrink the table with factor of 2.
+			atomic.AddInt64(&m.totalShrinks, 1)
+			newTable = newMapOfTable[K, V](tableLen >> 1)
+		} else {
+			// No need to shrink. Wake up all waiters and give up.
+			m.resizeMu.Lock()
+			atomic.StoreInt64(&m.resizing, 0)
+			m.resizeCond.Broadcast()
+			m.resizeMu.Unlock()
+			return
+		}
+	case mapClearHint:
+		newTable = newMapOfTable[K, V](minMapTableLen)
+	default:
+		panic(fmt.Sprintf("unexpected resize hint: %d", hint))
+	}
+	// Copy the data only if we're not clearing the map.
+	if hint != mapClearHint {
+		for i := 0; i < tableLen; i++ {
+			copied := copyBucketOf(&table.buckets[i], newTable, m.hasher)
+			newTable.addSizePlain(uint64(i), copied)
+		}
+	}
+	// Publish the new table and wake up all waiters.
+	atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
+	m.resizeMu.Lock()
+	atomic.StoreInt64(&m.resizing, 0)
+	m.resizeCond.Broadcast()
+	m.resizeMu.Unlock()
+}
+
+func copyBucketOf[K comparable, V any](
+	b *bucketOfPadded,
+	destTable *mapOfTable[K, V],
+	hasher func(maphash.Seed, K) uint64,
+) (copied int) {
+	rootb := b
+	rootb.mu.Lock()
+	for {
+		for i := 0; i < entriesPerMapBucket; i++ {
+			if b.entries[i] != nil {
+				e := (*entryOf[K, V])(b.entries[i])
+				hash := shiftHash(hasher(destTable.seed, e.key))
+				bidx := uint64(len(destTable.buckets)-1) & hash
+				destb := &destTable.buckets[bidx]
+				appendToBucketOf(hash, b.entries[i], destb)
+				copied++
+			}
+		}
+		if b.next == nil {
+			rootb.mu.Unlock()
+			return
+		}
+		b = (*bucketOfPadded)(b.next)
+	}
+}
+
+// Range calls f sequentially for each key and value present in the
+// map. If f returns false, range stops the iteration.
+//
+// Range does not necessarily correspond to any consistent snapshot
+// of the Map's contents: no key will be visited more than once, but
+// if the value for any key is stored or deleted concurrently, Range
+// may reflect any mapping for that key from any point during the
+// Range call.
+//
+// It is safe to modify the map while iterating it. However, the
+// concurrent modification rule apply, i.e. the changes may be not
+// reflected in the subsequently iterated entries.
+func (m *MapOf[K, V]) Range(f func(key K, value V) bool) {
+	var zeroPtr unsafe.Pointer
+	// Pre-allocate array big enough to fit entries for most hash tables.
+	bentries := make([]unsafe.Pointer, 0, 16*entriesPerMapBucket)
+	tablep := atomic.LoadPointer(&m.table)
+	table := *(*mapOfTable[K, V])(tablep)
+	for i := range table.buckets {
+		rootb := &table.buckets[i]
+		b := rootb
+		// Prevent concurrent modifications and copy all entries into
+		// the intermediate slice.
+		rootb.mu.Lock()
+		for {
+			for i := 0; i < entriesPerMapBucket; i++ {
+				if b.entries[i] != nil {
+					bentries = append(bentries, b.entries[i])
+				}
+			}
+			if b.next == nil {
+				rootb.mu.Unlock()
+				break
+			}
+			b = (*bucketOfPadded)(b.next)
+		}
+		// Call the function for all copied entries.
+		for j := range bentries {
+			entry := (*entryOf[K, V])(bentries[j])
+			if !f(entry.key, entry.value) {
+				return
+			}
+			// Remove the reference to avoid preventing the copied
+			// entries from being GCed until this method finishes.
+			bentries[j] = zeroPtr
+		}
+		bentries = bentries[:0]
+	}
+}
+
+// Clear deletes all keys and values currently stored in the map.
+func (m *MapOf[K, V]) Clear() {
+	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
+	m.resize(table, mapClearHint)
+}
+
+// Size returns current size of the map.
+func (m *MapOf[K, V]) Size() int {
+	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
+	return int(table.sumSize())
+}
+
+func appendToBucketOf(hash uint64, entryPtr unsafe.Pointer, b *bucketOfPadded) {
+	for {
+		for i := 0; i < entriesPerMapBucket; i++ {
+			if b.entries[i] == nil {
+				b.hashes[i] = hash
+				b.entries[i] = entryPtr
+				return
+			}
+		}
+		if b.next == nil {
+			newb := new(bucketOfPadded)
+			newb.hashes[0] = hash
+			newb.entries[0] = entryPtr
+			b.next = unsafe.Pointer(newb)
+			return
+		}
+		b = (*bucketOfPadded)(b.next)
+	}
+}
+
+func isEmptyBucketOf(rootb *bucketOfPadded) bool {
+	b := rootb
+	for {
+		for i := 0; i < entriesPerMapBucket; i++ {
+			if b.entries[i] != nil {
+				return false
+			}
+		}
+		if b.next == nil {
+			return true
+		}
+		b = (*bucketOfPadded)(b.next)
+	}
+}
+
+func (table *mapOfTable[K, V]) addSize(bucketIdx uint64, delta int) {
+	cidx := uint64(len(table.size)-1) & bucketIdx
+	atomic.AddInt64(&table.size[cidx].c, int64(delta))
+}
+
+func (table *mapOfTable[K, V]) addSizePlain(bucketIdx uint64, delta int) {
+	cidx := uint64(len(table.size)-1) & bucketIdx
+	table.size[cidx].c += int64(delta)
+}
+
+func (table *mapOfTable[K, V]) sumSize() int64 {
+	sum := int64(0)
+	for i := range table.size {
+		sum += atomic.LoadInt64(&table.size[i].c)
+	}
+	return sum
+}
+
+func shiftHash(h uint64) uint64 {
+	// uint64(0) is a reserved value which stands for an empty slot.
+	if h == uint64(0) {
+		return uint64(1)
+	}
+	return h
+}
+
+// O(N) operation; use for debug purposes only
+func (m *MapOf[K, V]) stats() mapStats {
+	stats := mapStats{
+		TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
+		TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
+		MinEntries:   math.MaxInt32,
+	}
+	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
+	stats.RootBuckets = len(table.buckets)
+	stats.Counter = int(table.sumSize())
+	stats.CounterLen = len(table.size)
+	for i := range table.buckets {
+		nentries := 0
+		b := &table.buckets[i]
+		stats.TotalBuckets++
+		for {
+			nentriesLocal := 0
+			stats.Capacity += entriesPerMapBucket
+			for i := 0; i < entriesPerMapBucket; i++ {
+				if atomic.LoadPointer(&b.entries[i]) != nil {
+					stats.Size++
+					nentriesLocal++
+				}
+			}
+			nentries += nentriesLocal
+			if nentriesLocal == 0 {
+				stats.EmptyBuckets++
+			}
+			if b.next == nil {
+				break
+			}
+			b = (*bucketOfPadded)(b.next)
+			stats.TotalBuckets++
+		}
+		if nentries < stats.MinEntries {
+			stats.MinEntries = nentries
+		}
+		if nentries > stats.MaxEntries {
+			stats.MaxEntries = nentries
+		}
+	}
+	return stats
+}
diff --git a/vendor/github.com/puzpuzpuz/xsync/v2/mpmcqueue.go b/vendor/github.com/puzpuzpuz/xsync/v2/mpmcqueue.go
new file mode 100644
index 0000000000..96584e698a
--- /dev/null
+++ b/vendor/github.com/puzpuzpuz/xsync/v2/mpmcqueue.go
@@ -0,0 +1,137 @@
+package xsync
+
+import (
+	"runtime"
+	"sync/atomic"
+	"unsafe"
+)
+
+// A MPMCQueue is a bounded multi-producer multi-consumer concurrent
+// queue.
+//
+// MPMCQueue instances must be created with NewMPMCQueue function.
+// A MPMCQueue must not be copied after first use.
+//
+// Based on the data structure from the following C++ library:
+// https://github.com/rigtorp/MPMCQueue
+type MPMCQueue struct {
+	cap  uint64
+	head uint64
+	//lint:ignore U1000 prevents false sharing
+	hpad [cacheLineSize - 8]byte
+	tail uint64
+	//lint:ignore U1000 prevents false sharing
+	tpad  [cacheLineSize - 8]byte
+	slots []slotPadded
+}
+
+type slotPadded struct {
+	slot
+	//lint:ignore U1000 prevents false sharing
+	pad [cacheLineSize - unsafe.Sizeof(slot{})]byte
+}
+
+type slot struct {
+	turn uint64
+	item interface{}
+}
+
+// NewMPMCQueue creates a new MPMCQueue instance with the given
+// capacity.
+func NewMPMCQueue(capacity int) *MPMCQueue {
+	if capacity < 1 {
+		panic("capacity must be positive number")
+	}
+	return &MPMCQueue{
+		cap:   uint64(capacity),
+		slots: make([]slotPadded, capacity),
+	}
+}
+
+// Enqueue inserts the given item into the queue.
+// Blocks, if the queue is full.
+func (q *MPMCQueue) Enqueue(item interface{}) {
+	head := atomic.AddUint64(&q.head, 1) - 1
+	slot := &q.slots[q.idx(head)]
+	turn := q.turn(head) * 2
+	for atomic.LoadUint64(&slot.turn) != turn {
+		runtime.Gosched()
+	}
+	slot.item = item
+	atomic.StoreUint64(&slot.turn, turn+1)
+}
+
+// Dequeue retrieves and removes the item from the head of the queue.
+// Blocks, if the queue is empty.
+func (q *MPMCQueue) Dequeue() interface{} {
+	tail := atomic.AddUint64(&q.tail, 1) - 1
+	slot := &q.slots[q.idx(tail)]
+	turn := q.turn(tail)*2 + 1
+	for atomic.LoadUint64(&slot.turn) != turn {
+		runtime.Gosched()
+	}
+	item := slot.item
+	slot.item = nil
+	atomic.StoreUint64(&slot.turn, turn+1)
+	return item
+}
+
+// TryEnqueue inserts the given item into the queue. Does not block
+// and returns immediately. The result indicates that the queue isn't
+// full and the item was inserted.
+func (q *MPMCQueue) TryEnqueue(item interface{}) bool {
+	head := atomic.LoadUint64(&q.head)
+	for {
+		slot := &q.slots[q.idx(head)]
+		turn := q.turn(head) * 2
+		if atomic.LoadUint64(&slot.turn) == turn {
+			if atomic.CompareAndSwapUint64(&q.head, head, head+1) {
+				slot.item = item
+				atomic.StoreUint64(&slot.turn, turn+1)
+				return true
+			}
+		} else {
+			prevHead := head
+			head = atomic.LoadUint64(&q.head)
+			if head == prevHead {
+				return false
+			}
+		}
+		runtime.Gosched()
+	}
+}
+
+// TryDequeue retrieves and removes the item from the head of the
+// queue. Does not block and returns immediately. The ok result
+// indicates that the queue isn't empty and an item was retrieved.
+func (q *MPMCQueue) TryDequeue() (item interface{}, ok bool) {
+	tail := atomic.LoadUint64(&q.tail)
+	for {
+		slot := &q.slots[q.idx(tail)]
+		turn := q.turn(tail)*2 + 1
+		if atomic.LoadUint64(&slot.turn) == turn {
+			if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) {
+				item = slot.item
+				ok = true
+				slot.item = nil
+				atomic.StoreUint64(&slot.turn, turn+1)
+				return
+			}
+		} else {
+			prevTail := tail
+			tail = atomic.LoadUint64(&q.tail)
+			if tail == prevTail {
+				return
+			}
+		}
+		runtime.Gosched()
+	}
+}
+
+func (q *MPMCQueue) idx(i uint64) uint64 {
+	return i % q.cap
+}
+
+func (q *MPMCQueue) turn(i uint64) uint64 {
+	return i / q.cap
+}
diff --git a/vendor/github.com/puzpuzpuz/xsync/v2/rbmutex.go b/vendor/github.com/puzpuzpuz/xsync/v2/rbmutex.go
new file mode 100644
index 0000000000..c4a503ff01
--- /dev/null
+++ b/vendor/github.com/puzpuzpuz/xsync/v2/rbmutex.go
@@ -0,0 +1,145 @@
+package xsync
+
+import (
+	"runtime"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// slow-down guard
+const nslowdown = 7
+
+// pool for reader tokens
+var rtokenPool sync.Pool
+
+// RToken is a reader lock token.
+type RToken struct {
+	slot uint32
+	//lint:ignore U1000 prevents false sharing
+	pad [cacheLineSize - 4]byte
+}
+
+// A RBMutex is a reader biased reader/writer mutual exclusion lock.
+// The lock can be held by an many readers or a single writer.
+// The zero value for a RBMutex is an unlocked mutex.
+//
+// A RBMutex must not be copied after first use.
+//
+// RBMutex is based on a modified version of BRAVO
+// (Biased Locking for Reader-Writer Locks) algorithm:
+// https://arxiv.org/pdf/1810.01553.pdf
+//
+// RBMutex is a specialized mutex for scenarios, such as caches,
+// where the vast majority of locks are acquired by readers and write
+// lock acquire attempts are infrequent. In such scenarios, RBMutex
+// performs better than sync.RWMutex on large multicore machines.
+//
+// RBMutex extends sync.RWMutex internally and uses it as the "reader
+// bias disabled" fallback, so the same semantics apply. The only
+// noticeable difference is in reader tokens returned from the
+// RLock/RUnlock methods.
+type RBMutex struct {
+	rslots       []rslot
+	rmask        uint32
+	rbias        int32
+	inhibitUntil time.Time
+	rw           sync.RWMutex
+}
+
+type rslot struct {
+	mu int32
+	//lint:ignore U1000 prevents false sharing
+	pad [cacheLineSize - 4]byte
+}
+
+// NewRBMutex creates a new RBMutex instance.
+func NewRBMutex() *RBMutex {
+	nslots := nextPowOf2(parallelism())
+	mu := RBMutex{
+		rslots: make([]rslot, nslots),
+		rmask:  nslots - 1,
+		rbias:  1,
+	}
+	return &mu
+}
+
+// RLock locks m for reading and returns a reader token. The
+// token must be used in the later RUnlock call.
+//
+// Should not be used for recursive read locking; a blocked Lock
+// call excludes new readers from acquiring the lock.
+func (mu *RBMutex) RLock() *RToken {
+	if atomic.LoadInt32(&mu.rbias) == 1 {
+		t, ok := rtokenPool.Get().(*RToken)
+		if !ok {
+			t = new(RToken)
+			t.slot = fastrand()
+		}
+		// Try all available slots to distribute reader threads to slots.
+		for i := 0; i < len(mu.rslots); i++ {
+			slot := t.slot + uint32(i)
+			rslot := &mu.rslots[slot&mu.rmask]
+			rslotmu := atomic.LoadInt32(&rslot.mu)
+			if atomic.CompareAndSwapInt32(&rslot.mu, rslotmu, rslotmu+1) {
+				if atomic.LoadInt32(&mu.rbias) == 1 {
+					// Hot path succeeded.
+					t.slot = slot
+					return t
+				}
+				// The mutex is no longer reader biased. Go to the slow path.
+				atomic.AddInt32(&rslot.mu, -1)
+				rtokenPool.Put(t)
+				break
+			}
+			// Contention detected. Give a try with the next slot.
+		}
+	}
+	// Slow path.
+	mu.rw.RLock()
+	if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) {
+		atomic.StoreInt32(&mu.rbias, 1)
+	}
+	return nil
+}
+
+// RUnlock undoes a single RLock call. A reader token obtained from
+// the RLock call must be provided. RUnlock does not affect other
+// simultaneous readers. A panic is raised if m is not locked for
+// reading on entry to RUnlock.
+func (mu *RBMutex) RUnlock(t *RToken) {
+	if t == nil {
+		mu.rw.RUnlock()
+		return
+	}
+	if atomic.AddInt32(&mu.rslots[t.slot&mu.rmask].mu, -1) < 0 {
+		panic("invalid reader state detected")
+	}
+	rtokenPool.Put(t)
+}
+
+// Lock locks m for writing. If the lock is already locked for
+// reading or writing, Lock blocks until the lock is available.
+func (mu *RBMutex) Lock() {
+	mu.rw.Lock()
+	if atomic.LoadInt32(&mu.rbias) == 1 {
+		atomic.StoreInt32(&mu.rbias, 0)
+		start := time.Now()
+		for i := 0; i < len(mu.rslots); i++ {
+			for atomic.LoadInt32(&mu.rslots[i].mu) > 0 {
+				runtime.Gosched()
+			}
+		}
+		mu.inhibitUntil = time.Now().Add(time.Since(start) * nslowdown)
+	}
+}
+
+// Unlock unlocks m for writing. A panic is raised if m is not locked
+// for writing on entry to Unlock.
+//
+// As with RWMutex, a locked RBMutex is not associated with a
+// particular goroutine. One goroutine may RLock (Lock) a RBMutex and
+// then arrange for another goroutine to RUnlock (Unlock) it.
+func (mu *RBMutex) Unlock() {
+	mu.rw.Unlock()
+}
diff --git a/vendor/github.com/puzpuzpuz/xsync/v2/util.go b/vendor/github.com/puzpuzpuz/xsync/v2/util.go
new file mode 100644
index 0000000000..81a596a1d1
--- /dev/null
+++ b/vendor/github.com/puzpuzpuz/xsync/v2/util.go
@@ -0,0 +1,55 @@
+package xsync
+
+import (
+	"hash/maphash"
+	"runtime"
+	_ "unsafe"
+)
+
+// test-only assert()-like flag
+var assertionsEnabled = false
+
+const (
+	// cacheLineSize is used in paddings to prevent false sharing;
+	// 64B are used instead of 128B as a compromise between
+	// memory footprint and performance; 128B usage may give ~30%
+	// improvement on NUMA machines.
+	cacheLineSize = 64
+)
+
+// nextPowOf2 computes the next highest power of 2 of 32-bit v.
+// Source: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
+func nextPowOf2(v uint32) uint32 {
+	if v == 0 {
+		return 1
+	}
+	v--
+	v |= v >> 1
+	v |= v >> 2
+	v |= v >> 4
+	v |= v >> 8
+	v |= v >> 16
+	v++
+	return v
+}
+
+func parallelism() uint32 {
+	maxProcs := uint32(runtime.GOMAXPROCS(0))
+	numCores := uint32(runtime.NumCPU())
+	if maxProcs < numCores {
+		return maxProcs
+	}
+	return numCores
+}
+
+// hashString calculates a hash of s with the given seed.
+func hashString(seed maphash.Seed, s string) uint64 {
+	var h maphash.Hash
+	h.SetSeed(seed)
+	h.WriteString(s)
+	return h.Sum64()
+}
+
+//go:noescape
+//go:linkname fastrand runtime.fastrand
+func fastrand() uint32
diff --git a/vendor/github.com/puzpuzpuz/xsync/v2/util_mapof.go b/vendor/github.com/puzpuzpuz/xsync/v2/util_mapof.go
new file mode 100644
index 0000000000..fbb00c4380
--- /dev/null
+++ b/vendor/github.com/puzpuzpuz/xsync/v2/util_mapof.go
@@ -0,0 +1,22 @@
+//go:build go1.18
+// +build go1.18
+
+package xsync
+
+import (
+	"hash/maphash"
+	"unsafe"
+)
+
+// hashUint64 calculates a hash of v with the given seed.
+//
+//lint:ignore U1000 used in MapOf
+func hashUint64[K IntegerConstraint](seed maphash.Seed, k K) uint64 {
+	n := uint64(k)
+	// Java's Long standard hash function.
+	n = n ^ (n >> 32)
+	nseed := *(*uint64)(unsafe.Pointer(&seed))
+	// 64-bit variation of boost's hash_combine.
+	nseed ^= n + 0x9e3779b97f4a7c15 + (nseed << 12) + (nseed >> 4)
+	return nseed
+}
diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go
new file mode 100644
index 0000000000..037a718abe
--- /dev/null
+++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go
@@ -0,0 +1,290 @@
+// Package encrypted provides a simple, secure system for encrypting data
+// symmetrically with a passphrase.
+//
+// It uses scrypt derive a key from the passphrase and the NaCl secret box
+// cipher for authenticated encryption.
+package encrypted
+
+import (
+	"crypto/rand"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+
+	"golang.org/x/crypto/nacl/secretbox"
+	"golang.org/x/crypto/scrypt"
+)
+
+const saltSize = 32
+
+const (
+	boxKeySize   = 32
+	boxNonceSize = 24
+)
+
+// KDFParameterStrength defines the KDF parameter strength level to be used for
+// encryption key derivation.
+type KDFParameterStrength uint8
+
+const (
+	// Legacy defines legacy scrypt parameters (N:2^15, r:8, p:1)
+	Legacy KDFParameterStrength = iota + 1
+	// Standard defines standard scrypt parameters which is focusing 100ms of computation (N:2^16, r:8, p:1)
+	Standard
+	// OWASP defines OWASP recommended scrypt parameters (N:2^17, r:8, p:1)
+	OWASP
+)
+
+var (
+	// legacyParams represents old scrypt derivation parameters for backward
+	// compatibility.
+	legacyParams = scryptParams{
+		N: 32768, // 2^15
+		R: 8,
+		P: 1,
+	}
+
+	// standardParams defines scrypt parameters based on the scrypt creator
+	// recommendation to limit key derivation in time boxed to 100ms.
+	standardParams = scryptParams{
+		N: 65536, // 2^16
+		R: 8,
+		P: 1,
+	}
+
+	// owaspParams defines scrypt parameters recommended by OWASP
+	owaspParams = scryptParams{
+		N: 131072, // 2^17
+		R: 8,
+		P: 1,
+	}
+
+	// defaultParams defines scrypt parameters which will be used to generate a
+	// new key.
+	defaultParams = standardParams
+)
+
+const (
+	nameScrypt    = "scrypt"
+	nameSecretBox = "nacl/secretbox"
+)
+
+type data struct {
+	KDF        scryptKDF       `json:"kdf"`
+	Cipher     secretBoxCipher `json:"cipher"`
+	Ciphertext []byte          `json:"ciphertext"`
+}
+
+type scryptParams struct {
+	N int `json:"N"`
+	R int `json:"r"`
+	P int `json:"p"`
+}
+
+func (sp *scryptParams) Equal(in *scryptParams) bool {
+	return in != nil && sp.N == in.N && sp.P == in.P && sp.R == in.R
+}
+
+func newScryptKDF(level KDFParameterStrength) (scryptKDF, error) {
+	salt := make([]byte, saltSize)
+	if err := fillRandom(salt); err != nil {
+		return scryptKDF{}, fmt.Errorf("unable to generate a random salt: %w", err)
+	}
+
+	var params scryptParams
+	switch level {
+	case Legacy:
+		params = legacyParams
+	case Standard:
+		params = standardParams
+	case OWASP:
+		params = owaspParams
+	default:
+		// Fallback to default parameters
+		params = defaultParams
+	}
+
+	return scryptKDF{
+		Name:   nameScrypt,
+		Params: params,
+		Salt:   salt,
+	}, nil
+}
+
+type scryptKDF struct {
+	Name   string       `json:"name"`
+	Params scryptParams `json:"params"`
+	Salt   []byte       `json:"salt"`
+}
+
+func (s *scryptKDF) Key(passphrase []byte) ([]byte, error) {
+	return scrypt.Key(passphrase, s.Salt, s.Params.N, s.Params.R, s.Params.P, boxKeySize)
+}
+
+// CheckParams checks that the encoded KDF parameters are what we expect them to
+// be. If we do not do this, an attacker could cause a DoS by tampering with
+// them.
+func (s *scryptKDF) CheckParams() error {
+	switch {
+	case legacyParams.Equal(&s.Params):
+	case standardParams.Equal(&s.Params):
+	case owaspParams.Equal(&s.Params):
+	default:
+		return errors.New("unsupported scrypt parameters")
+	}
+
+	return nil
+}
+
+func newSecretBoxCipher() (secretBoxCipher, error) {
+	nonce := make([]byte, boxNonceSize)
+	if err := fillRandom(nonce); err != nil {
+		return secretBoxCipher{}, err
+	}
+	return secretBoxCipher{
+		Name:  nameSecretBox,
+		Nonce: nonce,
+	}, nil
+}
+
+type secretBoxCipher struct {
+	Name  string `json:"name"`
+	Nonce []byte `json:"nonce"`
+
+	encrypted bool
+}
+
+func (s *secretBoxCipher) Encrypt(plaintext, key []byte) []byte {
+	var keyBytes [boxKeySize]byte
+	var nonceBytes [boxNonceSize]byte
+
+	if len(key) != len(keyBytes) {
+		panic("incorrect key size")
+	}
+	if len(s.Nonce) != len(nonceBytes) {
+		panic("incorrect nonce size")
+	}
+
+	copy(keyBytes[:], key)
+	copy(nonceBytes[:], s.Nonce)
+
+	// ensure that we don't re-use nonces
+	if s.encrypted {
+		panic("Encrypt must only be called once for each cipher instance")
+	}
+	s.encrypted = true
+
+	return secretbox.Seal(nil, plaintext, &nonceBytes, &keyBytes)
+}
+
+func (s *secretBoxCipher) Decrypt(ciphertext, key []byte) ([]byte, error) {
+	var keyBytes [boxKeySize]byte
+	var nonceBytes [boxNonceSize]byte
+
+	if len(key) != len(keyBytes) {
+		panic("incorrect key size")
+	}
+	if len(s.Nonce) != len(nonceBytes) {
+		// return an error instead of panicking since the nonce is user input
+		return nil, errors.New("encrypted: incorrect nonce size")
+	}
+
+	copy(keyBytes[:], key)
+	copy(nonceBytes[:], s.Nonce)
+
+	res, ok := secretbox.Open(nil, ciphertext, &nonceBytes, &keyBytes)
+	if !ok {
+		return nil, errors.New("encrypted: decryption failed")
+	}
+	return res, nil
+}
+
+// Encrypt takes a passphrase and plaintext, and returns a JSON object
+// containing ciphertext and the details necessary to decrypt it.
+func Encrypt(plaintext, passphrase []byte) ([]byte, error) {
+	return EncryptWithCustomKDFParameters(plaintext, passphrase, Standard)
+}
+
+// EncryptWithCustomKDFParameters takes a passphrase, the plaintext and a KDF
+// parameter level (Legacy, Standard, or OWASP), and returns a JSON object
+// containing ciphertext and the details necessary to decrypt it.
+func EncryptWithCustomKDFParameters(plaintext, passphrase []byte, kdfLevel KDFParameterStrength) ([]byte, error) {
+	k, err := newScryptKDF(kdfLevel)
+	if err != nil {
+		return nil, err
+	}
+	key, err := k.Key(passphrase)
+	if err != nil {
+		return nil, err
+	}
+
+	c, err := newSecretBoxCipher()
+	if err != nil {
+		return nil, err
+	}
+
+	data := &data{
+		KDF:    k,
+		Cipher: c,
+	}
+	data.Ciphertext = c.Encrypt(plaintext, key)
+
+	return json.Marshal(data)
+}
+
+// Marshal encrypts the JSON encoding of v using passphrase.
+func Marshal(v interface{}, passphrase []byte) ([]byte, error) {
+	return MarshalWithCustomKDFParameters(v, passphrase, Standard)
+}
+
+// MarshalWithCustomKDFParameters encrypts the JSON encoding of v using passphrase.
+func MarshalWithCustomKDFParameters(v interface{}, passphrase []byte, kdfLevel KDFParameterStrength) ([]byte, error) {
+	data, err := json.MarshalIndent(v, "", "\t")
+	if err != nil {
+		return nil, err
+	}
+	return EncryptWithCustomKDFParameters(data, passphrase, kdfLevel)
+}
+
+// Decrypt takes a JSON-encoded ciphertext object encrypted using Encrypt and
+// tries to decrypt it using passphrase. If successful, it returns the
+// plaintext.
+func Decrypt(ciphertext, passphrase []byte) ([]byte, error) {
+	data := &data{}
+	if err := json.Unmarshal(ciphertext, data); err != nil {
+		return nil, err
+	}
+
+	if data.KDF.Name != nameScrypt {
+		return nil, fmt.Errorf("encrypted: unknown kdf name %q", data.KDF.Name)
+	}
+	if data.Cipher.Name != nameSecretBox {
+		return nil, fmt.Errorf("encrypted: unknown cipher name %q", data.Cipher.Name)
+	}
+	if err := data.KDF.CheckParams(); err != nil {
+		return nil, err
+	}
+
+	key, err := data.KDF.Key(passphrase)
+	if err != nil {
+		return nil, err
+	}
+
+	return data.Cipher.Decrypt(data.Ciphertext, key)
+}
+
+// Unmarshal decrypts the data using passphrase and unmarshals the resulting
+// plaintext into the value pointed to by v.
+func Unmarshal(data []byte, v interface{}, passphrase []byte) error {
+	decrypted, err := Decrypt(data, passphrase)
+	if err != nil {
+		return err
+	}
+	return json.Unmarshal(decrypted, v)
+}
+
+func fillRandom(b []byte) error {
+	_, err := io.ReadFull(rand.Reader, b)
+	return err
+}
diff --git a/vendor/github.com/segmentio/asm/LICENSE b/vendor/github.com/segmentio/asm/LICENSE
new file mode 100644
index 0000000000..29e1ab6b05
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Segment
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/segmentio/asm/base64/base64.go b/vendor/github.com/segmentio/asm/base64/base64.go
new file mode 100644
index 0000000000..dd2128d4a9
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/base64.go
@@ -0,0 +1,67 @@
+package base64
+
+import (
+	"encoding/base64"
+)
+
+const (
+	StdPadding rune = base64.StdPadding
+	NoPadding  rune = base64.NoPadding
+
+	encodeStd  = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
+	encodeURL  = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"
+	encodeIMAP = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,"
+
+	letterRange = int8('Z' - 'A' + 1)
+)
+
+// StdEncoding is the standard base64 encoding, as defined in RFC 4648.
+var StdEncoding = NewEncoding(encodeStd)
+
+// URLEncoding is the alternate base64 encoding defined in RFC 4648.
+// It is typically used in URLs and file names.
+var URLEncoding = NewEncoding(encodeURL)
+
+// RawStdEncoding is the standard unpadded base64 encoding defined in RFC 4648 section 3.2.
+// This is the same as StdEncoding but omits padding characters.
+var RawStdEncoding = StdEncoding.WithPadding(NoPadding)
+
+// RawURLEncoding is the unpadded alternate base64 encoding defined in RFC 4648.
+// This is the same as URLEncoding but omits padding characters.
+var RawURLEncoding = URLEncoding.WithPadding(NoPadding)
+
+// NewEncoding returns a new padded Encoding defined by the given alphabet,
+// which must be a 64-byte string that does not contain the padding character
+// or CR / LF ('\r', '\n'). Unlike the standard library, the encoding alphabet
+// cannot be abitrary, and it must follow one of the know standard encoding
+// variants.
+//
+// Required alphabet values:
+//     * [0,26):  characters 'A'..'Z'
+//     * [26,52): characters 'a'..'z'
+//     * [52,62): characters '0'..'9'
+// Flexible alphabet value options:
+//     * RFC 4648, RFC 1421, RFC 2045, RFC 2152, RFC 4880: '+' and '/'
+//     * RFC 4648 URI: '-' and '_'
+//     * RFC 3501: '+' and ','
+//
+// The resulting Encoding uses the default padding character ('='), which may
+// be changed or disabled via WithPadding. The padding characters is urestricted,
+// but it must be a character outside of the encoder alphabet.
+func NewEncoding(encoder string) *Encoding {
+	if len(encoder) != 64 {
+		panic("encoding alphabet is not 64-bytes long")
+	}
+
+	if _, ok := allowedEncoding[encoder]; !ok {
+		panic("non-standard encoding alphabets are not supported")
+	}
+
+	return newEncoding(encoder)
+}
+
+var allowedEncoding = map[string]struct{}{
+	encodeStd:  {},
+	encodeURL:  {},
+	encodeIMAP: {},
+}
diff --git a/vendor/github.com/segmentio/asm/base64/base64_amd64.go b/vendor/github.com/segmentio/asm/base64/base64_amd64.go
new file mode 100644
index 0000000000..4136098eaa
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/base64_amd64.go
@@ -0,0 +1,78 @@
+//go:build amd64 && !purego
+// +build amd64,!purego
+
+package base64
+
+import (
+	"encoding/base64"
+
+	"github.com/segmentio/asm/cpu"
+	"github.com/segmentio/asm/cpu/x86"
+)
+
+const (
+	encLutSize   = 32
+	decLutSize   = 48
+	minEncodeLen = 28
+	minDecodeLen = 45
+)
+
+func newEncoding(encoder string) *Encoding {
+	e := &Encoding{base: base64.NewEncoding(encoder)}
+	if cpu.X86.Has(x86.AVX2) {
+		e.enableEncodeAVX2(encoder)
+		e.enableDecodeAVX2(encoder)
+	}
+	return e
+}
+
+func (e *Encoding) enableEncodeAVX2(encoder string) {
+	// Translate values 0..63 to the Base64 alphabet. There are five sets:
+	//
+	// From      To         Add    Index  Example
+	// [0..25]   [65..90]   +65        0  ABCDEFGHIJKLMNOPQRSTUVWXYZ
+	// [26..51]  [97..122]  +71        1  abcdefghijklmnopqrstuvwxyz
+	// [52..61]  [48..57]    -4  [2..11]  0123456789
+	// [62]      [43]       -19       12  +
+	// [63]      [47]       -16       13  /
+	tab := [encLutSize]int8{int8(encoder[0]), int8(encoder[letterRange]) - letterRange}
+	for i, ch := range encoder[2*letterRange:] {
+		tab[2+i] = int8(ch) - 2*letterRange - int8(i)
+	}
+
+	e.enc = encodeAVX2
+	e.enclut = tab
+}
+
+func (e *Encoding) enableDecodeAVX2(encoder string) {
+	c62, c63 := int8(encoder[62]), int8(encoder[63])
+	url := c63 == '_'
+	if url {
+		c63 = '/'
+	}
+
+	// Translate values from the Base64 alphabet using five sets. Values outside
+	// of these ranges are considered invalid:
+	//
+	// From       To        Add    Index  Example
+	// [47]       [63]      +16        1  /
+	// [43]       [62]      +19        2  +
+	// [48..57]   [52..61]   +4        3  0123456789
+	// [65..90]   [0..25]   -65      4,5  ABCDEFGHIJKLMNOPQRSTUVWXYZ
+	// [97..122]  [26..51]  -71      6,7  abcdefghijklmnopqrstuvwxyz
+	tab := [decLutSize]int8{
+		0, 63 - c63, 62 - c62, 4, -65, -65, -71, -71,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+		0x11, 0x11, 0x13, 0x1B, 0x1B, 0x1B, 0x1B, 0x1B,
+	}
+	tab[(c62&15)+16] = 0x1A
+	tab[(c63&15)+16] = 0x1A
+
+	if url {
+		e.dec = decodeAVX2URI
+	} else {
+		e.dec = decodeAVX2
+	}
+	e.declut = tab
+}
diff --git a/vendor/github.com/segmentio/asm/base64/base64_arm64.go b/vendor/github.com/segmentio/asm/base64/base64_arm64.go
new file mode 100644
index 0000000000..276f300287
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/base64_arm64.go
@@ -0,0 +1,42 @@
+//go:build arm64 && !purego
+// +build arm64,!purego
+
+package base64
+
+import (
+	"encoding/base64"
+)
+
+const (
+	encLutSize   = 16
+	decLutSize   = 2
+	minEncodeLen = 16 * 3
+	minDecodeLen = 8 * 4
+)
+
+func newEncoding(encoder string) *Encoding {
+	e := &Encoding{base: base64.NewEncoding(encoder)}
+	e.enableEncodeARM64(encoder)
+	e.enableDecodeARM64(encoder)
+	return e
+}
+
+func (e *Encoding) enableEncodeARM64(encoder string) {
+	c62, c63 := int8(encoder[62]), int8(encoder[63])
+	tab := [encLutSize]int8{
+		'a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52,
+		'0' - 52, '0' - 52, '0' - 52, c62 - 62, c63 - 63, 'A', 0, 0,
+	}
+
+	e.enc = encodeARM64
+	e.enclut = tab
+}
+
+func (e *Encoding) enableDecodeARM64(encoder string) {
+	if encoder == encodeStd {
+		e.dec = decodeStdARM64
+	} else {
+		e.dec = decodeARM64
+	}
+	e.declut = [decLutSize]int8{int8(encoder[62]), int8(encoder[63])}
+}
diff --git a/vendor/github.com/segmentio/asm/base64/base64_asm.go b/vendor/github.com/segmentio/asm/base64/base64_asm.go
new file mode 100644
index 0000000000..f9afadd7f2
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/base64_asm.go
@@ -0,0 +1,94 @@
+//go:build (amd64 || arm64) && !purego
+// +build amd64 arm64
+// +build !purego
+
+package base64
+
+import (
+	"encoding/base64"
+
+	"github.com/segmentio/asm/internal/unsafebytes"
+)
+
+// An Encoding is a radix 64 encoding/decoding scheme, defined by a
+// 64-character alphabet.
+type Encoding struct {
+	enc    func(dst []byte, src []byte, lut *int8) (int, int)
+	enclut [encLutSize]int8
+
+	dec    func(dst []byte, src []byte, lut *int8) (int, int)
+	declut [decLutSize]int8
+
+	base *base64.Encoding
+}
+
+// WithPadding creates a duplicate Encoding updated with a specified padding
+// character, or NoPadding to disable padding. The padding character must not
+// be contained in the encoding alphabet, must not be '\r' or '\n', and must
+// be no greater than '\xFF'.
+func (enc Encoding) WithPadding(padding rune) *Encoding {
+	enc.base = enc.base.WithPadding(padding)
+	return &enc
+}
+
+// Strict creates a duplicate encoding updated with strict decoding enabled.
+// This requires that trailing padding bits are zero.
+func (enc Encoding) Strict() *Encoding {
+	enc.base = enc.base.Strict()
+	return &enc
+}
+
+// Encode encodes src using the defined encoding alphabet.
+// This will write EncodedLen(len(src)) bytes to dst.
+func (enc *Encoding) Encode(dst, src []byte) {
+	if len(src) >= minEncodeLen && enc.enc != nil {
+		d, s := enc.enc(dst, src, &enc.enclut[0])
+		dst = dst[d:]
+		src = src[s:]
+	}
+	enc.base.Encode(dst, src)
+}
+
+// Encode encodes src using the encoding enc, writing
+// EncodedLen(len(src)) bytes to dst.
+func (enc *Encoding) EncodeToString(src []byte) string {
+	buf := make([]byte, enc.base.EncodedLen(len(src)))
+	enc.Encode(buf, src)
+	return string(buf)
+}
+
+// EncodedLen calculates the base64-encoded byte length for a message
+// of length n.
+func (enc *Encoding) EncodedLen(n int) int {
+	return enc.base.EncodedLen(n)
+}
+
+// Decode decodes src using the defined encoding alphabet.
+// This will write DecodedLen(len(src)) bytes to dst and return the number of
+// bytes written.
+func (enc *Encoding) Decode(dst, src []byte) (n int, err error) {
+	var d, s int
+	if len(src) >= minDecodeLen && enc.dec != nil {
+		d, s = enc.dec(dst, src, &enc.declut[0])
+		dst = dst[d:]
+		src = src[s:]
+	}
+	n, err = enc.base.Decode(dst, src)
+	n += d
+	return
+}
+
+// DecodeString decodes the base64 encoded string s, returns the decoded
+// value as bytes.
+func (enc *Encoding) DecodeString(s string) ([]byte, error) {
+	src := unsafebytes.BytesOf(s)
+	dst := make([]byte, enc.base.DecodedLen(len(s)))
+	n, err := enc.Decode(dst, src)
+	return dst[:n], err
+}
+
+// DecodedLen calculates the decoded byte length for a base64-encoded message
+// of length n.
+func (enc *Encoding) DecodedLen(n int) int {
+	return enc.base.DecodedLen(n)
+}
diff --git a/vendor/github.com/segmentio/asm/base64/base64_default.go b/vendor/github.com/segmentio/asm/base64/base64_default.go
new file mode 100644
index 0000000000..1720da5ca7
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/base64_default.go
@@ -0,0 +1,14 @@
+//go:build purego || !(amd64 || arm64)
+// +build purego !amd64,!arm64
+
+package base64
+
+import "encoding/base64"
+
+// An Encoding is a radix 64 encoding/decoding scheme, defined by a
+// 64-character alphabet.
+type Encoding = base64.Encoding
+
+func newEncoding(encoder string) *Encoding {
+	return base64.NewEncoding(encoder)
+}
diff --git a/vendor/github.com/segmentio/asm/base64/decode_amd64.go b/vendor/github.com/segmentio/asm/base64/decode_amd64.go
new file mode 100644
index 0000000000..e85bf6a925
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/decode_amd64.go
@@ -0,0 +1,9 @@
+// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT.
+
+//go:build !purego
+
+package base64
+
+func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int)
+
+func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int)
diff --git a/vendor/github.com/segmentio/asm/base64/decode_amd64.s b/vendor/github.com/segmentio/asm/base64/decode_amd64.s
new file mode 100644
index 0000000000..ade5442c3b
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/decode_amd64.s
@@ -0,0 +1,143 @@
+// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT.
+
+//go:build !purego
+
+#include "textflag.h"
+
+DATA b64_dec_lut_hi<>+0(SB)/8, $0x0804080402011010
+DATA b64_dec_lut_hi<>+8(SB)/8, $0x1010101010101010
+DATA b64_dec_lut_hi<>+16(SB)/8, $0x0804080402011010
+DATA b64_dec_lut_hi<>+24(SB)/8, $0x1010101010101010
+GLOBL b64_dec_lut_hi<>(SB), RODATA|NOPTR, $32
+
+DATA b64_dec_madd1<>+0(SB)/8, $0x0140014001400140
+DATA b64_dec_madd1<>+8(SB)/8, $0x0140014001400140
+DATA b64_dec_madd1<>+16(SB)/8, $0x0140014001400140
+DATA b64_dec_madd1<>+24(SB)/8, $0x0140014001400140
+GLOBL b64_dec_madd1<>(SB), RODATA|NOPTR, $32
+
+DATA b64_dec_madd2<>+0(SB)/8, $0x0001100000011000
+DATA b64_dec_madd2<>+8(SB)/8, $0x0001100000011000
+DATA b64_dec_madd2<>+16(SB)/8, $0x0001100000011000
+DATA b64_dec_madd2<>+24(SB)/8, $0x0001100000011000
+GLOBL b64_dec_madd2<>(SB), RODATA|NOPTR, $32
+
+DATA b64_dec_shuf_lo<>+0(SB)/8, $0x0000000000000000
+DATA b64_dec_shuf_lo<>+8(SB)/8, $0x0600010200000000
+GLOBL b64_dec_shuf_lo<>(SB), RODATA|NOPTR, $16
+
+DATA b64_dec_shuf<>+0(SB)/8, $0x090a040506000102
+DATA b64_dec_shuf<>+8(SB)/8, $0x000000000c0d0e08
+DATA b64_dec_shuf<>+16(SB)/8, $0x0c0d0e08090a0405
+DATA b64_dec_shuf<>+24(SB)/8, $0x0000000000000000
+GLOBL b64_dec_shuf<>(SB), RODATA|NOPTR, $32
+
+// func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int)
+// Requires: AVX, AVX2, SSE4.1
+TEXT ·decodeAVX2(SB), NOSPLIT, $0-72
+	MOVQ         dst_base+0(FP), AX
+	MOVQ         src_base+24(FP), DX
+	MOVQ         lut+48(FP), SI
+	MOVQ         src_len+32(FP), DI
+	MOVB         $0x2f, CL
+	PINSRB       $0x00, CX, X8
+	VPBROADCASTB X8, Y8
+	XORQ         CX, CX
+	XORQ         BX, BX
+	VPXOR        Y7, Y7, Y7
+	VPERMQ       $0x44, (SI), Y6
+	VPERMQ       $0x44, 16(SI), Y4
+	VMOVDQA      b64_dec_lut_hi<>+0(SB), Y5
+
+loop:
+	VMOVDQU      (DX)(BX*1), Y0
+	VPSRLD       $0x04, Y0, Y2
+	VPAND        Y8, Y0, Y3
+	VPSHUFB      Y3, Y4, Y3
+	VPAND        Y8, Y2, Y2
+	VPSHUFB      Y2, Y5, Y9
+	VPTEST       Y9, Y3
+	JNE          done
+	VPCMPEQB     Y8, Y0, Y3
+	VPADDB       Y3, Y2, Y2
+	VPSHUFB      Y2, Y6, Y2
+	VPADDB       Y0, Y2, Y0
+	VPMADDUBSW   b64_dec_madd1<>+0(SB), Y0, Y0
+	VPMADDWD     b64_dec_madd2<>+0(SB), Y0, Y0
+	VEXTRACTI128 $0x01, Y0, X1
+	VPSHUFB      b64_dec_shuf_lo<>+0(SB), X1, X1
+	VPSHUFB      b64_dec_shuf<>+0(SB), Y0, Y0
+	VPBLENDD     $0x08, Y1, Y0, Y1
+	VPBLENDD     $0xc0, Y7, Y1, Y1
+	VMOVDQU      Y1, (AX)(CX*1)
+	ADDQ         $0x18, CX
+	ADDQ         $0x20, BX
+	SUBQ         $0x20, DI
+	CMPQ         DI, $0x2d
+	JB           done
+	JMP          loop
+
+done:
+	MOVQ CX, ret+56(FP)
+	MOVQ BX, ret1+64(FP)
+	VZEROUPPER
+	RET
+
+// func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int)
+// Requires: AVX, AVX2, SSE4.1
+TEXT ·decodeAVX2URI(SB), NOSPLIT, $0-72
+	MOVB         $0x2f, AL
+	PINSRB       $0x00, AX, X0
+	VPBROADCASTB X0, Y0
+	MOVB         $0x5f, AL
+	PINSRB       $0x00, AX, X1
+	VPBROADCASTB X1, Y1
+	MOVQ         dst_base+0(FP), AX
+	MOVQ         src_base+24(FP), DX
+	MOVQ         lut+48(FP), SI
+	MOVQ         src_len+32(FP), DI
+	MOVB         $0x2f, CL
+	PINSRB       $0x00, CX, X10
+	VPBROADCASTB X10, Y10
+	XORQ         CX, CX
+	XORQ         BX, BX
+	VPXOR        Y9, Y9, Y9
+	VPERMQ       $0x44, (SI), Y8
+	VPERMQ       $0x44, 16(SI), Y6
+	VMOVDQA      b64_dec_lut_hi<>+0(SB), Y7
+
+loop:
+	VMOVDQU      (DX)(BX*1), Y2
+	VPCMPEQB     Y2, Y1, Y4
+	VPBLENDVB    Y4, Y0, Y2, Y2
+	VPSRLD       $0x04, Y2, Y4
+	VPAND        Y10, Y2, Y5
+	VPSHUFB      Y5, Y6, Y5
+	VPAND        Y10, Y4, Y4
+	VPSHUFB      Y4, Y7, Y11
+	VPTEST       Y11, Y5
+	JNE          done
+	VPCMPEQB     Y10, Y2, Y5
+	VPADDB       Y5, Y4, Y4
+	VPSHUFB      Y4, Y8, Y4
+	VPADDB       Y2, Y4, Y2
+	VPMADDUBSW   b64_dec_madd1<>+0(SB), Y2, Y2
+	VPMADDWD     b64_dec_madd2<>+0(SB), Y2, Y2
+	VEXTRACTI128 $0x01, Y2, X3
+	VPSHUFB      b64_dec_shuf_lo<>+0(SB), X3, X3
+	VPSHUFB      b64_dec_shuf<>+0(SB), Y2, Y2
+	VPBLENDD     $0x08, Y3, Y2, Y3
+	VPBLENDD     $0xc0, Y9, Y3, Y3
+	VMOVDQU      Y3, (AX)(CX*1)
+	ADDQ         $0x18, CX
+	ADDQ         $0x20, BX
+	SUBQ         $0x20, DI
+	CMPQ         DI, $0x2d
+	JB           done
+	JMP          loop
+
+done:
+	MOVQ CX, ret+56(FP)
+	MOVQ BX, ret1+64(FP)
+	VZEROUPPER
+	RET
diff --git a/vendor/github.com/segmentio/asm/base64/decode_arm64.go b/vendor/github.com/segmentio/asm/base64/decode_arm64.go
new file mode 100644
index 0000000000..d44baa1dc5
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/decode_arm64.go
@@ -0,0 +1,7 @@
+//go:build !purego
+// +build !purego
+
+package base64
+
+func decodeARM64(dst []byte, src []byte, lut *int8) (int, int)
+func decodeStdARM64(dst []byte, src []byte, lut *int8) (int, int)
diff --git a/vendor/github.com/segmentio/asm/base64/decode_arm64.s b/vendor/github.com/segmentio/asm/base64/decode_arm64.s
new file mode 100644
index 0000000000..4374d5ce17
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/decode_arm64.s
@@ -0,0 +1,203 @@
+#include "textflag.h"
+
+#define LOAD_ARGS()                                            \
+	MOVD    dst_base+0(FP), R0;                                  \
+	MOVD    R0, R3;                                              \
+	MOVD    src_base+24(FP), R1;                                 \
+	MOVD    R1, R4;                                              \
+	MOVD    src_len+32(FP), R2;                                  \
+	BIC     $31, R2, R2;                                         \
+	ADD     R1, R2, R2
+
+#define LOAD_ARG_LUT()                                         \
+	MOVD    lut+48(FP), R5;                                      \
+	VLD2R   (R5), [V0.B16, V1.B16]
+
+#define LOAD_CONST_LUT()                                       \
+	MOVD    $·mask_lut(SB), R6;                                  \
+	MOVD    $·bpos_lut(SB), R7;                                  \
+	MOVD    $·shft_lut(SB), R8;                                  \
+	VLD1    (R6), [V2.B16];                                      \
+	VLD1    (R7), [V3.B16];                                      \
+	VLD1    (R8), [V4.B16];                                      \
+	VMOVI   $43, V5.B8;                                          \
+	VMOVI   $47, V6.B8;                                          \
+	VMOVI   $15, V7.B8;                                          \
+	VMOVI   $16, V8.B8;                                          \
+
+#define LOAD_INPUT()                                           \
+	VLD4    (R4), [V10.B8, V11.B8, V12.B8, V13.B8]
+
+#define COMPARE_INPUT(v)                                       \
+	VCMEQ   V10.B8, v.B8, V14.B8;                                \
+	VCMEQ   V11.B8, v.B8, V15.B8;                                \
+	VCMEQ   V12.B8, v.B8, V16.B8;                                \
+	VCMEQ   V13.B8, v.B8, V17.B8
+
+#define UPDATE_INPUT(v)                                        \
+	VBIT    V14.B8, v.B8, V10.B8;                                \
+	VBIT    V15.B8, v.B8, V11.B8;                                \
+	VBIT    V16.B8, v.B8, V12.B8;                                \
+	VBIT    V17.B8, v.B8, V13.B8
+
+#define DECODE_INPUT(goto_err)                                 \
+	/* Create hi/lo nibles */                                    \
+	VUSHR   $4, V10.B8, V18.B8;                                  \
+	VUSHR   $4, V11.B8, V19.B8;                                  \
+	VUSHR   $4, V12.B8, V20.B8;                                  \
+	VUSHR   $4, V13.B8, V21.B8;                                  \
+	VAND    V7.B8, V10.B8, V22.B8;                               \
+	VAND    V7.B8, V11.B8, V23.B8;                               \
+	VAND    V7.B8, V12.B8, V24.B8;                               \
+	VAND    V7.B8, V13.B8, V25.B8;                               \
+	/* Detect invalid input characters */                        \
+	VTBL    V22.B8, [V2.B8], V22.B8;                             \
+	VTBL    V23.B8, [V2.B8], V23.B8;                             \
+	VTBL    V24.B8, [V2.B8], V24.B8;                             \
+	VTBL    V25.B8, [V2.B8], V25.B8;                             \
+	VTBL    V18.B8, [V3.B8], V26.B8;                             \
+	VTBL    V19.B8, [V3.B8], V27.B8;                             \
+	VTBL    V20.B8, [V3.B8], V28.B8;                             \
+	VTBL    V21.B8, [V3.B8], V29.B8;                             \
+	VAND    V22.B8, V26.B8, V26.B8;                              \
+	VAND    V23.B8, V27.B8, V27.B8;                              \
+	VAND    V24.B8, V28.B8, V28.B8;                              \
+	VAND    V25.B8, V29.B8, V29.B8;                              \
+	WORD    $0x0e209b5a /* VCMEQ   $0, V26.B8, V26.B8 */;        \
+	WORD    $0x0e209b7b /* VCMEQ   $0, V27.B8, V27.B8 */;        \
+	WORD    $0x0e209b9c /* VCMEQ   $0, V28.B8, V28.B8 */;        \
+	WORD    $0x0e209bbd /* VCMEQ   $0, V29.B8, V29.B8 */;        \
+	VORR    V26.B8, V27.B8, V26.B8;                              \
+	VORR    V28.B8, V29.B8, V28.B8;                              \
+	VORR    V26.B8, V28.B8, V26.B8;                              \
+	VMOV    V26.D[0], R5;                                        \
+	VMOV    V26.D[1], R6;                                        \
+	ORR     R6, R5;                                              \
+	CBNZ    R5, goto_err;                                        \
+	/* Shift hi nibles */                                        \
+	VTBL    V18.B8, [V4.B8], V18.B8;                             \
+	VTBL    V19.B8, [V4.B8], V19.B8;                             \
+	VTBL    V20.B8, [V4.B8], V20.B8;                             \
+	VTBL    V21.B8, [V4.B8], V21.B8;                             \
+	VBIT    V14.B8, V8.B8, V18.B8;                               \
+	VBIT    V15.B8, V8.B8, V19.B8;                               \
+	VBIT    V16.B8, V8.B8, V20.B8;                               \
+	VBIT    V17.B8, V8.B8, V21.B8;                               \
+	/* Combine results */                                        \
+	VADD    V18.B8, V10.B8, V10.B8;                              \
+	VADD    V19.B8, V11.B8, V11.B8;                              \
+	VADD    V20.B8, V12.B8, V12.B8;                              \
+	VADD    V21.B8, V13.B8, V13.B8;                              \
+	VUSHR   $4, V11.B8, V14.B8;                                  \
+	VUSHR   $2, V12.B8, V15.B8;                                  \
+	VSHL    $2, V10.B8, V10.B8;                                  \
+	VSHL    $4, V11.B8, V11.B8;                                  \
+	VSHL    $6, V12.B8, V12.B8;                                  \
+	VORR    V10.B8, V14.B8, V16.B8;                              \
+	VORR    V11.B8, V15.B8, V17.B8;                              \
+	VORR    V12.B8, V13.B8, V18.B8
+
+#define ADVANCE_LOOP(goto_loop)                                \
+	VST3.P  [V16.B8, V17.B8, V18.B8], 24(R3);                    \
+	ADD     $32, R4;                                             \
+	CMP     R4, R2;                                              \
+	BGT     goto_loop
+
+#define RETURN()                                               \
+	SUB     R0, R3;                                              \
+	SUB     R1, R4;                                              \
+	MOVD    R3, ret+56(FP);                                      \
+	MOVD    R4, ret1+64(FP);                                     \
+	RET
+
+
+// func decodeARM64(dst []byte, src []byte, lut *int8) (int, int)
+TEXT ·decodeARM64(SB),NOSPLIT,$0-72
+	LOAD_ARGS()
+	LOAD_ARG_LUT()
+	LOAD_CONST_LUT()
+
+loop:
+	LOAD_INPUT()
+
+	// Compare and normalize the 63rd and 64th characters
+	COMPARE_INPUT(V0)
+	UPDATE_INPUT(V5)
+	COMPARE_INPUT(V1)
+	UPDATE_INPUT(V6)
+
+	DECODE_INPUT(done) // Detect invalid input characters
+	ADVANCE_LOOP(loop) // Store results and continue
+
+done:
+	RETURN()
+
+
+// func decodeStdARM64(dst []byte, src []byte, lut *int8) (int, int)
+TEXT ·decodeStdARM64(SB),NOSPLIT,$0-72
+	LOAD_ARGS()
+	LOAD_CONST_LUT()
+
+loop:
+	LOAD_INPUT()
+	COMPARE_INPUT(V6)  // Compare to '+'
+	DECODE_INPUT(done) // Detect invalid input characters
+	ADVANCE_LOOP(loop) // Store results and continue
+
+done:
+	RETURN()
+
+
+DATA  ·mask_lut+0x00(SB)/1, $0xa8
+DATA  ·mask_lut+0x01(SB)/1, $0xf8
+DATA  ·mask_lut+0x02(SB)/1, $0xf8
+DATA  ·mask_lut+0x03(SB)/1, $0xf8
+DATA  ·mask_lut+0x04(SB)/1, $0xf8
+DATA  ·mask_lut+0x05(SB)/1, $0xf8
+DATA  ·mask_lut+0x06(SB)/1, $0xf8
+DATA  ·mask_lut+0x07(SB)/1, $0xf8
+DATA  ·mask_lut+0x08(SB)/1, $0xf8
+DATA  ·mask_lut+0x09(SB)/1, $0xf8
+DATA  ·mask_lut+0x0a(SB)/1, $0xf0
+DATA  ·mask_lut+0x0b(SB)/1, $0x54
+DATA  ·mask_lut+0x0c(SB)/1, $0x50
+DATA  ·mask_lut+0x0d(SB)/1, $0x50
+DATA  ·mask_lut+0x0e(SB)/1, $0x50
+DATA  ·mask_lut+0x0f(SB)/1, $0x54
+GLOBL ·mask_lut(SB), NOPTR|RODATA, $16
+
+DATA  ·bpos_lut+0x00(SB)/1, $0x01
+DATA  ·bpos_lut+0x01(SB)/1, $0x02
+DATA  ·bpos_lut+0x02(SB)/1, $0x04
+DATA  ·bpos_lut+0x03(SB)/1, $0x08
+DATA  ·bpos_lut+0x04(SB)/1, $0x10
+DATA  ·bpos_lut+0x05(SB)/1, $0x20
+DATA  ·bpos_lut+0x06(SB)/1, $0x40
+DATA  ·bpos_lut+0x07(SB)/1, $0x80
+DATA  ·bpos_lut+0x08(SB)/1, $0x00
+DATA  ·bpos_lut+0x09(SB)/1, $0x00
+DATA  ·bpos_lut+0x0a(SB)/1, $0x00
+DATA  ·bpos_lut+0x0b(SB)/1, $0x00
+DATA  ·bpos_lut+0x0c(SB)/1, $0x00
+DATA  ·bpos_lut+0x0d(SB)/1, $0x00
+DATA  ·bpos_lut+0x0e(SB)/1, $0x00
+DATA  ·bpos_lut+0x0f(SB)/1, $0x00
+GLOBL ·bpos_lut(SB), NOPTR|RODATA, $16
+
+DATA  ·shft_lut+0x00(SB)/1, $0x00
+DATA  ·shft_lut+0x01(SB)/1, $0x00
+DATA  ·shft_lut+0x02(SB)/1, $0x13
+DATA  ·shft_lut+0x03(SB)/1, $0x04
+DATA  ·shft_lut+0x04(SB)/1, $0xbf
+DATA  ·shft_lut+0x05(SB)/1, $0xbf
+DATA  ·shft_lut+0x06(SB)/1, $0xb9
+DATA  ·shft_lut+0x07(SB)/1, $0xb9
+DATA  ·shft_lut+0x08(SB)/1, $0x00
+DATA  ·shft_lut+0x09(SB)/1, $0x00
+DATA  ·shft_lut+0x0a(SB)/1, $0x00
+DATA  ·shft_lut+0x0b(SB)/1, $0x00
+DATA  ·shft_lut+0x0c(SB)/1, $0x00
+DATA  ·shft_lut+0x0d(SB)/1, $0x00
+DATA  ·shft_lut+0x0e(SB)/1, $0x00
+DATA  ·shft_lut+0x0f(SB)/1, $0x00
+GLOBL ·shft_lut(SB), NOPTR|RODATA, $16
diff --git a/vendor/github.com/segmentio/asm/base64/encode_amd64.go b/vendor/github.com/segmentio/asm/base64/encode_amd64.go
new file mode 100644
index 0000000000..a83c81f157
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/encode_amd64.go
@@ -0,0 +1,7 @@
+// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT.
+
+//go:build !purego
+
+package base64
+
+func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int)
diff --git a/vendor/github.com/segmentio/asm/base64/encode_amd64.s b/vendor/github.com/segmentio/asm/base64/encode_amd64.s
new file mode 100644
index 0000000000..6797c977e8
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/encode_amd64.s
@@ -0,0 +1,87 @@
+// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT.
+
+//go:build !purego
+
+#include "textflag.h"
+
+// func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int)
+// Requires: AVX, AVX2, SSE4.1
+TEXT ·encodeAVX2(SB), NOSPLIT, $0-72
+	MOVQ         dst_base+0(FP), AX
+	MOVQ         src_base+24(FP), DX
+	MOVQ         lut+48(FP), SI
+	MOVQ         src_len+32(FP), DI
+	MOVB         $0x33, CL
+	PINSRB       $0x00, CX, X4
+	VPBROADCASTB X4, Y4
+	MOVB         $0x19, CL
+	PINSRB       $0x00, CX, X5
+	VPBROADCASTB X5, Y5
+	XORQ         CX, CX
+	XORQ         BX, BX
+
+	// Load the 16-byte LUT into both lanes of the register
+	VPERMQ $0x44, (SI), Y3
+
+	// Load the first block using a mask to avoid potential fault
+	VMOVDQU    b64_enc_load<>+0(SB), Y0
+	VPMASKMOVD -4(DX)(BX*1), Y0, Y0
+
+loop:
+	VPSHUFB  b64_enc_shuf<>+0(SB), Y0, Y0
+	VPAND    b64_enc_mask1<>+0(SB), Y0, Y1
+	VPSLLW   $0x08, Y1, Y2
+	VPSLLW   $0x04, Y1, Y1
+	VPBLENDW $0xaa, Y2, Y1, Y2
+	VPAND    b64_enc_mask2<>+0(SB), Y0, Y1
+	VPMULHUW b64_enc_mult<>+0(SB), Y1, Y0
+	VPOR     Y0, Y2, Y0
+	VPSUBUSB Y4, Y0, Y1
+	VPCMPGTB Y5, Y0, Y2
+	VPSUBB   Y2, Y1, Y1
+	VPSHUFB  Y1, Y3, Y1
+	VPADDB   Y0, Y1, Y0
+	VMOVDQU  Y0, (AX)(CX*1)
+	ADDQ     $0x20, CX
+	ADDQ     $0x18, BX
+	SUBQ     $0x18, DI
+	CMPQ     DI, $0x20
+	JB       done
+	VMOVDQU  -4(DX)(BX*1), Y0
+	JMP      loop
+
+done:
+	MOVQ CX, ret+56(FP)
+	MOVQ BX, ret1+64(FP)
+	VZEROUPPER
+	RET
+
+DATA b64_enc_load<>+0(SB)/8, $0x8000000000000000
+DATA b64_enc_load<>+8(SB)/8, $0x8000000080000000
+DATA b64_enc_load<>+16(SB)/8, $0x8000000080000000
+DATA b64_enc_load<>+24(SB)/8, $0x8000000080000000
+GLOBL b64_enc_load<>(SB), RODATA|NOPTR, $32
+
+DATA b64_enc_shuf<>+0(SB)/8, $0x0809070805060405
+DATA b64_enc_shuf<>+8(SB)/8, $0x0e0f0d0e0b0c0a0b
+DATA b64_enc_shuf<>+16(SB)/8, $0x0405030401020001
+DATA b64_enc_shuf<>+24(SB)/8, $0x0a0b090a07080607
+GLOBL b64_enc_shuf<>(SB), RODATA|NOPTR, $32
+
+DATA b64_enc_mask1<>+0(SB)/8, $0x003f03f0003f03f0
+DATA b64_enc_mask1<>+8(SB)/8, $0x003f03f0003f03f0
+DATA b64_enc_mask1<>+16(SB)/8, $0x003f03f0003f03f0
+DATA b64_enc_mask1<>+24(SB)/8, $0x003f03f0003f03f0
+GLOBL b64_enc_mask1<>(SB), RODATA|NOPTR, $32
+
+DATA b64_enc_mask2<>+0(SB)/8, $0x0fc0fc000fc0fc00
+DATA b64_enc_mask2<>+8(SB)/8, $0x0fc0fc000fc0fc00
+DATA b64_enc_mask2<>+16(SB)/8, $0x0fc0fc000fc0fc00
+DATA b64_enc_mask2<>+24(SB)/8, $0x0fc0fc000fc0fc00
+GLOBL b64_enc_mask2<>(SB), RODATA|NOPTR, $32
+
+DATA b64_enc_mult<>+0(SB)/8, $0x0400004004000040
+DATA b64_enc_mult<>+8(SB)/8, $0x0400004004000040
+DATA b64_enc_mult<>+16(SB)/8, $0x0400004004000040
+DATA b64_enc_mult<>+24(SB)/8, $0x0400004004000040
+GLOBL b64_enc_mult<>(SB), RODATA|NOPTR, $32
diff --git a/vendor/github.com/segmentio/asm/base64/encode_arm64.go b/vendor/github.com/segmentio/asm/base64/encode_arm64.go
new file mode 100644
index 0000000000..b6a3814928
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/encode_arm64.go
@@ -0,0 +1,6 @@
+//go:build !purego
+// +build !purego
+
+package base64
+
+func encodeARM64(dst []byte, src []byte, lut *int8) (int, int)
diff --git a/vendor/github.com/segmentio/asm/base64/encode_arm64.s b/vendor/github.com/segmentio/asm/base64/encode_arm64.s
new file mode 100644
index 0000000000..4654313bbd
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/base64/encode_arm64.s
@@ -0,0 +1,97 @@
+#include "textflag.h"
+
+#define Rdst  R0
+#define Rsrc  R1
+#define Rlen  R2
+#define Rwr   R3
+#define Rrem  R4
+#define Rtmp  R5
+
+#define Vlut V0
+#define Vfld0 V6
+#define Vfld1 V7
+#define Vfld2 V8
+#define Vfld3 V9
+#define Vsrc0 V10
+#define Vsrc1 V11
+#define Vsrc2 V12
+#define Vr0a V13
+#define Vr1a V14
+#define Vr2a V15
+#define Vr3a V16
+#define Vr0b V17
+#define Vr1b V18
+#define Vr2b V19
+#define Vr3b V20
+
+// func encodeARM64(dst []byte, src []byte, lut *int8) (int, int)
+TEXT ·encodeARM64(SB),NOSPLIT,$0-72
+	// Load dst/src info
+	MOVD    dst_base+0(FP), Rdst
+	MOVD    src_base+24(FP), Rsrc
+	MOVD    src_len+32(FP), Rlen
+	MOVD    lut+48(FP), Rtmp
+	VLD1    (Rtmp), [Vlut.B16]
+
+	MOVD    Rlen, Rrem
+	MOVD    Rdst, Rwr
+
+	VMOVI   $51, V1.B16
+	VMOVI   $26, V2.B16
+	VMOVI   $63, V3.B16
+	VMOVI   $13, V4.B16
+
+loop:
+	VLD3.P  48(Rsrc), [Vsrc0.B16, Vsrc1.B16, Vsrc2.B16]
+
+	// Split 3 source blocks into 4 lookup inputs
+	VUSHR   $2, Vsrc0.B16, Vfld0.B16
+	VUSHR   $4, Vsrc1.B16, Vfld1.B16
+	VUSHR   $6, Vsrc2.B16, Vfld2.B16
+	VSHL    $4, Vsrc0.B16, Vsrc0.B16
+	VSHL    $2, Vsrc1.B16, Vsrc1.B16
+	VORR    Vsrc0.B16, Vfld1.B16, Vfld1.B16
+	VORR    Vsrc1.B16, Vfld2.B16, Vfld2.B16
+	VAND    V3.B16, Vfld1.B16, Vfld1.B16
+	VAND    V3.B16, Vfld2.B16, Vfld2.B16
+	VAND    V3.B16, Vsrc2.B16, Vfld3.B16
+
+	WORD    $0x6e212ccd // VUQSUB  V1.B16, Vfld0.B16, Vr0a.B16
+	WORD    $0x4e263451 // VCMGT   V2.B16, Vfld0.B16, Vr0b.B16
+	VAND    V4.B16, Vr0b.B16, Vr0b.B16
+	VORR    Vr0b.B16, Vr0a.B16, Vr0a.B16
+	WORD    $0x6e212cee // VUQSUB  V1.B16, Vfld1.B16, Vr1a.B16
+	WORD    $0x4e273452 // VCMGT   V2.B16, Vfld1.B16, Vr1b.B16
+	VAND    V4.B16, Vr1b.B16, Vr1b.B16
+	VORR    Vr1b.B16, Vr1a.B16, Vr1a.B16
+	WORD    $0x6e212d0f // VUQSUB  V1.B16, Vfld2.B16, Vr2a.B16
+	WORD    $0x4e283453 // VCMGT   V2.B16, Vfld2.B16, Vr2b.B16
+	VAND    V4.B16, Vr2b.B16, Vr2b.B16
+	VORR    Vr2b.B16, Vr2a.B16, Vr2a.B16
+	WORD    $0x6e212d30 // VUQSUB  V1.B16, Vfld3.B16, Vr3a.B16
+	WORD    $0x4e293454 // VCMGT   V2.B16, Vfld3.B16, Vr3b.B16
+	VAND    V4.B16, Vr3b.B16, Vr3b.B16
+	VORR    Vr3b.B16, Vr3a.B16, Vr3a.B16
+
+	// Add result of lookup table to each field
+	VTBL    Vr0a.B16, [Vlut.B16], Vr0a.B16
+	VADD    Vr0a.B16, Vfld0.B16, Vfld0.B16
+	VTBL    Vr1a.B16, [Vlut.B16], Vr1a.B16
+	VADD    Vr1a.B16, Vfld1.B16, Vfld1.B16
+	VTBL    Vr2a.B16, [Vlut.B16], Vr2a.B16
+	VADD    Vr2a.B16, Vfld2.B16, Vfld2.B16
+	VTBL    Vr3a.B16, [Vlut.B16], Vr3a.B16
+	VADD    Vr3a.B16, Vfld3.B16, Vfld3.B16
+
+	VST4.P  [Vfld0.B16, Vfld1.B16, Vfld2.B16, Vfld3.B16], 64(Rwr)
+	SUB     $48, Rrem
+	CMP     $48, Rrem
+	BGE     loop
+
+done:
+	SUB     Rdst, Rwr
+	SUB     Rrem, Rlen
+	MOVD    Rwr, ret+56(FP)
+	MOVD    Rlen, ret1+64(FP)
+	RET
+
diff --git a/vendor/github.com/segmentio/asm/cpu/arm/arm.go b/vendor/github.com/segmentio/asm/cpu/arm/arm.go
new file mode 100644
index 0000000000..47c695a075
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/cpu/arm/arm.go
@@ -0,0 +1,80 @@
+package arm
+
+import (
+	"github.com/segmentio/asm/cpu/cpuid"
+	. "golang.org/x/sys/cpu"
+)
+
+type CPU cpuid.CPU
+
+func (cpu CPU) Has(feature Feature) bool {
+	return cpuid.CPU(cpu).Has(cpuid.Feature(feature))
+}
+
+func (cpu *CPU) set(feature Feature, enable bool) {
+	(*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable)
+}
+
+type Feature cpuid.Feature
+
+const (
+	SWP      Feature = 1 << iota // SWP instruction support
+	HALF                         // Half-word load and store support
+	THUMB                        // ARM Thumb instruction set
+	BIT26                        // Address space limited to 26-bits
+	FASTMUL                      // 32-bit operand, 64-bit result multiplication support
+	FPA                          // Floating point arithmetic support
+	VFP                          // Vector floating point support
+	EDSP                         // DSP Extensions support
+	JAVA                         // Java instruction set
+	IWMMXT                       // Intel Wireless MMX technology support
+	CRUNCH                       // MaverickCrunch context switching and handling
+	THUMBEE                      // Thumb EE instruction set
+	NEON                         // NEON instruction set
+	VFPv3                        // Vector floating point version 3 support
+	VFPv3D16                     // Vector floating point version 3 D8-D15
+	TLS                          // Thread local storage support
+	VFPv4                        // Vector floating point version 4 support
+	IDIVA                        // Integer divide instruction support in ARM mode
+	IDIVT                        // Integer divide instruction support in Thumb mode
+	VFPD32                       // Vector floating point version 3 D15-D31
+	LPAE                         // Large Physical Address Extensions
+	EVTSTRM                      // Event stream support
+	AES                          // AES hardware implementation
+	PMULL                        // Polynomial multiplication instruction set
+	SHA1                         // SHA1 hardware implementation
+	SHA2                         // SHA2 hardware implementation
+	CRC32                        // CRC32 hardware implementation
+)
+
+func ABI() CPU {
+	cpu := CPU(0)
+	cpu.set(SWP, ARM.HasSWP)
+	cpu.set(HALF, ARM.HasHALF)
+	cpu.set(THUMB, ARM.HasTHUMB)
+	cpu.set(BIT26, ARM.Has26BIT)
+	cpu.set(FASTMUL, ARM.HasFASTMUL)
+	cpu.set(FPA, ARM.HasFPA)
+	cpu.set(VFP, ARM.HasVFP)
+	cpu.set(EDSP, ARM.HasEDSP)
+	cpu.set(JAVA, ARM.HasJAVA)
+	cpu.set(IWMMXT, ARM.HasIWMMXT)
+	cpu.set(CRUNCH, ARM.HasCRUNCH)
+	cpu.set(THUMBEE, ARM.HasTHUMBEE)
+	cpu.set(NEON, ARM.HasNEON)
+	cpu.set(VFPv3, ARM.HasVFPv3)
+	cpu.set(VFPv3D16, ARM.HasVFPv3D16)
+	cpu.set(TLS, ARM.HasTLS)
+	cpu.set(VFPv4, ARM.HasVFPv4)
+	cpu.set(IDIVA, ARM.HasIDIVA)
+	cpu.set(IDIVT, ARM.HasIDIVT)
+	cpu.set(VFPD32, ARM.HasVFPD32)
+	cpu.set(LPAE, ARM.HasLPAE)
+	cpu.set(EVTSTRM, ARM.HasEVTSTRM)
+	cpu.set(AES, ARM.HasAES)
+	cpu.set(PMULL, ARM.HasPMULL)
+	cpu.set(SHA1, ARM.HasSHA1)
+	cpu.set(SHA2, ARM.HasSHA2)
+	cpu.set(CRC32, ARM.HasCRC32)
+	return cpu
+}
diff --git a/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go b/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go
new file mode 100644
index 0000000000..0c5134c76e
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go
@@ -0,0 +1,74 @@
+package arm64
+
+import (
+	"github.com/segmentio/asm/cpu/cpuid"
+	. "golang.org/x/sys/cpu"
+)
+
+type CPU cpuid.CPU
+
+func (cpu CPU) Has(feature Feature) bool {
+	return cpuid.CPU(cpu).Has(cpuid.Feature(feature))
+}
+
+func (cpu *CPU) set(feature Feature, enable bool) {
+	(*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable)
+}
+
+type Feature cpuid.Feature
+
+const (
+	FP       Feature = 1 << iota // Floating-point instruction set (always available)
+	ASIMD                        // Advanced SIMD (always available)
+	EVTSTRM                      // Event stream support
+	AES                          // AES hardware implementation
+	PMULL                        // Polynomial multiplication instruction set
+	SHA1                         // SHA1 hardware implementation
+	SHA2                         // SHA2 hardware implementation
+	CRC32                        // CRC32 hardware implementation
+	ATOMICS                      // Atomic memory operation instruction set
+	FPHP                         // Half precision floating-point instruction set
+	ASIMDHP                      // Advanced SIMD half precision instruction set
+	CPUID                        // CPUID identification scheme registers
+	ASIMDRDM                     // Rounding double multiply add/subtract instruction set
+	JSCVT                        // Javascript conversion from floating-point to integer
+	FCMA                         // Floating-point multiplication and addition of complex numbers
+	LRCPC                        // Release Consistent processor consistent support
+	DCPOP                        // Persistent memory support
+	SHA3                         // SHA3 hardware implementation
+	SM3                          // SM3 hardware implementation
+	SM4                          // SM4 hardware implementation
+	ASIMDDP                      // Advanced SIMD double precision instruction set
+	SHA512                       // SHA512 hardware implementation
+	SVE                          // Scalable Vector Extensions
+	ASIMDFHM                     // Advanced SIMD multiplication FP16 to FP32
+)
+
+func ABI() CPU {
+	cpu := CPU(0)
+	cpu.set(FP, ARM64.HasFP)
+	cpu.set(ASIMD, ARM64.HasASIMD)
+	cpu.set(EVTSTRM, ARM64.HasEVTSTRM)
+	cpu.set(AES, ARM64.HasAES)
+	cpu.set(PMULL, ARM64.HasPMULL)
+	cpu.set(SHA1, ARM64.HasSHA1)
+	cpu.set(SHA2, ARM64.HasSHA2)
+	cpu.set(CRC32, ARM64.HasCRC32)
+	cpu.set(ATOMICS, ARM64.HasATOMICS)
+	cpu.set(FPHP, ARM64.HasFPHP)
+	cpu.set(ASIMDHP, ARM64.HasASIMDHP)
+	cpu.set(CPUID, ARM64.HasCPUID)
+	cpu.set(ASIMDRDM, ARM64.HasASIMDRDM)
+	cpu.set(JSCVT, ARM64.HasJSCVT)
+	cpu.set(FCMA, ARM64.HasFCMA)
+	cpu.set(LRCPC, ARM64.HasLRCPC)
+	cpu.set(DCPOP, ARM64.HasDCPOP)
+	cpu.set(SHA3, ARM64.HasSHA3)
+	cpu.set(SM3, ARM64.HasSM3)
+	cpu.set(SM4, ARM64.HasSM4)
+	cpu.set(ASIMDDP, ARM64.HasASIMDDP)
+	cpu.set(SHA512, ARM64.HasSHA512)
+	cpu.set(SVE, ARM64.HasSVE)
+	cpu.set(ASIMDFHM, ARM64.HasASIMDFHM)
+	return cpu
+}
diff --git a/vendor/github.com/segmentio/asm/cpu/cpu.go b/vendor/github.com/segmentio/asm/cpu/cpu.go
new file mode 100644
index 0000000000..6ddf4973f5
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/cpu/cpu.go
@@ -0,0 +1,22 @@
+// Pakage cpu provides APIs to detect CPU features available at runtime.
+package cpu
+
+import (
+	"github.com/segmentio/asm/cpu/arm"
+	"github.com/segmentio/asm/cpu/arm64"
+	"github.com/segmentio/asm/cpu/x86"
+)
+
+var (
+	// X86 is the bitset representing the set of the x86 instruction sets are
+	// supported by the CPU.
+	X86 = x86.ABI()
+
+	// ARM is the bitset representing which parts of the arm instruction sets
+	// are supported by the CPU.
+	ARM = arm.ABI()
+
+	// ARM64 is the bitset representing which parts of the arm64 instruction
+	// sets are supported by the CPU.
+	ARM64 = arm64.ABI()
+)
diff --git a/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go b/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go
new file mode 100644
index 0000000000..0949d3d584
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go
@@ -0,0 +1,32 @@
+// Package cpuid provides generic types used to represent CPU features supported
+// by the architecture.
+package cpuid
+
+// CPU is a bitset of feature flags representing the capabilities of various CPU
+// architeectures that this package provides optimized assembly routines for.
+//
+// The intent is to provide a stable ABI between the Go code that generate the
+// assembly, and the program that uses the library functions.
+type CPU uint64
+
+// Feature represents a single CPU feature.
+type Feature uint64
+
+const (
+	// None is a Feature value that has no CPU features enabled.
+	None Feature = 0
+	// All is a Feature value that has all CPU features enabled.
+	All Feature = 0xFFFFFFFFFFFFFFFF
+)
+
+func (cpu CPU) Has(feature Feature) bool {
+	return (Feature(cpu) & feature) == feature
+}
+
+func (cpu *CPU) Set(feature Feature, enabled bool) {
+	if enabled {
+		*cpu |= CPU(feature)
+	} else {
+		*cpu &= ^CPU(feature)
+	}
+}
diff --git a/vendor/github.com/segmentio/asm/cpu/x86/x86.go b/vendor/github.com/segmentio/asm/cpu/x86/x86.go
new file mode 100644
index 0000000000..9e93537583
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/cpu/x86/x86.go
@@ -0,0 +1,76 @@
+package x86
+
+import (
+	"github.com/segmentio/asm/cpu/cpuid"
+	. "golang.org/x/sys/cpu"
+)
+
+type CPU cpuid.CPU
+
+func (cpu CPU) Has(feature Feature) bool {
+	return cpuid.CPU(cpu).Has(cpuid.Feature(feature))
+}
+
+func (cpu *CPU) set(feature Feature, enable bool) {
+	(*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable)
+}
+
+type Feature cpuid.Feature
+
+const (
+	SSE                Feature = 1 << iota // SSE functions
+	SSE2                                   // P4 SSE functions
+	SSE3                                   // Prescott SSE3 functions
+	SSE41                                  // Penryn SSE4.1 functions
+	SSE42                                  // Nehalem SSE4.2 functions
+	SSE4A                                  // AMD Barcelona microarchitecture SSE4a instructions
+	SSSE3                                  // Conroe SSSE3 functions
+	AVX                                    // AVX functions
+	AVX2                                   // AVX2 functions
+	AVX512BF16                             // AVX-512 BFLOAT16 Instructions
+	AVX512BITALG                           // AVX-512 Bit Algorithms
+	AVX512BW                               // AVX-512 Byte and Word Instructions
+	AVX512CD                               // AVX-512 Conflict Detection Instructions
+	AVX512DQ                               // AVX-512 Doubleword and Quadword Instructions
+	AVX512ER                               // AVX-512 Exponential and Reciprocal Instructions
+	AVX512F                                // AVX-512 Foundation
+	AVX512IFMA                             // AVX-512 Integer Fused Multiply-Add Instructions
+	AVX512PF                               // AVX-512 Prefetch Instructions
+	AVX512VBMI                             // AVX-512 Vector Bit Manipulation Instructions
+	AVX512VBMI2                            // AVX-512 Vector Bit Manipulation Instructions, Version 2
+	AVX512VL                               // AVX-512 Vector Length Extensions
+	AVX512VNNI                             // AVX-512 Vector Neural Network Instructions
+	AVX512VP2INTERSECT                     // AVX-512 Intersect for D/Q
+	AVX512VPOPCNTDQ                        // AVX-512 Vector Population Count Doubleword and Quadword
+	CMOV                                   // Conditional move
+)
+
+func ABI() CPU {
+	cpu := CPU(0)
+	cpu.set(SSE, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have SEE?
+	cpu.set(SSE2, X86.HasSSE2)
+	cpu.set(SSE3, X86.HasSSE3)
+	cpu.set(SSE41, X86.HasSSE41)
+	cpu.set(SSE42, X86.HasSSE42)
+	cpu.set(SSE4A, false) // TODO: add upstream support in golang.org/x/sys/cpu?
+	cpu.set(SSSE3, X86.HasSSSE3)
+	cpu.set(AVX, X86.HasAVX)
+	cpu.set(AVX2, X86.HasAVX2)
+	cpu.set(AVX512BF16, X86.HasAVX512BF16)
+	cpu.set(AVX512BITALG, X86.HasAVX512BITALG)
+	cpu.set(AVX512BW, X86.HasAVX512BW)
+	cpu.set(AVX512CD, X86.HasAVX512CD)
+	cpu.set(AVX512DQ, X86.HasAVX512DQ)
+	cpu.set(AVX512ER, X86.HasAVX512ER)
+	cpu.set(AVX512F, X86.HasAVX512F)
+	cpu.set(AVX512IFMA, X86.HasAVX512IFMA)
+	cpu.set(AVX512PF, X86.HasAVX512PF)
+	cpu.set(AVX512VBMI, X86.HasAVX512VBMI)
+	cpu.set(AVX512VBMI2, X86.HasAVX512VBMI2)
+	cpu.set(AVX512VL, X86.HasAVX512VL)
+	cpu.set(AVX512VNNI, X86.HasAVX512VNNI)
+	cpu.set(AVX512VP2INTERSECT, false) // TODO: add upstream support in golang.org/x/sys/cpu?
+	cpu.set(AVX512VPOPCNTDQ, X86.HasAVX512VPOPCNTDQ)
+	cpu.set(CMOV, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have CMOV?
+	return cpu
+}
diff --git a/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go b/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go
new file mode 100644
index 0000000000..913c9cc68b
--- /dev/null
+++ b/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go
@@ -0,0 +1,20 @@
+package unsafebytes
+
+import "unsafe"
+
+func Pointer(b []byte) *byte {
+	return *(**byte)(unsafe.Pointer(&b))
+}
+
+func String(b []byte) string {
+	return *(*string)(unsafe.Pointer(&b))
+}
+
+func BytesOf(s string) []byte {
+	return *(*[]byte)(unsafe.Pointer(&sliceHeader{str: s, cap: len(s)}))
+}
+
+type sliceHeader struct {
+	str string
+	cap int
+}
diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/key.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/key.go
index 1100ca083d..6af69afda3 100644
--- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/key.go
+++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/key.go
@@ -33,6 +33,10 @@ type KeyOpts struct {
 	OIDCProvider         string // Specify which OIDC credential provider to use for keyless signer
 	BundlePath           string
 	SkipConfirmation     bool
+	TSAClientCACert      string
+	TSAClientCert        string
+	TSAClientKey         string
+	TSAServerName        string // expected SAN field in the TSA server's certificate - https://pkg.go.dev/crypto/tls#Config.ServerName
 	TSAServerURL         string
 	RFC3161TimestampPath string
 	TSACertChainPath     string
diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/sign.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/sign.go
index 0359fa8e6e..aeac9cad6e 100644
--- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/sign.go
+++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/sign.go
@@ -34,6 +34,10 @@ type SignOptions struct {
 	Attachment            string
 	SkipConfirmation      bool
 	TlogUpload            bool
+	TSAClientCACert       string
+	TSAClientCert         string
+	TSAClientKey          string
+	TSAServerName         string
 	TSAServerURL          string
 	IssueCertificate      bool
 	SignContainerIdentity string
@@ -104,9 +108,23 @@ func (o *SignOptions) AddFlags(cmd *cobra.Command) {
 	cmd.Flags().BoolVar(&o.TlogUpload, "tlog-upload", true,
 		"whether or not to upload to the tlog")
 
+	cmd.Flags().StringVar(&o.TSAClientCACert, "timestamp-client-cacert", "",
+		"path to the X.509 CA certificate file in PEM format to be used for the connection to the TSA Server")
+
+	cmd.Flags().StringVar(&o.TSAClientCert, "timestamp-client-cert", "",
+		"path to the X.509 certificate file in PEM format to be used for the connection to the TSA Server")
+
+	cmd.Flags().StringVar(&o.TSAClientKey, "timestamp-client-key", "",
+		"path to the X.509 private key file in PEM format to be used, together with the 'timestamp-client-cert' value, for the connection to the TSA Server")
+
+	cmd.Flags().StringVar(&o.TSAServerName, "timestamp-server-name", "",
+		"SAN name to use as the 'ServerName' tls.Config field to verify the mTLS connection to the TSA Server")
+
 	cmd.Flags().StringVar(&o.TSAServerURL, "timestamp-server-url", "",
 		"url to the Timestamp RFC3161 server, default none. Must be the path to the API to request timestamp responses, e.g. https://freetsa.org/tsr")
 
+	_ = cmd.Flags().SetAnnotation("certificate", cobra.BashCompFilenameExt, []string{"cert"})
+
 	cmd.Flags().BoolVar(&o.IssueCertificate, "issue-certificate", false,
 		"issue a code signing certificate from Fulcio, even if a key is provided")
 
diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/signblob.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/signblob.go
index 99c1a473a5..7cddde63df 100644
--- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/signblob.go
+++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/signblob.go
@@ -35,6 +35,10 @@ type SignBlobOptions struct {
 	BundlePath           string
 	SkipConfirmation     bool
 	TlogUpload           bool
+	TSAClientCACert      string
+	TSAClientCert        string
+	TSAClientKey         string
+	TSAServerName        string
 	TSAServerURL         string
 	RFC3161TimestampPath string
 	IssueCertificate     bool
@@ -77,6 +81,18 @@ func (o *SignBlobOptions) AddFlags(cmd *cobra.Command) {
 	cmd.Flags().BoolVar(&o.TlogUpload, "tlog-upload", true,
 		"whether or not to upload to the tlog")
 
+	cmd.Flags().StringVar(&o.TSAClientCACert, "timestamp-client-cacert", "",
+		"path to the X.509 CA certificate file in PEM format to be used for the connection to the TSA Server")
+
+	cmd.Flags().StringVar(&o.TSAClientCert, "timestamp-client-cert", "",
+		"path to the X.509 certificate file in PEM format to be used for the connection to the TSA Server")
+
+	cmd.Flags().StringVar(&o.TSAClientKey, "timestamp-client-key", "",
+		"path to the X.509 private key file in PEM format to be used, together with the 'timestamp-client-cert' value, for the connection to the TSA Server")
+
+	cmd.Flags().StringVar(&o.TSAServerName, "timestamp-server-name", "",
+		"SAN name to use as the 'ServerName' tls.Config field to verify the mTLS connection to the TSA Server")
+
 	cmd.Flags().StringVar(&o.TSAServerURL, "timestamp-server-url", "",
 		"url to the Timestamp RFC3161 server, default none. Must be the path to the API to request timestamp responses, e.g. https://freetsa.org/tsr")
 
diff --git a/vendor/github.com/sigstore/cosign/v2/internal/pkg/now/now.go b/vendor/github.com/sigstore/cosign/v2/internal/pkg/now/now.go
deleted file mode 100644
index 253a41f3f2..0000000000
--- a/vendor/github.com/sigstore/cosign/v2/internal/pkg/now/now.go
+++ /dev/null
@@ -1,38 +0,0 @@
-//
-// Copyright 2023 The Sigstore Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package now
-
-import (
-	"fmt"
-	"os"
-	"strconv"
-	"time"
-)
-
-// Now returns SOURCE_DATE_EPOCH or time.Now().
-func Now() (time.Time, error) {
-	// nolint
-	epoch := os.Getenv("SOURCE_DATE_EPOCH")
-	if epoch == "" {
-		return time.Now(), nil
-	}
-
-	seconds, err := strconv.ParseInt(epoch, 10, 64)
-	if err != nil {
-		return time.Now(), fmt.Errorf("SOURCE_DATE_EPOCH should be the number of seconds since January 1st 1970, 00:00 UTC, got: %w", err)
-	}
-	return time.Unix(seconds, 0), nil
-}
diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/github/github.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/github/github.go
index 8f7a0b4ea2..14c7bb2f4e 100644
--- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/github/github.go
+++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/github/github.go
@@ -25,7 +25,7 @@ import (
 	"os"
 	"strings"
 
-	"github.com/google/go-github/v50/github"
+	"github.com/google/go-github/v53/github"
 	"golang.org/x/crypto/nacl/box"
 	"golang.org/x/oauth2"
 
diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/keys.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/keys.go
index 3ab43cae47..9adc22525f 100644
--- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/keys.go
+++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/keys.go
@@ -30,8 +30,7 @@ import (
 	"os"
 	"path/filepath"
 
-	"github.com/theupdateframework/go-tuf/encrypted"
-
+	"github.com/secure-systems-lab/go-securesystemslib/encrypted"
 	"github.com/sigstore/cosign/v2/pkg/oci/static"
 	"github.com/sigstore/sigstore/pkg/cryptoutils"
 	"github.com/sigstore/sigstore/pkg/signature"
diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/tlog.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/tlog.go
index 87579dacf6..827311993c 100644
--- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/tlog.go
+++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/tlog.go
@@ -370,7 +370,7 @@ func GetTlogEntry(ctx context.Context, rekorClient *client.Rekor, entryUUID stri
 	return nil, errors.New("empty response")
 }
 
-func proposedEntry(b64Sig string, payload, pubKey []byte) ([]models.ProposedEntry, error) {
+func proposedEntries(b64Sig string, payload, pubKey []byte) ([]models.ProposedEntry, error) {
 	var proposedEntry []models.ProposedEntry
 	signature, err := base64.StdEncoding.DecodeString(b64Sig)
 	if err != nil {
@@ -380,11 +380,15 @@ func proposedEntry(b64Sig string, payload, pubKey []byte) ([]models.ProposedEntr
 	// The fact that there's no signature (or empty rather), implies
 	// that this is an Attestation that we're verifying.
 	if len(signature) == 0 {
-		e, err := intotoEntry(context.Background(), payload, pubKey)
+		intotoEntry, err := intotoEntry(context.Background(), payload, pubKey)
 		if err != nil {
 			return nil, err
 		}
-		proposedEntry = []models.ProposedEntry{e}
+		dsseEntry, err := dsseEntry(context.Background(), payload, pubKey)
+		if err != nil {
+			return nil, err
+		}
+		proposedEntry = []models.ProposedEntry{dsseEntry, intotoEntry}
 	} else {
 		sha256CheckSum := sha256.New()
 		if _, err := sha256CheckSum.Write(payload); err != nil {
@@ -404,12 +408,12 @@ func FindTlogEntry(ctx context.Context, rekorClient *client.Rekor,
 	b64Sig string, payload, pubKey []byte) ([]models.LogEntryAnon, error) {
 	searchParams := entries.NewSearchLogQueryParamsWithContext(ctx)
 	searchLogQuery := models.SearchLogQuery{}
-	proposedEntry, err := proposedEntry(b64Sig, payload, pubKey)
+	proposedEntries, err := proposedEntries(b64Sig, payload, pubKey)
 	if err != nil {
 		return nil, err
 	}
 
-	searchLogQuery.SetEntries(proposedEntry)
+	searchLogQuery.SetEntries(proposedEntries)
 
 	searchParams.SetEntry(&searchLogQuery)
 	resp, err := rekorClient.Entries.SearchLogQuery(searchParams)
diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go
index 2ab846fe59..dffd8691e1 100644
--- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go
+++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go
@@ -296,7 +296,7 @@ func CheckCertificatePolicy(cert *x509.Certificate, co *CheckOpts) error {
 		return err
 	}
 	oidcIssuer := ce.GetIssuer()
-	sans := getSubjectAlternateNames(cert)
+	sans := cryptoutils.GetSubjectAlternateNames(cert)
 	// If there are identities given, go through them and if one of them
 	// matches, call that good, otherwise, return an error.
 	if len(co.Identities) > 0 {
@@ -399,29 +399,6 @@ func validateCertExtensions(ce CertExtensions, co *CheckOpts) error {
 	return nil
 }
 
-// getSubjectAlternateNames returns all of the following for a Certificate.
-// DNSNames
-// EmailAddresses
-// IPAddresses
-// URIs
-func getSubjectAlternateNames(cert *x509.Certificate) []string {
-	sans := []string{}
-	sans = append(sans, cert.DNSNames...)
-	sans = append(sans, cert.EmailAddresses...)
-	for _, ip := range cert.IPAddresses {
-		sans = append(sans, ip.String())
-	}
-	for _, uri := range cert.URIs {
-		sans = append(sans, uri.String())
-	}
-	// ignore error if there's no OtherName SAN
-	otherName, _ := cryptoutils.UnmarshalOtherNameSAN(cert.Extensions)
-	if len(otherName) > 0 {
-		sans = append(sans, otherName)
-	}
-	return sans
-}
-
 // ValidateAndUnpackCertWithChain creates a Verifier from a certificate. Verifies that the certificate
 // chains up to the provided root. Chain should start with the parent of the certificate and end with the root.
 // Optionally verifies the subject and issuer of the certificate.
@@ -618,6 +595,7 @@ func verifySignatures(ctx context.Context, sigs oci.Signatures, h v1.Hash, co *C
 				t.Done(err)
 				return
 			}
+
 			verified, err := VerifyImageSignature(ctx, sig, h, co)
 			bundlesVerified[index] = verified
 			if err != nil {
@@ -907,7 +885,7 @@ func VerifyImageAttestations(ctx context.Context, signedImgRef name.Reference, c
 		return nil, false, err
 	}
 
-	return verifyImageAttestations(ctx, atts, h, co)
+	return VerifyImageAttestation(ctx, atts, h, co)
 }
 
 // VerifyLocalImageAttestations verifies attestations from a saved, local image, without any network calls,
@@ -953,7 +931,7 @@ func VerifyLocalImageAttestations(ctx context.Context, path string, co *CheckOpt
 	if err != nil {
 		return nil, false, err
 	}
-	return verifyImageAttestations(ctx, atts, h, co)
+	return VerifyImageAttestation(ctx, atts, h, co)
 }
 
 func VerifyBlobAttestation(ctx context.Context, att oci.Signature, h v1.Hash, co *CheckOpts) (
@@ -961,7 +939,7 @@ func VerifyBlobAttestation(ctx context.Context, att oci.Signature, h v1.Hash, co
 	return verifyInternal(ctx, att, h, verifyOCIAttestation, co)
 }
 
-func verifyImageAttestations(ctx context.Context, atts oci.Signatures, h v1.Hash, co *CheckOpts) (checkedAttestations []oci.Signature, bundleVerified bool, err error) {
+func VerifyImageAttestation(ctx context.Context, atts oci.Signatures, h v1.Hash, co *CheckOpts) (checkedAttestations []oci.Signature, bundleVerified bool, err error) {
 	sl, err := atts.Get()
 	if err != nil {
 		return nil, false, err
@@ -1105,11 +1083,16 @@ func VerifyBundle(sig oci.Signature, co *CheckOpts) (bool, error) {
 	}
 
 	alg, bundlehash, err := bundleHash(bundle.Payload.Body.(string), signature)
+	if err != nil {
+		return false, fmt.Errorf("computing bundle hash: %w", err)
+	}
 	h := sha256.Sum256(payload)
 	payloadHash := hex.EncodeToString(h[:])
 
-	if alg != "sha256" || bundlehash != payloadHash {
-		return false, fmt.Errorf("matching bundle to payload: %w", err)
+	if alg != "sha256" {
+		return false, fmt.Errorf("unexpected algorithm: %q", alg)
+	} else if bundlehash != payloadHash {
+		return false, fmt.Errorf("matching bundle to payload: bundle=%q, payload=%q", bundlehash, payloadHash)
 	}
 	return true, nil
 }
diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/signatures.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/signatures.go
index dfbe1737ac..4ac356fe7a 100644
--- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/signatures.go
+++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/signatures.go
@@ -19,7 +19,6 @@ import (
 	v1 "github.com/google/go-containerregistry/pkg/v1"
 	"github.com/google/go-containerregistry/pkg/v1/empty"
 	"github.com/google/go-containerregistry/pkg/v1/mutate"
-	"github.com/sigstore/cosign/v2/internal/pkg/now"
 	"github.com/sigstore/cosign/v2/pkg/oci"
 )
 
@@ -43,17 +42,6 @@ func AppendSignatures(base oci.Signatures, sigs ...oci.Signature) (oci.Signature
 		return nil, err
 	}
 
-	t, err := now.Now()
-	if err != nil {
-		return nil, err
-	}
-
-	// Set the Created date to time of execution
-	img, err = mutate.CreatedAt(img, v1.Time{Time: t})
-	if err != nil {
-		return nil, err
-	}
-
 	return &sigAppender{
 		Image: img,
 		base:  base,
diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/file.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/file.go
index 8e5cdc9c12..6fc55d8311 100644
--- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/file.go
+++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/file.go
@@ -22,7 +22,6 @@ import (
 	"github.com/google/go-containerregistry/pkg/v1/empty"
 	"github.com/google/go-containerregistry/pkg/v1/mutate"
 	"github.com/google/go-containerregistry/pkg/v1/types"
-	"github.com/sigstore/cosign/v2/internal/pkg/now"
 	"github.com/sigstore/cosign/v2/pkg/oci"
 	"github.com/sigstore/cosign/v2/pkg/oci/signed"
 )
@@ -49,16 +48,6 @@ func NewFile(payload []byte, opts ...Option) (oci.File, error) {
 	// Add annotations from options
 	img = mutate.Annotations(img, o.Annotations).(v1.Image)
 
-	t, err := now.Now()
-	if err != nil {
-		return nil, err
-	}
-
-	// Set the Created date to time of execution
-	img, err = mutate.CreatedAt(img, v1.Time{Time: t})
-	if err != nil {
-		return nil, err
-	}
 	return &file{
 		SignedImage: signed.Image(img),
 		layer:       layer,
diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/providers/github/github.go b/vendor/github.com/sigstore/cosign/v2/pkg/providers/github/github.go
index 129c7a0bc3..3329890aeb 100644
--- a/vendor/github.com/sigstore/cosign/v2/pkg/providers/github/github.go
+++ b/vendor/github.com/sigstore/cosign/v2/pkg/providers/github/github.go
@@ -21,6 +21,7 @@ import (
 	"fmt"
 	"net/http"
 	"os"
+	"strings"
 	"time"
 
 	"github.com/sigstore/cosign/v2/pkg/cosign/env"
@@ -54,22 +55,38 @@ func (ga *githubActions) Enabled(_ context.Context) bool {
 }
 
 // Provide implements providers.Interface
-func (ga *githubActions) Provide(_ context.Context, audience string) (string, error) {
+func (ga *githubActions) Provide(ctx context.Context, audience string) (string, error) {
 	url := env.Getenv(env.VariableGitHubRequestURL) + "&audience=" + audience
 
-	req, err := http.NewRequest("GET", url, nil)
+	req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
 	if err != nil {
 		return "", err
 	}
 
+	// May be replaced by a different client if we hit HTTP_1_1_REQUIRED.
+	client := http.DefaultClient
+
 	// Retry up to 3 times.
 	for i := 0; ; i++ {
 		req.Header.Add("Authorization", "bearer "+env.Getenv(env.VariableGitHubRequestToken))
-		resp, err := http.DefaultClient.Do(req)
+		resp, err := client.Do(req)
 		if err != nil {
 			if i == 2 {
 				return "", err
 			}
+
+			// This error isn't exposed by net/http, and retrying this with the
+			// DefaultClient will fail because it will just use HTTP2 again.
+			// I don't know why go doesn't do this for us.
+			if strings.Contains(err.Error(), "HTTP_1_1_REQUIRED") {
+				http1transport := http.DefaultTransport.(*http.Transport).Clone()
+				http1transport.ForceAttemptHTTP2 = false
+
+				client = &http.Client{
+					Transport: http1transport,
+				}
+			}
+
 			fmt.Fprintf(os.Stderr, "error fetching GitHub OIDC token (will retry): %v\n", err)
 			time.Sleep(time.Second)
 			continue
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go
index 8e0054daf4..e3f43cc2dc 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go
@@ -73,7 +73,7 @@ const (
 	// EnforceNonfalsifiabilityWithSpire is the value used for  "enable-nonfalsifiability" when SPIRE is used to enable non-falsifiability.
 	EnforceNonfalsifiabilityWithSpire = "spire"
 	// EnforceNonfalsifiabilityNone is the value used for  "enable-nonfalsifiability" when non-falsifiability is not enabled.
-	EnforceNonfalsifiabilityNone = ""
+	EnforceNonfalsifiabilityNone = "none"
 	// DefaultEnforceNonfalsifiability is the default value for "enforce-nonfalsifiability".
 	DefaultEnforceNonfalsifiability = EnforceNonfalsifiabilityNone
 	// DefaultNoMatchPolicyConfig is the default value for "trusted-resources-verification-no-match-policy".
@@ -195,7 +195,7 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) {
 	if err := setMaxResultSize(cfgMap, DefaultMaxResultSize, &tc.MaxResultSize); err != nil {
 		return nil, err
 	}
-	if err := setEnforceNonFalsifiability(cfgMap, tc.EnableAPIFields, &tc.EnforceNonfalsifiability); err != nil {
+	if err := setEnforceNonFalsifiability(cfgMap, &tc.EnforceNonfalsifiability); err != nil {
 		return nil, err
 	}
 	if err := setFeature(setSecurityContextKey, DefaultSetSecurityContext, &tc.SetSecurityContext); err != nil {
@@ -262,7 +262,7 @@ func setCoschedule(cfgMap map[string]string, defaultValue string, disabledAffini
 
 // setEnforceNonFalsifiability sets the "enforce-nonfalsifiability" flag based on the content of a given map.
 // If the feature gate is invalid, then an error is returned.
-func setEnforceNonFalsifiability(cfgMap map[string]string, enableAPIFields string, feature *string) error {
+func setEnforceNonFalsifiability(cfgMap map[string]string, feature *string) error {
 	var value = DefaultEnforceNonfalsifiability
 	if cfg, ok := cfgMap[enforceNonfalsifiability]; ok {
 		value = strings.ToLower(cfg)
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/internal/checksum/checksum.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/internal/checksum/checksum.go
new file mode 100644
index 0000000000..29cca04f77
--- /dev/null
+++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/internal/checksum/checksum.go
@@ -0,0 +1,57 @@
+package checksum
+
+import (
+	"crypto/sha256"
+	"encoding/json"
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+	// SignatureAnnotation is the key of signature in annotation map
+	SignatureAnnotation = "tekton.dev/signature"
+)
+
+// PrepareObjectMeta will remove annotations not configured from user side -- "kubectl-client-side-apply" and "kubectl.kubernetes.io/last-applied-configuration"
+// (added when an object is created with `kubectl apply`) to avoid verification failure and extract the signature.
+// Returns a copy of the input object metadata with the annotations removed and the object's signature,
+// if it is present in the metadata.
+func PrepareObjectMeta(in metav1.Object) metav1.ObjectMeta {
+	outMeta := metav1.ObjectMeta{}
+
+	// exclude the fields populated by system.
+	outMeta.Name = in.GetName()
+	outMeta.GenerateName = in.GetGenerateName()
+	outMeta.Namespace = in.GetNamespace()
+
+	if in.GetLabels() != nil {
+		outMeta.Labels = make(map[string]string)
+		for k, v := range in.GetLabels() {
+			outMeta.Labels[k] = v
+		}
+	}
+
+	outMeta.Annotations = make(map[string]string)
+	for k, v := range in.GetAnnotations() {
+		outMeta.Annotations[k] = v
+	}
+
+	// exclude the annotations added by other components
+	delete(outMeta.Annotations, "kubectl-client-side-apply")
+	delete(outMeta.Annotations, "kubectl.kubernetes.io/last-applied-configuration")
+	delete(outMeta.Annotations, SignatureAnnotation)
+
+	return outMeta
+}
+
+// ComputeSha256Checksum computes the sha256 checksum of the tekton object.
+func ComputeSha256Checksum(obj interface{}) ([]byte, error) {
+	ts, err := json.Marshal(obj)
+	if err != nil {
+		return nil, fmt.Errorf("failed to marshal the object: %w", err)
+	}
+	h := sha256.New()
+	h.Write(ts)
+	return h.Sum(nil), nil
+}
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go
index e0ce5ec677..19f97cbf22 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go
@@ -18,6 +18,7 @@ package v1
 
 import (
 	"github.com/tektoncd/pipeline/pkg/apis/pipeline"
+	"github.com/tektoncd/pipeline/pkg/apis/pipeline/internal/checksum"
 	"github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
@@ -70,6 +71,27 @@ func (*Pipeline) GetGroupVersionKind() schema.GroupVersionKind {
 	return SchemeGroupVersion.WithKind(pipeline.PipelineControllerName)
 }
 
+// Checksum computes the sha256 checksum of the pipeline object.
+// Prior to computing the checksum, it performs some preprocessing on the
+// metadata of the object where it removes system provided annotations.
+// Only the name, namespace, generateName, user-provided labels and annotations
+// and the pipelineSpec are included for the checksum computation.
+func (p *Pipeline) Checksum() ([]byte, error) {
+	objectMeta := checksum.PrepareObjectMeta(p)
+	preprocessedPipeline := Pipeline{
+		TypeMeta: metav1.TypeMeta{
+			APIVersion: "tekton.dev/v1",
+			Kind:       "Pipeline"},
+		ObjectMeta: objectMeta,
+		Spec:       p.Spec,
+	}
+	sha256Checksum, err := checksum.ComputeSha256Checksum(preprocessedPipeline)
+	if err != nil {
+		return nil, err
+	}
+	return sha256Checksum, nil
+}
+
 // PipelineSpec defines the desired state of Pipeline.
 type PipelineSpec struct {
 	// DisplayName is a user-facing name of the pipeline that may be
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go
index e6c7299c8e..1f56c2050e 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go
@@ -109,6 +109,10 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError)
 		errs = errs.Also(validateTaskRunSpec(ctx, trs).ViaIndex(idx).ViaField("taskRunSpecs"))
 	}
 
+	if ps.TaskRunTemplate.PodTemplate != nil {
+		errs = errs.Also(validatePodTemplateEnv(ctx, *ps.TaskRunTemplate.PodTemplate).ViaField("taskRunTemplate"))
+	}
+
 	return errs
 }
 
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_types.go
index 9a46de41b8..894590508f 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_types.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_types.go
@@ -18,6 +18,7 @@ package v1
 
 import (
 	"github.com/tektoncd/pipeline/pkg/apis/pipeline"
+	"github.com/tektoncd/pipeline/pkg/apis/pipeline/internal/checksum"
 	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime/schema"
@@ -52,6 +53,27 @@ func (*Task) GetGroupVersionKind() schema.GroupVersionKind {
 	return SchemeGroupVersion.WithKind(pipeline.TaskControllerName)
 }
 
+// Checksum computes the sha256 checksum of the task object.
+// Prior to computing the checksum, it performs some preprocessing on the
+// metadata of the object where it removes system provided annotations.
+// Only the name, namespace, generateName, user-provided labels and annotations
+// and the taskSpec are included for the checksum computation.
+func (t *Task) Checksum() ([]byte, error) {
+	objectMeta := checksum.PrepareObjectMeta(t)
+	preprocessedTask := Task{
+		TypeMeta: metav1.TypeMeta{
+			APIVersion: "tekton.dev/v1",
+			Kind:       "Task"},
+		ObjectMeta: objectMeta,
+		Spec:       t.Spec,
+	}
+	sha256Checksum, err := checksum.ComputeSha256Checksum(preprocessedTask)
+	if err != nil {
+		return nil, err
+	}
+	return sha256Checksum, nil
+}
+
 // TaskSpec defines the desired state of Task.
 type TaskSpec struct {
 	// Params is a list of input parameters required to run the task. Params
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go
index 4426d5ddb7..eba6cba272 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go
@@ -18,6 +18,7 @@ package v1beta1
 
 import (
 	"github.com/tektoncd/pipeline/pkg/apis/pipeline"
+	"github.com/tektoncd/pipeline/pkg/apis/pipeline/internal/checksum"
 	"github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
@@ -77,6 +78,27 @@ func (*Pipeline) GetGroupVersionKind() schema.GroupVersionKind {
 	return SchemeGroupVersion.WithKind(pipeline.PipelineControllerName)
 }
 
+// Checksum computes the sha256 checksum of the task object.
+// Prior to computing the checksum, it performs some preprocessing on the
+// metadata of the object where it removes system provided annotations.
+// Only the name, namespace, generateName, user-provided labels and annotations
+// and the pipelineSpec are included for the checksum computation.
+func (p *Pipeline) Checksum() ([]byte, error) {
+	objectMeta := checksum.PrepareObjectMeta(p)
+	preprocessedPipeline := Pipeline{
+		TypeMeta: metav1.TypeMeta{
+			APIVersion: "tekton.dev/v1beta1",
+			Kind:       "Pipeline"},
+		ObjectMeta: objectMeta,
+		Spec:       p.Spec,
+	}
+	sha256Checksum, err := checksum.ComputeSha256Checksum(preprocessedPipeline)
+	if err != nil {
+		return nil, err
+	}
+	return sha256Checksum, nil
+}
+
 // PipelineSpec defines the desired state of Pipeline.
 type PipelineSpec struct {
 	// DisplayName is a user-facing name of the pipeline that may be
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go
index 8553092566..2961ade3be 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go
@@ -18,6 +18,7 @@ package v1beta1
 
 import (
 	"github.com/tektoncd/pipeline/pkg/apis/pipeline"
+	"github.com/tektoncd/pipeline/pkg/apis/pipeline/internal/checksum"
 	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime/schema"
@@ -68,6 +69,27 @@ func (*Task) GetGroupVersionKind() schema.GroupVersionKind {
 	return SchemeGroupVersion.WithKind(pipeline.TaskControllerName)
 }
 
+// Checksum computes the sha256 checksum of the task object.
+// Prior to computing the checksum, it performs some preprocessing on the
+// metadata of the object where it removes system provided annotations.
+// Only the name, namespace, generateName, user-provided labels and annotations
+// and the taskSpec are included for the checksum computation.
+func (t *Task) Checksum() ([]byte, error) {
+	objectMeta := checksum.PrepareObjectMeta(t)
+	preprocessedTask := Task{
+		TypeMeta: metav1.TypeMeta{
+			APIVersion: "tekton.dev/v1beta1",
+			Kind:       "Task"},
+		ObjectMeta: objectMeta,
+		Spec:       t.Spec,
+	}
+	sha256Checksum, err := checksum.ComputeSha256Checksum(preprocessedTask)
+	if err != nil {
+		return nil, err
+	}
+	return sha256Checksum, nil
+}
+
 // TaskSpec defines the desired state of Task.
 type TaskSpec struct {
 	// Resources is a list input and output resource to run the task
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go
index 081e93512c..56848e8d54 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go
@@ -37,8 +37,7 @@ type Interface interface {
 	TektonV1() tektonv1.TektonV1Interface
 }
 
-// Clientset contains the clients for groups. Each group has exactly one
-// version included in a Clientset.
+// Clientset contains the clients for groups.
 type Clientset struct {
 	*discovery.DiscoveryClient
 	tektonV1alpha1 *tektonv1alpha1.TektonV1alpha1Client
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/factory.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/factory.go
index 691cee85d3..b0cd64175b 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/factory.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/factory.go
@@ -47,6 +47,11 @@ type sharedInformerFactory struct {
 	// startedInformers is used for tracking which informers have been started.
 	// This allows Start() to be called multiple times safely.
 	startedInformers map[reflect.Type]bool
+	// wg tracks how many goroutines were started.
+	wg sync.WaitGroup
+	// shuttingDown is true when Shutdown has been called. It may still be running
+	// because it needs to wait for goroutines.
+	shuttingDown bool
 }
 
 // WithCustomResyncConfig sets a custom resync period for the specified informer types.
@@ -107,20 +112,39 @@ func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResy
 	return factory
 }
 
-// Start initializes all requested informers.
 func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
 	f.lock.Lock()
 	defer f.lock.Unlock()
 
+	if f.shuttingDown {
+		return
+	}
+
 	for informerType, informer := range f.informers {
 		if !f.startedInformers[informerType] {
-			go informer.Run(stopCh)
+			f.wg.Add(1)
+			// We need a new variable in each loop iteration,
+			// otherwise the goroutine would use the loop variable
+			// and that keeps changing.
+			informer := informer
+			go func() {
+				defer f.wg.Done()
+				informer.Run(stopCh)
+			}()
 			f.startedInformers[informerType] = true
 		}
 	}
 }
 
-// WaitForCacheSync waits for all started informers' cache were synced.
+func (f *sharedInformerFactory) Shutdown() {
+	f.lock.Lock()
+	f.shuttingDown = true
+	f.lock.Unlock()
+
+	// Will return immediately if there is nothing to wait for.
+	f.wg.Wait()
+}
+
 func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
 	informers := func() map[reflect.Type]cache.SharedIndexInformer {
 		f.lock.Lock()
@@ -167,11 +191,58 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal
 
 // SharedInformerFactory provides shared informers for resources in all known
 // API group versions.
+//
+// It is typically used like this:
+//
+//	ctx, cancel := context.Background()
+//	defer cancel()
+//	factory := NewSharedInformerFactory(client, resyncPeriod)
+//	defer factory.WaitForStop()    // Returns immediately if nothing was started.
+//	genericInformer := factory.ForResource(resource)
+//	typedInformer := factory.SomeAPIGroup().V1().SomeType()
+//	factory.Start(ctx.Done())          // Start processing these informers.
+//	synced := factory.WaitForCacheSync(ctx.Done())
+//	for v, ok := range synced {
+//	    if !ok {
+//	        fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
+//	        return
+//	    }
+//	}
+//
+//	// Creating informers can also be created after Start, but then
+//	// Start must be called again:
+//	anotherGenericInformer := factory.ForResource(resource)
+//	factory.Start(ctx.Done())
 type SharedInformerFactory interface {
 	internalinterfaces.SharedInformerFactory
-	ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
+
+	// Start initializes all requested informers. They are handled in goroutines
+	// which run until the stop channel gets closed.
+	Start(stopCh <-chan struct{})
+
+	// Shutdown marks a factory as shutting down. At that point no new
+	// informers can be started anymore and Start will return without
+	// doing anything.
+	//
+	// In addition, Shutdown blocks until all goroutines have terminated. For that
+	// to happen, the close channel(s) that they were started with must be closed,
+	// either before Shutdown gets called or while it is waiting.
+	//
+	// Shutdown may be called multiple times, even concurrently. All such calls will
+	// block until all goroutines have terminated.
+	Shutdown()
+
+	// WaitForCacheSync blocks until all started informers' caches were synced
+	// or the stop channel gets closed.
 	WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
 
+	// ForResource gives generic access to a shared informer of the matching type.
+	ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
+
+	// InternalInformerFor returns the SharedIndexInformer for obj using an internal
+	// client.
+	InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
+
 	Tekton() pipeline.Interface
 }
 
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go
index 67cd130ad4..af3642aaa3 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go
@@ -20,28 +20,10 @@ package client
 
 import (
 	context "context"
-	json "encoding/json"
-	errors "errors"
-	fmt "fmt"
 
-	pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
-	v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
-	v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
 	versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
-	typedtektonv1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1"
-	typedtektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1"
-	typedtektonv1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
-	runtime "k8s.io/apimachinery/pkg/runtime"
-	schema "k8s.io/apimachinery/pkg/runtime/schema"
-	types "k8s.io/apimachinery/pkg/types"
-	watch "k8s.io/apimachinery/pkg/watch"
-	discovery "k8s.io/client-go/discovery"
-	dynamic "k8s.io/client-go/dynamic"
 	rest "k8s.io/client-go/rest"
 	injection "knative.dev/pkg/injection"
-	dynamicclient "knative.dev/pkg/injection/clients/dynamicclient"
 	logging "knative.dev/pkg/logging"
 )
 
@@ -50,7 +32,6 @@ func init() {
 	injection.Default.RegisterClientFetcher(func(ctx context.Context) interface{} {
 		return Get(ctx)
 	})
-	injection.Dynamic.RegisterDynamicClient(withClientFromDynamic)
 }
 
 // Key is used as the key for associating information with a context.Context.
@@ -60,10 +41,6 @@ func withClientFromConfig(ctx context.Context, cfg *rest.Config) context.Context
 	return context.WithValue(ctx, Key{}, versioned.NewForConfigOrDie(cfg))
 }
 
-func withClientFromDynamic(ctx context.Context) context.Context {
-	return context.WithValue(ctx, Key{}, &wrapClient{dyn: dynamicclient.Get(ctx)})
-}
-
 // Get extracts the versioned.Interface client from the context.
 func Get(ctx context.Context) versioned.Interface {
 	untyped := ctx.Value(Key{})
@@ -78,1637 +55,3 @@ func Get(ctx context.Context) versioned.Interface {
 	}
 	return untyped.(versioned.Interface)
 }
-
-type wrapClient struct {
-	dyn dynamic.Interface
-}
-
-var _ versioned.Interface = (*wrapClient)(nil)
-
-func (w *wrapClient) Discovery() discovery.DiscoveryInterface {
-	panic("Discovery called on dynamic client!")
-}
-
-func convert(from interface{}, to runtime.Object) error {
-	bs, err := json.Marshal(from)
-	if err != nil {
-		return fmt.Errorf("Marshal() = %w", err)
-	}
-	if err := json.Unmarshal(bs, to); err != nil {
-		return fmt.Errorf("Unmarshal() = %w", err)
-	}
-	return nil
-}
-
-// TektonV1alpha1 retrieves the TektonV1alpha1Client
-func (w *wrapClient) TektonV1alpha1() typedtektonv1alpha1.TektonV1alpha1Interface {
-	return &wrapTektonV1alpha1{
-		dyn: w.dyn,
-	}
-}
-
-type wrapTektonV1alpha1 struct {
-	dyn dynamic.Interface
-}
-
-func (w *wrapTektonV1alpha1) RESTClient() rest.Interface {
-	panic("RESTClient called on dynamic client!")
-}
-
-func (w *wrapTektonV1alpha1) Runs(namespace string) typedtektonv1alpha1.RunInterface {
-	return &wrapTektonV1alpha1RunImpl{
-		dyn: w.dyn.Resource(schema.GroupVersionResource{
-			Group:    "tekton.dev",
-			Version:  "v1alpha1",
-			Resource: "runs",
-		}),
-
-		namespace: namespace,
-	}
-}
-
-type wrapTektonV1alpha1RunImpl struct {
-	dyn dynamic.NamespaceableResourceInterface
-
-	namespace string
-}
-
-var _ typedtektonv1alpha1.RunInterface = (*wrapTektonV1alpha1RunImpl)(nil)
-
-func (w *wrapTektonV1alpha1RunImpl) Create(ctx context.Context, in *v1alpha1.Run, opts v1.CreateOptions) (*v1alpha1.Run, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1alpha1",
-		Kind:    "Run",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.Run{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1alpha1RunImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
-	return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts)
-}
-
-func (w *wrapTektonV1alpha1RunImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-	return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts)
-}
-
-func (w *wrapTektonV1alpha1RunImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Run, error) {
-	uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.Run{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1alpha1RunImpl) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RunList, error) {
-	uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.RunList{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1alpha1RunImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Run, err error) {
-	uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.Run{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1alpha1RunImpl) Update(ctx context.Context, in *v1alpha1.Run, opts v1.UpdateOptions) (*v1alpha1.Run, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1alpha1",
-		Kind:    "Run",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.Run{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1alpha1RunImpl) UpdateStatus(ctx context.Context, in *v1alpha1.Run, opts v1.UpdateOptions) (*v1alpha1.Run, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1alpha1",
-		Kind:    "Run",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.Run{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1alpha1RunImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
-	return nil, errors.New("NYI: Watch")
-}
-
-func (w *wrapTektonV1alpha1) VerificationPolicies(namespace string) typedtektonv1alpha1.VerificationPolicyInterface {
-	return &wrapTektonV1alpha1VerificationPolicyImpl{
-		dyn: w.dyn.Resource(schema.GroupVersionResource{
-			Group:    "tekton.dev",
-			Version:  "v1alpha1",
-			Resource: "verificationpolicies",
-		}),
-
-		namespace: namespace,
-	}
-}
-
-type wrapTektonV1alpha1VerificationPolicyImpl struct {
-	dyn dynamic.NamespaceableResourceInterface
-
-	namespace string
-}
-
-var _ typedtektonv1alpha1.VerificationPolicyInterface = (*wrapTektonV1alpha1VerificationPolicyImpl)(nil)
-
-func (w *wrapTektonV1alpha1VerificationPolicyImpl) Create(ctx context.Context, in *v1alpha1.VerificationPolicy, opts v1.CreateOptions) (*v1alpha1.VerificationPolicy, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1alpha1",
-		Kind:    "VerificationPolicy",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.VerificationPolicy{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1alpha1VerificationPolicyImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
-	return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts)
-}
-
-func (w *wrapTektonV1alpha1VerificationPolicyImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-	return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts)
-}
-
-func (w *wrapTektonV1alpha1VerificationPolicyImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VerificationPolicy, error) {
-	uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.VerificationPolicy{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1alpha1VerificationPolicyImpl) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VerificationPolicyList, error) {
-	uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.VerificationPolicyList{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1alpha1VerificationPolicyImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VerificationPolicy, err error) {
-	uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.VerificationPolicy{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1alpha1VerificationPolicyImpl) Update(ctx context.Context, in *v1alpha1.VerificationPolicy, opts v1.UpdateOptions) (*v1alpha1.VerificationPolicy, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1alpha1",
-		Kind:    "VerificationPolicy",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.VerificationPolicy{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1alpha1VerificationPolicyImpl) UpdateStatus(ctx context.Context, in *v1alpha1.VerificationPolicy, opts v1.UpdateOptions) (*v1alpha1.VerificationPolicy, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1alpha1",
-		Kind:    "VerificationPolicy",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.VerificationPolicy{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1alpha1VerificationPolicyImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
-	return nil, errors.New("NYI: Watch")
-}
-
-// TektonV1beta1 retrieves the TektonV1beta1Client
-func (w *wrapClient) TektonV1beta1() typedtektonv1beta1.TektonV1beta1Interface {
-	return &wrapTektonV1beta1{
-		dyn: w.dyn,
-	}
-}
-
-type wrapTektonV1beta1 struct {
-	dyn dynamic.Interface
-}
-
-func (w *wrapTektonV1beta1) RESTClient() rest.Interface {
-	panic("RESTClient called on dynamic client!")
-}
-
-func (w *wrapTektonV1beta1) ClusterTasks() typedtektonv1beta1.ClusterTaskInterface {
-	return &wrapTektonV1beta1ClusterTaskImpl{
-		dyn: w.dyn.Resource(schema.GroupVersionResource{
-			Group:    "tekton.dev",
-			Version:  "v1beta1",
-			Resource: "clustertasks",
-		}),
-	}
-}
-
-type wrapTektonV1beta1ClusterTaskImpl struct {
-	dyn dynamic.NamespaceableResourceInterface
-}
-
-var _ typedtektonv1beta1.ClusterTaskInterface = (*wrapTektonV1beta1ClusterTaskImpl)(nil)
-
-func (w *wrapTektonV1beta1ClusterTaskImpl) Create(ctx context.Context, in *v1beta1.ClusterTask, opts v1.CreateOptions) (*v1beta1.ClusterTask, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "ClusterTask",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Create(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.ClusterTask{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1ClusterTaskImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
-	return w.dyn.Delete(ctx, name, opts)
-}
-
-func (w *wrapTektonV1beta1ClusterTaskImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-	return w.dyn.DeleteCollection(ctx, opts, listOpts)
-}
-
-func (w *wrapTektonV1beta1ClusterTaskImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ClusterTask, error) {
-	uo, err := w.dyn.Get(ctx, name, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.ClusterTask{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1ClusterTaskImpl) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ClusterTaskList, error) {
-	uo, err := w.dyn.List(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.ClusterTaskList{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1ClusterTaskImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterTask, err error) {
-	uo, err := w.dyn.Patch(ctx, name, pt, data, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.ClusterTask{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1ClusterTaskImpl) Update(ctx context.Context, in *v1beta1.ClusterTask, opts v1.UpdateOptions) (*v1beta1.ClusterTask, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "ClusterTask",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Update(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.ClusterTask{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1ClusterTaskImpl) UpdateStatus(ctx context.Context, in *v1beta1.ClusterTask, opts v1.UpdateOptions) (*v1beta1.ClusterTask, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "ClusterTask",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.UpdateStatus(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.ClusterTask{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1ClusterTaskImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
-	return nil, errors.New("NYI: Watch")
-}
-
-func (w *wrapTektonV1beta1) CustomRuns(namespace string) typedtektonv1beta1.CustomRunInterface {
-	return &wrapTektonV1beta1CustomRunImpl{
-		dyn: w.dyn.Resource(schema.GroupVersionResource{
-			Group:    "tekton.dev",
-			Version:  "v1beta1",
-			Resource: "customruns",
-		}),
-
-		namespace: namespace,
-	}
-}
-
-type wrapTektonV1beta1CustomRunImpl struct {
-	dyn dynamic.NamespaceableResourceInterface
-
-	namespace string
-}
-
-var _ typedtektonv1beta1.CustomRunInterface = (*wrapTektonV1beta1CustomRunImpl)(nil)
-
-func (w *wrapTektonV1beta1CustomRunImpl) Create(ctx context.Context, in *v1beta1.CustomRun, opts v1.CreateOptions) (*v1beta1.CustomRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "CustomRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.CustomRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1CustomRunImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
-	return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts)
-}
-
-func (w *wrapTektonV1beta1CustomRunImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-	return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts)
-}
-
-func (w *wrapTektonV1beta1CustomRunImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CustomRun, error) {
-	uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.CustomRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1CustomRunImpl) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CustomRunList, error) {
-	uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.CustomRunList{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1CustomRunImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomRun, err error) {
-	uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.CustomRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1CustomRunImpl) Update(ctx context.Context, in *v1beta1.CustomRun, opts v1.UpdateOptions) (*v1beta1.CustomRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "CustomRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.CustomRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1CustomRunImpl) UpdateStatus(ctx context.Context, in *v1beta1.CustomRun, opts v1.UpdateOptions) (*v1beta1.CustomRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "CustomRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.CustomRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1CustomRunImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
-	return nil, errors.New("NYI: Watch")
-}
-
-func (w *wrapTektonV1beta1) Pipelines(namespace string) typedtektonv1beta1.PipelineInterface {
-	return &wrapTektonV1beta1PipelineImpl{
-		dyn: w.dyn.Resource(schema.GroupVersionResource{
-			Group:    "tekton.dev",
-			Version:  "v1beta1",
-			Resource: "pipelines",
-		}),
-
-		namespace: namespace,
-	}
-}
-
-type wrapTektonV1beta1PipelineImpl struct {
-	dyn dynamic.NamespaceableResourceInterface
-
-	namespace string
-}
-
-var _ typedtektonv1beta1.PipelineInterface = (*wrapTektonV1beta1PipelineImpl)(nil)
-
-func (w *wrapTektonV1beta1PipelineImpl) Create(ctx context.Context, in *v1beta1.Pipeline, opts v1.CreateOptions) (*v1beta1.Pipeline, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "Pipeline",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.Pipeline{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1PipelineImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
-	return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts)
-}
-
-func (w *wrapTektonV1beta1PipelineImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-	return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts)
-}
-
-func (w *wrapTektonV1beta1PipelineImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Pipeline, error) {
-	uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.Pipeline{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1PipelineImpl) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PipelineList, error) {
-	uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.PipelineList{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1PipelineImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Pipeline, err error) {
-	uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.Pipeline{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1PipelineImpl) Update(ctx context.Context, in *v1beta1.Pipeline, opts v1.UpdateOptions) (*v1beta1.Pipeline, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "Pipeline",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.Pipeline{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1PipelineImpl) UpdateStatus(ctx context.Context, in *v1beta1.Pipeline, opts v1.UpdateOptions) (*v1beta1.Pipeline, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "Pipeline",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.Pipeline{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1PipelineImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
-	return nil, errors.New("NYI: Watch")
-}
-
-func (w *wrapTektonV1beta1) PipelineRuns(namespace string) typedtektonv1beta1.PipelineRunInterface {
-	return &wrapTektonV1beta1PipelineRunImpl{
-		dyn: w.dyn.Resource(schema.GroupVersionResource{
-			Group:    "tekton.dev",
-			Version:  "v1beta1",
-			Resource: "pipelineruns",
-		}),
-
-		namespace: namespace,
-	}
-}
-
-type wrapTektonV1beta1PipelineRunImpl struct {
-	dyn dynamic.NamespaceableResourceInterface
-
-	namespace string
-}
-
-var _ typedtektonv1beta1.PipelineRunInterface = (*wrapTektonV1beta1PipelineRunImpl)(nil)
-
-func (w *wrapTektonV1beta1PipelineRunImpl) Create(ctx context.Context, in *v1beta1.PipelineRun, opts v1.CreateOptions) (*v1beta1.PipelineRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "PipelineRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.PipelineRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1PipelineRunImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
-	return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts)
-}
-
-func (w *wrapTektonV1beta1PipelineRunImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-	return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts)
-}
-
-func (w *wrapTektonV1beta1PipelineRunImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PipelineRun, error) {
-	uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.PipelineRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1PipelineRunImpl) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PipelineRunList, error) {
-	uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.PipelineRunList{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1PipelineRunImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PipelineRun, err error) {
-	uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.PipelineRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1PipelineRunImpl) Update(ctx context.Context, in *v1beta1.PipelineRun, opts v1.UpdateOptions) (*v1beta1.PipelineRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "PipelineRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.PipelineRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1PipelineRunImpl) UpdateStatus(ctx context.Context, in *v1beta1.PipelineRun, opts v1.UpdateOptions) (*v1beta1.PipelineRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "PipelineRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.PipelineRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1PipelineRunImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
-	return nil, errors.New("NYI: Watch")
-}
-
-func (w *wrapTektonV1beta1) Tasks(namespace string) typedtektonv1beta1.TaskInterface {
-	return &wrapTektonV1beta1TaskImpl{
-		dyn: w.dyn.Resource(schema.GroupVersionResource{
-			Group:    "tekton.dev",
-			Version:  "v1beta1",
-			Resource: "tasks",
-		}),
-
-		namespace: namespace,
-	}
-}
-
-type wrapTektonV1beta1TaskImpl struct {
-	dyn dynamic.NamespaceableResourceInterface
-
-	namespace string
-}
-
-var _ typedtektonv1beta1.TaskInterface = (*wrapTektonV1beta1TaskImpl)(nil)
-
-func (w *wrapTektonV1beta1TaskImpl) Create(ctx context.Context, in *v1beta1.Task, opts v1.CreateOptions) (*v1beta1.Task, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "Task",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.Task{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1TaskImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
-	return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts)
-}
-
-func (w *wrapTektonV1beta1TaskImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-	return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts)
-}
-
-func (w *wrapTektonV1beta1TaskImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Task, error) {
-	uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.Task{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1TaskImpl) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.TaskList, error) {
-	uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.TaskList{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1TaskImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Task, err error) {
-	uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.Task{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1TaskImpl) Update(ctx context.Context, in *v1beta1.Task, opts v1.UpdateOptions) (*v1beta1.Task, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "Task",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.Task{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1TaskImpl) UpdateStatus(ctx context.Context, in *v1beta1.Task, opts v1.UpdateOptions) (*v1beta1.Task, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "Task",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.Task{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1TaskImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
-	return nil, errors.New("NYI: Watch")
-}
-
-func (w *wrapTektonV1beta1) TaskRuns(namespace string) typedtektonv1beta1.TaskRunInterface {
-	return &wrapTektonV1beta1TaskRunImpl{
-		dyn: w.dyn.Resource(schema.GroupVersionResource{
-			Group:    "tekton.dev",
-			Version:  "v1beta1",
-			Resource: "taskruns",
-		}),
-
-		namespace: namespace,
-	}
-}
-
-type wrapTektonV1beta1TaskRunImpl struct {
-	dyn dynamic.NamespaceableResourceInterface
-
-	namespace string
-}
-
-var _ typedtektonv1beta1.TaskRunInterface = (*wrapTektonV1beta1TaskRunImpl)(nil)
-
-func (w *wrapTektonV1beta1TaskRunImpl) Create(ctx context.Context, in *v1beta1.TaskRun, opts v1.CreateOptions) (*v1beta1.TaskRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "TaskRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.TaskRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1TaskRunImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
-	return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts)
-}
-
-func (w *wrapTektonV1beta1TaskRunImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-	return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts)
-}
-
-func (w *wrapTektonV1beta1TaskRunImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.TaskRun, error) {
-	uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.TaskRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1TaskRunImpl) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.TaskRunList, error) {
-	uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.TaskRunList{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1TaskRunImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.TaskRun, err error) {
-	uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.TaskRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1TaskRunImpl) Update(ctx context.Context, in *v1beta1.TaskRun, opts v1.UpdateOptions) (*v1beta1.TaskRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "TaskRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.TaskRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1TaskRunImpl) UpdateStatus(ctx context.Context, in *v1beta1.TaskRun, opts v1.UpdateOptions) (*v1beta1.TaskRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1beta1",
-		Kind:    "TaskRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.TaskRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1beta1TaskRunImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
-	return nil, errors.New("NYI: Watch")
-}
-
-// TektonV1 retrieves the TektonV1Client
-func (w *wrapClient) TektonV1() typedtektonv1.TektonV1Interface {
-	return &wrapTektonV1{
-		dyn: w.dyn,
-	}
-}
-
-type wrapTektonV1 struct {
-	dyn dynamic.Interface
-}
-
-func (w *wrapTektonV1) RESTClient() rest.Interface {
-	panic("RESTClient called on dynamic client!")
-}
-
-func (w *wrapTektonV1) Pipelines(namespace string) typedtektonv1.PipelineInterface {
-	return &wrapTektonV1PipelineImpl{
-		dyn: w.dyn.Resource(schema.GroupVersionResource{
-			Group:    "tekton.dev",
-			Version:  "v1",
-			Resource: "pipelines",
-		}),
-
-		namespace: namespace,
-	}
-}
-
-type wrapTektonV1PipelineImpl struct {
-	dyn dynamic.NamespaceableResourceInterface
-
-	namespace string
-}
-
-var _ typedtektonv1.PipelineInterface = (*wrapTektonV1PipelineImpl)(nil)
-
-func (w *wrapTektonV1PipelineImpl) Create(ctx context.Context, in *pipelinev1.Pipeline, opts v1.CreateOptions) (*pipelinev1.Pipeline, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1",
-		Kind:    "Pipeline",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.Pipeline{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1PipelineImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
-	return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts)
-}
-
-func (w *wrapTektonV1PipelineImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-	return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts)
-}
-
-func (w *wrapTektonV1PipelineImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1.Pipeline, error) {
-	uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.Pipeline{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1PipelineImpl) List(ctx context.Context, opts v1.ListOptions) (*pipelinev1.PipelineList, error) {
-	uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.PipelineList{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1PipelineImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1.Pipeline, err error) {
-	uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.Pipeline{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1PipelineImpl) Update(ctx context.Context, in *pipelinev1.Pipeline, opts v1.UpdateOptions) (*pipelinev1.Pipeline, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1",
-		Kind:    "Pipeline",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.Pipeline{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1PipelineImpl) UpdateStatus(ctx context.Context, in *pipelinev1.Pipeline, opts v1.UpdateOptions) (*pipelinev1.Pipeline, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1",
-		Kind:    "Pipeline",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.Pipeline{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1PipelineImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
-	return nil, errors.New("NYI: Watch")
-}
-
-func (w *wrapTektonV1) PipelineRuns(namespace string) typedtektonv1.PipelineRunInterface {
-	return &wrapTektonV1PipelineRunImpl{
-		dyn: w.dyn.Resource(schema.GroupVersionResource{
-			Group:    "tekton.dev",
-			Version:  "v1",
-			Resource: "pipelineruns",
-		}),
-
-		namespace: namespace,
-	}
-}
-
-type wrapTektonV1PipelineRunImpl struct {
-	dyn dynamic.NamespaceableResourceInterface
-
-	namespace string
-}
-
-var _ typedtektonv1.PipelineRunInterface = (*wrapTektonV1PipelineRunImpl)(nil)
-
-func (w *wrapTektonV1PipelineRunImpl) Create(ctx context.Context, in *pipelinev1.PipelineRun, opts v1.CreateOptions) (*pipelinev1.PipelineRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1",
-		Kind:    "PipelineRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.PipelineRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1PipelineRunImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
-	return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts)
-}
-
-func (w *wrapTektonV1PipelineRunImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-	return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts)
-}
-
-func (w *wrapTektonV1PipelineRunImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1.PipelineRun, error) {
-	uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.PipelineRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1PipelineRunImpl) List(ctx context.Context, opts v1.ListOptions) (*pipelinev1.PipelineRunList, error) {
-	uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.PipelineRunList{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1PipelineRunImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1.PipelineRun, err error) {
-	uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.PipelineRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1PipelineRunImpl) Update(ctx context.Context, in *pipelinev1.PipelineRun, opts v1.UpdateOptions) (*pipelinev1.PipelineRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1",
-		Kind:    "PipelineRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.PipelineRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1PipelineRunImpl) UpdateStatus(ctx context.Context, in *pipelinev1.PipelineRun, opts v1.UpdateOptions) (*pipelinev1.PipelineRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1",
-		Kind:    "PipelineRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.PipelineRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1PipelineRunImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
-	return nil, errors.New("NYI: Watch")
-}
-
-func (w *wrapTektonV1) Tasks(namespace string) typedtektonv1.TaskInterface {
-	return &wrapTektonV1TaskImpl{
-		dyn: w.dyn.Resource(schema.GroupVersionResource{
-			Group:    "tekton.dev",
-			Version:  "v1",
-			Resource: "tasks",
-		}),
-
-		namespace: namespace,
-	}
-}
-
-type wrapTektonV1TaskImpl struct {
-	dyn dynamic.NamespaceableResourceInterface
-
-	namespace string
-}
-
-var _ typedtektonv1.TaskInterface = (*wrapTektonV1TaskImpl)(nil)
-
-func (w *wrapTektonV1TaskImpl) Create(ctx context.Context, in *pipelinev1.Task, opts v1.CreateOptions) (*pipelinev1.Task, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1",
-		Kind:    "Task",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.Task{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1TaskImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
-	return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts)
-}
-
-func (w *wrapTektonV1TaskImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-	return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts)
-}
-
-func (w *wrapTektonV1TaskImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1.Task, error) {
-	uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.Task{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1TaskImpl) List(ctx context.Context, opts v1.ListOptions) (*pipelinev1.TaskList, error) {
-	uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.TaskList{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1TaskImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1.Task, err error) {
-	uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.Task{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1TaskImpl) Update(ctx context.Context, in *pipelinev1.Task, opts v1.UpdateOptions) (*pipelinev1.Task, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1",
-		Kind:    "Task",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.Task{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1TaskImpl) UpdateStatus(ctx context.Context, in *pipelinev1.Task, opts v1.UpdateOptions) (*pipelinev1.Task, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1",
-		Kind:    "Task",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.Task{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1TaskImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
-	return nil, errors.New("NYI: Watch")
-}
-
-func (w *wrapTektonV1) TaskRuns(namespace string) typedtektonv1.TaskRunInterface {
-	return &wrapTektonV1TaskRunImpl{
-		dyn: w.dyn.Resource(schema.GroupVersionResource{
-			Group:    "tekton.dev",
-			Version:  "v1",
-			Resource: "taskruns",
-		}),
-
-		namespace: namespace,
-	}
-}
-
-type wrapTektonV1TaskRunImpl struct {
-	dyn dynamic.NamespaceableResourceInterface
-
-	namespace string
-}
-
-var _ typedtektonv1.TaskRunInterface = (*wrapTektonV1TaskRunImpl)(nil)
-
-func (w *wrapTektonV1TaskRunImpl) Create(ctx context.Context, in *pipelinev1.TaskRun, opts v1.CreateOptions) (*pipelinev1.TaskRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1",
-		Kind:    "TaskRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.TaskRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1TaskRunImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
-	return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts)
-}
-
-func (w *wrapTektonV1TaskRunImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-	return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts)
-}
-
-func (w *wrapTektonV1TaskRunImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1.TaskRun, error) {
-	uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.TaskRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1TaskRunImpl) List(ctx context.Context, opts v1.ListOptions) (*pipelinev1.TaskRunList, error) {
-	uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.TaskRunList{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1TaskRunImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1.TaskRun, err error) {
-	uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.TaskRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1TaskRunImpl) Update(ctx context.Context, in *pipelinev1.TaskRun, opts v1.UpdateOptions) (*pipelinev1.TaskRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1",
-		Kind:    "TaskRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.TaskRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1TaskRunImpl) UpdateStatus(ctx context.Context, in *pipelinev1.TaskRun, opts v1.UpdateOptions) (*pipelinev1.TaskRun, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "tekton.dev",
-		Version: "v1",
-		Kind:    "TaskRun",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &pipelinev1.TaskRun{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapTektonV1TaskRunImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
-	return nil, errors.New("NYI: Watch")
-}
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipeline/pipeline.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipeline/pipeline.go
index a58d4037bd..7a79f9115a 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipeline/pipeline.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipeline/pipeline.go
@@ -21,15 +21,8 @@ package pipeline
 import (
 	context "context"
 
-	apispipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
-	versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
 	v1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1"
-	client "github.com/tektoncd/pipeline/pkg/client/injection/client"
 	factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
-	pipelinev1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	labels "k8s.io/apimachinery/pkg/labels"
-	cache "k8s.io/client-go/tools/cache"
 	controller "knative.dev/pkg/controller"
 	injection "knative.dev/pkg/injection"
 	logging "knative.dev/pkg/logging"
@@ -37,7 +30,6 @@ import (
 
 func init() {
 	injection.Default.RegisterInformer(withInformer)
-	injection.Dynamic.RegisterDynamicInformer(withDynamicInformer)
 }
 
 // Key is used for associating the Informer inside the context.Context.
@@ -49,11 +41,6 @@ func withInformer(ctx context.Context) (context.Context, controller.Informer) {
 	return context.WithValue(ctx, Key{}, inf), inf.Informer()
 }
 
-func withDynamicInformer(ctx context.Context) context.Context {
-	inf := &wrapper{client: client.Get(ctx), resourceVersion: injection.GetResourceVersion(ctx)}
-	return context.WithValue(ctx, Key{}, inf)
-}
-
 // Get extracts the typed informer from the context.
 func Get(ctx context.Context) v1.PipelineInformer {
 	untyped := ctx.Value(Key{})
@@ -63,54 +50,3 @@ func Get(ctx context.Context) v1.PipelineInformer {
 	}
 	return untyped.(v1.PipelineInformer)
 }
-
-type wrapper struct {
-	client versioned.Interface
-
-	namespace string
-
-	resourceVersion string
-}
-
-var _ v1.PipelineInformer = (*wrapper)(nil)
-var _ pipelinev1.PipelineLister = (*wrapper)(nil)
-
-func (w *wrapper) Informer() cache.SharedIndexInformer {
-	return cache.NewSharedIndexInformer(nil, &apispipelinev1.Pipeline{}, 0, nil)
-}
-
-func (w *wrapper) Lister() pipelinev1.PipelineLister {
-	return w
-}
-
-func (w *wrapper) Pipelines(namespace string) pipelinev1.PipelineNamespaceLister {
-	return &wrapper{client: w.client, namespace: namespace, resourceVersion: w.resourceVersion}
-}
-
-// SetResourceVersion allows consumers to adjust the minimum resourceVersion
-// used by the underlying client.  It is not accessible via the standard
-// lister interface, but can be accessed through a user-defined interface and
-// an implementation check e.g. rvs, ok := foo.(ResourceVersionSetter)
-func (w *wrapper) SetResourceVersion(resourceVersion string) {
-	w.resourceVersion = resourceVersion
-}
-
-func (w *wrapper) List(selector labels.Selector) (ret []*apispipelinev1.Pipeline, err error) {
-	lo, err := w.client.TektonV1().Pipelines(w.namespace).List(context.TODO(), metav1.ListOptions{
-		LabelSelector:   selector.String(),
-		ResourceVersion: w.resourceVersion,
-	})
-	if err != nil {
-		return nil, err
-	}
-	for idx := range lo.Items {
-		ret = append(ret, &lo.Items[idx])
-	}
-	return ret, nil
-}
-
-func (w *wrapper) Get(name string) (*apispipelinev1.Pipeline, error) {
-	return w.client.TektonV1().Pipelines(w.namespace).Get(context.TODO(), name, metav1.GetOptions{
-		ResourceVersion: w.resourceVersion,
-	})
-}
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun/pipelinerun.go
index 44b15d5230..0e08505315 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun/pipelinerun.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun/pipelinerun.go
@@ -21,15 +21,8 @@ package pipelinerun
 import (
 	context "context"
 
-	apispipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
-	versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
 	v1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1"
-	client "github.com/tektoncd/pipeline/pkg/client/injection/client"
 	factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
-	pipelinev1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	labels "k8s.io/apimachinery/pkg/labels"
-	cache "k8s.io/client-go/tools/cache"
 	controller "knative.dev/pkg/controller"
 	injection "knative.dev/pkg/injection"
 	logging "knative.dev/pkg/logging"
@@ -37,7 +30,6 @@ import (
 
 func init() {
 	injection.Default.RegisterInformer(withInformer)
-	injection.Dynamic.RegisterDynamicInformer(withDynamicInformer)
 }
 
 // Key is used for associating the Informer inside the context.Context.
@@ -49,11 +41,6 @@ func withInformer(ctx context.Context) (context.Context, controller.Informer) {
 	return context.WithValue(ctx, Key{}, inf), inf.Informer()
 }
 
-func withDynamicInformer(ctx context.Context) context.Context {
-	inf := &wrapper{client: client.Get(ctx), resourceVersion: injection.GetResourceVersion(ctx)}
-	return context.WithValue(ctx, Key{}, inf)
-}
-
 // Get extracts the typed informer from the context.
 func Get(ctx context.Context) v1.PipelineRunInformer {
 	untyped := ctx.Value(Key{})
@@ -63,54 +50,3 @@ func Get(ctx context.Context) v1.PipelineRunInformer {
 	}
 	return untyped.(v1.PipelineRunInformer)
 }
-
-type wrapper struct {
-	client versioned.Interface
-
-	namespace string
-
-	resourceVersion string
-}
-
-var _ v1.PipelineRunInformer = (*wrapper)(nil)
-var _ pipelinev1.PipelineRunLister = (*wrapper)(nil)
-
-func (w *wrapper) Informer() cache.SharedIndexInformer {
-	return cache.NewSharedIndexInformer(nil, &apispipelinev1.PipelineRun{}, 0, nil)
-}
-
-func (w *wrapper) Lister() pipelinev1.PipelineRunLister {
-	return w
-}
-
-func (w *wrapper) PipelineRuns(namespace string) pipelinev1.PipelineRunNamespaceLister {
-	return &wrapper{client: w.client, namespace: namespace, resourceVersion: w.resourceVersion}
-}
-
-// SetResourceVersion allows consumers to adjust the minimum resourceVersion
-// used by the underlying client.  It is not accessible via the standard
-// lister interface, but can be accessed through a user-defined interface and
-// an implementation check e.g. rvs, ok := foo.(ResourceVersionSetter)
-func (w *wrapper) SetResourceVersion(resourceVersion string) {
-	w.resourceVersion = resourceVersion
-}
-
-func (w *wrapper) List(selector labels.Selector) (ret []*apispipelinev1.PipelineRun, err error) {
-	lo, err := w.client.TektonV1().PipelineRuns(w.namespace).List(context.TODO(), metav1.ListOptions{
-		LabelSelector:   selector.String(),
-		ResourceVersion: w.resourceVersion,
-	})
-	if err != nil {
-		return nil, err
-	}
-	for idx := range lo.Items {
-		ret = append(ret, &lo.Items[idx])
-	}
-	return ret, nil
-}
-
-func (w *wrapper) Get(name string) (*apispipelinev1.PipelineRun, error) {
-	return w.client.TektonV1().PipelineRuns(w.namespace).Get(context.TODO(), name, metav1.GetOptions{
-		ResourceVersion: w.resourceVersion,
-	})
-}
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/task/task.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/task/task.go
index fb9021cb36..93e7c166dd 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/task/task.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/task/task.go
@@ -21,15 +21,8 @@ package task
 import (
 	context "context"
 
-	apispipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
-	versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
 	v1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1"
-	client "github.com/tektoncd/pipeline/pkg/client/injection/client"
 	factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
-	pipelinev1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	labels "k8s.io/apimachinery/pkg/labels"
-	cache "k8s.io/client-go/tools/cache"
 	controller "knative.dev/pkg/controller"
 	injection "knative.dev/pkg/injection"
 	logging "knative.dev/pkg/logging"
@@ -37,7 +30,6 @@ import (
 
 func init() {
 	injection.Default.RegisterInformer(withInformer)
-	injection.Dynamic.RegisterDynamicInformer(withDynamicInformer)
 }
 
 // Key is used for associating the Informer inside the context.Context.
@@ -49,11 +41,6 @@ func withInformer(ctx context.Context) (context.Context, controller.Informer) {
 	return context.WithValue(ctx, Key{}, inf), inf.Informer()
 }
 
-func withDynamicInformer(ctx context.Context) context.Context {
-	inf := &wrapper{client: client.Get(ctx), resourceVersion: injection.GetResourceVersion(ctx)}
-	return context.WithValue(ctx, Key{}, inf)
-}
-
 // Get extracts the typed informer from the context.
 func Get(ctx context.Context) v1.TaskInformer {
 	untyped := ctx.Value(Key{})
@@ -63,54 +50,3 @@ func Get(ctx context.Context) v1.TaskInformer {
 	}
 	return untyped.(v1.TaskInformer)
 }
-
-type wrapper struct {
-	client versioned.Interface
-
-	namespace string
-
-	resourceVersion string
-}
-
-var _ v1.TaskInformer = (*wrapper)(nil)
-var _ pipelinev1.TaskLister = (*wrapper)(nil)
-
-func (w *wrapper) Informer() cache.SharedIndexInformer {
-	return cache.NewSharedIndexInformer(nil, &apispipelinev1.Task{}, 0, nil)
-}
-
-func (w *wrapper) Lister() pipelinev1.TaskLister {
-	return w
-}
-
-func (w *wrapper) Tasks(namespace string) pipelinev1.TaskNamespaceLister {
-	return &wrapper{client: w.client, namespace: namespace, resourceVersion: w.resourceVersion}
-}
-
-// SetResourceVersion allows consumers to adjust the minimum resourceVersion
-// used by the underlying client.  It is not accessible via the standard
-// lister interface, but can be accessed through a user-defined interface and
-// an implementation check e.g. rvs, ok := foo.(ResourceVersionSetter)
-func (w *wrapper) SetResourceVersion(resourceVersion string) {
-	w.resourceVersion = resourceVersion
-}
-
-func (w *wrapper) List(selector labels.Selector) (ret []*apispipelinev1.Task, err error) {
-	lo, err := w.client.TektonV1().Tasks(w.namespace).List(context.TODO(), metav1.ListOptions{
-		LabelSelector:   selector.String(),
-		ResourceVersion: w.resourceVersion,
-	})
-	if err != nil {
-		return nil, err
-	}
-	for idx := range lo.Items {
-		ret = append(ret, &lo.Items[idx])
-	}
-	return ret, nil
-}
-
-func (w *wrapper) Get(name string) (*apispipelinev1.Task, error) {
-	return w.client.TektonV1().Tasks(w.namespace).Get(context.TODO(), name, metav1.GetOptions{
-		ResourceVersion: w.resourceVersion,
-	})
-}
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun/taskrun.go
index 0b7cb1be30..bd575e14c1 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun/taskrun.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun/taskrun.go
@@ -21,15 +21,8 @@ package taskrun
 import (
 	context "context"
 
-	apispipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
-	versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
 	v1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1"
-	client "github.com/tektoncd/pipeline/pkg/client/injection/client"
 	factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
-	pipelinev1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	labels "k8s.io/apimachinery/pkg/labels"
-	cache "k8s.io/client-go/tools/cache"
 	controller "knative.dev/pkg/controller"
 	injection "knative.dev/pkg/injection"
 	logging "knative.dev/pkg/logging"
@@ -37,7 +30,6 @@ import (
 
 func init() {
 	injection.Default.RegisterInformer(withInformer)
-	injection.Dynamic.RegisterDynamicInformer(withDynamicInformer)
 }
 
 // Key is used for associating the Informer inside the context.Context.
@@ -49,11 +41,6 @@ func withInformer(ctx context.Context) (context.Context, controller.Informer) {
 	return context.WithValue(ctx, Key{}, inf), inf.Informer()
 }
 
-func withDynamicInformer(ctx context.Context) context.Context {
-	inf := &wrapper{client: client.Get(ctx), resourceVersion: injection.GetResourceVersion(ctx)}
-	return context.WithValue(ctx, Key{}, inf)
-}
-
 // Get extracts the typed informer from the context.
 func Get(ctx context.Context) v1.TaskRunInformer {
 	untyped := ctx.Value(Key{})
@@ -63,54 +50,3 @@ func Get(ctx context.Context) v1.TaskRunInformer {
 	}
 	return untyped.(v1.TaskRunInformer)
 }
-
-type wrapper struct {
-	client versioned.Interface
-
-	namespace string
-
-	resourceVersion string
-}
-
-var _ v1.TaskRunInformer = (*wrapper)(nil)
-var _ pipelinev1.TaskRunLister = (*wrapper)(nil)
-
-func (w *wrapper) Informer() cache.SharedIndexInformer {
-	return cache.NewSharedIndexInformer(nil, &apispipelinev1.TaskRun{}, 0, nil)
-}
-
-func (w *wrapper) Lister() pipelinev1.TaskRunLister {
-	return w
-}
-
-func (w *wrapper) TaskRuns(namespace string) pipelinev1.TaskRunNamespaceLister {
-	return &wrapper{client: w.client, namespace: namespace, resourceVersion: w.resourceVersion}
-}
-
-// SetResourceVersion allows consumers to adjust the minimum resourceVersion
-// used by the underlying client.  It is not accessible via the standard
-// lister interface, but can be accessed through a user-defined interface and
-// an implementation check e.g. rvs, ok := foo.(ResourceVersionSetter)
-func (w *wrapper) SetResourceVersion(resourceVersion string) {
-	w.resourceVersion = resourceVersion
-}
-
-func (w *wrapper) List(selector labels.Selector) (ret []*apispipelinev1.TaskRun, err error) {
-	lo, err := w.client.TektonV1().TaskRuns(w.namespace).List(context.TODO(), metav1.ListOptions{
-		LabelSelector:   selector.String(),
-		ResourceVersion: w.resourceVersion,
-	})
-	if err != nil {
-		return nil, err
-	}
-	for idx := range lo.Items {
-		ret = append(ret, &lo.Items[idx])
-	}
-	return ret, nil
-}
-
-func (w *wrapper) Get(name string) (*apispipelinev1.TaskRun, error) {
-	return w.client.TektonV1().TaskRuns(w.namespace).Get(context.TODO(), name, metav1.GetOptions{
-		ResourceVersion: w.resourceVersion,
-	})
-}
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/verificationpolicy.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/verificationpolicy.go
index 04480ee8ea..9962547fc1 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/verificationpolicy.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/verificationpolicy.go
@@ -21,15 +21,8 @@ package verificationpolicy
 import (
 	context "context"
 
-	apispipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
-	versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
 	v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1"
-	client "github.com/tektoncd/pipeline/pkg/client/injection/client"
 	factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
-	pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	labels "k8s.io/apimachinery/pkg/labels"
-	cache "k8s.io/client-go/tools/cache"
 	controller "knative.dev/pkg/controller"
 	injection "knative.dev/pkg/injection"
 	logging "knative.dev/pkg/logging"
@@ -37,7 +30,6 @@ import (
 
 func init() {
 	injection.Default.RegisterInformer(withInformer)
-	injection.Dynamic.RegisterDynamicInformer(withDynamicInformer)
 }
 
 // Key is used for associating the Informer inside the context.Context.
@@ -49,11 +41,6 @@ func withInformer(ctx context.Context) (context.Context, controller.Informer) {
 	return context.WithValue(ctx, Key{}, inf), inf.Informer()
 }
 
-func withDynamicInformer(ctx context.Context) context.Context {
-	inf := &wrapper{client: client.Get(ctx), resourceVersion: injection.GetResourceVersion(ctx)}
-	return context.WithValue(ctx, Key{}, inf)
-}
-
 // Get extracts the typed informer from the context.
 func Get(ctx context.Context) v1alpha1.VerificationPolicyInformer {
 	untyped := ctx.Value(Key{})
@@ -63,54 +50,3 @@ func Get(ctx context.Context) v1alpha1.VerificationPolicyInformer {
 	}
 	return untyped.(v1alpha1.VerificationPolicyInformer)
 }
-
-type wrapper struct {
-	client versioned.Interface
-
-	namespace string
-
-	resourceVersion string
-}
-
-var _ v1alpha1.VerificationPolicyInformer = (*wrapper)(nil)
-var _ pipelinev1alpha1.VerificationPolicyLister = (*wrapper)(nil)
-
-func (w *wrapper) Informer() cache.SharedIndexInformer {
-	return cache.NewSharedIndexInformer(nil, &apispipelinev1alpha1.VerificationPolicy{}, 0, nil)
-}
-
-func (w *wrapper) Lister() pipelinev1alpha1.VerificationPolicyLister {
-	return w
-}
-
-func (w *wrapper) VerificationPolicies(namespace string) pipelinev1alpha1.VerificationPolicyNamespaceLister {
-	return &wrapper{client: w.client, namespace: namespace, resourceVersion: w.resourceVersion}
-}
-
-// SetResourceVersion allows consumers to adjust the minimum resourceVersion
-// used by the underlying client.  It is not accessible via the standard
-// lister interface, but can be accessed through a user-defined interface and
-// an implementation check e.g. rvs, ok := foo.(ResourceVersionSetter)
-func (w *wrapper) SetResourceVersion(resourceVersion string) {
-	w.resourceVersion = resourceVersion
-}
-
-func (w *wrapper) List(selector labels.Selector) (ret []*apispipelinev1alpha1.VerificationPolicy, err error) {
-	lo, err := w.client.TektonV1alpha1().VerificationPolicies(w.namespace).List(context.TODO(), v1.ListOptions{
-		LabelSelector:   selector.String(),
-		ResourceVersion: w.resourceVersion,
-	})
-	if err != nil {
-		return nil, err
-	}
-	for idx := range lo.Items {
-		ret = append(ret, &lo.Items[idx])
-	}
-	return ret, nil
-}
-
-func (w *wrapper) Get(name string) (*apispipelinev1alpha1.VerificationPolicy, error) {
-	return w.client.TektonV1alpha1().VerificationPolicies(w.namespace).Get(context.TODO(), name, v1.GetOptions{
-		ResourceVersion: w.resourceVersion,
-	})
-}
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/clustertask/clustertask.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/clustertask/clustertask.go
index 5aafda6891..7626993d1d 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/clustertask/clustertask.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/clustertask/clustertask.go
@@ -21,15 +21,8 @@ package clustertask
 import (
 	context "context"
 
-	apispipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
-	versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
 	v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
-	client "github.com/tektoncd/pipeline/pkg/client/injection/client"
 	factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
-	pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	labels "k8s.io/apimachinery/pkg/labels"
-	cache "k8s.io/client-go/tools/cache"
 	controller "knative.dev/pkg/controller"
 	injection "knative.dev/pkg/injection"
 	logging "knative.dev/pkg/logging"
@@ -37,7 +30,6 @@ import (
 
 func init() {
 	injection.Default.RegisterInformer(withInformer)
-	injection.Dynamic.RegisterDynamicInformer(withDynamicInformer)
 }
 
 // Key is used for associating the Informer inside the context.Context.
@@ -49,11 +41,6 @@ func withInformer(ctx context.Context) (context.Context, controller.Informer) {
 	return context.WithValue(ctx, Key{}, inf), inf.Informer()
 }
 
-func withDynamicInformer(ctx context.Context) context.Context {
-	inf := &wrapper{client: client.Get(ctx), resourceVersion: injection.GetResourceVersion(ctx)}
-	return context.WithValue(ctx, Key{}, inf)
-}
-
 // Get extracts the typed informer from the context.
 func Get(ctx context.Context) v1beta1.ClusterTaskInformer {
 	untyped := ctx.Value(Key{})
@@ -63,48 +50,3 @@ func Get(ctx context.Context) v1beta1.ClusterTaskInformer {
 	}
 	return untyped.(v1beta1.ClusterTaskInformer)
 }
-
-type wrapper struct {
-	client versioned.Interface
-
-	resourceVersion string
-}
-
-var _ v1beta1.ClusterTaskInformer = (*wrapper)(nil)
-var _ pipelinev1beta1.ClusterTaskLister = (*wrapper)(nil)
-
-func (w *wrapper) Informer() cache.SharedIndexInformer {
-	return cache.NewSharedIndexInformer(nil, &apispipelinev1beta1.ClusterTask{}, 0, nil)
-}
-
-func (w *wrapper) Lister() pipelinev1beta1.ClusterTaskLister {
-	return w
-}
-
-// SetResourceVersion allows consumers to adjust the minimum resourceVersion
-// used by the underlying client.  It is not accessible via the standard
-// lister interface, but can be accessed through a user-defined interface and
-// an implementation check e.g. rvs, ok := foo.(ResourceVersionSetter)
-func (w *wrapper) SetResourceVersion(resourceVersion string) {
-	w.resourceVersion = resourceVersion
-}
-
-func (w *wrapper) List(selector labels.Selector) (ret []*apispipelinev1beta1.ClusterTask, err error) {
-	lo, err := w.client.TektonV1beta1().ClusterTasks().List(context.TODO(), v1.ListOptions{
-		LabelSelector:   selector.String(),
-		ResourceVersion: w.resourceVersion,
-	})
-	if err != nil {
-		return nil, err
-	}
-	for idx := range lo.Items {
-		ret = append(ret, &lo.Items[idx])
-	}
-	return ret, nil
-}
-
-func (w *wrapper) Get(name string) (*apispipelinev1beta1.ClusterTask, error) {
-	return w.client.TektonV1beta1().ClusterTasks().Get(context.TODO(), name, v1.GetOptions{
-		ResourceVersion: w.resourceVersion,
-	})
-}
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun/customrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun/customrun.go
index 5f97b83c0a..a35b017b39 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun/customrun.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun/customrun.go
@@ -21,15 +21,8 @@ package customrun
 import (
 	context "context"
 
-	apispipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
-	versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
 	v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
-	client "github.com/tektoncd/pipeline/pkg/client/injection/client"
 	factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
-	pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	labels "k8s.io/apimachinery/pkg/labels"
-	cache "k8s.io/client-go/tools/cache"
 	controller "knative.dev/pkg/controller"
 	injection "knative.dev/pkg/injection"
 	logging "knative.dev/pkg/logging"
@@ -37,7 +30,6 @@ import (
 
 func init() {
 	injection.Default.RegisterInformer(withInformer)
-	injection.Dynamic.RegisterDynamicInformer(withDynamicInformer)
 }
 
 // Key is used for associating the Informer inside the context.Context.
@@ -49,11 +41,6 @@ func withInformer(ctx context.Context) (context.Context, controller.Informer) {
 	return context.WithValue(ctx, Key{}, inf), inf.Informer()
 }
 
-func withDynamicInformer(ctx context.Context) context.Context {
-	inf := &wrapper{client: client.Get(ctx), resourceVersion: injection.GetResourceVersion(ctx)}
-	return context.WithValue(ctx, Key{}, inf)
-}
-
 // Get extracts the typed informer from the context.
 func Get(ctx context.Context) v1beta1.CustomRunInformer {
 	untyped := ctx.Value(Key{})
@@ -63,54 +50,3 @@ func Get(ctx context.Context) v1beta1.CustomRunInformer {
 	}
 	return untyped.(v1beta1.CustomRunInformer)
 }
-
-type wrapper struct {
-	client versioned.Interface
-
-	namespace string
-
-	resourceVersion string
-}
-
-var _ v1beta1.CustomRunInformer = (*wrapper)(nil)
-var _ pipelinev1beta1.CustomRunLister = (*wrapper)(nil)
-
-func (w *wrapper) Informer() cache.SharedIndexInformer {
-	return cache.NewSharedIndexInformer(nil, &apispipelinev1beta1.CustomRun{}, 0, nil)
-}
-
-func (w *wrapper) Lister() pipelinev1beta1.CustomRunLister {
-	return w
-}
-
-func (w *wrapper) CustomRuns(namespace string) pipelinev1beta1.CustomRunNamespaceLister {
-	return &wrapper{client: w.client, namespace: namespace, resourceVersion: w.resourceVersion}
-}
-
-// SetResourceVersion allows consumers to adjust the minimum resourceVersion
-// used by the underlying client.  It is not accessible via the standard
-// lister interface, but can be accessed through a user-defined interface and
-// an implementation check e.g. rvs, ok := foo.(ResourceVersionSetter)
-func (w *wrapper) SetResourceVersion(resourceVersion string) {
-	w.resourceVersion = resourceVersion
-}
-
-func (w *wrapper) List(selector labels.Selector) (ret []*apispipelinev1beta1.CustomRun, err error) {
-	lo, err := w.client.TektonV1beta1().CustomRuns(w.namespace).List(context.TODO(), v1.ListOptions{
-		LabelSelector:   selector.String(),
-		ResourceVersion: w.resourceVersion,
-	})
-	if err != nil {
-		return nil, err
-	}
-	for idx := range lo.Items {
-		ret = append(ret, &lo.Items[idx])
-	}
-	return ret, nil
-}
-
-func (w *wrapper) Get(name string) (*apispipelinev1beta1.CustomRun, error) {
-	return w.client.TektonV1beta1().CustomRuns(w.namespace).Get(context.TODO(), name, v1.GetOptions{
-		ResourceVersion: w.resourceVersion,
-	})
-}
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/pipelinerun.go
index 8ffb08c78e..29efdfecd4 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/pipelinerun.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/pipelinerun.go
@@ -21,15 +21,8 @@ package pipelinerun
 import (
 	context "context"
 
-	apispipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
-	versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
 	v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
-	client "github.com/tektoncd/pipeline/pkg/client/injection/client"
 	factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
-	pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	labels "k8s.io/apimachinery/pkg/labels"
-	cache "k8s.io/client-go/tools/cache"
 	controller "knative.dev/pkg/controller"
 	injection "knative.dev/pkg/injection"
 	logging "knative.dev/pkg/logging"
@@ -37,7 +30,6 @@ import (
 
 func init() {
 	injection.Default.RegisterInformer(withInformer)
-	injection.Dynamic.RegisterDynamicInformer(withDynamicInformer)
 }
 
 // Key is used for associating the Informer inside the context.Context.
@@ -49,11 +41,6 @@ func withInformer(ctx context.Context) (context.Context, controller.Informer) {
 	return context.WithValue(ctx, Key{}, inf), inf.Informer()
 }
 
-func withDynamicInformer(ctx context.Context) context.Context {
-	inf := &wrapper{client: client.Get(ctx), resourceVersion: injection.GetResourceVersion(ctx)}
-	return context.WithValue(ctx, Key{}, inf)
-}
-
 // Get extracts the typed informer from the context.
 func Get(ctx context.Context) v1beta1.PipelineRunInformer {
 	untyped := ctx.Value(Key{})
@@ -63,54 +50,3 @@ func Get(ctx context.Context) v1beta1.PipelineRunInformer {
 	}
 	return untyped.(v1beta1.PipelineRunInformer)
 }
-
-type wrapper struct {
-	client versioned.Interface
-
-	namespace string
-
-	resourceVersion string
-}
-
-var _ v1beta1.PipelineRunInformer = (*wrapper)(nil)
-var _ pipelinev1beta1.PipelineRunLister = (*wrapper)(nil)
-
-func (w *wrapper) Informer() cache.SharedIndexInformer {
-	return cache.NewSharedIndexInformer(nil, &apispipelinev1beta1.PipelineRun{}, 0, nil)
-}
-
-func (w *wrapper) Lister() pipelinev1beta1.PipelineRunLister {
-	return w
-}
-
-func (w *wrapper) PipelineRuns(namespace string) pipelinev1beta1.PipelineRunNamespaceLister {
-	return &wrapper{client: w.client, namespace: namespace, resourceVersion: w.resourceVersion}
-}
-
-// SetResourceVersion allows consumers to adjust the minimum resourceVersion
-// used by the underlying client.  It is not accessible via the standard
-// lister interface, but can be accessed through a user-defined interface and
-// an implementation check e.g. rvs, ok := foo.(ResourceVersionSetter)
-func (w *wrapper) SetResourceVersion(resourceVersion string) {
-	w.resourceVersion = resourceVersion
-}
-
-func (w *wrapper) List(selector labels.Selector) (ret []*apispipelinev1beta1.PipelineRun, err error) {
-	lo, err := w.client.TektonV1beta1().PipelineRuns(w.namespace).List(context.TODO(), v1.ListOptions{
-		LabelSelector:   selector.String(),
-		ResourceVersion: w.resourceVersion,
-	})
-	if err != nil {
-		return nil, err
-	}
-	for idx := range lo.Items {
-		ret = append(ret, &lo.Items[idx])
-	}
-	return ret, nil
-}
-
-func (w *wrapper) Get(name string) (*apispipelinev1beta1.PipelineRun, error) {
-	return w.client.TektonV1beta1().PipelineRuns(w.namespace).Get(context.TODO(), name, v1.GetOptions{
-		ResourceVersion: w.resourceVersion,
-	})
-}
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/taskrun.go
index 70f4da7a1a..7cab8456f7 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/taskrun.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/taskrun.go
@@ -21,15 +21,8 @@ package taskrun
 import (
 	context "context"
 
-	apispipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
-	versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
 	v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
-	client "github.com/tektoncd/pipeline/pkg/client/injection/client"
 	factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
-	pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	labels "k8s.io/apimachinery/pkg/labels"
-	cache "k8s.io/client-go/tools/cache"
 	controller "knative.dev/pkg/controller"
 	injection "knative.dev/pkg/injection"
 	logging "knative.dev/pkg/logging"
@@ -37,7 +30,6 @@ import (
 
 func init() {
 	injection.Default.RegisterInformer(withInformer)
-	injection.Dynamic.RegisterDynamicInformer(withDynamicInformer)
 }
 
 // Key is used for associating the Informer inside the context.Context.
@@ -49,11 +41,6 @@ func withInformer(ctx context.Context) (context.Context, controller.Informer) {
 	return context.WithValue(ctx, Key{}, inf), inf.Informer()
 }
 
-func withDynamicInformer(ctx context.Context) context.Context {
-	inf := &wrapper{client: client.Get(ctx), resourceVersion: injection.GetResourceVersion(ctx)}
-	return context.WithValue(ctx, Key{}, inf)
-}
-
 // Get extracts the typed informer from the context.
 func Get(ctx context.Context) v1beta1.TaskRunInformer {
 	untyped := ctx.Value(Key{})
@@ -63,54 +50,3 @@ func Get(ctx context.Context) v1beta1.TaskRunInformer {
 	}
 	return untyped.(v1beta1.TaskRunInformer)
 }
-
-type wrapper struct {
-	client versioned.Interface
-
-	namespace string
-
-	resourceVersion string
-}
-
-var _ v1beta1.TaskRunInformer = (*wrapper)(nil)
-var _ pipelinev1beta1.TaskRunLister = (*wrapper)(nil)
-
-func (w *wrapper) Informer() cache.SharedIndexInformer {
-	return cache.NewSharedIndexInformer(nil, &apispipelinev1beta1.TaskRun{}, 0, nil)
-}
-
-func (w *wrapper) Lister() pipelinev1beta1.TaskRunLister {
-	return w
-}
-
-func (w *wrapper) TaskRuns(namespace string) pipelinev1beta1.TaskRunNamespaceLister {
-	return &wrapper{client: w.client, namespace: namespace, resourceVersion: w.resourceVersion}
-}
-
-// SetResourceVersion allows consumers to adjust the minimum resourceVersion
-// used by the underlying client.  It is not accessible via the standard
-// lister interface, but can be accessed through a user-defined interface and
-// an implementation check e.g. rvs, ok := foo.(ResourceVersionSetter)
-func (w *wrapper) SetResourceVersion(resourceVersion string) {
-	w.resourceVersion = resourceVersion
-}
-
-func (w *wrapper) List(selector labels.Selector) (ret []*apispipelinev1beta1.TaskRun, err error) {
-	lo, err := w.client.TektonV1beta1().TaskRuns(w.namespace).List(context.TODO(), v1.ListOptions{
-		LabelSelector:   selector.String(),
-		ResourceVersion: w.resourceVersion,
-	})
-	if err != nil {
-		return nil, err
-	}
-	for idx := range lo.Items {
-		ret = append(ret, &lo.Items[idx])
-	}
-	return ret, nil
-}
-
-func (w *wrapper) Get(name string) (*apispipelinev1beta1.TaskRun, error) {
-	return w.client.TektonV1beta1().TaskRuns(w.namespace).Get(context.TODO(), name, v1.GetOptions{
-		ResourceVersion: w.resourceVersion,
-	})
-}
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/clientset.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/clientset.go
index df12dfc026..48ab004778 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/clientset.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/clientset.go
@@ -35,8 +35,7 @@ type Interface interface {
 	ResolutionV1beta1() resolutionv1beta1.ResolutionV1beta1Interface
 }
 
-// Clientset contains the clients for groups. Each group has exactly one
-// version included in a Clientset.
+// Clientset contains the clients for groups.
 type Clientset struct {
 	*discovery.DiscoveryClient
 	resolutionV1alpha1 *resolutionv1alpha1.ResolutionV1alpha1Client
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/factory.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/factory.go
index 39c9230ec8..49918ae947 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/factory.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/factory.go
@@ -47,6 +47,11 @@ type sharedInformerFactory struct {
 	// startedInformers is used for tracking which informers have been started.
 	// This allows Start() to be called multiple times safely.
 	startedInformers map[reflect.Type]bool
+	// wg tracks how many goroutines were started.
+	wg sync.WaitGroup
+	// shuttingDown is true when Shutdown has been called. It may still be running
+	// because it needs to wait for goroutines.
+	shuttingDown bool
 }
 
 // WithCustomResyncConfig sets a custom resync period for the specified informer types.
@@ -107,20 +112,39 @@ func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResy
 	return factory
 }
 
-// Start initializes all requested informers.
 func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
 	f.lock.Lock()
 	defer f.lock.Unlock()
 
+	if f.shuttingDown {
+		return
+	}
+
 	for informerType, informer := range f.informers {
 		if !f.startedInformers[informerType] {
-			go informer.Run(stopCh)
+			f.wg.Add(1)
+			// We need a new variable in each loop iteration,
+			// otherwise the goroutine would use the loop variable
+			// and that keeps changing.
+			informer := informer
+			go func() {
+				defer f.wg.Done()
+				informer.Run(stopCh)
+			}()
 			f.startedInformers[informerType] = true
 		}
 	}
 }
 
-// WaitForCacheSync waits for all started informers' cache were synced.
+func (f *sharedInformerFactory) Shutdown() {
+	f.lock.Lock()
+	f.shuttingDown = true
+	f.lock.Unlock()
+
+	// Will return immediately if there is nothing to wait for.
+	f.wg.Wait()
+}
+
 func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
 	informers := func() map[reflect.Type]cache.SharedIndexInformer {
 		f.lock.Lock()
@@ -167,11 +191,58 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal
 
 // SharedInformerFactory provides shared informers for resources in all known
 // API group versions.
+//
+// It is typically used like this:
+//
+//	ctx, cancel := context.Background()
+//	defer cancel()
+//	factory := NewSharedInformerFactory(client, resyncPeriod)
+//	defer factory.WaitForStop()    // Returns immediately if nothing was started.
+//	genericInformer := factory.ForResource(resource)
+//	typedInformer := factory.SomeAPIGroup().V1().SomeType()
+//	factory.Start(ctx.Done())          // Start processing these informers.
+//	synced := factory.WaitForCacheSync(ctx.Done())
+//	for v, ok := range synced {
+//	    if !ok {
+//	        fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
+//	        return
+//	    }
+//	}
+//
+//	// Creating informers can also be created after Start, but then
+//	// Start must be called again:
+//	anotherGenericInformer := factory.ForResource(resource)
+//	factory.Start(ctx.Done())
 type SharedInformerFactory interface {
 	internalinterfaces.SharedInformerFactory
-	ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
+
+	// Start initializes all requested informers. They are handled in goroutines
+	// which run until the stop channel gets closed.
+	Start(stopCh <-chan struct{})
+
+	// Shutdown marks a factory as shutting down. At that point no new
+	// informers can be started anymore and Start will return without
+	// doing anything.
+	//
+	// In addition, Shutdown blocks until all goroutines have terminated. For that
+	// to happen, the close channel(s) that they were started with must be closed,
+	// either before Shutdown gets called or while it is waiting.
+	//
+	// Shutdown may be called multiple times, even concurrently. All such calls will
+	// block until all goroutines have terminated.
+	Shutdown()
+
+	// WaitForCacheSync blocks until all started informers' caches were synced
+	// or the stop channel gets closed.
 	WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
 
+	// ForResource gives generic access to a shared informer of the matching type.
+	ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
+
+	// InternalInformerFor returns the SharedIndexInformer for obj using an internal
+	// client.
+	InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
+
 	Resolution() resolution.Interface
 }
 
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/client.go
index 2b357f8253..498e20322f 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/client.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/client.go
@@ -20,26 +20,10 @@ package client
 
 import (
 	context "context"
-	json "encoding/json"
-	errors "errors"
-	fmt "fmt"
 
-	v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1"
-	v1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
 	versioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
-	typedresolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1"
-	typedresolutionv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
-	runtime "k8s.io/apimachinery/pkg/runtime"
-	schema "k8s.io/apimachinery/pkg/runtime/schema"
-	types "k8s.io/apimachinery/pkg/types"
-	watch "k8s.io/apimachinery/pkg/watch"
-	discovery "k8s.io/client-go/discovery"
-	dynamic "k8s.io/client-go/dynamic"
 	rest "k8s.io/client-go/rest"
 	injection "knative.dev/pkg/injection"
-	dynamicclient "knative.dev/pkg/injection/clients/dynamicclient"
 	logging "knative.dev/pkg/logging"
 )
 
@@ -48,7 +32,6 @@ func init() {
 	injection.Default.RegisterClientFetcher(func(ctx context.Context) interface{} {
 		return Get(ctx)
 	})
-	injection.Dynamic.RegisterDynamicClient(withClientFromDynamic)
 }
 
 // Key is used as the key for associating information with a context.Context.
@@ -58,10 +41,6 @@ func withClientFromConfig(ctx context.Context, cfg *rest.Config) context.Context
 	return context.WithValue(ctx, Key{}, versioned.NewForConfigOrDie(cfg))
 }
 
-func withClientFromDynamic(ctx context.Context) context.Context {
-	return context.WithValue(ctx, Key{}, &wrapClient{dyn: dynamicclient.Get(ctx)})
-}
-
 // Get extracts the versioned.Interface client from the context.
 func Get(ctx context.Context) versioned.Interface {
 	untyped := ctx.Value(Key{})
@@ -76,316 +55,3 @@ func Get(ctx context.Context) versioned.Interface {
 	}
 	return untyped.(versioned.Interface)
 }
-
-type wrapClient struct {
-	dyn dynamic.Interface
-}
-
-var _ versioned.Interface = (*wrapClient)(nil)
-
-func (w *wrapClient) Discovery() discovery.DiscoveryInterface {
-	panic("Discovery called on dynamic client!")
-}
-
-func convert(from interface{}, to runtime.Object) error {
-	bs, err := json.Marshal(from)
-	if err != nil {
-		return fmt.Errorf("Marshal() = %w", err)
-	}
-	if err := json.Unmarshal(bs, to); err != nil {
-		return fmt.Errorf("Unmarshal() = %w", err)
-	}
-	return nil
-}
-
-// ResolutionV1alpha1 retrieves the ResolutionV1alpha1Client
-func (w *wrapClient) ResolutionV1alpha1() typedresolutionv1alpha1.ResolutionV1alpha1Interface {
-	return &wrapResolutionV1alpha1{
-		dyn: w.dyn,
-	}
-}
-
-type wrapResolutionV1alpha1 struct {
-	dyn dynamic.Interface
-}
-
-func (w *wrapResolutionV1alpha1) RESTClient() rest.Interface {
-	panic("RESTClient called on dynamic client!")
-}
-
-func (w *wrapResolutionV1alpha1) ResolutionRequests(namespace string) typedresolutionv1alpha1.ResolutionRequestInterface {
-	return &wrapResolutionV1alpha1ResolutionRequestImpl{
-		dyn: w.dyn.Resource(schema.GroupVersionResource{
-			Group:    "resolution.tekton.dev",
-			Version:  "v1alpha1",
-			Resource: "resolutionrequests",
-		}),
-
-		namespace: namespace,
-	}
-}
-
-type wrapResolutionV1alpha1ResolutionRequestImpl struct {
-	dyn dynamic.NamespaceableResourceInterface
-
-	namespace string
-}
-
-var _ typedresolutionv1alpha1.ResolutionRequestInterface = (*wrapResolutionV1alpha1ResolutionRequestImpl)(nil)
-
-func (w *wrapResolutionV1alpha1ResolutionRequestImpl) Create(ctx context.Context, in *v1alpha1.ResolutionRequest, opts v1.CreateOptions) (*v1alpha1.ResolutionRequest, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "resolution.tekton.dev",
-		Version: "v1alpha1",
-		Kind:    "ResolutionRequest",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.ResolutionRequest{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapResolutionV1alpha1ResolutionRequestImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
-	return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts)
-}
-
-func (w *wrapResolutionV1alpha1ResolutionRequestImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-	return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts)
-}
-
-func (w *wrapResolutionV1alpha1ResolutionRequestImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResolutionRequest, error) {
-	uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.ResolutionRequest{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapResolutionV1alpha1ResolutionRequestImpl) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResolutionRequestList, error) {
-	uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.ResolutionRequestList{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapResolutionV1alpha1ResolutionRequestImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResolutionRequest, err error) {
-	uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.ResolutionRequest{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapResolutionV1alpha1ResolutionRequestImpl) Update(ctx context.Context, in *v1alpha1.ResolutionRequest, opts v1.UpdateOptions) (*v1alpha1.ResolutionRequest, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "resolution.tekton.dev",
-		Version: "v1alpha1",
-		Kind:    "ResolutionRequest",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.ResolutionRequest{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapResolutionV1alpha1ResolutionRequestImpl) UpdateStatus(ctx context.Context, in *v1alpha1.ResolutionRequest, opts v1.UpdateOptions) (*v1alpha1.ResolutionRequest, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "resolution.tekton.dev",
-		Version: "v1alpha1",
-		Kind:    "ResolutionRequest",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1alpha1.ResolutionRequest{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapResolutionV1alpha1ResolutionRequestImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
-	return nil, errors.New("NYI: Watch")
-}
-
-// ResolutionV1beta1 retrieves the ResolutionV1beta1Client
-func (w *wrapClient) ResolutionV1beta1() typedresolutionv1beta1.ResolutionV1beta1Interface {
-	return &wrapResolutionV1beta1{
-		dyn: w.dyn,
-	}
-}
-
-type wrapResolutionV1beta1 struct {
-	dyn dynamic.Interface
-}
-
-func (w *wrapResolutionV1beta1) RESTClient() rest.Interface {
-	panic("RESTClient called on dynamic client!")
-}
-
-func (w *wrapResolutionV1beta1) ResolutionRequests(namespace string) typedresolutionv1beta1.ResolutionRequestInterface {
-	return &wrapResolutionV1beta1ResolutionRequestImpl{
-		dyn: w.dyn.Resource(schema.GroupVersionResource{
-			Group:    "resolution.tekton.dev",
-			Version:  "v1beta1",
-			Resource: "resolutionrequests",
-		}),
-
-		namespace: namespace,
-	}
-}
-
-type wrapResolutionV1beta1ResolutionRequestImpl struct {
-	dyn dynamic.NamespaceableResourceInterface
-
-	namespace string
-}
-
-var _ typedresolutionv1beta1.ResolutionRequestInterface = (*wrapResolutionV1beta1ResolutionRequestImpl)(nil)
-
-func (w *wrapResolutionV1beta1ResolutionRequestImpl) Create(ctx context.Context, in *v1beta1.ResolutionRequest, opts v1.CreateOptions) (*v1beta1.ResolutionRequest, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "resolution.tekton.dev",
-		Version: "v1beta1",
-		Kind:    "ResolutionRequest",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.ResolutionRequest{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapResolutionV1beta1ResolutionRequestImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
-	return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts)
-}
-
-func (w *wrapResolutionV1beta1ResolutionRequestImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-	return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts)
-}
-
-func (w *wrapResolutionV1beta1ResolutionRequestImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ResolutionRequest, error) {
-	uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.ResolutionRequest{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapResolutionV1beta1ResolutionRequestImpl) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ResolutionRequestList, error) {
-	uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.ResolutionRequestList{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapResolutionV1beta1ResolutionRequestImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ResolutionRequest, err error) {
-	uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.ResolutionRequest{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapResolutionV1beta1ResolutionRequestImpl) Update(ctx context.Context, in *v1beta1.ResolutionRequest, opts v1.UpdateOptions) (*v1beta1.ResolutionRequest, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "resolution.tekton.dev",
-		Version: "v1beta1",
-		Kind:    "ResolutionRequest",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.ResolutionRequest{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapResolutionV1beta1ResolutionRequestImpl) UpdateStatus(ctx context.Context, in *v1beta1.ResolutionRequest, opts v1.UpdateOptions) (*v1beta1.ResolutionRequest, error) {
-	in.SetGroupVersionKind(schema.GroupVersionKind{
-		Group:   "resolution.tekton.dev",
-		Version: "v1beta1",
-		Kind:    "ResolutionRequest",
-	})
-	uo := &unstructured.Unstructured{}
-	if err := convert(in, uo); err != nil {
-		return nil, err
-	}
-	uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts)
-	if err != nil {
-		return nil, err
-	}
-	out := &v1beta1.ResolutionRequest{}
-	if err := convert(uo, out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (w *wrapResolutionV1beta1ResolutionRequestImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
-	return nil, errors.New("NYI: Watch")
-}
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest/resolutionrequest.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest/resolutionrequest.go
index 0ae786c677..973e1f7efa 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest/resolutionrequest.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest/resolutionrequest.go
@@ -21,15 +21,8 @@ package resolutionrequest
 import (
 	context "context"
 
-	apisresolutionv1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
-	versioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
 	v1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1"
-	client "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client"
 	factory "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory"
-	resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	labels "k8s.io/apimachinery/pkg/labels"
-	cache "k8s.io/client-go/tools/cache"
 	controller "knative.dev/pkg/controller"
 	injection "knative.dev/pkg/injection"
 	logging "knative.dev/pkg/logging"
@@ -37,7 +30,6 @@ import (
 
 func init() {
 	injection.Default.RegisterInformer(withInformer)
-	injection.Dynamic.RegisterDynamicInformer(withDynamicInformer)
 }
 
 // Key is used for associating the Informer inside the context.Context.
@@ -49,11 +41,6 @@ func withInformer(ctx context.Context) (context.Context, controller.Informer) {
 	return context.WithValue(ctx, Key{}, inf), inf.Informer()
 }
 
-func withDynamicInformer(ctx context.Context) context.Context {
-	inf := &wrapper{client: client.Get(ctx), resourceVersion: injection.GetResourceVersion(ctx)}
-	return context.WithValue(ctx, Key{}, inf)
-}
-
 // Get extracts the typed informer from the context.
 func Get(ctx context.Context) v1beta1.ResolutionRequestInformer {
 	untyped := ctx.Value(Key{})
@@ -63,54 +50,3 @@ func Get(ctx context.Context) v1beta1.ResolutionRequestInformer {
 	}
 	return untyped.(v1beta1.ResolutionRequestInformer)
 }
-
-type wrapper struct {
-	client versioned.Interface
-
-	namespace string
-
-	resourceVersion string
-}
-
-var _ v1beta1.ResolutionRequestInformer = (*wrapper)(nil)
-var _ resolutionv1beta1.ResolutionRequestLister = (*wrapper)(nil)
-
-func (w *wrapper) Informer() cache.SharedIndexInformer {
-	return cache.NewSharedIndexInformer(nil, &apisresolutionv1beta1.ResolutionRequest{}, 0, nil)
-}
-
-func (w *wrapper) Lister() resolutionv1beta1.ResolutionRequestLister {
-	return w
-}
-
-func (w *wrapper) ResolutionRequests(namespace string) resolutionv1beta1.ResolutionRequestNamespaceLister {
-	return &wrapper{client: w.client, namespace: namespace, resourceVersion: w.resourceVersion}
-}
-
-// SetResourceVersion allows consumers to adjust the minimum resourceVersion
-// used by the underlying client.  It is not accessible via the standard
-// lister interface, but can be accessed through a user-defined interface and
-// an implementation check e.g. rvs, ok := foo.(ResourceVersionSetter)
-func (w *wrapper) SetResourceVersion(resourceVersion string) {
-	w.resourceVersion = resourceVersion
-}
-
-func (w *wrapper) List(selector labels.Selector) (ret []*apisresolutionv1beta1.ResolutionRequest, err error) {
-	lo, err := w.client.ResolutionV1beta1().ResolutionRequests(w.namespace).List(context.TODO(), v1.ListOptions{
-		LabelSelector:   selector.String(),
-		ResourceVersion: w.resourceVersion,
-	})
-	if err != nil {
-		return nil, err
-	}
-	for idx := range lo.Items {
-		ret = append(ret, &lo.Items[idx])
-	}
-	return ret, nil
-}
-
-func (w *wrapper) Get(name string) (*apisresolutionv1beta1.ResolutionRequest, error) {
-	return w.client.ResolutionV1beta1().ResolutionRequests(w.namespace).Get(context.TODO(), name, v1.GetOptions{
-		ResourceVersion: w.resourceVersion,
-	})
-}
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent/cloudeventclient.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent/cloudeventclient.go
index 0e398a60eb..90a6d89997 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent/cloudeventclient.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent/cloudeventclient.go
@@ -33,7 +33,6 @@ func init() {
 	injection.Default.RegisterClient(func(ctx context.Context, _ *rest.Config) context.Context {
 		return withCloudEventClient(ctx)
 	})
-	injection.Dynamic.RegisterDynamicClient(withCloudEventClient)
 }
 
 // ceKey is used to associate the CloudEventClient inside the context.Context
diff --git a/vendor/github.com/tektoncd/plumbing/.gitignore b/vendor/github.com/tektoncd/plumbing/.gitignore
index 861c813c13..38f8cb40f2 100644
--- a/vendor/github.com/tektoncd/plumbing/.gitignore
+++ b/vendor/github.com/tektoncd/plumbing/.gitignore
@@ -12,3 +12,4 @@
 .venv
 
 **/.bin
+**/.DS_Store
diff --git a/vendor/github.com/tektoncd/plumbing/scripts/README.md b/vendor/github.com/tektoncd/plumbing/scripts/README.md
index 94c6cb638e..d87f32f4b0 100644
--- a/vendor/github.com/tektoncd/plumbing/scripts/README.md
+++ b/vendor/github.com/tektoncd/plumbing/scripts/README.md
@@ -243,12 +243,12 @@ Where:
 
 - `version` is the release number i.e. `v0.13.2`
 
-- `bucket` is the URL of the bucket where the release fiel is stored, `gs://tekton-releases` by default
+- `bucket` is the URL of the bucket where the release file is stored, `gs://tekton-releases` by default
 
-- `extra-path` is the root path within the bucket where release are stored, empty by default
+- `extra-path` is the root path within the bucket where releases are stored, empty by default
 
 - `file` is the name of the release file, `release.yaml` by default
 
 - `post-file` is the name of the 2nd release file, none by default, `interceptors.yaml` by default for triggers
 
-To summarize, the deployment job will look for the release file into `<bucket>/<extra-path>/<project>/previous/<version>/<file>`
+To summarize, the deployment job will look for the release file in `<bucket>/<extra-path>/<project>/previous/<version>/<file>`
diff --git a/vendor/github.com/tektoncd/plumbing/scripts/deploy-release.sh b/vendor/github.com/tektoncd/plumbing/scripts/deploy-release.sh
index 5511d3514c..ae17ed4a69 100644
--- a/vendor/github.com/tektoncd/plumbing/scripts/deploy-release.sh
+++ b/vendor/github.com/tektoncd/plumbing/scripts/deploy-release.sh
@@ -65,11 +65,7 @@ if [ -z "$TEKTON_VERSION" ]; then
 fi
 RELEASE_BUCKET=${RELEASE_BUCKET_OPT:-gs://tekton-releases}
 if [ -z "$RELEASE_FILE" ]; then
-    if [ "$TEKTON_PROJECT" == "dashboard" ]; then
-        RELEASE_FILE="tekton-dashboard-release-readonly.yaml"
-    else
-        RELEASE_FILE="release.yaml"
-    fi
+    RELEASE_FILE="release.yaml"
 fi
 if [ -z "$POST_RELEASE_FILE" ]; then
     if [ "$TEKTON_PROJECT" == "triggers" ]; then
@@ -78,6 +74,7 @@ if [ -z "$POST_RELEASE_FILE" ]; then
 fi
 CONTEXT=${CONTEXT:-gke_tekton-nightly_europe-north1-a_robocat}
 CLUSTER_RESOURCE=${CLUSTER_RESOURCE:-dogfooding-tekton-deployer}
+TARGET_NAMESPACE=${TARGET_NAMESPACE:-tekton-pipelines}
 
 # Deploy the release
 # cat <<EOF | tee
@@ -107,7 +104,7 @@ spec:
             "trigger-template": "tekton",
             "params": {
               "target": {
-                "namespace": "tekton-pipelines",
+                "namespace": "$TARGET_NAMESPACE",
                 "cluster-resource": "$CLUSTER_RESOURCE"
               },
               "tekton": {
diff --git a/vendor/github.com/tektoncd/plumbing/scripts/verified-catalog-e2e-common.sh b/vendor/github.com/tektoncd/plumbing/scripts/verified-catalog-e2e-common.sh
new file mode 100644
index 0000000000..34e75b346b
--- /dev/null
+++ b/vendor/github.com/tektoncd/plumbing/scripts/verified-catalog-e2e-common.sh
@@ -0,0 +1,301 @@
+#!/usr/bin/env bash
+
+# Copyright 2023 The Tekton Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Helper functions for E2E tests.
+source $(dirname ${BASH_SOURCE})/e2e-tests.sh
+
+# Define a custom kubectl path if you like
+KUBECTL_CMD=${KUBECTL_CMD:-kubectl}
+
+# Dependency checks
+## Bash must be 4 or greater to support associative arrays
+if [ "${BASH_VERSINFO:-0}" -lt 4 ];then
+    echo "this script must be executed in bash >= 4"
+    exit 1
+fi
+
+## Commands
+function require_command() {
+    if ! command -v ${1} &> /dev/null;then
+        echo "required command '${1}' not be found"
+        exit 1
+    fi
+}
+
+require_command ${KUBECTL_CMD} python3
+
+function test_yaml_can_install() {
+    # Validate that all the Task CRDs in this repo are valid by creating them in a NS.
+    readonly ns="task-ns"
+    all_tasks="$*"
+    ${KUBECTL_CMD} create ns "${ns}" || true
+    local runtest
+    for runtest in ${all_tasks}; do
+        # remove task/ from beginning
+        local runtestdir=${runtest#*/}
+        # remove /tests from end
+        local testname=${runtestdir%%/*}
+        runtest=${runtest//tests}
+
+        # in case a task is being removed then it's directory
+        # doesn't exists, so skip the test for YAML
+        [ ! -d "${runtest%%/*}/${testname}" ] && continue
+
+        input="${runtest}${testname}.yaml"
+
+        echo "Checking ${testname}"
+        ${KUBECTL_CMD} -n ${ns} apply -f <(sed "s/namespace:.*/namespace: task-ns/" "${input}")
+    done
+
+    ${KUBECTL_CMD} delete ns ${ns} >/dev/null || true
+}
+
+function create_task() {
+    local tns=${1}
+    local taskdir=${2}
+
+    # In case of rerun it's fine to ignore this error
+    ${KUBECTL_CMD} create namespace ${tns} >/dev/null 2>/dev/null || :
+
+    # Install the task itself first. We can only have one YAML file
+    yaml=$(printf  ${taskdir}/*.yaml)
+    started=$(date '+%Hh%M:%S')
+    echo "${started} STARTING: ${testname}/${version} "
+    # dry-run this YAML to validate and also get formatting side-effects.
+    ${KUBECTL_CMD} -n ${tns} create -f ${yaml} --dry-run=client -o yaml >${TMPF}
+
+    # Make sure we have deleted the content, this is in case of rerun
+    # and namespace hasn't been cleaned up or there is some Cluster*
+    # stuff, which really should not be allowed.
+    ${KUBECTL_CMD} -n ${tns} delete -f ${TMPF} >/dev/null 2>/dev/null || true
+    ${KUBECTL_CMD} -n ${tns} create -f ${TMPF}
+
+    # Install resource and run
+    for yaml in ${runtest}/*.yaml;do
+        cp ${yaml} ${TMPF}
+
+        # Make sure we have deleted the content, this is in case of rerun
+        # and namespace hasn't been cleaned up or there is some Cluster*
+        # stuff, which really should not be allowed.
+        ${KUBECTL_CMD} -n ${tns} delete -f ${TMPF} >/dev/null 2>/dev/null || true
+        ${KUBECTL_CMD} -n ${tns} create -f ${TMPF}
+    done
+}
+
+# Check whether test folder exists or not inside task dir
+# if not then run the tests for next task (if any)
+function check_test_folder_exist() {
+    local runtest=${1}
+    local taskdir=${runtest%/*}
+    local skipit=
+    [ ! -d $runtest ] && false
+
+    ls ${taskdir}/*.yaml 2>/dev/null >/dev/null || false
+
+    true
+}
+
+function test_task_creation() {
+    local runtest
+    declare -A task_to_wait_for
+
+    for runtest in $@;do
+        # remove task/ from beginning
+        local runtestdir=${runtest#*/}
+        # remove /0.1/tests from end
+        local testname=${runtestdir%%/*}
+        # remove /tests from end
+        local taskdir=${runtest%/*}
+        local version=$(basename $(basename $(dirname $runtest)))
+        local tns="${testname}-${version}"
+
+        # check whether test folder exists or not inside task dir
+        # if not then run the tests for next task (if any)
+        check_test_folder_exist ${runtest} || continue
+
+        create_task "${tns}" "${taskdir}"
+
+        task_to_wait_for["$testname/${version}"]="${tns}|$started" 
+    done
+
+    # I would refactor this to a function but bash limitation is too great, really need a rewrite the sooner
+    # the uglness to pass a hashmap to a function https://stackoverflow.com/a/17557904/145125
+    local cnt=0
+    local all_status=''
+    local reason=''
+    local maxloop=60 # 10 minutes max
+
+    set +x
+    while true;do
+        # If we have timed out then show failures of what's remaining in
+        # task_to_wait_for we assume only first one fails this
+        [[ ${cnt} == "${maxloop}" ]] && {
+            for testname in "${!task_to_wait_for[@]}";do
+                target_ns=${task_to_wait_for[$testname]}
+                show_failure "${testname}" "${target_ns}"
+            done
+        }
+        [[ -z ${task_to_wait_for[*]} ]] && {
+            break
+        }
+
+        for testname in "${!task_to_wait_for[@]}";do
+            target_ns=${task_to_wait_for[$testname]%|*}
+            started=${task_to_wait_for[$testname]#*|}
+            # sometimes we don't get all_status and reason in one go so
+            # wait until we get the reason and all_status for 5 iterations
+            for tektontype in pipelinerun taskrun;do
+                for _ in {1..10}; do
+                    all_status=$(${KUBECTL_CMD} get -n ${target_ns} ${tektontype} --output=jsonpath='{.items[*].status.conditions[*].status}')
+                    reason=$(${KUBECTL_CMD} get -n ${target_ns} ${tektontype} --output=jsonpath='{.items[*].status.conditions[*].reason}')
+                    [[ ! -z ${all_status} ]] && [[ ! -z ${reason} ]] && break
+                    sleep 1
+                done
+                # No need to check taskrun if pipelinerun has been set
+                [[ ! -z ${all_status} ]] && [[ ! -z ${reason} ]] && break
+            done
+
+            if [[ -z ${all_status} || -z ${reason} ]];then
+                echo "Could not find a created taskrun or pipelinerun in ${target_ns}"
+            fi
+
+            breakit=True
+            for status in ${all_status};do
+                [[ ${status} == *ERROR || ${reason} == *Fail* || ${reason} == Couldnt* ]] && show_failure ${testname} ${target_ns}
+                if [[ ${status} != True ]];then
+                    breakit=
+                fi
+            done
+
+            if [[ ${breakit} == True ]];then
+                unset task_to_wait_for[$testname]
+                [[ -z ${CATALOG_TEST_SKIP_CLEANUP} ]] && ${KUBECTL_CMD} delete ns ${target_ns} >/dev/null
+                echo "${started}::$(date '+%Hh%M:%S') SUCCESS: ${testname} testrun has successfully executed" ;
+            fi
+        done
+
+        sleep 10
+        cnt=$((cnt+1))
+    done
+    set -x 
+}
+
+function show_failure() {
+    local testname=$1 tns=$2
+
+    echo "FAILED: ${testname} task has failed to comeback properly" ;
+    echo "--- Task Dump"
+    ${KUBECTL_CMD} get -n ${tns} task -o yaml
+    echo "--- Pipeline Dump"
+    ${KUBECTL_CMD} get -n ${tns} pipeline -o yaml
+    echo "--- PipelineRun Dump"
+    ${KUBECTL_CMD} get -n ${tns} pipelinerun -o yaml
+    echo "--- TaskRun Dump"
+    ${KUBECTL_CMD} get -n ${tns} taskrun -o yaml
+    echo "--- Container Logs"
+    for pod in $(${KUBECTL_CMD} get pod -o name -n ${tns}); do
+        ${KUBECTL_CMD} logs --all-containers -n ${tns} ${pod} || true
+    done
+    exit 1
+}
+
+function install_pipeline_crd() {
+  local latestreleaseyaml
+  echo ">> Deploying Tekton Pipelines"
+  if [[ -n ${RELEASE_YAML} ]];then
+	latestreleaseyaml=${RELEASE_YAML}
+  else
+    latestreleaseyaml="https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml"
+  fi
+  [[ -z ${latestreleaseyaml} ]] && fail_test "Could not get latest released release.yaml"
+  ${KUBECTL_CMD} apply -f ${latestreleaseyaml} ||
+      fail_test "Build pipeline installation failed"
+
+  # Make sure that eveything is cleaned up in the current namespace.
+  for res in pipelineresources tasks pipelines taskruns pipelineruns; do
+    ${KUBECTL_CMD} delete --ignore-not-found=true ${res}.tekton.dev --all
+  done
+
+  # Wait for pods to be running in the namespaces we are deploying to
+  wait_until_pods_running tekton-pipelines || fail_test "Tekton Pipeline did not come up"
+}
+
+function test_tasks {
+    local cnt=0
+    local task_to_tests=""
+
+    for runtest in $@;do
+        task_to_tests="${task_to_tests} ${runtest}"
+        if [[ ${cnt} == "${MAX_NUMBERS_OF_PARALLEL_TASKS}" ]];then
+            test_task_creation "${task_to_tests}"
+            cnt=0
+            task_to_tests=""
+            continue
+        fi
+        cnt=$((cnt+1))
+    done
+
+    # in case if there are some remaining tasks
+    if [[ -n ${task_to_tests} ]];then
+        test_task_creation "${task_to_tests}"
+    fi
+}
+
+function convert_directory_structure() {
+    # Copy the resources to a temp directory to futher process. 
+    # Temp directory structure: ${TMPD}/${resource}/${version} (e.g. /tmp/xxx/task/golang-build/dev)
+    if [[ ! -z ${TEST_RUN_NIGHTLY_TESTS} ]];then
+        # Iterate through all release when testing all releases in nightly test, copy content in each release (version)
+        # to a flatterned temp directory
+        git fetch --tags --force
+        cur_branch=$(git rev-parse --abbrev-ref HEAD)
+
+        for version_tag in $(git tag) 
+        do
+            git checkout "tags/${version_tag}"
+
+            version="$( echo $version_tag | tr '.' '-' )"
+            resources=$(ls -d task/*)
+
+            for resource in ${resources};do
+                cp_dir=${TMPD}/${resource}/${version}
+                mkdir -p ${cp_dir}
+                cp -r ./${resource}/* ${cp_dir}
+            done
+        done
+        git checkout ${cur_branch}
+    else 
+        # If the test is triggered as PR merge check, just test the content in the branch head
+        version="dev"
+        resources=$(ls -d task/*)
+
+        for resource in ${resources};do
+            cp_dir=${TMPD}/${resource}/${version}
+            mkdir -p ${cp_dir}
+            cp -r ./${resource}/* ${cp_dir}
+        done
+    fi
+}
+
+function echo_local_test_helper_info {
+    cat <<EOF
+This script will run a single task to help developers testing directly a
+single task without sending it to CI.
+You need to specify the task name as the argument. For example :
+${0} golang-build
+will run the tests for golang-build. You can add --nightly flag to do a local nightly test.
+EOF
+}
diff --git a/vendor/github.com/theupdateframework/go-tuf/.golangci.yml b/vendor/github.com/theupdateframework/go-tuf/.golangci.yml
index 570c05d60d..992c1190a5 100644
--- a/vendor/github.com/theupdateframework/go-tuf/.golangci.yml
+++ b/vendor/github.com/theupdateframework/go-tuf/.golangci.yml
@@ -1,9 +1,3 @@
-run:
-  # Lint using Go 1.17, since some linters are disabled by default for Go 1.18
-  # until generics are supported.
-  # See https://github.com/golangci/golangci-lint/issues/2649
-  go: '1.17'
-
 linters:
   disable-all: true
   enable:
diff --git a/vendor/github.com/theupdateframework/go-tuf/README.md b/vendor/github.com/theupdateframework/go-tuf/README.md
index 125978c1cf..fe2836743d 100644
--- a/vendor/github.com/theupdateframework/go-tuf/README.md
+++ b/vendor/github.com/theupdateframework/go-tuf/README.md
@@ -35,7 +35,7 @@ The directories contain the following files:
 `go-tuf` is tested on Go versions 1.18.
 
 ```bash
-go get github.com/theupdateframework/go-tuf/cmd/tuf
+go install github.com/theupdateframework/go-tuf/cmd/tuf@latest
 ```
 
 ### Commands
@@ -653,3 +653,10 @@ install tuf`). To update the data for these tests requires Docker and make (see
 test data [README.md](client/python_interop/testdata/README.md) for details).
 
 Please see [CONTRIBUTING.md](docs/CONTRIBUTING.md) for contribution guidelines before making your first contribution!
+
+## Comparison to other implementations
+
+There are TUF implementations in a variety of programming languages. Some other Go implementations of TUF include:
+
+* [Notary](https://github.com/notaryproject/notary): A version of TUF designed specifically for publishing and managing trusted collections of content. It was used by Docker Content Trust, and has since been superseded by the [Notation](https://github.com/notaryproject/notation) project. In contrast, go-tuf is a direct implementation of TUF and has been updated to conform to 1.0.0 of the TUF specification.
+
diff --git a/vendor/github.com/theupdateframework/go-tuf/data/types.go b/vendor/github.com/theupdateframework/go-tuf/data/types.go
index 3e1806bde5..eb00489b67 100644
--- a/vendor/github.com/theupdateframework/go-tuf/data/types.go
+++ b/vendor/github.com/theupdateframework/go-tuf/data/types.go
@@ -24,9 +24,12 @@ type HashAlgorithm string
 const (
 	KeyIDLength = sha256.Size * 2
 
-	KeyTypeEd25519           KeyType = "ed25519"
-	KeyTypeECDSA_SHA2_P256   KeyType = "ecdsa-sha2-nistp256"
-	KeyTypeRSASSA_PSS_SHA256 KeyType = "rsa"
+	KeyTypeEd25519 KeyType = "ed25519"
+	// From version 1.0.32, the reference implementation defines 'ecdsa',
+	// not 'ecdsa-sha2-nistp256' for NIST P-256 curves.
+	KeyTypeECDSA_SHA2_P256         KeyType = "ecdsa"
+	KeyTypeECDSA_SHA2_P256_OLD_FMT KeyType = "ecdsa-sha2-nistp256"
+	KeyTypeRSASSA_PSS_SHA256       KeyType = "rsa"
 
 	KeySchemeEd25519           KeyScheme = "ed25519"
 	KeySchemeECDSA_SHA2_P256   KeyScheme = "ecdsa-sha2-nistp256"
diff --git a/vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go b/vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go
index 4d174d61f9..b884d611e4 100644
--- a/vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go
+++ b/vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go
@@ -3,6 +3,10 @@
 //
 // It uses scrypt derive a key from the passphrase and the NaCl secret box
 // cipher for authenticated encryption.
+//
+// Deprecated: The encrypted package from go-tuf is already moved to
+// https://github.com/secure-systems-lab/go-securesystemslib and will be deprecated here.
+// Use github.com/secure-systems-lab/go-securesystemslib/encrypted instead.
 package encrypted
 
 import (
@@ -23,13 +27,46 @@ const (
 	boxNonceSize = 24
 )
 
+// KDFParameterStrength defines the KDF parameter strength level to be used for
+// encryption key derivation.
+type KDFParameterStrength uint8
+
 const (
-	// N parameter was chosen to be ~100ms of work using the default implementation
-	// on the 2.3GHz Core i7 Haswell processor in a late-2013 Apple Retina Macbook
-	// Pro (it takes ~113ms).
-	scryptN = 32768
-	scryptR = 8
-	scryptP = 1
+	// Legacy defines legacy scrypt parameters (N:2^15, r:8, p:1)
+	Legacy KDFParameterStrength = iota + 1
+	// Standard defines standard scrypt parameters which is focusing 100ms of computation (N:2^16, r:8, p:1)
+	Standard
+	// OWASP defines OWASP recommended scrypt parameters (N:2^17, r:8, p:1)
+	OWASP
+)
+
+var (
+	// legacyParams represents old scrypt derivation parameters for backward
+	// compatibility.
+	legacyParams = scryptParams{
+		N: 32768, // 2^15
+		R: 8,
+		P: 1,
+	}
+
+	// standardParams defines scrypt parameters based on the scrypt creator
+	// recommendation to limit key derivation in time boxed to 100ms.
+	standardParams = scryptParams{
+		N: 65536, // 2^16
+		R: 8,
+		P: 1,
+	}
+
+	// owaspParams defines scrypt parameters recommended by OWASP
+	owaspParams = scryptParams{
+		N: 131072, // 2^17
+		R: 8,
+		P: 1,
+	}
+
+	// defaultParams defines scrypt parameters which will be used to generate a
+	// new key.
+	defaultParams = standardParams
 )
 
 const (
@@ -49,19 +86,33 @@ type scryptParams struct {
 	P int `json:"p"`
 }
 
-func newScryptKDF() (scryptKDF, error) {
+func (sp *scryptParams) Equal(in *scryptParams) bool {
+	return in != nil && sp.N == in.N && sp.P == in.P && sp.R == in.R
+}
+
+func newScryptKDF(level KDFParameterStrength) (scryptKDF, error) {
 	salt := make([]byte, saltSize)
 	if err := fillRandom(salt); err != nil {
-		return scryptKDF{}, err
+		return scryptKDF{}, fmt.Errorf("unable to generate a random salt: %w", err)
+	}
+
+	var params scryptParams
+	switch level {
+	case Legacy:
+		params = legacyParams
+	case Standard:
+		params = standardParams
+	case OWASP:
+		params = owaspParams
+	default:
+		// Fallback to default parameters
+		params = defaultParams
 	}
+
 	return scryptKDF{
-		Name: nameScrypt,
-		Params: scryptParams{
-			N: scryptN,
-			R: scryptR,
-			P: scryptP,
-		},
-		Salt: salt,
+		Name:   nameScrypt,
+		Params: params,
+		Salt:   salt,
 	}, nil
 }
 
@@ -79,9 +130,14 @@ func (s *scryptKDF) Key(passphrase []byte) ([]byte, error) {
 // be. If we do not do this, an attacker could cause a DoS by tampering with
 // them.
 func (s *scryptKDF) CheckParams() error {
-	if s.Params.N != scryptN || s.Params.R != scryptR || s.Params.P != scryptP {
-		return errors.New("encrypted: unexpected kdf parameters")
+	switch {
+	case legacyParams.Equal(&s.Params):
+	case standardParams.Equal(&s.Params):
+	case owaspParams.Equal(&s.Params):
+	default:
+		return errors.New("unsupported scrypt parameters")
 	}
+
 	return nil
 }
 
@@ -151,7 +207,14 @@ func (s *secretBoxCipher) Decrypt(ciphertext, key []byte) ([]byte, error) {
 // Encrypt takes a passphrase and plaintext, and returns a JSON object
 // containing ciphertext and the details necessary to decrypt it.
 func Encrypt(plaintext, passphrase []byte) ([]byte, error) {
-	k, err := newScryptKDF()
+	return EncryptWithCustomKDFParameters(plaintext, passphrase, Standard)
+}
+
+// EncryptWithCustomKDFParameters takes a passphrase, the plaintext and a KDF
+// parameter level (Legacy, Standard, or OWASP), and returns a JSON object
+// containing ciphertext and the details necessary to decrypt it.
+func EncryptWithCustomKDFParameters(plaintext, passphrase []byte, kdfLevel KDFParameterStrength) ([]byte, error) {
+	k, err := newScryptKDF(kdfLevel)
 	if err != nil {
 		return nil, err
 	}
@@ -176,11 +239,16 @@ func Encrypt(plaintext, passphrase []byte) ([]byte, error) {
 
 // Marshal encrypts the JSON encoding of v using passphrase.
 func Marshal(v interface{}, passphrase []byte) ([]byte, error) {
+	return MarshalWithCustomKDFParameters(v, passphrase, Standard)
+}
+
+// MarshalWithCustomKDFParameters encrypts the JSON encoding of v using passphrase.
+func MarshalWithCustomKDFParameters(v interface{}, passphrase []byte, kdfLevel KDFParameterStrength) ([]byte, error) {
 	data, err := json.MarshalIndent(v, "", "\t")
 	if err != nil {
 		return nil, err
 	}
-	return Encrypt(data, passphrase)
+	return EncryptWithCustomKDFParameters(data, passphrase, kdfLevel)
 }
 
 // Decrypt takes a JSON-encoded ciphertext object encrypted using Encrypt and
diff --git a/vendor/github.com/theupdateframework/go-tuf/local_store.go b/vendor/github.com/theupdateframework/go-tuf/local_store.go
index fee03f314e..b59721e61c 100644
--- a/vendor/github.com/theupdateframework/go-tuf/local_store.go
+++ b/vendor/github.com/theupdateframework/go-tuf/local_store.go
@@ -12,8 +12,8 @@ import (
 	"path/filepath"
 	"strings"
 
+	"github.com/secure-systems-lab/go-securesystemslib/encrypted"
 	"github.com/theupdateframework/go-tuf/data"
-	"github.com/theupdateframework/go-tuf/encrypted"
 	"github.com/theupdateframework/go-tuf/internal/fsutil"
 	"github.com/theupdateframework/go-tuf/internal/sets"
 	"github.com/theupdateframework/go-tuf/pkg/keys"
diff --git a/vendor/github.com/theupdateframework/go-tuf/pkg/keys/ecdsa.go b/vendor/github.com/theupdateframework/go-tuf/pkg/keys/ecdsa.go
index ee93e33007..9740d1f33c 100644
--- a/vendor/github.com/theupdateframework/go-tuf/pkg/keys/ecdsa.go
+++ b/vendor/github.com/theupdateframework/go-tuf/pkg/keys/ecdsa.go
@@ -20,7 +20,9 @@ func init() {
 	// Note: we use LoadOrStore here to prevent accidentally overriding the
 	// an explicit deprecated ECDSA verifier.
 	// TODO: When deprecated ECDSA is removed, this can switch back to Store.
+	VerifierMap.LoadOrStore(data.KeyTypeECDSA_SHA2_P256_OLD_FMT, NewEcdsaVerifier)
 	VerifierMap.LoadOrStore(data.KeyTypeECDSA_SHA2_P256, NewEcdsaVerifier)
+	SignerMap.Store(data.KeyTypeECDSA_SHA2_P256_OLD_FMT, newEcdsaSigner)
 	SignerMap.Store(data.KeyTypeECDSA_SHA2_P256, newEcdsaSigner)
 }
 
diff --git a/vendor/github.com/theupdateframework/go-tuf/repo.go b/vendor/github.com/theupdateframework/go-tuf/repo.go
index c6a23deea4..db2ac66369 100644
--- a/vendor/github.com/theupdateframework/go-tuf/repo.go
+++ b/vendor/github.com/theupdateframework/go-tuf/repo.go
@@ -782,11 +782,13 @@ func (r *Repo) setMeta(roleFilename string, meta interface{}) error {
 	return r.local.SetMeta(roleFilename, b)
 }
 
-// SignPayload signs the given payload using the key(s) associated with role.
+// CanonicalizeAndSign canonicalizes the signed portion of signed, then signs it using the key(s) associated with role.
+//
+// It appends the signature to signed.
 //
 // It returns the total number of keys used for signing, 0 (along with
 // ErrNoKeys) if no keys were found, or -1 (along with an error) in error cases.
-func (r *Repo) SignPayload(role string, payload *data.Signed) (int, error) {
+func (r *Repo) CanonicalizeAndSign(role string, signed *data.Signed) (int, error) {
 	keys, err := r.signersForRole(role)
 	if err != nil {
 		return -1, err
@@ -795,13 +797,46 @@ func (r *Repo) SignPayload(role string, payload *data.Signed) (int, error) {
 		return 0, ErrNoKeys{role}
 	}
 	for _, k := range keys {
-		if err = sign.Sign(payload, k); err != nil {
+		if err = sign.Sign(signed, k); err != nil {
 			return -1, err
 		}
 	}
 	return len(keys), nil
 }
 
+// SignPayload canonicalizes the signed portion of payload, then signs it using the key(s) associated with role.
+//
+// It returns the total number of keys used for signing, 0 (along with
+// ErrNoKeys) if no keys were found, or -1 (along with an error) in error cases.
+//
+// DEPRECATED: please use CanonicalizeAndSign instead.
+func (r *Repo) SignPayload(role string, payload *data.Signed) (int, error) {
+	return r.CanonicalizeAndSign(role, payload)
+}
+
+// SignRaw signs the given (pre-canonicalized) payload using the key(s) associated with role.
+//
+// It returns the new data.Signatures.
+func (r *Repo) SignRaw(role string, payload []byte) ([]data.Signature, error) {
+	keys, err := r.signersForRole(role)
+	if err != nil {
+		return nil, err
+	}
+	if len(keys) == 0 {
+		return nil, ErrNoKeys{role}
+	}
+
+	allSigs := make([]data.Signature, 0, len(keys))
+	for _, k := range keys {
+		sigs, err := sign.MakeSignatures(payload, k)
+		if err != nil {
+			return nil, err
+		}
+		allSigs = append(allSigs, sigs...)
+	}
+	return allSigs, nil
+}
+
 func (r *Repo) Sign(roleFilename string) error {
 	signed, err := r.SignedMeta(roleFilename)
 	if err != nil {
diff --git a/vendor/github.com/theupdateframework/go-tuf/requirements-test.txt b/vendor/github.com/theupdateframework/go-tuf/requirements-test.txt
index 00f20734bf..23822eecf4 100644
--- a/vendor/github.com/theupdateframework/go-tuf/requirements-test.txt
+++ b/vendor/github.com/theupdateframework/go-tuf/requirements-test.txt
@@ -1,5 +1,5 @@
-iso8601==1.1.0
-requests==2.28.2
-securesystemslib==0.26.0
+iso8601==2.0.0
+requests==2.31.0
+securesystemslib==0.28.0
 six==1.16.0
-tuf==2.0.0
+tuf==3.0.0
diff --git a/vendor/github.com/theupdateframework/go-tuf/sign/sign.go b/vendor/github.com/theupdateframework/go-tuf/sign/sign.go
index 6b15b6b4f7..e31b5465d9 100644
--- a/vendor/github.com/theupdateframework/go-tuf/sign/sign.go
+++ b/vendor/github.com/theupdateframework/go-tuf/sign/sign.go
@@ -2,46 +2,65 @@ package sign
 
 import (
 	"encoding/json"
+	"errors"
 
 	"github.com/secure-systems-lab/go-securesystemslib/cjson"
 	"github.com/theupdateframework/go-tuf/data"
 	"github.com/theupdateframework/go-tuf/pkg/keys"
 )
 
-func Sign(s *data.Signed, k keys.Signer) error {
+const maxSignatures = 1024
+
+// MakeSignatures creates data.Signatures for canonical using signer k.
+//
+// There will be one data.Signature for each of k's IDs, each wih the same
+// signature data.
+func MakeSignatures(canonical []byte, k keys.Signer) ([]data.Signature, error) {
+	sigData, err := k.SignMessage(canonical)
+	if err != nil {
+		return nil, err
+	}
+
 	ids := k.PublicData().IDs()
-	signatures := make([]data.Signature, 0, len(s.Signatures)+1)
-	for _, sig := range s.Signatures {
-		found := false
-		for _, id := range ids {
-			if sig.KeyID == id {
-				found = true
-				break
-			}
-		}
-		if !found {
-			signatures = append(signatures, sig)
-		}
+	signatures := make([]data.Signature, 0, len(ids))
+	for _, id := range ids {
+		signatures = append(signatures, data.Signature{
+			KeyID:     id,
+			Signature: sigData,
+		})
 	}
 
+	return signatures, nil
+}
+
+// Sign signs the to-be-signed part of s using the signer k.
+//
+// The new signature(s) (one for each of k's key IDs) are appended to
+// s.Signatures. Existing signatures for the Key IDs are replaced.
+func Sign(s *data.Signed, k keys.Signer) error {
 	canonical, err := cjson.EncodeCanonical(s.Signed)
 	if err != nil {
 		return err
 	}
 
-	sig, err := k.SignMessage(canonical)
+	size := len(s.Signatures)
+	if size > maxSignatures-1 {
+		return errors.New("value too large")
+	}
+	signatures := make([]data.Signature, 0, size+1)
+	for _, oldSig := range s.Signatures {
+		if !k.PublicData().ContainsID(oldSig.KeyID) {
+			signatures = append(signatures, oldSig)
+		}
+	}
+
+	newSigs, err := MakeSignatures(canonical, k)
 	if err != nil {
 		return err
 	}
+	signatures = append(signatures, newSigs...)
 
 	s.Signatures = signatures
-	for _, id := range ids {
-		s.Signatures = append(s.Signatures, data.Signature{
-			KeyID:     id,
-			Signature: sig,
-		})
-	}
-
 	return nil
 }
 
diff --git a/vendor/github.com/tinylib/msgp/LICENSE b/vendor/github.com/tinylib/msgp/LICENSE
new file mode 100644
index 0000000000..14d60424e8
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/LICENSE
@@ -0,0 +1,8 @@
+Copyright (c) 2014 Philip Hofer
+Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go
new file mode 100644
index 0000000000..d2a66857be
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go
@@ -0,0 +1,25 @@
+//go:build linux && !appengine && !tinygo
+// +build linux,!appengine,!tinygo
+
+package msgp
+
+import (
+	"os"
+	"syscall"
+)
+
+func adviseRead(mem []byte) {
+	syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED)
+}
+
+func adviseWrite(mem []byte) {
+	syscall.Madvise(mem, syscall.MADV_SEQUENTIAL)
+}
+
+func fallocate(f *os.File, sz int64) error {
+	err := syscall.Fallocate(int(f.Fd()), 0, 0, sz)
+	if err == syscall.ENOTSUP {
+		return f.Truncate(sz)
+	}
+	return err
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_other.go b/vendor/github.com/tinylib/msgp/msgp/advise_other.go
new file mode 100644
index 0000000000..1b6ed57277
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/advise_other.go
@@ -0,0 +1,18 @@
+//go:build (!linux && !tinygo) || appengine
+// +build !linux,!tinygo appengine
+
+package msgp
+
+import (
+	"os"
+)
+
+// TODO: darwin, BSD support
+
+func adviseRead(mem []byte) {}
+
+func adviseWrite(mem []byte) {}
+
+func fallocate(f *os.File, sz int64) error {
+	return f.Truncate(sz)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/circular.go b/vendor/github.com/tinylib/msgp/msgp/circular.go
new file mode 100644
index 0000000000..a0434c7ea1
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/circular.go
@@ -0,0 +1,39 @@
+package msgp
+
+type timer interface {
+	StartTimer()
+	StopTimer()
+}
+
+// EndlessReader is an io.Reader
+// that loops over the same data
+// endlessly. It is used for benchmarking.
+type EndlessReader struct {
+	tb     timer
+	data   []byte
+	offset int
+}
+
+// NewEndlessReader returns a new endless reader
+func NewEndlessReader(b []byte, tb timer) *EndlessReader {
+	return &EndlessReader{tb: tb, data: b, offset: 0}
+}
+
+// Read implements io.Reader. In practice, it
+// always returns (len(p), nil), although it
+// fills the supplied slice while the benchmark
+// timer is stopped.
+func (c *EndlessReader) Read(p []byte) (int, error) {
+	c.tb.StopTimer()
+	var n int
+	l := len(p)
+	m := len(c.data)
+	for n < l {
+		nn := copy(p[n:], c.data[c.offset:])
+		n += nn
+		c.offset += nn
+		c.offset %= m
+	}
+	c.tb.StartTimer()
+	return n, nil
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/defs.go b/vendor/github.com/tinylib/msgp/msgp/defs.go
new file mode 100644
index 0000000000..e265aa4f85
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/defs.go
@@ -0,0 +1,147 @@
+// This package is the support library for the msgp code generator (http://github.com/tinylib/msgp).
+//
+// This package defines the utilites used by the msgp code generator for encoding and decoding MessagePack
+// from []byte and io.Reader/io.Writer types. Much of this package is devoted to helping the msgp code
+// generator implement the Marshaler/Unmarshaler and Encodable/Decodable interfaces.
+//
+// This package defines four "families" of functions:
+//   - AppendXxxx() appends an object to a []byte in MessagePack encoding.
+//   - ReadXxxxBytes() reads an object from a []byte and returns the remaining bytes.
+//   - (*Writer).WriteXxxx() writes an object to the buffered *Writer type.
+//   - (*Reader).ReadXxxx() reads an object from a buffered *Reader type.
+//
+// Once a type has satisfied the `Encodable` and `Decodable` interfaces,
+// it can be written and read from arbitrary `io.Writer`s and `io.Reader`s using
+//
+//	msgp.Encode(io.Writer, msgp.Encodable)
+//
+// and
+//
+//	msgp.Decode(io.Reader, msgp.Decodable)
+//
+// There are also methods for converting MessagePack to JSON without
+// an explicit de-serialization step.
+//
+// For additional tips, tricks, and gotchas, please visit
+// the wiki at http://github.com/tinylib/msgp
+package msgp
+
+const (
+	last4  = 0x0f
+	first4 = 0xf0
+	last5  = 0x1f
+	first3 = 0xe0
+	last7  = 0x7f
+)
+
+func isfixint(b byte) bool {
+	return b>>7 == 0
+}
+
+func isnfixint(b byte) bool {
+	return b&first3 == mnfixint
+}
+
+func isfixmap(b byte) bool {
+	return b&first4 == mfixmap
+}
+
+func isfixarray(b byte) bool {
+	return b&first4 == mfixarray
+}
+
+func isfixstr(b byte) bool {
+	return b&first3 == mfixstr
+}
+
+func wfixint(u uint8) byte {
+	return u & last7
+}
+
+func rfixint(b byte) uint8 {
+	return b
+}
+
+func wnfixint(i int8) byte {
+	return byte(i) | mnfixint
+}
+
+func rnfixint(b byte) int8 {
+	return int8(b)
+}
+
+func rfixmap(b byte) uint8 {
+	return b & last4
+}
+
+func wfixmap(u uint8) byte {
+	return mfixmap | (u & last4)
+}
+
+func rfixstr(b byte) uint8 {
+	return b & last5
+}
+
+func wfixstr(u uint8) byte {
+	return (u & last5) | mfixstr
+}
+
+func rfixarray(b byte) uint8 {
+	return (b & last4)
+}
+
+func wfixarray(u uint8) byte {
+	return (u & last4) | mfixarray
+}
+
+// These are all the byte
+// prefixes defined by the
+// msgpack standard
+const (
+	// 0XXXXXXX
+	mfixint uint8 = 0x00
+
+	// 111XXXXX
+	mnfixint uint8 = 0xe0
+
+	// 1000XXXX
+	mfixmap uint8 = 0x80
+
+	// 1001XXXX
+	mfixarray uint8 = 0x90
+
+	// 101XXXXX
+	mfixstr uint8 = 0xa0
+
+	mnil      uint8 = 0xc0
+	mfalse    uint8 = 0xc2
+	mtrue     uint8 = 0xc3
+	mbin8     uint8 = 0xc4
+	mbin16    uint8 = 0xc5
+	mbin32    uint8 = 0xc6
+	mext8     uint8 = 0xc7
+	mext16    uint8 = 0xc8
+	mext32    uint8 = 0xc9
+	mfloat32  uint8 = 0xca
+	mfloat64  uint8 = 0xcb
+	muint8    uint8 = 0xcc
+	muint16   uint8 = 0xcd
+	muint32   uint8 = 0xce
+	muint64   uint8 = 0xcf
+	mint8     uint8 = 0xd0
+	mint16    uint8 = 0xd1
+	mint32    uint8 = 0xd2
+	mint64    uint8 = 0xd3
+	mfixext1  uint8 = 0xd4
+	mfixext2  uint8 = 0xd5
+	mfixext4  uint8 = 0xd6
+	mfixext8  uint8 = 0xd7
+	mfixext16 uint8 = 0xd8
+	mstr8     uint8 = 0xd9
+	mstr16    uint8 = 0xda
+	mstr32    uint8 = 0xdb
+	marray16  uint8 = 0xdc
+	marray32  uint8 = 0xdd
+	mmap16    uint8 = 0xde
+	mmap32    uint8 = 0xdf
+)
diff --git a/vendor/github.com/tinylib/msgp/msgp/edit.go b/vendor/github.com/tinylib/msgp/msgp/edit.go
new file mode 100644
index 0000000000..b473a6f668
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/edit.go
@@ -0,0 +1,242 @@
+package msgp
+
+import (
+	"math"
+)
+
+// Locate returns a []byte pointing to the field
+// in a messagepack map with the provided key. (The returned []byte
+// points to a sub-slice of 'raw'; Locate does no allocations.) If the
+// key doesn't exist in the map, a zero-length []byte will be returned.
+func Locate(key string, raw []byte) []byte {
+	s, n := locate(raw, key)
+	return raw[s:n]
+}
+
+// Replace takes a key ("key") in a messagepack map ("raw")
+// and replaces its value with the one provided and returns
+// the new []byte. The returned []byte may point to the same
+// memory as "raw". Replace makes no effort to evaluate the validity
+// of the contents of 'val'. It may use up to the full capacity of 'raw.'
+// Replace returns 'nil' if the field doesn't exist or if the object in 'raw'
+// is not a map.
+func Replace(key string, raw []byte, val []byte) []byte {
+	start, end := locate(raw, key)
+	if start == end {
+		return nil
+	}
+	return replace(raw, start, end, val, true)
+}
+
+// CopyReplace works similarly to Replace except that the returned
+// byte slice does not point to the same memory as 'raw'. CopyReplace
+// returns 'nil' if the field doesn't exist or 'raw' isn't a map.
+func CopyReplace(key string, raw []byte, val []byte) []byte {
+	start, end := locate(raw, key)
+	if start == end {
+		return nil
+	}
+	return replace(raw, start, end, val, false)
+}
+
+// Remove removes a key-value pair from 'raw'. It returns
+// 'raw' unchanged if the key didn't exist.
+func Remove(key string, raw []byte) []byte {
+	start, end := locateKV(raw, key)
+	if start == end {
+		return raw
+	}
+	raw = raw[:start+copy(raw[start:], raw[end:])]
+	return resizeMap(raw, -1)
+}
+
+// HasKey returns whether the map in 'raw' has
+// a field with key 'key'
+func HasKey(key string, raw []byte) bool {
+	sz, bts, err := ReadMapHeaderBytes(raw)
+	if err != nil {
+		return false
+	}
+	var field []byte
+	for i := uint32(0); i < sz; i++ {
+		field, bts, err = ReadStringZC(bts)
+		if err != nil {
+			return false
+		}
+		if UnsafeString(field) == key {
+			return true
+		}
+	}
+	return false
+}
+
+func replace(raw []byte, start int, end int, val []byte, inplace bool) []byte {
+	ll := end - start // length of segment to replace
+	lv := len(val)
+
+	if inplace {
+		extra := lv - ll
+
+		// fastest case: we're doing
+		// a 1:1 replacement
+		if extra == 0 {
+			copy(raw[start:], val)
+			return raw
+
+		} else if extra < 0 {
+			// 'val' smaller than replaced value
+			// copy in place and shift back
+
+			x := copy(raw[start:], val)
+			y := copy(raw[start+x:], raw[end:])
+			return raw[:start+x+y]
+
+		} else if extra < cap(raw)-len(raw) {
+			// 'val' less than (cap-len) extra bytes
+			// copy in place and shift forward
+			raw = raw[0 : len(raw)+extra]
+			// shift end forward
+			copy(raw[end+extra:], raw[end:])
+			copy(raw[start:], val)
+			return raw
+		}
+	}
+
+	// we have to allocate new space
+	out := make([]byte, len(raw)+len(val)-ll)
+	x := copy(out, raw[:start])
+	y := copy(out[x:], val)
+	copy(out[x+y:], raw[end:])
+	return out
+}
+
+// locate does a naive O(n) search for the map key; returns start, end
+// (returns 0,0 on error)
+func locate(raw []byte, key string) (start int, end int) {
+	var (
+		sz    uint32
+		bts   []byte
+		field []byte
+		err   error
+	)
+	sz, bts, err = ReadMapHeaderBytes(raw)
+	if err != nil {
+		return
+	}
+
+	// loop and locate field
+	for i := uint32(0); i < sz; i++ {
+		field, bts, err = ReadStringZC(bts)
+		if err != nil {
+			return 0, 0
+		}
+		if UnsafeString(field) == key {
+			// start location
+			l := len(raw)
+			start = l - len(bts)
+			bts, err = Skip(bts)
+			if err != nil {
+				return 0, 0
+			}
+			end = l - len(bts)
+			return
+		}
+		bts, err = Skip(bts)
+		if err != nil {
+			return 0, 0
+		}
+	}
+	return 0, 0
+}
+
+// locate key AND value
+func locateKV(raw []byte, key string) (start int, end int) {
+	var (
+		sz    uint32
+		bts   []byte
+		field []byte
+		err   error
+	)
+	sz, bts, err = ReadMapHeaderBytes(raw)
+	if err != nil {
+		return 0, 0
+	}
+
+	for i := uint32(0); i < sz; i++ {
+		tmp := len(bts)
+		field, bts, err = ReadStringZC(bts)
+		if err != nil {
+			return 0, 0
+		}
+		if UnsafeString(field) == key {
+			start = len(raw) - tmp
+			bts, err = Skip(bts)
+			if err != nil {
+				return 0, 0
+			}
+			end = len(raw) - len(bts)
+			return
+		}
+		bts, err = Skip(bts)
+		if err != nil {
+			return 0, 0
+		}
+	}
+	return 0, 0
+}
+
+// delta is delta on map size
+func resizeMap(raw []byte, delta int64) []byte {
+	var sz int64
+	switch raw[0] {
+	case mmap16:
+		sz = int64(big.Uint16(raw[1:]))
+		if sz+delta <= math.MaxUint16 {
+			big.PutUint16(raw[1:], uint16(sz+delta))
+			return raw
+		}
+		if cap(raw)-len(raw) >= 2 {
+			raw = raw[0 : len(raw)+2]
+			copy(raw[5:], raw[3:])
+			raw[0] = mmap32
+			big.PutUint32(raw[1:], uint32(sz+delta))
+			return raw
+		}
+		n := make([]byte, 0, len(raw)+5)
+		n = AppendMapHeader(n, uint32(sz+delta))
+		return append(n, raw[3:]...)
+
+	case mmap32:
+		sz = int64(big.Uint32(raw[1:]))
+		big.PutUint32(raw[1:], uint32(sz+delta))
+		return raw
+
+	default:
+		sz = int64(rfixmap(raw[0]))
+		if sz+delta < 16 {
+			raw[0] = wfixmap(uint8(sz + delta))
+			return raw
+		} else if sz+delta <= math.MaxUint16 {
+			if cap(raw)-len(raw) >= 2 {
+				raw = raw[0 : len(raw)+2]
+				copy(raw[3:], raw[1:])
+				raw[0] = mmap16
+				big.PutUint16(raw[1:], uint16(sz+delta))
+				return raw
+			}
+			n := make([]byte, 0, len(raw)+5)
+			n = AppendMapHeader(n, uint32(sz+delta))
+			return append(n, raw[1:]...)
+		}
+		if cap(raw)-len(raw) >= 4 {
+			raw = raw[0 : len(raw)+4]
+			copy(raw[5:], raw[1:])
+			raw[0] = mmap32
+			big.PutUint32(raw[1:], uint32(sz+delta))
+			return raw
+		}
+		n := make([]byte, 0, len(raw)+5)
+		n = AppendMapHeader(n, uint32(sz+delta))
+		return append(n, raw[1:]...)
+	}
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize.go b/vendor/github.com/tinylib/msgp/msgp/elsize.go
new file mode 100644
index 0000000000..a05b0b21c2
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/elsize.go
@@ -0,0 +1,128 @@
+package msgp
+
+func calcBytespec(v byte) bytespec {
+	// single byte values
+	switch v {
+
+	case mnil:
+		return bytespec{size: 1, extra: constsize, typ: NilType}
+	case mfalse:
+		return bytespec{size: 1, extra: constsize, typ: BoolType}
+	case mtrue:
+		return bytespec{size: 1, extra: constsize, typ: BoolType}
+	case mbin8:
+		return bytespec{size: 2, extra: extra8, typ: BinType}
+	case mbin16:
+		return bytespec{size: 3, extra: extra16, typ: BinType}
+	case mbin32:
+		return bytespec{size: 5, extra: extra32, typ: BinType}
+	case mext8:
+		return bytespec{size: 3, extra: extra8, typ: ExtensionType}
+	case mext16:
+		return bytespec{size: 4, extra: extra16, typ: ExtensionType}
+	case mext32:
+		return bytespec{size: 6, extra: extra32, typ: ExtensionType}
+	case mfloat32:
+		return bytespec{size: 5, extra: constsize, typ: Float32Type}
+	case mfloat64:
+		return bytespec{size: 9, extra: constsize, typ: Float64Type}
+	case muint8:
+		return bytespec{size: 2, extra: constsize, typ: UintType}
+	case muint16:
+		return bytespec{size: 3, extra: constsize, typ: UintType}
+	case muint32:
+		return bytespec{size: 5, extra: constsize, typ: UintType}
+	case muint64:
+		return bytespec{size: 9, extra: constsize, typ: UintType}
+	case mint8:
+		return bytespec{size: 2, extra: constsize, typ: IntType}
+	case mint16:
+		return bytespec{size: 3, extra: constsize, typ: IntType}
+	case mint32:
+		return bytespec{size: 5, extra: constsize, typ: IntType}
+	case mint64:
+		return bytespec{size: 9, extra: constsize, typ: IntType}
+	case mfixext1:
+		return bytespec{size: 3, extra: constsize, typ: ExtensionType}
+	case mfixext2:
+		return bytespec{size: 4, extra: constsize, typ: ExtensionType}
+	case mfixext4:
+		return bytespec{size: 6, extra: constsize, typ: ExtensionType}
+	case mfixext8:
+		return bytespec{size: 10, extra: constsize, typ: ExtensionType}
+	case mfixext16:
+		return bytespec{size: 18, extra: constsize, typ: ExtensionType}
+	case mstr8:
+		return bytespec{size: 2, extra: extra8, typ: StrType}
+	case mstr16:
+		return bytespec{size: 3, extra: extra16, typ: StrType}
+	case mstr32:
+		return bytespec{size: 5, extra: extra32, typ: StrType}
+	case marray16:
+		return bytespec{size: 3, extra: array16v, typ: ArrayType}
+	case marray32:
+		return bytespec{size: 5, extra: array32v, typ: ArrayType}
+	case mmap16:
+		return bytespec{size: 3, extra: map16v, typ: MapType}
+	case mmap32:
+		return bytespec{size: 5, extra: map32v, typ: MapType}
+	}
+
+	switch {
+
+	// fixint
+	case v >= mfixint && v < 0x80:
+		return bytespec{size: 1, extra: constsize, typ: IntType}
+
+	// fixstr gets constsize, since the prefix yields the size
+	case v >= mfixstr && v < 0xc0:
+		return bytespec{size: 1 + rfixstr(v), extra: constsize, typ: StrType}
+
+	// fixmap
+	case v >= mfixmap && v < 0x90:
+		return bytespec{size: 1, extra: varmode(2 * rfixmap(v)), typ: MapType}
+
+	// fixarray
+	case v >= mfixarray && v < 0xa0:
+		return bytespec{size: 1, extra: varmode(rfixarray(v)), typ: ArrayType}
+
+	// nfixint
+	case v >= mnfixint && uint16(v) < 0x100:
+		return bytespec{size: 1, extra: constsize, typ: IntType}
+
+	}
+
+	// 0xC1 is unused per the spec and falls through to here,
+	// everything else is covered above
+
+	return bytespec{}
+}
+
+func getType(v byte) Type {
+	return getBytespec(v).typ
+}
+
+// a valid bytespsec has
+// non-zero 'size' and
+// non-zero 'typ'
+type bytespec struct {
+	size  uint8   // prefix size information
+	extra varmode // extra size information
+	typ   Type    // type
+	_     byte    // makes bytespec 4 bytes (yes, this matters)
+}
+
+// size mode
+// if positive, # elements for composites
+type varmode int8
+
+const (
+	constsize varmode = 0  // constant size (size bytes + uint8(varmode) objects)
+	extra8    varmode = -1 // has uint8(p[1]) extra bytes
+	extra16   varmode = -2 // has be16(p[1:]) extra bytes
+	extra32   varmode = -3 // has be32(p[1:]) extra bytes
+	map16v    varmode = -4 // use map16
+	map32v    varmode = -5 // use map32
+	array16v  varmode = -6 // use array16
+	array32v  varmode = -7 // use array32
+)
diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize_default.go b/vendor/github.com/tinylib/msgp/msgp/elsize_default.go
new file mode 100644
index 0000000000..e7e8b547a9
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/elsize_default.go
@@ -0,0 +1,21 @@
+//go:build !tinygo
+// +build !tinygo
+
+package msgp
+
+// size of every object on the wire,
+// plus type information. gives us
+// constant-time type information
+// for traversing composite objects.
+var sizes [256]bytespec
+
+func init() {
+	for i := 0; i < 256; i++ {
+		sizes[i] = calcBytespec(byte(i))
+	}
+}
+
+// getBytespec gets inlined to a simple array index
+func getBytespec(v byte) bytespec {
+	return sizes[v]
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize_tinygo.go b/vendor/github.com/tinylib/msgp/msgp/elsize_tinygo.go
new file mode 100644
index 0000000000..041f4ad694
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/elsize_tinygo.go
@@ -0,0 +1,13 @@
+//go:build tinygo
+// +build tinygo
+
+package msgp
+
+// for tinygo, getBytespec just calls calcBytespec
+// a simple/slow function with a switch statement -
+// doesn't require any heap alloc, moves the space
+// requirements into code instad of ram
+
+func getBytespec(v byte) bytespec {
+	return calcBytespec(v)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/errors.go b/vendor/github.com/tinylib/msgp/msgp/errors.go
new file mode 100644
index 0000000000..4f19359a23
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/errors.go
@@ -0,0 +1,359 @@
+package msgp
+
+import (
+	"reflect"
+	"strconv"
+)
+
+const resumableDefault = false
+
+var (
+	// ErrShortBytes is returned when the
+	// slice being decoded is too short to
+	// contain the contents of the message
+	ErrShortBytes error = errShort{}
+
+	// this error is only returned
+	// if we reach code that should
+	// be unreachable
+	fatal error = errFatal{}
+)
+
+// Error is the interface satisfied
+// by all of the errors that originate
+// from this package.
+type Error interface {
+	error
+
+	// Resumable returns whether
+	// or not the error means that
+	// the stream of data is malformed
+	// and the information is unrecoverable.
+	Resumable() bool
+}
+
+// contextError allows msgp Error instances to be enhanced with additional
+// context about their origin.
+type contextError interface {
+	Error
+
+	// withContext must not modify the error instance - it must clone and
+	// return a new error with the context added.
+	withContext(ctx string) error
+}
+
+// Cause returns the underlying cause of an error that has been wrapped
+// with additional context.
+func Cause(e error) error {
+	out := e
+	if e, ok := e.(errWrapped); ok && e.cause != nil {
+		out = e.cause
+	}
+	return out
+}
+
+// Resumable returns whether or not the error means that the stream of data is
+// malformed and the information is unrecoverable.
+func Resumable(e error) bool {
+	if e, ok := e.(Error); ok {
+		return e.Resumable()
+	}
+	return resumableDefault
+}
+
+// WrapError wraps an error with additional context that allows the part of the
+// serialized type that caused the problem to be identified. Underlying errors
+// can be retrieved using Cause()
+//
+// The input error is not modified - a new error should be returned.
+//
+// ErrShortBytes is not wrapped with any context due to backward compatibility
+// issues with the public API.
+func WrapError(err error, ctx ...interface{}) error {
+	switch e := err.(type) {
+	case errShort:
+		return e
+	case contextError:
+		return e.withContext(ctxString(ctx))
+	default:
+		return errWrapped{cause: err, ctx: ctxString(ctx)}
+	}
+}
+
+func addCtx(ctx, add string) string {
+	if ctx != "" {
+		return add + "/" + ctx
+	} else {
+		return add
+	}
+}
+
+// errWrapped allows arbitrary errors passed to WrapError to be enhanced with
+// context and unwrapped with Cause()
+type errWrapped struct {
+	cause error
+	ctx   string
+}
+
+func (e errWrapped) Error() string {
+	if e.ctx != "" {
+		return e.cause.Error() + " at " + e.ctx
+	} else {
+		return e.cause.Error()
+	}
+}
+
+func (e errWrapped) Resumable() bool {
+	if e, ok := e.cause.(Error); ok {
+		return e.Resumable()
+	}
+	return resumableDefault
+}
+
+// Unwrap returns the cause.
+func (e errWrapped) Unwrap() error { return e.cause }
+
+type errShort struct{}
+
+func (e errShort) Error() string   { return "msgp: too few bytes left to read object" }
+func (e errShort) Resumable() bool { return false }
+
+type errFatal struct {
+	ctx string
+}
+
+func (f errFatal) Error() string {
+	out := "msgp: fatal decoding error (unreachable code)"
+	if f.ctx != "" {
+		out += " at " + f.ctx
+	}
+	return out
+}
+
+func (f errFatal) Resumable() bool { return false }
+
+func (f errFatal) withContext(ctx string) error { f.ctx = addCtx(f.ctx, ctx); return f }
+
+// ArrayError is an error returned
+// when decoding a fix-sized array
+// of the wrong size
+type ArrayError struct {
+	Wanted uint32
+	Got    uint32
+	ctx    string
+}
+
+// Error implements the error interface
+func (a ArrayError) Error() string {
+	out := "msgp: wanted array of size " + strconv.Itoa(int(a.Wanted)) + "; got " + strconv.Itoa(int(a.Got))
+	if a.ctx != "" {
+		out += " at " + a.ctx
+	}
+	return out
+}
+
+// Resumable is always 'true' for ArrayErrors
+func (a ArrayError) Resumable() bool { return true }
+
+func (a ArrayError) withContext(ctx string) error { a.ctx = addCtx(a.ctx, ctx); return a }
+
+// IntOverflow is returned when a call
+// would downcast an integer to a type
+// with too few bits to hold its value.
+type IntOverflow struct {
+	Value         int64 // the value of the integer
+	FailedBitsize int   // the bit size that the int64 could not fit into
+	ctx           string
+}
+
+// Error implements the error interface
+func (i IntOverflow) Error() string {
+	str := "msgp: " + strconv.FormatInt(i.Value, 10) + " overflows int" + strconv.Itoa(i.FailedBitsize)
+	if i.ctx != "" {
+		str += " at " + i.ctx
+	}
+	return str
+}
+
+// Resumable is always 'true' for overflows
+func (i IntOverflow) Resumable() bool { return true }
+
+func (i IntOverflow) withContext(ctx string) error { i.ctx = addCtx(i.ctx, ctx); return i }
+
+// UintOverflow is returned when a call
+// would downcast an unsigned integer to a type
+// with too few bits to hold its value
+type UintOverflow struct {
+	Value         uint64 // value of the uint
+	FailedBitsize int    // the bit size that couldn't fit the value
+	ctx           string
+}
+
+// Error implements the error interface
+func (u UintOverflow) Error() string {
+	str := "msgp: " + strconv.FormatUint(u.Value, 10) + " overflows uint" + strconv.Itoa(u.FailedBitsize)
+	if u.ctx != "" {
+		str += " at " + u.ctx
+	}
+	return str
+}
+
+// Resumable is always 'true' for overflows
+func (u UintOverflow) Resumable() bool { return true }
+
+func (u UintOverflow) withContext(ctx string) error { u.ctx = addCtx(u.ctx, ctx); return u }
+
+// UintBelowZero is returned when a call
+// would cast a signed integer below zero
+// to an unsigned integer.
+type UintBelowZero struct {
+	Value int64 // value of the incoming int
+	ctx   string
+}
+
+// Error implements the error interface
+func (u UintBelowZero) Error() string {
+	str := "msgp: attempted to cast int " + strconv.FormatInt(u.Value, 10) + " to unsigned"
+	if u.ctx != "" {
+		str += " at " + u.ctx
+	}
+	return str
+}
+
+// Resumable is always 'true' for overflows
+func (u UintBelowZero) Resumable() bool { return true }
+
+func (u UintBelowZero) withContext(ctx string) error {
+	u.ctx = ctx
+	return u
+}
+
+// A TypeError is returned when a particular
+// decoding method is unsuitable for decoding
+// a particular MessagePack value.
+type TypeError struct {
+	Method  Type // Type expected by method
+	Encoded Type // Type actually encoded
+
+	ctx string
+}
+
+// Error implements the error interface
+func (t TypeError) Error() string {
+	out := "msgp: attempted to decode type " + quoteStr(t.Encoded.String()) + " with method for " + quoteStr(t.Method.String())
+	if t.ctx != "" {
+		out += " at " + t.ctx
+	}
+	return out
+}
+
+// Resumable returns 'true' for TypeErrors
+func (t TypeError) Resumable() bool { return true }
+
+func (t TypeError) withContext(ctx string) error { t.ctx = addCtx(t.ctx, ctx); return t }
+
+// returns either InvalidPrefixError or
+// TypeError depending on whether or not
+// the prefix is recognized
+func badPrefix(want Type, lead byte) error {
+	t := getType(lead)
+	if t == InvalidType {
+		return InvalidPrefixError(lead)
+	}
+	return TypeError{Method: want, Encoded: t}
+}
+
+// InvalidPrefixError is returned when a bad encoding
+// uses a prefix that is not recognized in the MessagePack standard.
+// This kind of error is unrecoverable.
+type InvalidPrefixError byte
+
+// Error implements the error interface
+func (i InvalidPrefixError) Error() string {
+	return "msgp: unrecognized type prefix 0x" + strconv.FormatInt(int64(i), 16)
+}
+
+// Resumable returns 'false' for InvalidPrefixErrors
+func (i InvalidPrefixError) Resumable() bool { return false }
+
+// ErrUnsupportedType is returned
+// when a bad argument is supplied
+// to a function that takes `interface{}`.
+type ErrUnsupportedType struct {
+	T reflect.Type
+
+	ctx string
+}
+
+// Error implements error
+func (e *ErrUnsupportedType) Error() string {
+	out := "msgp: type " + quoteStr(e.T.String()) + " not supported"
+	if e.ctx != "" {
+		out += " at " + e.ctx
+	}
+	return out
+}
+
+// Resumable returns 'true' for ErrUnsupportedType
+func (e *ErrUnsupportedType) Resumable() bool { return true }
+
+func (e *ErrUnsupportedType) withContext(ctx string) error {
+	o := *e
+	o.ctx = addCtx(o.ctx, ctx)
+	return &o
+}
+
+// simpleQuoteStr is a simplified version of strconv.Quote for TinyGo,
+// which takes up a lot less code space by escaping all non-ASCII
+// (UTF-8) bytes with \x.  Saves about 4k of code size
+// (unicode tables, needed for IsPrint(), are big).
+// It lives in errors.go just so we can test it in errors_test.go
+func simpleQuoteStr(s string) string {
+	const (
+		lowerhex = "0123456789abcdef"
+	)
+
+	sb := make([]byte, 0, len(s)+2)
+
+	sb = append(sb, `"`...)
+
+l: // loop through string bytes (not UTF-8 characters)
+	for i := 0; i < len(s); i++ {
+		b := s[i]
+		// specific escape chars
+		switch b {
+		case '\\':
+			sb = append(sb, `\\`...)
+		case '"':
+			sb = append(sb, `\"`...)
+		case '\a':
+			sb = append(sb, `\a`...)
+		case '\b':
+			sb = append(sb, `\b`...)
+		case '\f':
+			sb = append(sb, `\f`...)
+		case '\n':
+			sb = append(sb, `\n`...)
+		case '\r':
+			sb = append(sb, `\r`...)
+		case '\t':
+			sb = append(sb, `\t`...)
+		case '\v':
+			sb = append(sb, `\v`...)
+		default:
+			// no escaping needed (printable ASCII)
+			if b >= 0x20 && b <= 0x7E {
+				sb = append(sb, b)
+				continue l
+			}
+			// anything else is \x
+			sb = append(sb, `\x`...)
+			sb = append(sb, lowerhex[byte(b)>>4])
+			sb = append(sb, lowerhex[byte(b)&0xF])
+			continue l
+		}
+	}
+
+	sb = append(sb, `"`...)
+	return string(sb)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/errors_default.go b/vendor/github.com/tinylib/msgp/msgp/errors_default.go
new file mode 100644
index 0000000000..e45c00a8b8
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/errors_default.go
@@ -0,0 +1,25 @@
+//go:build !tinygo
+// +build !tinygo
+
+package msgp
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// ctxString converts the incoming interface{} slice into a single string.
+func ctxString(ctx []interface{}) string {
+	out := ""
+	for idx, cv := range ctx {
+		if idx > 0 {
+			out += "/"
+		}
+		out += fmt.Sprintf("%v", cv)
+	}
+	return out
+}
+
+func quoteStr(s string) string {
+	return strconv.Quote(s)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/errors_tinygo.go b/vendor/github.com/tinylib/msgp/msgp/errors_tinygo.go
new file mode 100644
index 0000000000..8691cd387e
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/errors_tinygo.go
@@ -0,0 +1,42 @@
+//go:build tinygo
+// +build tinygo
+
+package msgp
+
+import (
+	"reflect"
+)
+
+// ctxString converts the incoming interface{} slice into a single string,
+// without using fmt under tinygo
+func ctxString(ctx []interface{}) string {
+	out := ""
+	for idx, cv := range ctx {
+		if idx > 0 {
+			out += "/"
+		}
+		out += ifToStr(cv)
+	}
+	return out
+}
+
+type stringer interface {
+	String() string
+}
+
+func ifToStr(i interface{}) string {
+	switch v := i.(type) {
+	case stringer:
+		return v.String()
+	case error:
+		return v.Error()
+	case string:
+		return v
+	default:
+		return reflect.ValueOf(i).String()
+	}
+}
+
+func quoteStr(s string) string {
+	return simpleQuoteStr(s)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/extension.go b/vendor/github.com/tinylib/msgp/msgp/extension.go
new file mode 100644
index 0000000000..b5ef3a4e3d
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/extension.go
@@ -0,0 +1,550 @@
+package msgp
+
+import (
+	"errors"
+	"math"
+	"strconv"
+)
+
+const (
+	// Complex64Extension is the extension number used for complex64
+	Complex64Extension = 3
+
+	// Complex128Extension is the extension number used for complex128
+	Complex128Extension = 4
+
+	// TimeExtension is the extension number used for time.Time
+	TimeExtension = 5
+)
+
+// our extensions live here
+var extensionReg = make(map[int8]func() Extension)
+
+// RegisterExtension registers extensions so that they
+// can be initialized and returned by methods that
+// decode `interface{}` values. This should only
+// be called during initialization. f() should return
+// a newly-initialized zero value of the extension. Keep in
+// mind that extensions 3, 4, and 5 are reserved for
+// complex64, complex128, and time.Time, respectively,
+// and that MessagePack reserves extension types from -127 to -1.
+//
+// For example, if you wanted to register a user-defined struct:
+//
+//	msgp.RegisterExtension(10, func() msgp.Extension { &MyExtension{} })
+//
+// RegisterExtension will panic if you call it multiple times
+// with the same 'typ' argument, or if you use a reserved
+// type (3, 4, or 5).
+func RegisterExtension(typ int8, f func() Extension) {
+	switch typ {
+	case Complex64Extension, Complex128Extension, TimeExtension:
+		panic(errors.New("msgp: forbidden extension type: " + strconv.Itoa(int(typ))))
+	}
+	if _, ok := extensionReg[typ]; ok {
+		panic(errors.New("msgp: RegisterExtension() called with typ " + strconv.Itoa(int(typ)) + " more than once"))
+	}
+	extensionReg[typ] = f
+}
+
+// ExtensionTypeError is an error type returned
+// when there is a mis-match between an extension type
+// and the type encoded on the wire
+type ExtensionTypeError struct {
+	Got  int8
+	Want int8
+}
+
+// Error implements the error interface
+func (e ExtensionTypeError) Error() string {
+	return "msgp: error decoding extension: wanted type " + strconv.Itoa(int(e.Want)) + "; got type " + strconv.Itoa(int(e.Got))
+}
+
+// Resumable returns 'true' for ExtensionTypeErrors
+func (e ExtensionTypeError) Resumable() bool { return true }
+
+func errExt(got int8, wanted int8) error {
+	return ExtensionTypeError{Got: got, Want: wanted}
+}
+
+// Extension is the interface fulfilled
+// by types that want to define their
+// own binary encoding.
+type Extension interface {
+	// ExtensionType should return
+	// a int8 that identifies the concrete
+	// type of the extension. (Types <0 are
+	// officially reserved by the MessagePack
+	// specifications.)
+	ExtensionType() int8
+
+	// Len should return the length
+	// of the data to be encoded
+	Len() int
+
+	// MarshalBinaryTo should copy
+	// the data into the supplied slice,
+	// assuming that the slice has length Len()
+	MarshalBinaryTo([]byte) error
+
+	UnmarshalBinary([]byte) error
+}
+
+// RawExtension implements the Extension interface
+type RawExtension struct {
+	Data []byte
+	Type int8
+}
+
+// ExtensionType implements Extension.ExtensionType, and returns r.Type
+func (r *RawExtension) ExtensionType() int8 { return r.Type }
+
+// Len implements Extension.Len, and returns len(r.Data)
+func (r *RawExtension) Len() int { return len(r.Data) }
+
+// MarshalBinaryTo implements Extension.MarshalBinaryTo,
+// and returns a copy of r.Data
+func (r *RawExtension) MarshalBinaryTo(d []byte) error {
+	copy(d, r.Data)
+	return nil
+}
+
+// UnmarshalBinary implements Extension.UnmarshalBinary,
+// and sets r.Data to the contents of the provided slice
+func (r *RawExtension) UnmarshalBinary(b []byte) error {
+	if cap(r.Data) >= len(b) {
+		r.Data = r.Data[0:len(b)]
+	} else {
+		r.Data = make([]byte, len(b))
+	}
+	copy(r.Data, b)
+	return nil
+}
+
+// WriteExtension writes an extension type to the writer
+func (mw *Writer) WriteExtension(e Extension) error {
+	l := e.Len()
+	var err error
+	switch l {
+	case 0:
+		o, err := mw.require(3)
+		if err != nil {
+			return err
+		}
+		mw.buf[o] = mext8
+		mw.buf[o+1] = 0
+		mw.buf[o+2] = byte(e.ExtensionType())
+	case 1:
+		o, err := mw.require(2)
+		if err != nil {
+			return err
+		}
+		mw.buf[o] = mfixext1
+		mw.buf[o+1] = byte(e.ExtensionType())
+	case 2:
+		o, err := mw.require(2)
+		if err != nil {
+			return err
+		}
+		mw.buf[o] = mfixext2
+		mw.buf[o+1] = byte(e.ExtensionType())
+	case 4:
+		o, err := mw.require(2)
+		if err != nil {
+			return err
+		}
+		mw.buf[o] = mfixext4
+		mw.buf[o+1] = byte(e.ExtensionType())
+	case 8:
+		o, err := mw.require(2)
+		if err != nil {
+			return err
+		}
+		mw.buf[o] = mfixext8
+		mw.buf[o+1] = byte(e.ExtensionType())
+	case 16:
+		o, err := mw.require(2)
+		if err != nil {
+			return err
+		}
+		mw.buf[o] = mfixext16
+		mw.buf[o+1] = byte(e.ExtensionType())
+	default:
+		switch {
+		case l < math.MaxUint8:
+			o, err := mw.require(3)
+			if err != nil {
+				return err
+			}
+			mw.buf[o] = mext8
+			mw.buf[o+1] = byte(uint8(l))
+			mw.buf[o+2] = byte(e.ExtensionType())
+		case l < math.MaxUint16:
+			o, err := mw.require(4)
+			if err != nil {
+				return err
+			}
+			mw.buf[o] = mext16
+			big.PutUint16(mw.buf[o+1:], uint16(l))
+			mw.buf[o+3] = byte(e.ExtensionType())
+		default:
+			o, err := mw.require(6)
+			if err != nil {
+				return err
+			}
+			mw.buf[o] = mext32
+			big.PutUint32(mw.buf[o+1:], uint32(l))
+			mw.buf[o+5] = byte(e.ExtensionType())
+		}
+	}
+	// we can only write directly to the
+	// buffer if we're sure that it
+	// fits the object
+	if l <= mw.bufsize() {
+		o, err := mw.require(l)
+		if err != nil {
+			return err
+		}
+		return e.MarshalBinaryTo(mw.buf[o:])
+	}
+	// here we create a new buffer
+	// just large enough for the body
+	// and save it as the write buffer
+	err = mw.flush()
+	if err != nil {
+		return err
+	}
+	buf := make([]byte, l)
+	err = e.MarshalBinaryTo(buf)
+	if err != nil {
+		return err
+	}
+	mw.buf = buf
+	mw.wloc = l
+	return nil
+}
+
+// peek at the extension type, assuming the next
+// kind to be read is Extension
+func (m *Reader) peekExtensionType() (int8, error) {
+	p, err := m.R.Peek(2)
+	if err != nil {
+		return 0, err
+	}
+	spec := getBytespec(p[0])
+	if spec.typ != ExtensionType {
+		return 0, badPrefix(ExtensionType, p[0])
+	}
+	if spec.extra == constsize {
+		return int8(p[1]), nil
+	}
+	size := spec.size
+	p, err = m.R.Peek(int(size))
+	if err != nil {
+		return 0, err
+	}
+	return int8(p[size-1]), nil
+}
+
+// peekExtension peeks at the extension encoding type
+// (must guarantee at least 1 byte in 'b')
+func peekExtension(b []byte) (int8, error) {
+	spec := getBytespec(b[0])
+	size := spec.size
+	if spec.typ != ExtensionType {
+		return 0, badPrefix(ExtensionType, b[0])
+	}
+	if len(b) < int(size) {
+		return 0, ErrShortBytes
+	}
+	// for fixed extensions,
+	// the type information is in
+	// the second byte
+	if spec.extra == constsize {
+		return int8(b[1]), nil
+	}
+	// otherwise, it's in the last
+	// part of the prefix
+	return int8(b[size-1]), nil
+}
+
+// ReadExtension reads the next object from the reader
+// as an extension. ReadExtension will fail if the next
+// object in the stream is not an extension, or if
+// e.Type() is not the same as the wire type.
+func (m *Reader) ReadExtension(e Extension) (err error) {
+	var p []byte
+	p, err = m.R.Peek(2)
+	if err != nil {
+		return
+	}
+	lead := p[0]
+	var read int
+	var off int
+	switch lead {
+	case mfixext1:
+		if int8(p[1]) != e.ExtensionType() {
+			err = errExt(int8(p[1]), e.ExtensionType())
+			return
+		}
+		p, err = m.R.Peek(3)
+		if err != nil {
+			return
+		}
+		err = e.UnmarshalBinary(p[2:])
+		if err == nil {
+			_, err = m.R.Skip(3)
+		}
+		return
+
+	case mfixext2:
+		if int8(p[1]) != e.ExtensionType() {
+			err = errExt(int8(p[1]), e.ExtensionType())
+			return
+		}
+		p, err = m.R.Peek(4)
+		if err != nil {
+			return
+		}
+		err = e.UnmarshalBinary(p[2:])
+		if err == nil {
+			_, err = m.R.Skip(4)
+		}
+		return
+
+	case mfixext4:
+		if int8(p[1]) != e.ExtensionType() {
+			err = errExt(int8(p[1]), e.ExtensionType())
+			return
+		}
+		p, err = m.R.Peek(6)
+		if err != nil {
+			return
+		}
+		err = e.UnmarshalBinary(p[2:])
+		if err == nil {
+			_, err = m.R.Skip(6)
+		}
+		return
+
+	case mfixext8:
+		if int8(p[1]) != e.ExtensionType() {
+			err = errExt(int8(p[1]), e.ExtensionType())
+			return
+		}
+		p, err = m.R.Peek(10)
+		if err != nil {
+			return
+		}
+		err = e.UnmarshalBinary(p[2:])
+		if err == nil {
+			_, err = m.R.Skip(10)
+		}
+		return
+
+	case mfixext16:
+		if int8(p[1]) != e.ExtensionType() {
+			err = errExt(int8(p[1]), e.ExtensionType())
+			return
+		}
+		p, err = m.R.Peek(18)
+		if err != nil {
+			return
+		}
+		err = e.UnmarshalBinary(p[2:])
+		if err == nil {
+			_, err = m.R.Skip(18)
+		}
+		return
+
+	case mext8:
+		p, err = m.R.Peek(3)
+		if err != nil {
+			return
+		}
+		if int8(p[2]) != e.ExtensionType() {
+			err = errExt(int8(p[2]), e.ExtensionType())
+			return
+		}
+		read = int(uint8(p[1]))
+		off = 3
+
+	case mext16:
+		p, err = m.R.Peek(4)
+		if err != nil {
+			return
+		}
+		if int8(p[3]) != e.ExtensionType() {
+			err = errExt(int8(p[3]), e.ExtensionType())
+			return
+		}
+		read = int(big.Uint16(p[1:]))
+		off = 4
+
+	case mext32:
+		p, err = m.R.Peek(6)
+		if err != nil {
+			return
+		}
+		if int8(p[5]) != e.ExtensionType() {
+			err = errExt(int8(p[5]), e.ExtensionType())
+			return
+		}
+		read = int(big.Uint32(p[1:]))
+		off = 6
+
+	default:
+		err = badPrefix(ExtensionType, lead)
+		return
+	}
+
+	p, err = m.R.Peek(read + off)
+	if err != nil {
+		return
+	}
+	err = e.UnmarshalBinary(p[off:])
+	if err == nil {
+		_, err = m.R.Skip(read + off)
+	}
+	return
+}
+
+// AppendExtension appends a MessagePack extension to the provided slice
+func AppendExtension(b []byte, e Extension) ([]byte, error) {
+	l := e.Len()
+	var o []byte
+	var n int
+	switch l {
+	case 0:
+		o, n = ensure(b, 3)
+		o[n] = mext8
+		o[n+1] = 0
+		o[n+2] = byte(e.ExtensionType())
+		return o[:n+3], nil
+	case 1:
+		o, n = ensure(b, 3)
+		o[n] = mfixext1
+		o[n+1] = byte(e.ExtensionType())
+		n += 2
+	case 2:
+		o, n = ensure(b, 4)
+		o[n] = mfixext2
+		o[n+1] = byte(e.ExtensionType())
+		n += 2
+	case 4:
+		o, n = ensure(b, 6)
+		o[n] = mfixext4
+		o[n+1] = byte(e.ExtensionType())
+		n += 2
+	case 8:
+		o, n = ensure(b, 10)
+		o[n] = mfixext8
+		o[n+1] = byte(e.ExtensionType())
+		n += 2
+	case 16:
+		o, n = ensure(b, 18)
+		o[n] = mfixext16
+		o[n+1] = byte(e.ExtensionType())
+		n += 2
+	default:
+		switch {
+		case l < math.MaxUint8:
+			o, n = ensure(b, l+3)
+			o[n] = mext8
+			o[n+1] = byte(uint8(l))
+			o[n+2] = byte(e.ExtensionType())
+			n += 3
+		case l < math.MaxUint16:
+			o, n = ensure(b, l+4)
+			o[n] = mext16
+			big.PutUint16(o[n+1:], uint16(l))
+			o[n+3] = byte(e.ExtensionType())
+			n += 4
+		default:
+			o, n = ensure(b, l+6)
+			o[n] = mext32
+			big.PutUint32(o[n+1:], uint32(l))
+			o[n+5] = byte(e.ExtensionType())
+			n += 6
+		}
+	}
+	return o, e.MarshalBinaryTo(o[n:])
+}
+
+// ReadExtensionBytes reads an extension from 'b' into 'e'
+// and returns any remaining bytes.
+// Possible errors:
+// - ErrShortBytes ('b' not long enough)
+// - ExtensionTypeError{} (wire type not the same as e.Type())
+// - TypeError{} (next object not an extension)
+// - InvalidPrefixError
+// - An umarshal error returned from e.UnmarshalBinary
+func ReadExtensionBytes(b []byte, e Extension) ([]byte, error) {
+	l := len(b)
+	if l < 3 {
+		return b, ErrShortBytes
+	}
+	lead := b[0]
+	var (
+		sz  int // size of 'data'
+		off int // offset of 'data'
+		typ int8
+	)
+	switch lead {
+	case mfixext1:
+		typ = int8(b[1])
+		sz = 1
+		off = 2
+	case mfixext2:
+		typ = int8(b[1])
+		sz = 2
+		off = 2
+	case mfixext4:
+		typ = int8(b[1])
+		sz = 4
+		off = 2
+	case mfixext8:
+		typ = int8(b[1])
+		sz = 8
+		off = 2
+	case mfixext16:
+		typ = int8(b[1])
+		sz = 16
+		off = 2
+	case mext8:
+		sz = int(uint8(b[1]))
+		typ = int8(b[2])
+		off = 3
+		if sz == 0 {
+			return b[3:], e.UnmarshalBinary(b[3:3])
+		}
+	case mext16:
+		if l < 4 {
+			return b, ErrShortBytes
+		}
+		sz = int(big.Uint16(b[1:]))
+		typ = int8(b[3])
+		off = 4
+	case mext32:
+		if l < 6 {
+			return b, ErrShortBytes
+		}
+		sz = int(big.Uint32(b[1:]))
+		typ = int8(b[5])
+		off = 6
+	default:
+		return b, badPrefix(ExtensionType, lead)
+	}
+
+	if typ != e.ExtensionType() {
+		return b, errExt(typ, e.ExtensionType())
+	}
+
+	// the data of the extension starts
+	// at 'off' and is 'sz' bytes long
+	if len(b[off:]) < sz {
+		return b, ErrShortBytes
+	}
+	tot := off + sz
+	return b[tot:], e.UnmarshalBinary(b[off:tot])
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go
new file mode 100644
index 0000000000..0f2c375209
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/file.go
@@ -0,0 +1,93 @@
+//go:build (linux || darwin || dragonfly || freebsd || netbsd || openbsd) && !appengine && !tinygo
+// +build linux darwin dragonfly freebsd netbsd openbsd
+// +build !appengine
+// +build !tinygo
+
+package msgp
+
+import (
+	"os"
+	"syscall"
+)
+
+// ReadFile reads a file into 'dst' using
+// a read-only memory mapping. Consequently,
+// the file must be mmap-able, and the
+// Unmarshaler should never write to
+// the source memory. (Methods generated
+// by the msgp tool obey that constraint, but
+// user-defined implementations may not.)
+//
+// Reading and writing through file mappings
+// is only efficient for large files; small
+// files are best read and written using
+// the ordinary streaming interfaces.
+func ReadFile(dst Unmarshaler, file *os.File) error {
+	stat, err := file.Stat()
+	if err != nil {
+		return err
+	}
+	data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED)
+	if err != nil {
+		return err
+	}
+	adviseRead(data)
+	_, err = dst.UnmarshalMsg(data)
+	uerr := syscall.Munmap(data)
+	if err == nil {
+		err = uerr
+	}
+	return err
+}
+
+// MarshalSizer is the combination
+// of the Marshaler and Sizer
+// interfaces.
+type MarshalSizer interface {
+	Marshaler
+	Sizer
+}
+
+// WriteFile writes a file from 'src' using
+// memory mapping. It overwrites the entire
+// contents of the previous file.
+// The mapping size is calculated
+// using the `Msgsize()` method
+// of 'src', so it must produce a result
+// equal to or greater than the actual encoded
+// size of the object. Otherwise,
+// a fault (SIGBUS) will occur.
+//
+// Reading and writing through file mappings
+// is only efficient for large files; small
+// files are best read and written using
+// the ordinary streaming interfaces.
+//
+// NOTE: The performance of this call
+// is highly OS- and filesystem-dependent.
+// Users should take care to test that this
+// performs as expected in a production environment.
+// (Linux users should run a kernel and filesystem
+// that support fallocate(2) for the best results.)
+func WriteFile(src MarshalSizer, file *os.File) error {
+	sz := src.Msgsize()
+	err := fallocate(file, int64(sz))
+	if err != nil {
+		return err
+	}
+	data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
+	if err != nil {
+		return err
+	}
+	adviseWrite(data)
+	chunk := data[:0]
+	chunk, err = src.MarshalMsg(chunk)
+	if err != nil {
+		return err
+	}
+	uerr := syscall.Munmap(data)
+	if uerr != nil {
+		return uerr
+	}
+	return file.Truncate(int64(len(chunk)))
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go
new file mode 100644
index 0000000000..2bbb3ad13a
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/file_port.go
@@ -0,0 +1,48 @@
+//go:build windows || appengine || tinygo
+// +build windows appengine tinygo
+
+package msgp
+
+import (
+	"io/ioutil"
+	"os"
+)
+
+// MarshalSizer is the combination
+// of the Marshaler and Sizer
+// interfaces.
+type MarshalSizer interface {
+	Marshaler
+	Sizer
+}
+
+func ReadFile(dst Unmarshaler, file *os.File) error {
+	if u, ok := dst.(Decodable); ok {
+		return u.DecodeMsg(NewReader(file))
+	}
+
+	data, err := ioutil.ReadAll(file)
+	if err != nil {
+		return err
+	}
+	_, err = dst.UnmarshalMsg(data)
+	return err
+}
+
+func WriteFile(src MarshalSizer, file *os.File) error {
+	if e, ok := src.(Encodable); ok {
+		w := NewWriter(file)
+		err := e.EncodeMsg(w)
+		if err == nil {
+			err = w.Flush()
+		}
+		return err
+	}
+
+	raw, err := src.MarshalMsg(nil)
+	if err != nil {
+		return err
+	}
+	_, err = file.Write(raw)
+	return err
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/integers.go b/vendor/github.com/tinylib/msgp/msgp/integers.go
new file mode 100644
index 0000000000..f817d77598
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/integers.go
@@ -0,0 +1,174 @@
+package msgp
+
+/* ----------------------------------
+	integer encoding utilities
+	(inline-able)
+
+	TODO(tinylib): there are faster,
+	albeit non-portable solutions
+	to the code below. implement
+	byteswap?
+   ---------------------------------- */
+
+func putMint64(b []byte, i int64) {
+	b[0] = mint64
+	b[1] = byte(i >> 56)
+	b[2] = byte(i >> 48)
+	b[3] = byte(i >> 40)
+	b[4] = byte(i >> 32)
+	b[5] = byte(i >> 24)
+	b[6] = byte(i >> 16)
+	b[7] = byte(i >> 8)
+	b[8] = byte(i)
+}
+
+func getMint64(b []byte) int64 {
+	return (int64(b[1]) << 56) | (int64(b[2]) << 48) |
+		(int64(b[3]) << 40) | (int64(b[4]) << 32) |
+		(int64(b[5]) << 24) | (int64(b[6]) << 16) |
+		(int64(b[7]) << 8) | (int64(b[8]))
+}
+
+func putMint32(b []byte, i int32) {
+	b[0] = mint32
+	b[1] = byte(i >> 24)
+	b[2] = byte(i >> 16)
+	b[3] = byte(i >> 8)
+	b[4] = byte(i)
+}
+
+func getMint32(b []byte) int32 {
+	return (int32(b[1]) << 24) | (int32(b[2]) << 16) | (int32(b[3]) << 8) | (int32(b[4]))
+}
+
+func putMint16(b []byte, i int16) {
+	b[0] = mint16
+	b[1] = byte(i >> 8)
+	b[2] = byte(i)
+}
+
+func getMint16(b []byte) (i int16) {
+	return (int16(b[1]) << 8) | int16(b[2])
+}
+
+func putMint8(b []byte, i int8) {
+	b[0] = mint8
+	b[1] = byte(i)
+}
+
+func getMint8(b []byte) (i int8) {
+	return int8(b[1])
+}
+
+func putMuint64(b []byte, u uint64) {
+	b[0] = muint64
+	b[1] = byte(u >> 56)
+	b[2] = byte(u >> 48)
+	b[3] = byte(u >> 40)
+	b[4] = byte(u >> 32)
+	b[5] = byte(u >> 24)
+	b[6] = byte(u >> 16)
+	b[7] = byte(u >> 8)
+	b[8] = byte(u)
+}
+
+func getMuint64(b []byte) uint64 {
+	return (uint64(b[1]) << 56) | (uint64(b[2]) << 48) |
+		(uint64(b[3]) << 40) | (uint64(b[4]) << 32) |
+		(uint64(b[5]) << 24) | (uint64(b[6]) << 16) |
+		(uint64(b[7]) << 8) | (uint64(b[8]))
+}
+
+func putMuint32(b []byte, u uint32) {
+	b[0] = muint32
+	b[1] = byte(u >> 24)
+	b[2] = byte(u >> 16)
+	b[3] = byte(u >> 8)
+	b[4] = byte(u)
+}
+
+func getMuint32(b []byte) uint32 {
+	return (uint32(b[1]) << 24) | (uint32(b[2]) << 16) | (uint32(b[3]) << 8) | (uint32(b[4]))
+}
+
+func putMuint16(b []byte, u uint16) {
+	b[0] = muint16
+	b[1] = byte(u >> 8)
+	b[2] = byte(u)
+}
+
+func getMuint16(b []byte) uint16 {
+	return (uint16(b[1]) << 8) | uint16(b[2])
+}
+
+func putMuint8(b []byte, u uint8) {
+	b[0] = muint8
+	b[1] = byte(u)
+}
+
+func getMuint8(b []byte) uint8 {
+	return uint8(b[1])
+}
+
+func getUnix(b []byte) (sec int64, nsec int32) {
+	sec = (int64(b[0]) << 56) | (int64(b[1]) << 48) |
+		(int64(b[2]) << 40) | (int64(b[3]) << 32) |
+		(int64(b[4]) << 24) | (int64(b[5]) << 16) |
+		(int64(b[6]) << 8) | (int64(b[7]))
+
+	nsec = (int32(b[8]) << 24) | (int32(b[9]) << 16) | (int32(b[10]) << 8) | (int32(b[11]))
+	return
+}
+
+func putUnix(b []byte, sec int64, nsec int32) {
+	b[0] = byte(sec >> 56)
+	b[1] = byte(sec >> 48)
+	b[2] = byte(sec >> 40)
+	b[3] = byte(sec >> 32)
+	b[4] = byte(sec >> 24)
+	b[5] = byte(sec >> 16)
+	b[6] = byte(sec >> 8)
+	b[7] = byte(sec)
+	b[8] = byte(nsec >> 24)
+	b[9] = byte(nsec >> 16)
+	b[10] = byte(nsec >> 8)
+	b[11] = byte(nsec)
+}
+
+/* -----------------------------
+		prefix utilities
+   ----------------------------- */
+
+// write prefix and uint8
+func prefixu8(b []byte, pre byte, sz uint8) {
+	b[0] = pre
+	b[1] = byte(sz)
+}
+
+// write prefix and big-endian uint16
+func prefixu16(b []byte, pre byte, sz uint16) {
+	b[0] = pre
+	b[1] = byte(sz >> 8)
+	b[2] = byte(sz)
+}
+
+// write prefix and big-endian uint32
+func prefixu32(b []byte, pre byte, sz uint32) {
+	b[0] = pre
+	b[1] = byte(sz >> 24)
+	b[2] = byte(sz >> 16)
+	b[3] = byte(sz >> 8)
+	b[4] = byte(sz)
+}
+
+func prefixu64(b []byte, pre byte, sz uint64) {
+	b[0] = pre
+	b[1] = byte(sz >> 56)
+	b[2] = byte(sz >> 48)
+	b[3] = byte(sz >> 40)
+	b[4] = byte(sz >> 32)
+	b[5] = byte(sz >> 24)
+	b[6] = byte(sz >> 16)
+	b[7] = byte(sz >> 8)
+	b[8] = byte(sz)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go
new file mode 100644
index 0000000000..0e11e603c0
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/json.go
@@ -0,0 +1,568 @@
+package msgp
+
+import (
+	"bufio"
+	"encoding/base64"
+	"encoding/json"
+	"io"
+	"strconv"
+	"unicode/utf8"
+)
+
+var (
+	null = []byte("null")
+	hex  = []byte("0123456789abcdef")
+)
+
+var defuns [_maxtype]func(jsWriter, *Reader) (int, error)
+
+// note: there is an initialization loop if
+// this isn't set up during init()
+func init() {
+	// since none of these functions are inline-able,
+	// there is not much of a penalty to the indirect
+	// call. however, this is best expressed as a jump-table...
+	defuns = [_maxtype]func(jsWriter, *Reader) (int, error){
+		StrType:        rwString,
+		BinType:        rwBytes,
+		MapType:        rwMap,
+		ArrayType:      rwArray,
+		Float64Type:    rwFloat64,
+		Float32Type:    rwFloat32,
+		BoolType:       rwBool,
+		IntType:        rwInt,
+		UintType:       rwUint,
+		NilType:        rwNil,
+		ExtensionType:  rwExtension,
+		Complex64Type:  rwExtension,
+		Complex128Type: rwExtension,
+		TimeType:       rwTime,
+	}
+}
+
+// this is the interface
+// used to write json
+type jsWriter interface {
+	io.Writer
+	io.ByteWriter
+	WriteString(string) (int, error)
+}
+
+// CopyToJSON reads MessagePack from 'src' and copies it
+// as JSON to 'dst' until EOF.
+func CopyToJSON(dst io.Writer, src io.Reader) (n int64, err error) {
+	r := NewReader(src)
+	n, err = r.WriteToJSON(dst)
+	freeR(r)
+	return
+}
+
+// WriteToJSON translates MessagePack from 'r' and writes it as
+// JSON to 'w' until the underlying reader returns io.EOF. It returns
+// the number of bytes written, and an error if it stopped before EOF.
+func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) {
+	var j jsWriter
+	var bf *bufio.Writer
+	if jsw, ok := w.(jsWriter); ok {
+		j = jsw
+	} else {
+		bf = bufio.NewWriter(w)
+		j = bf
+	}
+	var nn int
+	for err == nil {
+		nn, err = rwNext(j, r)
+		n += int64(nn)
+	}
+	if err != io.EOF {
+		if bf != nil {
+			bf.Flush()
+		}
+		return
+	}
+	err = nil
+	if bf != nil {
+		err = bf.Flush()
+	}
+	return
+}
+
+func rwNext(w jsWriter, src *Reader) (int, error) {
+	t, err := src.NextType()
+	if err != nil {
+		return 0, err
+	}
+	return defuns[t](w, src)
+}
+
+func rwMap(dst jsWriter, src *Reader) (n int, err error) {
+	var comma bool
+	var sz uint32
+	var field []byte
+
+	sz, err = src.ReadMapHeader()
+	if err != nil {
+		return
+	}
+
+	if sz == 0 {
+		return dst.WriteString("{}")
+	}
+
+	err = dst.WriteByte('{')
+	if err != nil {
+		return
+	}
+	n++
+	var nn int
+	for i := uint32(0); i < sz; i++ {
+		if comma {
+			err = dst.WriteByte(',')
+			if err != nil {
+				return
+			}
+			n++
+		}
+
+		field, err = src.ReadMapKeyPtr()
+		if err != nil {
+			return
+		}
+		nn, err = rwquoted(dst, field)
+		n += nn
+		if err != nil {
+			return
+		}
+
+		err = dst.WriteByte(':')
+		if err != nil {
+			return
+		}
+		n++
+		nn, err = rwNext(dst, src)
+		n += nn
+		if err != nil {
+			return
+		}
+		if !comma {
+			comma = true
+		}
+	}
+
+	err = dst.WriteByte('}')
+	if err != nil {
+		return
+	}
+	n++
+	return
+}
+
+func rwArray(dst jsWriter, src *Reader) (n int, err error) {
+	err = dst.WriteByte('[')
+	if err != nil {
+		return
+	}
+	var sz uint32
+	var nn int
+	sz, err = src.ReadArrayHeader()
+	if err != nil {
+		return
+	}
+	comma := false
+	for i := uint32(0); i < sz; i++ {
+		if comma {
+			err = dst.WriteByte(',')
+			if err != nil {
+				return
+			}
+			n++
+		}
+		nn, err = rwNext(dst, src)
+		n += nn
+		if err != nil {
+			return
+		}
+		comma = true
+	}
+
+	err = dst.WriteByte(']')
+	if err != nil {
+		return
+	}
+	n++
+	return
+}
+
+func rwNil(dst jsWriter, src *Reader) (int, error) {
+	err := src.ReadNil()
+	if err != nil {
+		return 0, err
+	}
+	return dst.Write(null)
+}
+
+func rwFloat32(dst jsWriter, src *Reader) (int, error) {
+	f, err := src.ReadFloat32()
+	if err != nil {
+		return 0, err
+	}
+	src.scratch = strconv.AppendFloat(src.scratch[:0], float64(f), 'f', -1, 32)
+	return dst.Write(src.scratch)
+}
+
+func rwFloat64(dst jsWriter, src *Reader) (int, error) {
+	f, err := src.ReadFloat64()
+	if err != nil {
+		return 0, err
+	}
+	src.scratch = strconv.AppendFloat(src.scratch[:0], f, 'f', -1, 64)
+	return dst.Write(src.scratch)
+}
+
+func rwInt(dst jsWriter, src *Reader) (int, error) {
+	i, err := src.ReadInt64()
+	if err != nil {
+		return 0, err
+	}
+	src.scratch = strconv.AppendInt(src.scratch[:0], i, 10)
+	return dst.Write(src.scratch)
+}
+
+func rwUint(dst jsWriter, src *Reader) (int, error) {
+	u, err := src.ReadUint64()
+	if err != nil {
+		return 0, err
+	}
+	src.scratch = strconv.AppendUint(src.scratch[:0], u, 10)
+	return dst.Write(src.scratch)
+}
+
+func rwBool(dst jsWriter, src *Reader) (int, error) {
+	b, err := src.ReadBool()
+	if err != nil {
+		return 0, err
+	}
+	if b {
+		return dst.WriteString("true")
+	}
+	return dst.WriteString("false")
+}
+
+func rwTime(dst jsWriter, src *Reader) (int, error) {
+	t, err := src.ReadTime()
+	if err != nil {
+		return 0, err
+	}
+	bts, err := t.MarshalJSON()
+	if err != nil {
+		return 0, err
+	}
+	return dst.Write(bts)
+}
+
+func rwExtension(dst jsWriter, src *Reader) (n int, err error) {
+	et, err := src.peekExtensionType()
+	if err != nil {
+		return 0, err
+	}
+
+	// registered extensions can override
+	// the JSON encoding
+	if j, ok := extensionReg[et]; ok {
+		var bts []byte
+		e := j()
+		err = src.ReadExtension(e)
+		if err != nil {
+			return
+		}
+		bts, err = json.Marshal(e)
+		if err != nil {
+			return
+		}
+		return dst.Write(bts)
+	}
+
+	e := RawExtension{}
+	e.Type = et
+	err = src.ReadExtension(&e)
+	if err != nil {
+		return
+	}
+
+	var nn int
+	err = dst.WriteByte('{')
+	if err != nil {
+		return
+	}
+	n++
+
+	nn, err = dst.WriteString(`"type:"`)
+	n += nn
+	if err != nil {
+		return
+	}
+
+	src.scratch = strconv.AppendInt(src.scratch[0:0], int64(e.Type), 10)
+	nn, err = dst.Write(src.scratch)
+	n += nn
+	if err != nil {
+		return
+	}
+
+	nn, err = dst.WriteString(`,"data":"`)
+	n += nn
+	if err != nil {
+		return
+	}
+
+	enc := base64.NewEncoder(base64.StdEncoding, dst)
+
+	nn, err = enc.Write(e.Data)
+	n += nn
+	if err != nil {
+		return
+	}
+	err = enc.Close()
+	if err != nil {
+		return
+	}
+	nn, err = dst.WriteString(`"}`)
+	n += nn
+	return
+}
+
+func rwString(dst jsWriter, src *Reader) (n int, err error) {
+	var p []byte
+	p, err = src.R.Peek(1)
+	if err != nil {
+		return
+	}
+	lead := p[0]
+	var read int
+
+	if isfixstr(lead) {
+		read = int(rfixstr(lead))
+		src.R.Skip(1)
+		goto write
+	}
+
+	switch lead {
+	case mstr8:
+		p, err = src.R.Next(2)
+		if err != nil {
+			return
+		}
+		read = int(uint8(p[1]))
+	case mstr16:
+		p, err = src.R.Next(3)
+		if err != nil {
+			return
+		}
+		read = int(big.Uint16(p[1:]))
+	case mstr32:
+		p, err = src.R.Next(5)
+		if err != nil {
+			return
+		}
+		read = int(big.Uint32(p[1:]))
+	default:
+		err = badPrefix(StrType, lead)
+		return
+	}
+write:
+	p, err = src.R.Next(read)
+	if err != nil {
+		return
+	}
+	n, err = rwquoted(dst, p)
+	return
+}
+
+func rwBytes(dst jsWriter, src *Reader) (n int, err error) {
+	var nn int
+	err = dst.WriteByte('"')
+	if err != nil {
+		return
+	}
+	n++
+	src.scratch, err = src.ReadBytes(src.scratch[:0])
+	if err != nil {
+		return
+	}
+	enc := base64.NewEncoder(base64.StdEncoding, dst)
+	nn, err = enc.Write(src.scratch)
+	n += nn
+	if err != nil {
+		return
+	}
+	err = enc.Close()
+	if err != nil {
+		return
+	}
+	err = dst.WriteByte('"')
+	if err != nil {
+		return
+	}
+	n++
+	return
+}
+
+// Below (c) The Go Authors, 2009-2014
+// Subject to the BSD-style license found at http://golang.org
+//
+// see: encoding/json/encode.go:(*encodeState).stringbytes()
+func rwquoted(dst jsWriter, s []byte) (n int, err error) {
+	var nn int
+	err = dst.WriteByte('"')
+	if err != nil {
+		return
+	}
+	n++
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+				i++
+				continue
+			}
+			if start < i {
+				nn, err = dst.Write(s[start:i])
+				n += nn
+				if err != nil {
+					return
+				}
+			}
+			switch b {
+			case '\\', '"':
+				err = dst.WriteByte('\\')
+				if err != nil {
+					return
+				}
+				n++
+				err = dst.WriteByte(b)
+				if err != nil {
+					return
+				}
+				n++
+			case '\n':
+				err = dst.WriteByte('\\')
+				if err != nil {
+					return
+				}
+				n++
+				err = dst.WriteByte('n')
+				if err != nil {
+					return
+				}
+				n++
+			case '\r':
+				err = dst.WriteByte('\\')
+				if err != nil {
+					return
+				}
+				n++
+				err = dst.WriteByte('r')
+				if err != nil {
+					return
+				}
+				n++
+			case '\t':
+				err = dst.WriteByte('\\')
+				if err != nil {
+					return
+				}
+				n++
+				err = dst.WriteByte('t')
+				if err != nil {
+					return
+				}
+				n++
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// It also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				nn, err = dst.WriteString(`\u00`)
+				n += nn
+				if err != nil {
+					return
+				}
+				err = dst.WriteByte(hex[b>>4])
+				if err != nil {
+					return
+				}
+				n++
+				err = dst.WriteByte(hex[b&0xF])
+				if err != nil {
+					return
+				}
+				n++
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRune(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				nn, err = dst.Write(s[start:i])
+				n += nn
+				if err != nil {
+					return
+				}
+			}
+			nn, err = dst.WriteString(`\ufffd`)
+			n += nn
+			if err != nil {
+				return
+			}
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				nn, err = dst.Write(s[start:i])
+				n += nn
+				if err != nil {
+					return
+				}
+			}
+			nn, err = dst.WriteString(`\u202`)
+			n += nn
+			if err != nil {
+				return
+			}
+			err = dst.WriteByte(hex[c&0xF])
+			if err != nil {
+				return
+			}
+			n++
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		nn, err = dst.Write(s[start:])
+		n += nn
+		if err != nil {
+			return
+		}
+	}
+	err = dst.WriteByte('"')
+	if err != nil {
+		return
+	}
+	n++
+	return
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go
new file mode 100644
index 0000000000..e6162d0a60
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go
@@ -0,0 +1,341 @@
+package msgp
+
+import (
+	"bufio"
+	"encoding/base64"
+	"encoding/json"
+	"io"
+	"strconv"
+	"time"
+)
+
+var unfuns [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error)
+
+func init() {
+	// NOTE(pmh): this is best expressed as a jump table,
+	// but gc doesn't do that yet. revisit post-go1.5.
+	unfuns = [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error){
+		StrType:        rwStringBytes,
+		BinType:        rwBytesBytes,
+		MapType:        rwMapBytes,
+		ArrayType:      rwArrayBytes,
+		Float64Type:    rwFloat64Bytes,
+		Float32Type:    rwFloat32Bytes,
+		BoolType:       rwBoolBytes,
+		IntType:        rwIntBytes,
+		UintType:       rwUintBytes,
+		NilType:        rwNullBytes,
+		ExtensionType:  rwExtensionBytes,
+		Complex64Type:  rwExtensionBytes,
+		Complex128Type: rwExtensionBytes,
+		TimeType:       rwTimeBytes,
+	}
+}
+
+// UnmarshalAsJSON takes raw messagepack and writes
+// it as JSON to 'w'. If an error is returned, the
+// bytes not translated will also be returned. If
+// no errors are encountered, the length of the returned
+// slice will be zero.
+func UnmarshalAsJSON(w io.Writer, msg []byte) ([]byte, error) {
+	var (
+		scratch []byte
+		cast    bool
+		dst     jsWriter
+		err     error
+	)
+	if jsw, ok := w.(jsWriter); ok {
+		dst = jsw
+		cast = true
+	} else {
+		dst = bufio.NewWriterSize(w, 512)
+	}
+	for len(msg) > 0 && err == nil {
+		msg, scratch, err = writeNext(dst, msg, scratch)
+	}
+	if !cast && err == nil {
+		err = dst.(*bufio.Writer).Flush()
+	}
+	return msg, err
+}
+
+func writeNext(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+	if len(msg) < 1 {
+		return msg, scratch, ErrShortBytes
+	}
+	t := getType(msg[0])
+	if t == InvalidType {
+		return msg, scratch, InvalidPrefixError(msg[0])
+	}
+	if t == ExtensionType {
+		et, err := peekExtension(msg)
+		if err != nil {
+			return nil, scratch, err
+		}
+		if et == TimeExtension {
+			t = TimeType
+		}
+	}
+	return unfuns[t](w, msg, scratch)
+}
+
+func rwArrayBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+	sz, msg, err := ReadArrayHeaderBytes(msg)
+	if err != nil {
+		return msg, scratch, err
+	}
+	err = w.WriteByte('[')
+	if err != nil {
+		return msg, scratch, err
+	}
+	for i := uint32(0); i < sz; i++ {
+		if i != 0 {
+			err = w.WriteByte(',')
+			if err != nil {
+				return msg, scratch, err
+			}
+		}
+		msg, scratch, err = writeNext(w, msg, scratch)
+		if err != nil {
+			return msg, scratch, err
+		}
+	}
+	err = w.WriteByte(']')
+	return msg, scratch, err
+}
+
+func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+	sz, msg, err := ReadMapHeaderBytes(msg)
+	if err != nil {
+		return msg, scratch, err
+	}
+	err = w.WriteByte('{')
+	if err != nil {
+		return msg, scratch, err
+	}
+	for i := uint32(0); i < sz; i++ {
+		if i != 0 {
+			err = w.WriteByte(',')
+			if err != nil {
+				return msg, scratch, err
+			}
+		}
+		msg, scratch, err = rwMapKeyBytes(w, msg, scratch)
+		if err != nil {
+			return msg, scratch, err
+		}
+		err = w.WriteByte(':')
+		if err != nil {
+			return msg, scratch, err
+		}
+		msg, scratch, err = writeNext(w, msg, scratch)
+		if err != nil {
+			return msg, scratch, err
+		}
+	}
+	err = w.WriteByte('}')
+	return msg, scratch, err
+}
+
+func rwMapKeyBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+	msg, scratch, err := rwStringBytes(w, msg, scratch)
+	if err != nil {
+		if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType {
+			return rwBytesBytes(w, msg, scratch)
+		}
+	}
+	return msg, scratch, err
+}
+
+func rwStringBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+	str, msg, err := ReadStringZC(msg)
+	if err != nil {
+		return msg, scratch, err
+	}
+	_, err = rwquoted(w, str)
+	return msg, scratch, err
+}
+
+func rwBytesBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+	bts, msg, err := ReadBytesZC(msg)
+	if err != nil {
+		return msg, scratch, err
+	}
+	l := base64.StdEncoding.EncodedLen(len(bts))
+	if cap(scratch) >= l {
+		scratch = scratch[0:l]
+	} else {
+		scratch = make([]byte, l)
+	}
+	base64.StdEncoding.Encode(scratch, bts)
+	err = w.WriteByte('"')
+	if err != nil {
+		return msg, scratch, err
+	}
+	_, err = w.Write(scratch)
+	if err != nil {
+		return msg, scratch, err
+	}
+	err = w.WriteByte('"')
+	return msg, scratch, err
+}
+
+func rwNullBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+	msg, err := ReadNilBytes(msg)
+	if err != nil {
+		return msg, scratch, err
+	}
+	_, err = w.Write(null)
+	return msg, scratch, err
+}
+
+func rwBoolBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+	b, msg, err := ReadBoolBytes(msg)
+	if err != nil {
+		return msg, scratch, err
+	}
+	if b {
+		_, err = w.WriteString("true")
+		return msg, scratch, err
+	}
+	_, err = w.WriteString("false")
+	return msg, scratch, err
+}
+
+func rwIntBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+	i, msg, err := ReadInt64Bytes(msg)
+	if err != nil {
+		return msg, scratch, err
+	}
+	scratch = strconv.AppendInt(scratch[0:0], i, 10)
+	_, err = w.Write(scratch)
+	return msg, scratch, err
+}
+
+func rwUintBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+	u, msg, err := ReadUint64Bytes(msg)
+	if err != nil {
+		return msg, scratch, err
+	}
+	scratch = strconv.AppendUint(scratch[0:0], u, 10)
+	_, err = w.Write(scratch)
+	return msg, scratch, err
+}
+
+func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+	var f float32
+	var err error
+	f, msg, err = ReadFloat32Bytes(msg)
+	if err != nil {
+		return msg, scratch, err
+	}
+	scratch = strconv.AppendFloat(scratch[:0], float64(f), 'f', -1, 32)
+	_, err = w.Write(scratch)
+	return msg, scratch, err
+}
+
+func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+	var f float64
+	var err error
+	f, msg, err = ReadFloat64Bytes(msg)
+	if err != nil {
+		return msg, scratch, err
+	}
+	scratch = strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)
+	_, err = w.Write(scratch)
+	return msg, scratch, err
+}
+
+func rwTimeBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+	var t time.Time
+	var err error
+	t, msg, err = ReadTimeBytes(msg)
+	if err != nil {
+		return msg, scratch, err
+	}
+	bts, err := t.MarshalJSON()
+	if err != nil {
+		return msg, scratch, err
+	}
+	_, err = w.Write(bts)
+	return msg, scratch, err
+}
+
+func rwExtensionBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+	var err error
+	var et int8
+	et, err = peekExtension(msg)
+	if err != nil {
+		return msg, scratch, err
+	}
+
+	// if it's time.Time
+	if et == TimeExtension {
+		var tm time.Time
+		tm, msg, err = ReadTimeBytes(msg)
+		if err != nil {
+			return msg, scratch, err
+		}
+		bts, err := tm.MarshalJSON()
+		if err != nil {
+			return msg, scratch, err
+		}
+		_, err = w.Write(bts)
+		return msg, scratch, err
+	}
+
+	// if the extension is registered,
+	// use its canonical JSON form
+	if f, ok := extensionReg[et]; ok {
+		e := f()
+		msg, err = ReadExtensionBytes(msg, e)
+		if err != nil {
+			return msg, scratch, err
+		}
+		bts, err := json.Marshal(e)
+		if err != nil {
+			return msg, scratch, err
+		}
+		_, err = w.Write(bts)
+		return msg, scratch, err
+	}
+
+	// otherwise, write `{"type": <num>, "data": "<base64data>"}`
+	r := RawExtension{}
+	r.Type = et
+	msg, err = ReadExtensionBytes(msg, &r)
+	if err != nil {
+		return msg, scratch, err
+	}
+	scratch, err = writeExt(w, r, scratch)
+	return msg, scratch, err
+}
+
+func writeExt(w jsWriter, r RawExtension, scratch []byte) ([]byte, error) {
+	_, err := w.WriteString(`{"type":`)
+	if err != nil {
+		return scratch, err
+	}
+	scratch = strconv.AppendInt(scratch[0:0], int64(r.Type), 10)
+	_, err = w.Write(scratch)
+	if err != nil {
+		return scratch, err
+	}
+	_, err = w.WriteString(`,"data":"`)
+	if err != nil {
+		return scratch, err
+	}
+	l := base64.StdEncoding.EncodedLen(len(r.Data))
+	if cap(scratch) >= l {
+		scratch = scratch[0:l]
+	} else {
+		scratch = make([]byte, l)
+	}
+	base64.StdEncoding.Encode(scratch, r.Data)
+	_, err = w.Write(scratch)
+	if err != nil {
+		return scratch, err
+	}
+	_, err = w.WriteString(`"}`)
+	return scratch, err
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/number.go b/vendor/github.com/tinylib/msgp/msgp/number.go
new file mode 100644
index 0000000000..edfe328b44
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/number.go
@@ -0,0 +1,266 @@
+package msgp
+
+import (
+	"math"
+	"strconv"
+)
+
+// The portable parts of the Number implementation
+
+// Number can be
+// an int64, uint64, float32,
+// or float64 internally.
+// It can decode itself
+// from any of the native
+// messagepack number types.
+// The zero-value of Number
+// is Int(0). Using the equality
+// operator with Number compares
+// both the type and the value
+// of the number.
+type Number struct {
+	// internally, this
+	// is just a tagged union.
+	// the raw bits of the number
+	// are stored the same way regardless.
+	bits uint64
+	typ  Type
+}
+
+// AsInt sets the number to an int64.
+func (n *Number) AsInt(i int64) {
+	// we always store int(0)
+	// as {0, InvalidType} in
+	// order to preserve
+	// the behavior of the == operator
+	if i == 0 {
+		n.typ = InvalidType
+		n.bits = 0
+		return
+	}
+
+	n.typ = IntType
+	n.bits = uint64(i)
+}
+
+// AsUint sets the number to a uint64.
+func (n *Number) AsUint(u uint64) {
+	n.typ = UintType
+	n.bits = u
+}
+
+// AsFloat32 sets the value of the number
+// to a float32.
+func (n *Number) AsFloat32(f float32) {
+	n.typ = Float32Type
+	n.bits = uint64(math.Float32bits(f))
+}
+
+// AsFloat64 sets the value of the
+// number to a float64.
+func (n *Number) AsFloat64(f float64) {
+	n.typ = Float64Type
+	n.bits = math.Float64bits(f)
+}
+
+// Int casts the number as an int64, and
+// returns whether or not that was the
+// underlying type.
+func (n *Number) Int() (int64, bool) {
+	return int64(n.bits), n.typ == IntType || n.typ == InvalidType
+}
+
+// Uint casts the number as a uint64, and returns
+// whether or not that was the underlying type.
+func (n *Number) Uint() (uint64, bool) {
+	return n.bits, n.typ == UintType
+}
+
+// Float casts the number to a float64, and
+// returns whether or not that was the underlying
+// type (either a float64 or a float32).
+func (n *Number) Float() (float64, bool) {
+	switch n.typ {
+	case Float32Type:
+		return float64(math.Float32frombits(uint32(n.bits))), true
+	case Float64Type:
+		return math.Float64frombits(n.bits), true
+	default:
+		return 0.0, false
+	}
+}
+
+// Type will return one of:
+// Float64Type, Float32Type, UintType, or IntType.
+func (n *Number) Type() Type {
+	if n.typ == InvalidType {
+		return IntType
+	}
+	return n.typ
+}
+
+// DecodeMsg implements msgp.Decodable
+func (n *Number) DecodeMsg(r *Reader) error {
+	typ, err := r.NextType()
+	if err != nil {
+		return err
+	}
+	switch typ {
+	case Float32Type:
+		f, err := r.ReadFloat32()
+		if err != nil {
+			return err
+		}
+		n.AsFloat32(f)
+		return nil
+	case Float64Type:
+		f, err := r.ReadFloat64()
+		if err != nil {
+			return err
+		}
+		n.AsFloat64(f)
+		return nil
+	case IntType:
+		i, err := r.ReadInt64()
+		if err != nil {
+			return err
+		}
+		n.AsInt(i)
+		return nil
+	case UintType:
+		u, err := r.ReadUint64()
+		if err != nil {
+			return err
+		}
+		n.AsUint(u)
+		return nil
+	default:
+		return TypeError{Encoded: typ, Method: IntType}
+	}
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (n *Number) UnmarshalMsg(b []byte) ([]byte, error) {
+	typ := NextType(b)
+	switch typ {
+	case IntType:
+		i, o, err := ReadInt64Bytes(b)
+		if err != nil {
+			return b, err
+		}
+		n.AsInt(i)
+		return o, nil
+	case UintType:
+		u, o, err := ReadUint64Bytes(b)
+		if err != nil {
+			return b, err
+		}
+		n.AsUint(u)
+		return o, nil
+	case Float64Type:
+		f, o, err := ReadFloat64Bytes(b)
+		if err != nil {
+			return b, err
+		}
+		n.AsFloat64(f)
+		return o, nil
+	case Float32Type:
+		f, o, err := ReadFloat32Bytes(b)
+		if err != nil {
+			return b, err
+		}
+		n.AsFloat32(f)
+		return o, nil
+	default:
+		return b, TypeError{Method: IntType, Encoded: typ}
+	}
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (n *Number) MarshalMsg(b []byte) ([]byte, error) {
+	switch n.typ {
+	case IntType:
+		return AppendInt64(b, int64(n.bits)), nil
+	case UintType:
+		return AppendUint64(b, uint64(n.bits)), nil
+	case Float64Type:
+		return AppendFloat64(b, math.Float64frombits(n.bits)), nil
+	case Float32Type:
+		return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil
+	default:
+		return AppendInt64(b, 0), nil
+	}
+}
+
+// EncodeMsg implements msgp.Encodable
+func (n *Number) EncodeMsg(w *Writer) error {
+	switch n.typ {
+	case IntType:
+		return w.WriteInt64(int64(n.bits))
+	case UintType:
+		return w.WriteUint64(n.bits)
+	case Float64Type:
+		return w.WriteFloat64(math.Float64frombits(n.bits))
+	case Float32Type:
+		return w.WriteFloat32(math.Float32frombits(uint32(n.bits)))
+	default:
+		return w.WriteInt64(0)
+	}
+}
+
+// Msgsize implements msgp.Sizer
+func (n *Number) Msgsize() int {
+	switch n.typ {
+	case Float32Type:
+		return Float32Size
+	case Float64Type:
+		return Float64Size
+	case IntType:
+		return Int64Size
+	case UintType:
+		return Uint64Size
+	default:
+		return 1 // fixint(0)
+	}
+}
+
+// MarshalJSON implements json.Marshaler
+func (n *Number) MarshalJSON() ([]byte, error) {
+	t := n.Type()
+	if t == InvalidType {
+		return []byte{'0'}, nil
+	}
+	out := make([]byte, 0, 32)
+	switch t {
+	case Float32Type, Float64Type:
+		f, _ := n.Float()
+		return strconv.AppendFloat(out, f, 'f', -1, 64), nil
+	case IntType:
+		i, _ := n.Int()
+		return strconv.AppendInt(out, i, 10), nil
+	case UintType:
+		u, _ := n.Uint()
+		return strconv.AppendUint(out, u, 10), nil
+	default:
+		panic("(*Number).typ is invalid")
+	}
+}
+
+// String implements fmt.Stringer
+func (n *Number) String() string {
+	switch n.typ {
+	case InvalidType:
+		return "0"
+	case Float32Type, Float64Type:
+		f, _ := n.Float()
+		return strconv.FormatFloat(f, 'f', -1, 64)
+	case IntType:
+		i, _ := n.Int()
+		return strconv.FormatInt(i, 10)
+	case UintType:
+		u, _ := n.Uint()
+		return strconv.FormatUint(u, 10)
+	default:
+		panic("(*Number).typ is invalid")
+	}
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/purego.go b/vendor/github.com/tinylib/msgp/msgp/purego.go
new file mode 100644
index 0000000000..2cd35c3e1d
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/purego.go
@@ -0,0 +1,16 @@
+//go:build purego || appengine
+// +build purego appengine
+
+package msgp
+
+// let's just assume appengine
+// uses 64-bit hardware...
+const smallint = false
+
+func UnsafeString(b []byte) string {
+	return string(b)
+}
+
+func UnsafeBytes(s string) []byte {
+	return []byte(s)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go
new file mode 100644
index 0000000000..e6d72f17d1
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/read.go
@@ -0,0 +1,1374 @@
+package msgp
+
+import (
+	"io"
+	"math"
+	"sync"
+	"time"
+
+	"github.com/philhofer/fwd"
+)
+
+// where we keep old *Readers
+var readerPool = sync.Pool{New: func() interface{} { return &Reader{} }}
+
+// Type is a MessagePack wire type,
+// including this package's built-in
+// extension types.
+type Type byte
+
+// MessagePack Types
+//
+// The zero value of Type
+// is InvalidType.
+const (
+	InvalidType Type = iota
+
+	// MessagePack built-in types
+
+	StrType
+	BinType
+	MapType
+	ArrayType
+	Float64Type
+	Float32Type
+	BoolType
+	IntType
+	UintType
+	NilType
+	DurationType
+	ExtensionType
+
+	// pseudo-types provided
+	// by extensions
+
+	Complex64Type
+	Complex128Type
+	TimeType
+
+	_maxtype
+)
+
+// String implements fmt.Stringer
+func (t Type) String() string {
+	switch t {
+	case StrType:
+		return "str"
+	case BinType:
+		return "bin"
+	case MapType:
+		return "map"
+	case ArrayType:
+		return "array"
+	case Float64Type:
+		return "float64"
+	case Float32Type:
+		return "float32"
+	case BoolType:
+		return "bool"
+	case UintType:
+		return "uint"
+	case IntType:
+		return "int"
+	case ExtensionType:
+		return "ext"
+	case NilType:
+		return "nil"
+	default:
+		return "<invalid>"
+	}
+}
+
+func freeR(m *Reader) {
+	readerPool.Put(m)
+}
+
+// Unmarshaler is the interface fulfilled
+// by objects that know how to unmarshal
+// themselves from MessagePack.
+// UnmarshalMsg unmarshals the object
+// from binary, returing any leftover
+// bytes and any errors encountered.
+type Unmarshaler interface {
+	UnmarshalMsg([]byte) ([]byte, error)
+}
+
+// Decodable is the interface fulfilled
+// by objects that know how to read
+// themselves from a *Reader.
+type Decodable interface {
+	DecodeMsg(*Reader) error
+}
+
+// Decode decodes 'd' from 'r'.
+func Decode(r io.Reader, d Decodable) error {
+	rd := NewReader(r)
+	err := d.DecodeMsg(rd)
+	freeR(rd)
+	return err
+}
+
+// NewReader returns a *Reader that
+// reads from the provided reader. The
+// reader will be buffered.
+func NewReader(r io.Reader) *Reader {
+	p := readerPool.Get().(*Reader)
+	if p.R == nil {
+		p.R = fwd.NewReader(r)
+	} else {
+		p.R.Reset(r)
+	}
+	return p
+}
+
+// NewReaderSize returns a *Reader with a buffer of the given size.
+// (This is vastly preferable to passing the decoder a reader that is already buffered.)
+func NewReaderSize(r io.Reader, sz int) *Reader {
+	return &Reader{R: fwd.NewReaderSize(r, sz)}
+}
+
+// NewReaderBuf returns a *Reader with a provided buffer.
+func NewReaderBuf(r io.Reader, buf []byte) *Reader {
+	return &Reader{R: fwd.NewReaderBuf(r, buf)}
+}
+
+// Reader wraps an io.Reader and provides
+// methods to read MessagePack-encoded values
+// from it. Readers are buffered.
+type Reader struct {
+	// R is the buffered reader
+	// that the Reader uses
+	// to decode MessagePack.
+	// The Reader itself
+	// is stateless; all the
+	// buffering is done
+	// within R.
+	R       *fwd.Reader
+	scratch []byte
+}
+
+// Read implements `io.Reader`
+func (m *Reader) Read(p []byte) (int, error) {
+	return m.R.Read(p)
+}
+
+// CopyNext reads the next object from m without decoding it and writes it to w.
+// It avoids unnecessary copies internally.
+func (m *Reader) CopyNext(w io.Writer) (int64, error) {
+	sz, o, err := getNextSize(m.R)
+	if err != nil {
+		return 0, err
+	}
+
+	var n int64
+	// Opportunistic optimization: if we can fit the whole thing in the m.R
+	// buffer, then just get a pointer to that, and pass it to w.Write,
+	// avoiding an allocation.
+	if int(sz) <= m.R.BufferSize() {
+		var nn int
+		var buf []byte
+		buf, err = m.R.Next(int(sz))
+		if err != nil {
+			if err == io.ErrUnexpectedEOF {
+				err = ErrShortBytes
+			}
+			return 0, err
+		}
+		nn, err = w.Write(buf)
+		n += int64(nn)
+	} else {
+		// Fall back to io.CopyN.
+		// May avoid allocating if w is a ReaderFrom (e.g. bytes.Buffer)
+		n, err = io.CopyN(w, m.R, int64(sz))
+		if err == io.ErrUnexpectedEOF {
+			err = ErrShortBytes
+		}
+	}
+	if err != nil {
+		return n, err
+	} else if n < int64(sz) {
+		return n, io.ErrShortWrite
+	}
+
+	// for maps and slices, read elements
+	for x := uintptr(0); x < o; x++ {
+		var n2 int64
+		n2, err = m.CopyNext(w)
+		if err != nil {
+			return n, err
+		}
+		n += n2
+	}
+	return n, nil
+}
+
+// ReadFull implements `io.ReadFull`
+func (m *Reader) ReadFull(p []byte) (int, error) {
+	return m.R.ReadFull(p)
+}
+
+// Reset resets the underlying reader.
+func (m *Reader) Reset(r io.Reader) { m.R.Reset(r) }
+
+// Buffered returns the number of bytes currently in the read buffer.
+func (m *Reader) Buffered() int { return m.R.Buffered() }
+
+// BufferSize returns the capacity of the read buffer.
+func (m *Reader) BufferSize() int { return m.R.BufferSize() }
+
+// NextType returns the next object type to be decoded.
+func (m *Reader) NextType() (Type, error) {
+	p, err := m.R.Peek(1)
+	if err != nil {
+		return InvalidType, err
+	}
+	t := getType(p[0])
+	if t == InvalidType {
+		return t, InvalidPrefixError(p[0])
+	}
+	if t == ExtensionType {
+		v, err := m.peekExtensionType()
+		if err != nil {
+			return InvalidType, err
+		}
+		switch v {
+		case Complex64Extension:
+			return Complex64Type, nil
+		case Complex128Extension:
+			return Complex128Type, nil
+		case TimeExtension:
+			return TimeType, nil
+		}
+	}
+	return t, nil
+}
+
+// IsNil returns whether or not
+// the next byte is a null messagepack byte
+func (m *Reader) IsNil() bool {
+	p, err := m.R.Peek(1)
+	return err == nil && p[0] == mnil
+}
+
+// getNextSize returns the size of the next object on the wire.
+// returns (obj size, obj elements, error)
+// only maps and arrays have non-zero obj elements
+// for maps and arrays, obj size does not include elements
+//
+// use uintptr b/c it's guaranteed to be large enough
+// to hold whatever we can fit in memory.
+func getNextSize(r *fwd.Reader) (uintptr, uintptr, error) {
+	b, err := r.Peek(1)
+	if err != nil {
+		return 0, 0, err
+	}
+	lead := b[0]
+	spec := getBytespec(lead)
+	size, mode := spec.size, spec.extra
+	if size == 0 {
+		return 0, 0, InvalidPrefixError(lead)
+	}
+	if mode >= 0 {
+		return uintptr(size), uintptr(mode), nil
+	}
+	b, err = r.Peek(int(size))
+	if err != nil {
+		return 0, 0, err
+	}
+	switch mode {
+	case extra8:
+		return uintptr(size) + uintptr(b[1]), 0, nil
+	case extra16:
+		return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil
+	case extra32:
+		return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil
+	case map16v:
+		return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil
+	case map32v:
+		return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil
+	case array16v:
+		return uintptr(size), uintptr(big.Uint16(b[1:])), nil
+	case array32v:
+		return uintptr(size), uintptr(big.Uint32(b[1:])), nil
+	default:
+		return 0, 0, fatal
+	}
+}
+
+// Skip skips over the next object, regardless of
+// its type. If it is an array or map, the whole array
+// or map will be skipped.
+func (m *Reader) Skip() error {
+	var (
+		v   uintptr // bytes
+		o   uintptr // objects
+		err error
+		p   []byte
+	)
+
+	// we can use the faster
+	// method if we have enough
+	// buffered data
+	if m.R.Buffered() >= 5 {
+		p, err = m.R.Peek(5)
+		if err != nil {
+			return err
+		}
+		v, o, err = getSize(p)
+		if err != nil {
+			return err
+		}
+	} else {
+		v, o, err = getNextSize(m.R)
+		if err != nil {
+			return err
+		}
+	}
+
+	// 'v' is always non-zero
+	// if err == nil
+	_, err = m.R.Skip(int(v))
+	if err != nil {
+		return err
+	}
+
+	// for maps and slices, skip elements
+	for x := uintptr(0); x < o; x++ {
+		err = m.Skip()
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// ReadMapHeader reads the next object
+// as a map header and returns the size
+// of the map and the number of bytes written.
+// It will return a TypeError{} if the next
+// object is not a map.
+func (m *Reader) ReadMapHeader() (sz uint32, err error) {
+	var p []byte
+	var lead byte
+	p, err = m.R.Peek(1)
+	if err != nil {
+		return
+	}
+	lead = p[0]
+	if isfixmap(lead) {
+		sz = uint32(rfixmap(lead))
+		_, err = m.R.Skip(1)
+		return
+	}
+	switch lead {
+	case mmap16:
+		p, err = m.R.Next(3)
+		if err != nil {
+			return
+		}
+		sz = uint32(big.Uint16(p[1:]))
+		return
+	case mmap32:
+		p, err = m.R.Next(5)
+		if err != nil {
+			return
+		}
+		sz = big.Uint32(p[1:])
+		return
+	default:
+		err = badPrefix(MapType, lead)
+		return
+	}
+}
+
+// ReadMapKey reads either a 'str' or 'bin' field from
+// the reader and returns the value as a []byte. It uses
+// scratch for storage if it is large enough.
+func (m *Reader) ReadMapKey(scratch []byte) ([]byte, error) {
+	out, err := m.ReadStringAsBytes(scratch)
+	if err != nil {
+		if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType {
+			return m.ReadBytes(scratch)
+		}
+		return nil, err
+	}
+	return out, nil
+}
+
+// ReadMapKeyPtr returns a []byte pointing to the contents
+// of a valid map key. The key cannot be empty, and it
+// must be shorter than the total buffer size of the
+// *Reader. Additionally, the returned slice is only
+// valid until the next *Reader method call. Users
+// should exercise extreme care when using this
+// method; writing into the returned slice may
+// corrupt future reads.
+func (m *Reader) ReadMapKeyPtr() ([]byte, error) {
+	p, err := m.R.Peek(1)
+	if err != nil {
+		return nil, err
+	}
+	lead := p[0]
+	var read int
+	if isfixstr(lead) {
+		read = int(rfixstr(lead))
+		m.R.Skip(1)
+		goto fill
+	}
+	switch lead {
+	case mstr8, mbin8:
+		p, err = m.R.Next(2)
+		if err != nil {
+			return nil, err
+		}
+		read = int(p[1])
+	case mstr16, mbin16:
+		p, err = m.R.Next(3)
+		if err != nil {
+			return nil, err
+		}
+		read = int(big.Uint16(p[1:]))
+	case mstr32, mbin32:
+		p, err = m.R.Next(5)
+		if err != nil {
+			return nil, err
+		}
+		read = int(big.Uint32(p[1:]))
+	default:
+		return nil, badPrefix(StrType, lead)
+	}
+fill:
+	if read == 0 {
+		return nil, ErrShortBytes
+	}
+	return m.R.Next(read)
+}
+
+// ReadArrayHeader reads the next object as an
+// array header and returns the size of the array
+// and the number of bytes read.
+func (m *Reader) ReadArrayHeader() (sz uint32, err error) {
+	var lead byte
+	var p []byte
+	p, err = m.R.Peek(1)
+	if err != nil {
+		return
+	}
+	lead = p[0]
+	if isfixarray(lead) {
+		sz = uint32(rfixarray(lead))
+		_, err = m.R.Skip(1)
+		return
+	}
+	switch lead {
+	case marray16:
+		p, err = m.R.Next(3)
+		if err != nil {
+			return
+		}
+		sz = uint32(big.Uint16(p[1:]))
+		return
+
+	case marray32:
+		p, err = m.R.Next(5)
+		if err != nil {
+			return
+		}
+		sz = big.Uint32(p[1:])
+		return
+
+	default:
+		err = badPrefix(ArrayType, lead)
+		return
+	}
+}
+
+// ReadNil reads a 'nil' MessagePack byte from the reader
+func (m *Reader) ReadNil() error {
+	p, err := m.R.Peek(1)
+	if err != nil {
+		return err
+	}
+	if p[0] != mnil {
+		return badPrefix(NilType, p[0])
+	}
+	_, err = m.R.Skip(1)
+	return err
+}
+
+// ReadFloat64 reads a float64 from the reader.
+// (If the value on the wire is encoded as a float32,
+// it will be up-cast to a float64.)
+func (m *Reader) ReadFloat64() (f float64, err error) {
+	var p []byte
+	p, err = m.R.Peek(9)
+	if err != nil {
+		// we'll allow a coversion from float32 to float64,
+		// since we don't lose any precision
+		if err == io.EOF && len(p) > 0 && p[0] == mfloat32 {
+			ef, err := m.ReadFloat32()
+			return float64(ef), err
+		}
+		return
+	}
+	if p[0] != mfloat64 {
+		// see above
+		if p[0] == mfloat32 {
+			ef, err := m.ReadFloat32()
+			return float64(ef), err
+		}
+		err = badPrefix(Float64Type, p[0])
+		return
+	}
+	f = math.Float64frombits(getMuint64(p))
+	_, err = m.R.Skip(9)
+	return
+}
+
+// ReadFloat32 reads a float32 from the reader
+func (m *Reader) ReadFloat32() (f float32, err error) {
+	var p []byte
+	p, err = m.R.Peek(5)
+	if err != nil {
+		return
+	}
+	if p[0] != mfloat32 {
+		err = badPrefix(Float32Type, p[0])
+		return
+	}
+	f = math.Float32frombits(getMuint32(p))
+	_, err = m.R.Skip(5)
+	return
+}
+
+// ReadBool reads a bool from the reader
+func (m *Reader) ReadBool() (b bool, err error) {
+	var p []byte
+	p, err = m.R.Peek(1)
+	if err != nil {
+		return
+	}
+	switch p[0] {
+	case mtrue:
+		b = true
+	case mfalse:
+	default:
+		err = badPrefix(BoolType, p[0])
+		return
+	}
+	_, err = m.R.Skip(1)
+	return
+}
+
+// ReadDuration reads a time.Duration from the reader
+func (m *Reader) ReadDuration() (d time.Duration, err error) {
+	i, err := m.ReadInt64()
+	return time.Duration(i), err
+}
+
+// ReadInt64 reads an int64 from the reader
+func (m *Reader) ReadInt64() (i int64, err error) {
+	var p []byte
+	var lead byte
+	p, err = m.R.Peek(1)
+	if err != nil {
+		return
+	}
+	lead = p[0]
+
+	if isfixint(lead) {
+		i = int64(rfixint(lead))
+		_, err = m.R.Skip(1)
+		return
+	} else if isnfixint(lead) {
+		i = int64(rnfixint(lead))
+		_, err = m.R.Skip(1)
+		return
+	}
+
+	switch lead {
+	case mint8:
+		p, err = m.R.Next(2)
+		if err != nil {
+			return
+		}
+		i = int64(getMint8(p))
+		return
+
+	case muint8:
+		p, err = m.R.Next(2)
+		if err != nil {
+			return
+		}
+		i = int64(getMuint8(p))
+		return
+
+	case mint16:
+		p, err = m.R.Next(3)
+		if err != nil {
+			return
+		}
+		i = int64(getMint16(p))
+		return
+
+	case muint16:
+		p, err = m.R.Next(3)
+		if err != nil {
+			return
+		}
+		i = int64(getMuint16(p))
+		return
+
+	case mint32:
+		p, err = m.R.Next(5)
+		if err != nil {
+			return
+		}
+		i = int64(getMint32(p))
+		return
+
+	case muint32:
+		p, err = m.R.Next(5)
+		if err != nil {
+			return
+		}
+		i = int64(getMuint32(p))
+		return
+
+	case mint64:
+		p, err = m.R.Next(9)
+		if err != nil {
+			return
+		}
+		i = getMint64(p)
+		return
+
+	case muint64:
+		p, err = m.R.Next(9)
+		if err != nil {
+			return
+		}
+		u := getMuint64(p)
+		if u > math.MaxInt64 {
+			err = UintOverflow{Value: u, FailedBitsize: 64}
+			return
+		}
+		i = int64(u)
+		return
+
+	default:
+		err = badPrefix(IntType, lead)
+		return
+	}
+}
+
+// ReadInt32 reads an int32 from the reader
+func (m *Reader) ReadInt32() (i int32, err error) {
+	var in int64
+	in, err = m.ReadInt64()
+	if in > math.MaxInt32 || in < math.MinInt32 {
+		err = IntOverflow{Value: in, FailedBitsize: 32}
+		return
+	}
+	i = int32(in)
+	return
+}
+
+// ReadInt16 reads an int16 from the reader
+func (m *Reader) ReadInt16() (i int16, err error) {
+	var in int64
+	in, err = m.ReadInt64()
+	if in > math.MaxInt16 || in < math.MinInt16 {
+		err = IntOverflow{Value: in, FailedBitsize: 16}
+		return
+	}
+	i = int16(in)
+	return
+}
+
+// ReadInt8 reads an int8 from the reader
+func (m *Reader) ReadInt8() (i int8, err error) {
+	var in int64
+	in, err = m.ReadInt64()
+	if in > math.MaxInt8 || in < math.MinInt8 {
+		err = IntOverflow{Value: in, FailedBitsize: 8}
+		return
+	}
+	i = int8(in)
+	return
+}
+
+// ReadInt reads an int from the reader
+func (m *Reader) ReadInt() (i int, err error) {
+	if smallint {
+		var in int32
+		in, err = m.ReadInt32()
+		i = int(in)
+		return
+	}
+	var in int64
+	in, err = m.ReadInt64()
+	i = int(in)
+	return
+}
+
+// ReadUint64 reads a uint64 from the reader
+func (m *Reader) ReadUint64() (u uint64, err error) {
+	var p []byte
+	var lead byte
+	p, err = m.R.Peek(1)
+	if err != nil {
+		return
+	}
+	lead = p[0]
+	if isfixint(lead) {
+		u = uint64(rfixint(lead))
+		_, err = m.R.Skip(1)
+		return
+	}
+	switch lead {
+	case mint8:
+		p, err = m.R.Next(2)
+		if err != nil {
+			return
+		}
+		v := int64(getMint8(p))
+		if v < 0 {
+			err = UintBelowZero{Value: v}
+			return
+		}
+		u = uint64(v)
+		return
+
+	case muint8:
+		p, err = m.R.Next(2)
+		if err != nil {
+			return
+		}
+		u = uint64(getMuint8(p))
+		return
+
+	case mint16:
+		p, err = m.R.Next(3)
+		if err != nil {
+			return
+		}
+		v := int64(getMint16(p))
+		if v < 0 {
+			err = UintBelowZero{Value: v}
+			return
+		}
+		u = uint64(v)
+		return
+
+	case muint16:
+		p, err = m.R.Next(3)
+		if err != nil {
+			return
+		}
+		u = uint64(getMuint16(p))
+		return
+
+	case mint32:
+		p, err = m.R.Next(5)
+		if err != nil {
+			return
+		}
+		v := int64(getMint32(p))
+		if v < 0 {
+			err = UintBelowZero{Value: v}
+			return
+		}
+		u = uint64(v)
+		return
+
+	case muint32:
+		p, err = m.R.Next(5)
+		if err != nil {
+			return
+		}
+		u = uint64(getMuint32(p))
+		return
+
+	case mint64:
+		p, err = m.R.Next(9)
+		if err != nil {
+			return
+		}
+		v := int64(getMint64(p))
+		if v < 0 {
+			err = UintBelowZero{Value: v}
+			return
+		}
+		u = uint64(v)
+		return
+
+	case muint64:
+		p, err = m.R.Next(9)
+		if err != nil {
+			return
+		}
+		u = getMuint64(p)
+		return
+
+	default:
+		if isnfixint(lead) {
+			err = UintBelowZero{Value: int64(rnfixint(lead))}
+		} else {
+			err = badPrefix(UintType, lead)
+		}
+		return
+
+	}
+}
+
+// ReadUint32 reads a uint32 from the reader
+func (m *Reader) ReadUint32() (u uint32, err error) {
+	var in uint64
+	in, err = m.ReadUint64()
+	if in > math.MaxUint32 {
+		err = UintOverflow{Value: in, FailedBitsize: 32}
+		return
+	}
+	u = uint32(in)
+	return
+}
+
+// ReadUint16 reads a uint16 from the reader
+func (m *Reader) ReadUint16() (u uint16, err error) {
+	var in uint64
+	in, err = m.ReadUint64()
+	if in > math.MaxUint16 {
+		err = UintOverflow{Value: in, FailedBitsize: 16}
+		return
+	}
+	u = uint16(in)
+	return
+}
+
+// ReadUint8 reads a uint8 from the reader
+func (m *Reader) ReadUint8() (u uint8, err error) {
+	var in uint64
+	in, err = m.ReadUint64()
+	if in > math.MaxUint8 {
+		err = UintOverflow{Value: in, FailedBitsize: 8}
+		return
+	}
+	u = uint8(in)
+	return
+}
+
+// ReadUint reads a uint from the reader
+func (m *Reader) ReadUint() (u uint, err error) {
+	if smallint {
+		var un uint32
+		un, err = m.ReadUint32()
+		u = uint(un)
+		return
+	}
+	var un uint64
+	un, err = m.ReadUint64()
+	u = uint(un)
+	return
+}
+
+// ReadByte is analogous to ReadUint8.
+//
+// NOTE: this is *not* an implementation
+// of io.ByteReader.
+func (m *Reader) ReadByte() (b byte, err error) {
+	var in uint64
+	in, err = m.ReadUint64()
+	if in > math.MaxUint8 {
+		err = UintOverflow{Value: in, FailedBitsize: 8}
+		return
+	}
+	b = byte(in)
+	return
+}
+
+// ReadBytes reads a MessagePack 'bin' object
+// from the reader and returns its value. It may
+// use 'scratch' for storage if it is non-nil.
+func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) {
+	var p []byte
+	var lead byte
+	p, err = m.R.Peek(2)
+	if err != nil {
+		return
+	}
+	lead = p[0]
+	var read int64
+	switch lead {
+	case mbin8:
+		read = int64(p[1])
+		m.R.Skip(2)
+	case mbin16:
+		p, err = m.R.Next(3)
+		if err != nil {
+			return
+		}
+		read = int64(big.Uint16(p[1:]))
+	case mbin32:
+		p, err = m.R.Next(5)
+		if err != nil {
+			return
+		}
+		read = int64(big.Uint32(p[1:]))
+	default:
+		err = badPrefix(BinType, lead)
+		return
+	}
+	if int64(cap(scratch)) < read {
+		b = make([]byte, read)
+	} else {
+		b = scratch[0:read]
+	}
+	_, err = m.R.ReadFull(b)
+	return
+}
+
+// ReadBytesHeader reads the size header
+// of a MessagePack 'bin' object. The user
+// is responsible for dealing with the next
+// 'sz' bytes from the reader in an application-specific
+// way.
+func (m *Reader) ReadBytesHeader() (sz uint32, err error) {
+	var p []byte
+	p, err = m.R.Peek(1)
+	if err != nil {
+		return
+	}
+	switch p[0] {
+	case mbin8:
+		p, err = m.R.Next(2)
+		if err != nil {
+			return
+		}
+		sz = uint32(p[1])
+		return
+	case mbin16:
+		p, err = m.R.Next(3)
+		if err != nil {
+			return
+		}
+		sz = uint32(big.Uint16(p[1:]))
+		return
+	case mbin32:
+		p, err = m.R.Next(5)
+		if err != nil {
+			return
+		}
+		sz = uint32(big.Uint32(p[1:]))
+		return
+	default:
+		err = badPrefix(BinType, p[0])
+		return
+	}
+}
+
+// ReadExactBytes reads a MessagePack 'bin'-encoded
+// object off of the wire into the provided slice. An
+// ArrayError will be returned if the object is not
+// exactly the length of the input slice.
+func (m *Reader) ReadExactBytes(into []byte) error {
+	p, err := m.R.Peek(2)
+	if err != nil {
+		return err
+	}
+	lead := p[0]
+	var read int64 // bytes to read
+	var skip int   // prefix size to skip
+	switch lead {
+	case mbin8:
+		read = int64(p[1])
+		skip = 2
+	case mbin16:
+		p, err = m.R.Peek(3)
+		if err != nil {
+			return err
+		}
+		read = int64(big.Uint16(p[1:]))
+		skip = 3
+	case mbin32:
+		p, err = m.R.Peek(5)
+		if err != nil {
+			return err
+		}
+		read = int64(big.Uint32(p[1:]))
+		skip = 5
+	default:
+		return badPrefix(BinType, lead)
+	}
+	if read != int64(len(into)) {
+		return ArrayError{Wanted: uint32(len(into)), Got: uint32(read)}
+	}
+	m.R.Skip(skip)
+	_, err = m.R.ReadFull(into)
+	return err
+}
+
+// ReadStringAsBytes reads a MessagePack 'str' (utf-8) string
+// and returns its value as bytes. It may use 'scratch' for storage
+// if it is non-nil.
+func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) {
+	var p []byte
+	var lead byte
+	p, err = m.R.Peek(1)
+	if err != nil {
+		return
+	}
+	lead = p[0]
+	var read int64
+
+	if isfixstr(lead) {
+		read = int64(rfixstr(lead))
+		m.R.Skip(1)
+		goto fill
+	}
+
+	switch lead {
+	case mstr8:
+		p, err = m.R.Next(2)
+		if err != nil {
+			return
+		}
+		read = int64(uint8(p[1]))
+	case mstr16:
+		p, err = m.R.Next(3)
+		if err != nil {
+			return
+		}
+		read = int64(big.Uint16(p[1:]))
+	case mstr32:
+		p, err = m.R.Next(5)
+		if err != nil {
+			return
+		}
+		read = int64(big.Uint32(p[1:]))
+	default:
+		err = badPrefix(StrType, lead)
+		return
+	}
+fill:
+	if int64(cap(scratch)) < read {
+		b = make([]byte, read)
+	} else {
+		b = scratch[0:read]
+	}
+	_, err = m.R.ReadFull(b)
+	return
+}
+
+// ReadStringHeader reads a string header
+// off of the wire. The user is then responsible
+// for dealing with the next 'sz' bytes from
+// the reader in an application-specific manner.
+func (m *Reader) ReadStringHeader() (sz uint32, err error) {
+	var p []byte
+	p, err = m.R.Peek(1)
+	if err != nil {
+		return
+	}
+	lead := p[0]
+	if isfixstr(lead) {
+		sz = uint32(rfixstr(lead))
+		m.R.Skip(1)
+		return
+	}
+	switch lead {
+	case mstr8:
+		p, err = m.R.Next(2)
+		if err != nil {
+			return
+		}
+		sz = uint32(p[1])
+		return
+	case mstr16:
+		p, err = m.R.Next(3)
+		if err != nil {
+			return
+		}
+		sz = uint32(big.Uint16(p[1:]))
+		return
+	case mstr32:
+		p, err = m.R.Next(5)
+		if err != nil {
+			return
+		}
+		sz = big.Uint32(p[1:])
+		return
+	default:
+		err = badPrefix(StrType, lead)
+		return
+	}
+}
+
+// ReadString reads a utf-8 string from the reader
+func (m *Reader) ReadString() (s string, err error) {
+	var p []byte
+	var lead byte
+	var read int64
+	p, err = m.R.Peek(1)
+	if err != nil {
+		return
+	}
+	lead = p[0]
+
+	if isfixstr(lead) {
+		read = int64(rfixstr(lead))
+		m.R.Skip(1)
+		goto fill
+	}
+
+	switch lead {
+	case mstr8:
+		p, err = m.R.Next(2)
+		if err != nil {
+			return
+		}
+		read = int64(uint8(p[1]))
+	case mstr16:
+		p, err = m.R.Next(3)
+		if err != nil {
+			return
+		}
+		read = int64(big.Uint16(p[1:]))
+	case mstr32:
+		p, err = m.R.Next(5)
+		if err != nil {
+			return
+		}
+		read = int64(big.Uint32(p[1:]))
+	default:
+		err = badPrefix(StrType, lead)
+		return
+	}
+fill:
+	if read == 0 {
+		s, err = "", nil
+		return
+	}
+	// reading into the memory
+	// that will become the string
+	// itself has vastly superior
+	// worst-case performance, because
+	// the reader buffer doesn't have
+	// to be large enough to hold the string.
+	// the idea here is to make it more
+	// difficult for someone malicious
+	// to cause the system to run out of
+	// memory by sending very large strings.
+	//
+	// NOTE: this works because the argument
+	// passed to (*fwd.Reader).ReadFull escapes
+	// to the heap; its argument may, in turn,
+	// be passed to the underlying reader, and
+	// thus escape analysis *must* conclude that
+	// 'out' escapes.
+	out := make([]byte, read)
+	_, err = m.R.ReadFull(out)
+	if err != nil {
+		return
+	}
+	s = UnsafeString(out)
+	return
+}
+
+// ReadComplex64 reads a complex64 from the reader
+func (m *Reader) ReadComplex64() (f complex64, err error) {
+	var p []byte
+	p, err = m.R.Peek(10)
+	if err != nil {
+		return
+	}
+	if p[0] != mfixext8 {
+		err = badPrefix(Complex64Type, p[0])
+		return
+	}
+	if int8(p[1]) != Complex64Extension {
+		err = errExt(int8(p[1]), Complex64Extension)
+		return
+	}
+	f = complex(math.Float32frombits(big.Uint32(p[2:])),
+		math.Float32frombits(big.Uint32(p[6:])))
+	_, err = m.R.Skip(10)
+	return
+}
+
+// ReadComplex128 reads a complex128 from the reader
+func (m *Reader) ReadComplex128() (f complex128, err error) {
+	var p []byte
+	p, err = m.R.Peek(18)
+	if err != nil {
+		return
+	}
+	if p[0] != mfixext16 {
+		err = badPrefix(Complex128Type, p[0])
+		return
+	}
+	if int8(p[1]) != Complex128Extension {
+		err = errExt(int8(p[1]), Complex128Extension)
+		return
+	}
+	f = complex(math.Float64frombits(big.Uint64(p[2:])),
+		math.Float64frombits(big.Uint64(p[10:])))
+	_, err = m.R.Skip(18)
+	return
+}
+
+// ReadMapStrIntf reads a MessagePack map into a map[string]interface{}.
+// (You must pass a non-nil map into the function.)
+func (m *Reader) ReadMapStrIntf(mp map[string]interface{}) (err error) {
+	var sz uint32
+	sz, err = m.ReadMapHeader()
+	if err != nil {
+		return
+	}
+	for key := range mp {
+		delete(mp, key)
+	}
+	for i := uint32(0); i < sz; i++ {
+		var key string
+		var val interface{}
+		key, err = m.ReadString()
+		if err != nil {
+			return
+		}
+		val, err = m.ReadIntf()
+		if err != nil {
+			return
+		}
+		mp[key] = val
+	}
+	return
+}
+
+// ReadTime reads a time.Time object from the reader.
+// The returned time's location will be set to time.Local.
+func (m *Reader) ReadTime() (t time.Time, err error) {
+	var p []byte
+	p, err = m.R.Peek(15)
+	if err != nil {
+		return
+	}
+	if p[0] != mext8 || p[1] != 12 {
+		err = badPrefix(TimeType, p[0])
+		return
+	}
+	if int8(p[2]) != TimeExtension {
+		err = errExt(int8(p[2]), TimeExtension)
+		return
+	}
+	sec, nsec := getUnix(p[3:])
+	t = time.Unix(sec, int64(nsec)).Local()
+	_, err = m.R.Skip(15)
+	return
+}
+
+// ReadIntf reads out the next object as a raw interface{}.
+// Arrays are decoded as []interface{}, and maps are decoded
+// as map[string]interface{}. Integers are decoded as int64
+// and unsigned integers are decoded as uint64.
+func (m *Reader) ReadIntf() (i interface{}, err error) {
+	var t Type
+	t, err = m.NextType()
+	if err != nil {
+		return
+	}
+	switch t {
+	case BoolType:
+		i, err = m.ReadBool()
+		return
+
+	case IntType:
+		i, err = m.ReadInt64()
+		return
+
+	case UintType:
+		i, err = m.ReadUint64()
+		return
+
+	case BinType:
+		i, err = m.ReadBytes(nil)
+		return
+
+	case StrType:
+		i, err = m.ReadString()
+		return
+
+	case Complex64Type:
+		i, err = m.ReadComplex64()
+		return
+
+	case Complex128Type:
+		i, err = m.ReadComplex128()
+		return
+
+	case TimeType:
+		i, err = m.ReadTime()
+		return
+
+	case DurationType:
+		i, err = m.ReadDuration()
+		return
+
+	case ExtensionType:
+		var t int8
+		t, err = m.peekExtensionType()
+		if err != nil {
+			return
+		}
+		f, ok := extensionReg[t]
+		if ok {
+			e := f()
+			err = m.ReadExtension(e)
+			i = e
+			return
+		}
+		var e RawExtension
+		e.Type = t
+		err = m.ReadExtension(&e)
+		i = &e
+		return
+
+	case MapType:
+		mp := make(map[string]interface{})
+		err = m.ReadMapStrIntf(mp)
+		i = mp
+		return
+
+	case NilType:
+		err = m.ReadNil()
+		i = nil
+		return
+
+	case Float32Type:
+		i, err = m.ReadFloat32()
+		return
+
+	case Float64Type:
+		i, err = m.ReadFloat64()
+		return
+
+	case ArrayType:
+		var sz uint32
+		sz, err = m.ReadArrayHeader()
+
+		if err != nil {
+			return
+		}
+		out := make([]interface{}, int(sz))
+		for j := range out {
+			out[j], err = m.ReadIntf()
+			if err != nil {
+				return
+			}
+		}
+		i = out
+		return
+
+	default:
+		return nil, fatal // unreachable
+	}
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
new file mode 100644
index 0000000000..a204ac4b9c
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
@@ -0,0 +1,1303 @@
+package msgp
+
+import (
+	"bytes"
+	"encoding/binary"
+	"math"
+	"time"
+)
+
+var big = binary.BigEndian
+
+// NextType returns the type of the next
+// object in the slice. If the length
+// of the input is zero, it returns
+// [InvalidType].
+func NextType(b []byte) Type {
+	if len(b) == 0 {
+		return InvalidType
+	}
+	spec := getBytespec(b[0])
+	t := spec.typ
+	if t == ExtensionType && len(b) > int(spec.size) {
+		var tp int8
+		if spec.extra == constsize {
+			tp = int8(b[1])
+		} else {
+			tp = int8(b[spec.size-1])
+		}
+		switch tp {
+		case TimeExtension:
+			return TimeType
+		case Complex128Extension:
+			return Complex128Type
+		case Complex64Extension:
+			return Complex64Type
+		default:
+			return ExtensionType
+		}
+	}
+	return t
+}
+
+// IsNil returns true if len(b)>0 and
+// the leading byte is a 'nil' MessagePack
+// byte; false otherwise
+func IsNil(b []byte) bool {
+	if len(b) != 0 && b[0] == mnil {
+		return true
+	}
+	return false
+}
+
+// Raw is raw MessagePack.
+// Raw allows you to read and write
+// data without interpreting its contents.
+type Raw []byte
+
+// MarshalMsg implements [Marshaler].
+// It appends the raw contents of 'raw'
+// to the provided byte slice. If 'raw'
+// is 0 bytes, 'nil' will be appended instead.
+func (r Raw) MarshalMsg(b []byte) ([]byte, error) {
+	i := len(r)
+	if i == 0 {
+		return AppendNil(b), nil
+	}
+	o, l := ensure(b, i)
+	copy(o[l:], []byte(r))
+	return o, nil
+}
+
+// UnmarshalMsg implements [Unmarshaler].
+// It sets the contents of *Raw to be the next
+// object in the provided byte slice.
+func (r *Raw) UnmarshalMsg(b []byte) ([]byte, error) {
+	l := len(b)
+	out, err := Skip(b)
+	if err != nil {
+		return b, err
+	}
+	rlen := l - len(out)
+	if IsNil(b[:rlen]) {
+		rlen = 0
+	}
+	if cap(*r) < rlen {
+		*r = make(Raw, rlen)
+	} else {
+		*r = (*r)[0:rlen]
+	}
+	copy(*r, b[:rlen])
+	return out, nil
+}
+
+// EncodeMsg implements [Encodable].
+// It writes the raw bytes to the writer.
+// If r is empty, it writes 'nil' instead.
+func (r Raw) EncodeMsg(w *Writer) error {
+	if len(r) == 0 {
+		return w.WriteNil()
+	}
+	_, err := w.Write([]byte(r))
+	return err
+}
+
+// DecodeMsg implements [Decodable].
+// It sets the value of *Raw to be the
+// next object on the wire.
+func (r *Raw) DecodeMsg(f *Reader) error {
+	*r = (*r)[:0]
+	err := appendNext(f, (*[]byte)(r))
+	if IsNil(*r) {
+		*r = (*r)[:0]
+	}
+	return err
+}
+
+// Msgsize implements [Sizer].
+func (r Raw) Msgsize() int {
+	l := len(r)
+	if l == 0 {
+		return 1 // for 'nil'
+	}
+	return l
+}
+
+func appendNext(f *Reader, d *[]byte) error {
+	amt, o, err := getNextSize(f.R)
+	if err != nil {
+		return err
+	}
+	var i int
+	*d, i = ensure(*d, int(amt))
+	_, err = f.R.ReadFull((*d)[i:])
+	if err != nil {
+		return err
+	}
+	for o > 0 {
+		err = appendNext(f, d)
+		if err != nil {
+			return err
+		}
+		o--
+	}
+	return nil
+}
+
+// MarshalJSON implements [json.Marshaler].
+func (r *Raw) MarshalJSON() ([]byte, error) {
+	var buf bytes.Buffer
+	_, err := UnmarshalAsJSON(&buf, []byte(*r))
+	return buf.Bytes(), err
+}
+
+// ReadMapHeaderBytes reads a map header size
+// from 'b' and returns the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a map)
+func ReadMapHeaderBytes(b []byte) (sz uint32, o []byte, err error) {
+	l := len(b)
+	if l < 1 {
+		err = ErrShortBytes
+		return
+	}
+
+	lead := b[0]
+	if isfixmap(lead) {
+		sz = uint32(rfixmap(lead))
+		o = b[1:]
+		return
+	}
+
+	switch lead {
+	case mmap16:
+		if l < 3 {
+			err = ErrShortBytes
+			return
+		}
+		sz = uint32(big.Uint16(b[1:]))
+		o = b[3:]
+		return
+
+	case mmap32:
+		if l < 5 {
+			err = ErrShortBytes
+			return
+		}
+		sz = big.Uint32(b[1:])
+		o = b[5:]
+		return
+
+	default:
+		err = badPrefix(MapType, lead)
+		return
+	}
+}
+
+// ReadMapKeyZC attempts to read a map key
+// from 'b' and returns the key bytes and the remaining bytes
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a str or bin)
+func ReadMapKeyZC(b []byte) ([]byte, []byte, error) {
+	o, x, err := ReadStringZC(b)
+	if err != nil {
+		if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType {
+			return ReadBytesZC(b)
+		}
+		return nil, b, err
+	}
+	return o, x, nil
+}
+
+// ReadArrayHeaderBytes attempts to read
+// the array header size off of 'b' and return
+// the size and remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not an array)
+func ReadArrayHeaderBytes(b []byte) (sz uint32, o []byte, err error) {
+	if len(b) < 1 {
+		return 0, nil, ErrShortBytes
+	}
+	lead := b[0]
+	if isfixarray(lead) {
+		sz = uint32(rfixarray(lead))
+		o = b[1:]
+		return
+	}
+
+	switch lead {
+	case marray16:
+		if len(b) < 3 {
+			err = ErrShortBytes
+			return
+		}
+		sz = uint32(big.Uint16(b[1:]))
+		o = b[3:]
+		return
+
+	case marray32:
+		if len(b) < 5 {
+			err = ErrShortBytes
+			return
+		}
+		sz = big.Uint32(b[1:])
+		o = b[5:]
+		return
+
+	default:
+		err = badPrefix(ArrayType, lead)
+		return
+	}
+}
+
+// ReadBytesHeader reads the 'bin' header size
+// off of 'b' and returns the size and remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a bin object)
+func ReadBytesHeader(b []byte) (sz uint32, o []byte, err error) {
+	if len(b) < 1 {
+		return 0, nil, ErrShortBytes
+	}
+	switch b[0] {
+	case mbin8:
+		if len(b) < 2 {
+			err = ErrShortBytes
+			return
+		}
+		sz = uint32(b[1])
+		o = b[2:]
+		return
+	case mbin16:
+		if len(b) < 3 {
+			err = ErrShortBytes
+			return
+		}
+		sz = uint32(big.Uint16(b[1:]))
+		o = b[3:]
+		return
+	case mbin32:
+		if len(b) < 5 {
+			err = ErrShortBytes
+			return
+		}
+		sz = big.Uint32(b[1:])
+		o = b[5:]
+		return
+	default:
+		err = badPrefix(BinType, b[0])
+		return
+	}
+}
+
+// ReadNilBytes tries to read a "nil" byte
+// off of 'b' and return the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a 'nil')
+//   - [InvalidPrefixError]
+func ReadNilBytes(b []byte) ([]byte, error) {
+	if len(b) < 1 {
+		return nil, ErrShortBytes
+	}
+	if b[0] != mnil {
+		return b, badPrefix(NilType, b[0])
+	}
+	return b[1:], nil
+}
+
+// ReadFloat64Bytes tries to read a float64
+// from 'b' and return the value and the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a float64)
+func ReadFloat64Bytes(b []byte) (f float64, o []byte, err error) {
+	if len(b) < 9 {
+		if len(b) >= 5 && b[0] == mfloat32 {
+			var tf float32
+			tf, o, err = ReadFloat32Bytes(b)
+			f = float64(tf)
+			return
+		}
+		err = ErrShortBytes
+		return
+	}
+
+	if b[0] != mfloat64 {
+		if b[0] == mfloat32 {
+			var tf float32
+			tf, o, err = ReadFloat32Bytes(b)
+			f = float64(tf)
+			return
+		}
+		err = badPrefix(Float64Type, b[0])
+		return
+	}
+
+	f = math.Float64frombits(getMuint64(b))
+	o = b[9:]
+	return
+}
+
+// ReadFloat32Bytes tries to read a float64
+// from 'b' and return the value and the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a float32)
+func ReadFloat32Bytes(b []byte) (f float32, o []byte, err error) {
+	if len(b) < 5 {
+		err = ErrShortBytes
+		return
+	}
+
+	if b[0] != mfloat32 {
+		err = TypeError{Method: Float32Type, Encoded: getType(b[0])}
+		return
+	}
+
+	f = math.Float32frombits(getMuint32(b))
+	o = b[5:]
+	return
+}
+
+// ReadBoolBytes tries to read a float64
+// from 'b' and return the value and the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a bool)
+func ReadBoolBytes(b []byte) (bool, []byte, error) {
+	if len(b) < 1 {
+		return false, b, ErrShortBytes
+	}
+	switch b[0] {
+	case mtrue:
+		return true, b[1:], nil
+	case mfalse:
+		return false, b[1:], nil
+	default:
+		return false, b, badPrefix(BoolType, b[0])
+	}
+}
+
+// ReadDurationBytes tries to read a time.Duration
+// from 'b' and return the value and the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - TypeError (not a int)
+func ReadDurationBytes(b []byte) (d time.Duration, o []byte, err error) {
+	i, o, err := ReadInt64Bytes(b)
+	return time.Duration(i), o, err
+}
+
+// ReadInt64Bytes tries to read an int64
+// from 'b' and return the value and the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a int)
+func ReadInt64Bytes(b []byte) (i int64, o []byte, err error) {
+	l := len(b)
+	if l < 1 {
+		return 0, nil, ErrShortBytes
+	}
+
+	lead := b[0]
+	if isfixint(lead) {
+		i = int64(rfixint(lead))
+		o = b[1:]
+		return
+	}
+	if isnfixint(lead) {
+		i = int64(rnfixint(lead))
+		o = b[1:]
+		return
+	}
+
+	switch lead {
+	case mint8:
+		if l < 2 {
+			err = ErrShortBytes
+			return
+		}
+		i = int64(getMint8(b))
+		o = b[2:]
+		return
+
+	case muint8:
+		if l < 2 {
+			err = ErrShortBytes
+			return
+		}
+		i = int64(getMuint8(b))
+		o = b[2:]
+		return
+
+	case mint16:
+		if l < 3 {
+			err = ErrShortBytes
+			return
+		}
+		i = int64(getMint16(b))
+		o = b[3:]
+		return
+
+	case muint16:
+		if l < 3 {
+			err = ErrShortBytes
+			return
+		}
+		i = int64(getMuint16(b))
+		o = b[3:]
+		return
+
+	case mint32:
+		if l < 5 {
+			err = ErrShortBytes
+			return
+		}
+		i = int64(getMint32(b))
+		o = b[5:]
+		return
+
+	case muint32:
+		if l < 5 {
+			err = ErrShortBytes
+			return
+		}
+		i = int64(getMuint32(b))
+		o = b[5:]
+		return
+
+	case mint64:
+		if l < 9 {
+			err = ErrShortBytes
+			return
+		}
+		i = int64(getMint64(b))
+		o = b[9:]
+		return
+
+	case muint64:
+		if l < 9 {
+			err = ErrShortBytes
+			return
+		}
+		u := getMuint64(b)
+		if u > math.MaxInt64 {
+			err = UintOverflow{Value: u, FailedBitsize: 64}
+			return
+		}
+		i = int64(u)
+		o = b[9:]
+		return
+
+	default:
+		err = badPrefix(IntType, lead)
+		return
+	}
+}
+
+// ReadInt32Bytes tries to read an int32
+// from 'b' and return the value and the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a int)
+//   - [IntOverflow] (value doesn't fit in int32)
+func ReadInt32Bytes(b []byte) (int32, []byte, error) {
+	i, o, err := ReadInt64Bytes(b)
+	if i > math.MaxInt32 || i < math.MinInt32 {
+		return 0, o, IntOverflow{Value: i, FailedBitsize: 32}
+	}
+	return int32(i), o, err
+}
+
+// ReadInt16Bytes tries to read an int16
+// from 'b' and return the value and the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a int)
+//   - [IntOverflow] (value doesn't fit in int16)
+func ReadInt16Bytes(b []byte) (int16, []byte, error) {
+	i, o, err := ReadInt64Bytes(b)
+	if i > math.MaxInt16 || i < math.MinInt16 {
+		return 0, o, IntOverflow{Value: i, FailedBitsize: 16}
+	}
+	return int16(i), o, err
+}
+
+// ReadInt8Bytes tries to read an int16
+// from 'b' and return the value and the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a int)
+//   - [IntOverflow] (value doesn't fit in int8)
+func ReadInt8Bytes(b []byte) (int8, []byte, error) {
+	i, o, err := ReadInt64Bytes(b)
+	if i > math.MaxInt8 || i < math.MinInt8 {
+		return 0, o, IntOverflow{Value: i, FailedBitsize: 8}
+	}
+	return int8(i), o, err
+}
+
+// ReadIntBytes tries to read an int
+// from 'b' and return the value and the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a int)
+//   - [IntOverflow] (value doesn't fit in int; 32-bit platforms only)
+func ReadIntBytes(b []byte) (int, []byte, error) {
+	if smallint {
+		i, b, err := ReadInt32Bytes(b)
+		return int(i), b, err
+	}
+	i, b, err := ReadInt64Bytes(b)
+	return int(i), b, err
+}
+
+// ReadUint64Bytes tries to read a uint64
+// from 'b' and return the value and the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a uint)
+func ReadUint64Bytes(b []byte) (u uint64, o []byte, err error) {
+	l := len(b)
+	if l < 1 {
+		return 0, nil, ErrShortBytes
+	}
+
+	lead := b[0]
+	if isfixint(lead) {
+		u = uint64(rfixint(lead))
+		o = b[1:]
+		return
+	}
+
+	switch lead {
+	case mint8:
+		if l < 2 {
+			err = ErrShortBytes
+			return
+		}
+		v := int64(getMint8(b))
+		if v < 0 {
+			err = UintBelowZero{Value: v}
+			return
+		}
+		u = uint64(v)
+		o = b[2:]
+		return
+
+	case muint8:
+		if l < 2 {
+			err = ErrShortBytes
+			return
+		}
+		u = uint64(getMuint8(b))
+		o = b[2:]
+		return
+
+	case mint16:
+		if l < 3 {
+			err = ErrShortBytes
+			return
+		}
+		v := int64(getMint16(b))
+		if v < 0 {
+			err = UintBelowZero{Value: v}
+			return
+		}
+		u = uint64(v)
+		o = b[3:]
+		return
+
+	case muint16:
+		if l < 3 {
+			err = ErrShortBytes
+			return
+		}
+		u = uint64(getMuint16(b))
+		o = b[3:]
+		return
+
+	case mint32:
+		if l < 5 {
+			err = ErrShortBytes
+			return
+		}
+		v := int64(getMint32(b))
+		if v < 0 {
+			err = UintBelowZero{Value: v}
+			return
+		}
+		u = uint64(v)
+		o = b[5:]
+		return
+
+	case muint32:
+		if l < 5 {
+			err = ErrShortBytes
+			return
+		}
+		u = uint64(getMuint32(b))
+		o = b[5:]
+		return
+
+	case mint64:
+		if l < 9 {
+			err = ErrShortBytes
+			return
+		}
+		v := int64(getMint64(b))
+		if v < 0 {
+			err = UintBelowZero{Value: v}
+			return
+		}
+		u = uint64(v)
+		o = b[9:]
+		return
+
+	case muint64:
+		if l < 9 {
+			err = ErrShortBytes
+			return
+		}
+		u = getMuint64(b)
+		o = b[9:]
+		return
+
+	default:
+		if isnfixint(lead) {
+			err = UintBelowZero{Value: int64(rnfixint(lead))}
+		} else {
+			err = badPrefix(UintType, lead)
+		}
+		return
+	}
+}
+
+// ReadUint32Bytes tries to read a uint32
+// from 'b' and return the value and the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a uint)
+//   - [UintOverflow] (value too large for uint32)
+func ReadUint32Bytes(b []byte) (uint32, []byte, error) {
+	v, o, err := ReadUint64Bytes(b)
+	if v > math.MaxUint32 {
+		return 0, nil, UintOverflow{Value: v, FailedBitsize: 32}
+	}
+	return uint32(v), o, err
+}
+
+// ReadUint16Bytes tries to read a uint16
+// from 'b' and return the value and the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a uint)
+//   - [UintOverflow] (value too large for uint16)
+func ReadUint16Bytes(b []byte) (uint16, []byte, error) {
+	v, o, err := ReadUint64Bytes(b)
+	if v > math.MaxUint16 {
+		return 0, nil, UintOverflow{Value: v, FailedBitsize: 16}
+	}
+	return uint16(v), o, err
+}
+
+// ReadUint8Bytes tries to read a uint8
+// from 'b' and return the value and the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a uint)
+//   - [UintOverflow] (value too large for uint8)
+func ReadUint8Bytes(b []byte) (uint8, []byte, error) {
+	v, o, err := ReadUint64Bytes(b)
+	if v > math.MaxUint8 {
+		return 0, nil, UintOverflow{Value: v, FailedBitsize: 8}
+	}
+	return uint8(v), o, err
+}
+
+// ReadUintBytes tries to read a uint
+// from 'b' and return the value and the remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a uint)
+//   - [UintOverflow] (value too large for uint; 32-bit platforms only)
+func ReadUintBytes(b []byte) (uint, []byte, error) {
+	if smallint {
+		u, b, err := ReadUint32Bytes(b)
+		return uint(u), b, err
+	}
+	u, b, err := ReadUint64Bytes(b)
+	return uint(u), b, err
+}
+
+// ReadByteBytes is analogous to ReadUint8Bytes
+func ReadByteBytes(b []byte) (byte, []byte, error) {
+	return ReadUint8Bytes(b)
+}
+
+// ReadBytesBytes reads a 'bin' object
+// from 'b' and returns its vaue and
+// the remaining bytes in 'b'.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (too few bytes)
+//   - [TypeError] (not a 'bin' object)
+func ReadBytesBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) {
+	return readBytesBytes(b, scratch, false)
+}
+
+func readBytesBytes(b []byte, scratch []byte, zc bool) (v []byte, o []byte, err error) {
+	l := len(b)
+	if l < 1 {
+		return nil, nil, ErrShortBytes
+	}
+
+	lead := b[0]
+	var read int
+	switch lead {
+	case mbin8:
+		if l < 2 {
+			err = ErrShortBytes
+			return
+		}
+
+		read = int(b[1])
+		b = b[2:]
+
+	case mbin16:
+		if l < 3 {
+			err = ErrShortBytes
+			return
+		}
+		read = int(big.Uint16(b[1:]))
+		b = b[3:]
+
+	case mbin32:
+		if l < 5 {
+			err = ErrShortBytes
+			return
+		}
+		read = int(big.Uint32(b[1:]))
+		b = b[5:]
+
+	default:
+		err = badPrefix(BinType, lead)
+		return
+	}
+
+	if len(b) < read {
+		err = ErrShortBytes
+		return
+	}
+
+	// zero-copy
+	if zc {
+		v = b[0:read]
+		o = b[read:]
+		return
+	}
+
+	if cap(scratch) >= read {
+		v = scratch[0:read]
+	} else {
+		v = make([]byte, read)
+	}
+
+	o = b[copy(v, b):]
+	return
+}
+
+// ReadBytesZC extracts the messagepack-encoded
+// binary field without copying. The returned []byte
+// points to the same memory as the input slice.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (b not long enough)
+//   - [TypeError] (object not 'bin')
+func ReadBytesZC(b []byte) (v []byte, o []byte, err error) {
+	return readBytesBytes(b, nil, true)
+}
+
+func ReadExactBytes(b []byte, into []byte) (o []byte, err error) {
+	l := len(b)
+	if l < 1 {
+		err = ErrShortBytes
+		return
+	}
+
+	lead := b[0]
+	var read uint32
+	var skip int
+	switch lead {
+	case mbin8:
+		if l < 2 {
+			err = ErrShortBytes
+			return
+		}
+
+		read = uint32(b[1])
+		skip = 2
+
+	case mbin16:
+		if l < 3 {
+			err = ErrShortBytes
+			return
+		}
+		read = uint32(big.Uint16(b[1:]))
+		skip = 3
+
+	case mbin32:
+		if l < 5 {
+			err = ErrShortBytes
+			return
+		}
+		read = uint32(big.Uint32(b[1:]))
+		skip = 5
+
+	default:
+		err = badPrefix(BinType, lead)
+		return
+	}
+
+	if read != uint32(len(into)) {
+		err = ArrayError{Wanted: uint32(len(into)), Got: read}
+		return
+	}
+
+	o = b[skip+copy(into, b[skip:]):]
+	return
+}
+
+// ReadStringZC reads a messagepack string field
+// without copying. The returned []byte points
+// to the same memory as the input slice.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (b not long enough)
+//   - [TypeError] (object not 'str')
+func ReadStringZC(b []byte) (v []byte, o []byte, err error) {
+	l := len(b)
+	if l < 1 {
+		return nil, nil, ErrShortBytes
+	}
+
+	lead := b[0]
+	var read int
+
+	if isfixstr(lead) {
+		read = int(rfixstr(lead))
+		b = b[1:]
+	} else {
+		switch lead {
+		case mstr8:
+			if l < 2 {
+				err = ErrShortBytes
+				return
+			}
+			read = int(b[1])
+			b = b[2:]
+
+		case mstr16:
+			if l < 3 {
+				err = ErrShortBytes
+				return
+			}
+			read = int(big.Uint16(b[1:]))
+			b = b[3:]
+
+		case mstr32:
+			if l < 5 {
+				err = ErrShortBytes
+				return
+			}
+			read = int(big.Uint32(b[1:]))
+			b = b[5:]
+
+		default:
+			err = TypeError{Method: StrType, Encoded: getType(lead)}
+			return
+		}
+	}
+
+	if len(b) < read {
+		err = ErrShortBytes
+		return
+	}
+
+	v = b[0:read]
+	o = b[read:]
+	return
+}
+
+// ReadStringBytes reads a 'str' object
+// from 'b' and returns its value and the
+// remaining bytes in 'b'.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (b not long enough)
+//   - [TypeError] (not 'str' type)
+//   - [InvalidPrefixError]
+func ReadStringBytes(b []byte) (string, []byte, error) {
+	v, o, err := ReadStringZC(b)
+	return string(v), o, err
+}
+
+// ReadStringAsBytes reads a 'str' object
+// into a slice of bytes. 'v' is the value of
+// the 'str' object, which may reside in memory
+// pointed to by 'scratch.' 'o' is the remaining bytes
+// in 'b'.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (b not long enough)
+//   - [TypeError] (not 'str' type)
+//   - [InvalidPrefixError] (unknown type marker)
+func ReadStringAsBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) {
+	var tmp []byte
+	tmp, o, err = ReadStringZC(b)
+	v = append(scratch[:0], tmp...)
+	return
+}
+
+// ReadComplex128Bytes reads a complex128
+// extension object from 'b' and returns the
+// remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (not enough bytes in 'b')
+//   - [TypeError] (object not a complex128)
+//   - [InvalidPrefixError]
+//   - [ExtensionTypeError] (object an extension of the correct size, but not a complex128)
+func ReadComplex128Bytes(b []byte) (c complex128, o []byte, err error) {
+	if len(b) < 18 {
+		err = ErrShortBytes
+		return
+	}
+	if b[0] != mfixext16 {
+		err = badPrefix(Complex128Type, b[0])
+		return
+	}
+	if int8(b[1]) != Complex128Extension {
+		err = errExt(int8(b[1]), Complex128Extension)
+		return
+	}
+	c = complex(math.Float64frombits(big.Uint64(b[2:])),
+		math.Float64frombits(big.Uint64(b[10:])))
+	o = b[18:]
+	return
+}
+
+// ReadComplex64Bytes reads a complex64
+// extension object from 'b' and returns the
+// remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (not enough bytes in 'b')
+//   - [TypeError] (object not a complex64)
+//   - [ExtensionTypeError] (object an extension of the correct size, but not a complex64)
+func ReadComplex64Bytes(b []byte) (c complex64, o []byte, err error) {
+	if len(b) < 10 {
+		err = ErrShortBytes
+		return
+	}
+	if b[0] != mfixext8 {
+		err = badPrefix(Complex64Type, b[0])
+		return
+	}
+	if b[1] != Complex64Extension {
+		err = errExt(int8(b[1]), Complex64Extension)
+		return
+	}
+	c = complex(math.Float32frombits(big.Uint32(b[2:])),
+		math.Float32frombits(big.Uint32(b[6:])))
+	o = b[10:]
+	return
+}
+
+// ReadTimeBytes reads a time.Time
+// extension object from 'b' and returns the
+// remaining bytes.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (not enough bytes in 'b')
+//   - [TypeError] (object not a complex64)
+//   - [ExtensionTypeError] (object an extension of the correct size, but not a time.Time)
+func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) {
+	if len(b) < 15 {
+		err = ErrShortBytes
+		return
+	}
+	if b[0] != mext8 || b[1] != 12 {
+		err = badPrefix(TimeType, b[0])
+		return
+	}
+	if int8(b[2]) != TimeExtension {
+		err = errExt(int8(b[2]), TimeExtension)
+		return
+	}
+	sec, nsec := getUnix(b[3:])
+	t = time.Unix(sec, int64(nsec)).Local()
+	o = b[15:]
+	return
+}
+
+// ReadMapStrIntfBytes reads a map[string]interface{}
+// out of 'b' and returns the map and remaining bytes.
+// If 'old' is non-nil, the values will be read into that map.
+func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]interface{}, o []byte, err error) {
+	var sz uint32
+	o = b
+	sz, o, err = ReadMapHeaderBytes(o)
+
+	if err != nil {
+		return
+	}
+
+	if old != nil {
+		for key := range old {
+			delete(old, key)
+		}
+		v = old
+	} else {
+		v = make(map[string]interface{}, int(sz))
+	}
+
+	for z := uint32(0); z < sz; z++ {
+		if len(o) < 1 {
+			err = ErrShortBytes
+			return
+		}
+		var key []byte
+		key, o, err = ReadMapKeyZC(o)
+		if err != nil {
+			return
+		}
+		var val interface{}
+		val, o, err = ReadIntfBytes(o)
+		if err != nil {
+			return
+		}
+		v[string(key)] = val
+	}
+	return
+}
+
+// ReadIntfBytes attempts to read
+// the next object out of 'b' as a raw interface{} and
+// return the remaining bytes.
+func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) {
+	if len(b) < 1 {
+		err = ErrShortBytes
+		return
+	}
+
+	k := NextType(b)
+
+	switch k {
+	case MapType:
+		i, o, err = ReadMapStrIntfBytes(b, nil)
+		return
+
+	case ArrayType:
+		var sz uint32
+		sz, o, err = ReadArrayHeaderBytes(b)
+		if err != nil {
+			return
+		}
+		j := make([]interface{}, int(sz))
+		i = j
+		for d := range j {
+			j[d], o, err = ReadIntfBytes(o)
+			if err != nil {
+				return
+			}
+		}
+		return
+
+	case Float32Type:
+		i, o, err = ReadFloat32Bytes(b)
+		return
+
+	case Float64Type:
+		i, o, err = ReadFloat64Bytes(b)
+		return
+
+	case IntType:
+		i, o, err = ReadInt64Bytes(b)
+		return
+
+	case UintType:
+		i, o, err = ReadUint64Bytes(b)
+		return
+
+	case BoolType:
+		i, o, err = ReadBoolBytes(b)
+		return
+
+	case TimeType:
+		i, o, err = ReadTimeBytes(b)
+		return
+
+	case Complex64Type:
+		i, o, err = ReadComplex64Bytes(b)
+		return
+
+	case Complex128Type:
+		i, o, err = ReadComplex128Bytes(b)
+		return
+
+	case ExtensionType:
+		var t int8
+		t, err = peekExtension(b)
+		if err != nil {
+			return
+		}
+		// use a user-defined extension,
+		// if it's been registered
+		f, ok := extensionReg[t]
+		if ok {
+			e := f()
+			o, err = ReadExtensionBytes(b, e)
+			i = e
+			return
+		}
+		// last resort is a raw extension
+		e := RawExtension{}
+		e.Type = int8(t)
+		o, err = ReadExtensionBytes(b, &e)
+		i = &e
+		return
+
+	case NilType:
+		o, err = ReadNilBytes(b)
+		return
+
+	case BinType:
+		i, o, err = ReadBytesBytes(b, nil)
+		return
+
+	case StrType:
+		i, o, err = ReadStringBytes(b)
+		return
+
+	default:
+		err = InvalidPrefixError(b[0])
+		return
+	}
+}
+
+// Skip skips the next object in 'b' and
+// returns the remaining bytes. If the object
+// is a map or array, all of its elements
+// will be skipped.
+//
+// Possible errors:
+//
+//   - [ErrShortBytes] (not enough bytes in b)
+//   - [InvalidPrefixError] (bad encoding)
+func Skip(b []byte) ([]byte, error) {
+	sz, asz, err := getSize(b)
+	if err != nil {
+		return b, err
+	}
+	if uintptr(len(b)) < sz {
+		return b, ErrShortBytes
+	}
+	b = b[sz:]
+	for asz > 0 {
+		b, err = Skip(b)
+		if err != nil {
+			return b, err
+		}
+		asz--
+	}
+	return b, nil
+}
+
+// returns (skip N bytes, skip M objects, error)
+func getSize(b []byte) (uintptr, uintptr, error) {
+	l := len(b)
+	if l == 0 {
+		return 0, 0, ErrShortBytes
+	}
+	lead := b[0]
+	spec := getBytespec(lead) // get type information
+	size, mode := spec.size, spec.extra
+	if size == 0 {
+		return 0, 0, InvalidPrefixError(lead)
+	}
+	if mode >= 0 { // fixed composites
+		return uintptr(size), uintptr(mode), nil
+	}
+	if l < int(size) {
+		return 0, 0, ErrShortBytes
+	}
+	switch mode {
+	case extra8:
+		return uintptr(size) + uintptr(b[1]), 0, nil
+	case extra16:
+		return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil
+	case extra32:
+		return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil
+	case map16v:
+		return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil
+	case map32v:
+		return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil
+	case array16v:
+		return uintptr(size), uintptr(big.Uint16(b[1:])), nil
+	case array32v:
+		return uintptr(size), uintptr(big.Uint32(b[1:])), nil
+	default:
+		return 0, 0, fatal
+	}
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/size.go b/vendor/github.com/tinylib/msgp/msgp/size.go
new file mode 100644
index 0000000000..e3a613b248
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/size.go
@@ -0,0 +1,39 @@
+package msgp
+
+// The sizes provided
+// are the worst-case
+// encoded sizes for
+// each type. For variable-
+// length types ([]byte, string),
+// the total encoded size is
+// the prefix size plus the
+// length of the object.
+const (
+	Int64Size      = 9
+	IntSize        = Int64Size
+	UintSize       = Int64Size
+	Int8Size       = 2
+	Int16Size      = 3
+	Int32Size      = 5
+	Uint8Size      = 2
+	ByteSize       = Uint8Size
+	Uint16Size     = 3
+	Uint32Size     = 5
+	Uint64Size     = Int64Size
+	Float64Size    = 9
+	Float32Size    = 5
+	Complex64Size  = 10
+	Complex128Size = 18
+
+	DurationSize = Int64Size
+	TimeSize     = 15
+	BoolSize     = 1
+	NilSize      = 1
+
+	MapHeaderSize   = 5
+	ArrayHeaderSize = 5
+
+	BytesPrefixSize     = 5
+	StringPrefixSize    = 5
+	ExtensionPrefixSize = 6
+)
diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go
new file mode 100644
index 0000000000..06e8d84378
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/unsafe.go
@@ -0,0 +1,37 @@
+//go:build !purego && !appengine
+// +build !purego,!appengine
+
+package msgp
+
+import (
+	"unsafe"
+)
+
+// NOTE:
+// all of the definition in this file
+// should be repeated in appengine.go,
+// but without using unsafe
+
+const (
+	// spec says int and uint are always
+	// the same size, but that int/uint
+	// size may not be machine word size
+	smallint = unsafe.Sizeof(int(0)) == 4
+)
+
+// UnsafeString returns the byte slice as a volatile string
+// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
+// THIS IS EVIL CODE.
+// YOU HAVE BEEN WARNED.
+func UnsafeString(b []byte) string {
+	return *(*string)(unsafe.Pointer(&b))
+}
+
+// UnsafeBytes returns the string as a byte slice
+//
+// Deprecated:
+// Since this code is no longer used by the code generator,
+// UnsafeBytes(s) is precisely equivalent to []byte(s)
+func UnsafeBytes(s string) []byte {
+	return []byte(s)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/write.go b/vendor/github.com/tinylib/msgp/msgp/write.go
new file mode 100644
index 0000000000..ec2f6f528b
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/write.go
@@ -0,0 +1,813 @@
+package msgp
+
+import (
+	"errors"
+	"io"
+	"math"
+	"reflect"
+	"sync"
+	"time"
+)
+
+const (
+	// min buffer size for the writer
+	minWriterSize = 18
+)
+
+// Sizer is an interface implemented
+// by types that can estimate their
+// size when MessagePack encoded.
+// This interface is optional, but
+// encoding/marshaling implementations
+// may use this as a way to pre-allocate
+// memory for serialization.
+type Sizer interface {
+	Msgsize() int
+}
+
+var (
+	// Nowhere is an io.Writer to nowhere
+	Nowhere io.Writer = nwhere{}
+
+	btsType    = reflect.TypeOf(([]byte)(nil))
+	writerPool = sync.Pool{
+		New: func() interface{} {
+			return &Writer{buf: make([]byte, 2048)}
+		},
+	}
+)
+
+func popWriter(w io.Writer) *Writer {
+	wr := writerPool.Get().(*Writer)
+	wr.Reset(w)
+	return wr
+}
+
+func pushWriter(wr *Writer) {
+	wr.w = nil
+	wr.wloc = 0
+	writerPool.Put(wr)
+}
+
+// freeW frees a writer for use
+// by other processes. It is not necessary
+// to call freeW on a writer. However, maintaining
+// a reference to a *Writer after calling freeW on
+// it will cause undefined behavior.
+func freeW(w *Writer) { pushWriter(w) }
+
+// Require ensures that cap(old)-len(old) >= extra.
+func Require(old []byte, extra int) []byte {
+	l := len(old)
+	c := cap(old)
+	r := l + extra
+	if c >= r {
+		return old
+	} else if l == 0 {
+		return make([]byte, 0, extra)
+	}
+	// the new size is the greater
+	// of double the old capacity
+	// and the sum of the old length
+	// and the number of new bytes
+	// necessary.
+	c <<= 1
+	if c < r {
+		c = r
+	}
+	n := make([]byte, l, c)
+	copy(n, old)
+	return n
+}
+
+// nowhere writer
+type nwhere struct{}
+
+func (n nwhere) Write(p []byte) (int, error) { return len(p), nil }
+
+// Marshaler is the interface implemented
+// by types that know how to marshal themselves
+// as MessagePack. MarshalMsg appends the marshalled
+// form of the object to the provided
+// byte slice, returning the extended
+// slice and any errors encountered.
+type Marshaler interface {
+	MarshalMsg([]byte) ([]byte, error)
+}
+
+// Encodable is the interface implemented
+// by types that know how to write themselves
+// as MessagePack using a *msgp.Writer.
+type Encodable interface {
+	EncodeMsg(*Writer) error
+}
+
+// Writer is a buffered writer
+// that can be used to write
+// MessagePack objects to an io.Writer.
+// You must call *Writer.Flush() in order
+// to flush all of the buffered data
+// to the underlying writer.
+type Writer struct {
+	w    io.Writer
+	buf  []byte
+	wloc int
+}
+
+// NewWriter returns a new *Writer.
+func NewWriter(w io.Writer) *Writer {
+	if wr, ok := w.(*Writer); ok {
+		return wr
+	}
+	return popWriter(w)
+}
+
+// NewWriterSize returns a writer with a custom buffer size.
+func NewWriterSize(w io.Writer, sz int) *Writer {
+	// we must be able to require() 'minWriterSize'
+	// contiguous bytes, so that is the
+	// practical minimum buffer size
+	if sz < minWriterSize {
+		sz = minWriterSize
+	}
+	buf := make([]byte, sz)
+	return NewWriterBuf(w, buf)
+}
+
+// NewWriterBuf returns a writer with a provided buffer.
+// 'buf' is not used when the capacity is smaller than 18,
+// custom buffer is allocated instead.
+func NewWriterBuf(w io.Writer, buf []byte) *Writer {
+	if cap(buf) < minWriterSize {
+		buf = make([]byte, minWriterSize)
+	}
+	buf = buf[:cap(buf)]
+	return &Writer{
+		w:   w,
+		buf: buf,
+	}
+}
+
+// Encode encodes an Encodable to an io.Writer.
+func Encode(w io.Writer, e Encodable) error {
+	wr := NewWriter(w)
+	err := e.EncodeMsg(wr)
+	if err == nil {
+		err = wr.Flush()
+	}
+	freeW(wr)
+	return err
+}
+
+func (mw *Writer) flush() error {
+	if mw.wloc == 0 {
+		return nil
+	}
+	n, err := mw.w.Write(mw.buf[:mw.wloc])
+	if err != nil {
+		if n > 0 {
+			mw.wloc = copy(mw.buf, mw.buf[n:mw.wloc])
+		}
+		return err
+	}
+	mw.wloc = 0
+	return nil
+}
+
+// Flush flushes all of the buffered
+// data to the underlying writer.
+func (mw *Writer) Flush() error { return mw.flush() }
+
+// Buffered returns the number bytes in the write buffer
+func (mw *Writer) Buffered() int { return len(mw.buf) - mw.wloc }
+
+func (mw *Writer) avail() int { return len(mw.buf) - mw.wloc }
+
+func (mw *Writer) bufsize() int { return len(mw.buf) }
+
+// NOTE: this should only be called with
+// a number that is guaranteed to be less than
+// len(mw.buf). typically, it is called with a constant.
+//
+// NOTE: this is a hot code path
+func (mw *Writer) require(n int) (int, error) {
+	c := len(mw.buf)
+	wl := mw.wloc
+	if c-wl < n {
+		if err := mw.flush(); err != nil {
+			return 0, err
+		}
+		wl = mw.wloc
+	}
+	mw.wloc += n
+	return wl, nil
+}
+
+func (mw *Writer) Append(b ...byte) error {
+	if mw.avail() < len(b) {
+		err := mw.flush()
+		if err != nil {
+			return err
+		}
+	}
+	mw.wloc += copy(mw.buf[mw.wloc:], b)
+	return nil
+}
+
+// push one byte onto the buffer
+//
+// NOTE: this is a hot code path
+func (mw *Writer) push(b byte) error {
+	if mw.wloc == len(mw.buf) {
+		if err := mw.flush(); err != nil {
+			return err
+		}
+	}
+	mw.buf[mw.wloc] = b
+	mw.wloc++
+	return nil
+}
+
+func (mw *Writer) prefix8(b byte, u uint8) error {
+	const need = 2
+	if len(mw.buf)-mw.wloc < need {
+		if err := mw.flush(); err != nil {
+			return err
+		}
+	}
+	prefixu8(mw.buf[mw.wloc:], b, u)
+	mw.wloc += need
+	return nil
+}
+
+func (mw *Writer) prefix16(b byte, u uint16) error {
+	const need = 3
+	if len(mw.buf)-mw.wloc < need {
+		if err := mw.flush(); err != nil {
+			return err
+		}
+	}
+	prefixu16(mw.buf[mw.wloc:], b, u)
+	mw.wloc += need
+	return nil
+}
+
+func (mw *Writer) prefix32(b byte, u uint32) error {
+	const need = 5
+	if len(mw.buf)-mw.wloc < need {
+		if err := mw.flush(); err != nil {
+			return err
+		}
+	}
+	prefixu32(mw.buf[mw.wloc:], b, u)
+	mw.wloc += need
+	return nil
+}
+
+func (mw *Writer) prefix64(b byte, u uint64) error {
+	const need = 9
+	if len(mw.buf)-mw.wloc < need {
+		if err := mw.flush(); err != nil {
+			return err
+		}
+	}
+	prefixu64(mw.buf[mw.wloc:], b, u)
+	mw.wloc += need
+	return nil
+}
+
+// Write implements io.Writer, and writes
+// data directly to the buffer.
+func (mw *Writer) Write(p []byte) (int, error) {
+	l := len(p)
+	if mw.avail() < l {
+		if err := mw.flush(); err != nil {
+			return 0, err
+		}
+		if l > len(mw.buf) {
+			return mw.w.Write(p)
+		}
+	}
+	mw.wloc += copy(mw.buf[mw.wloc:], p)
+	return l, nil
+}
+
+// implements io.WriteString
+func (mw *Writer) writeString(s string) error {
+	l := len(s)
+	if mw.avail() < l {
+		if err := mw.flush(); err != nil {
+			return err
+		}
+		if l > len(mw.buf) {
+			_, err := io.WriteString(mw.w, s)
+			return err
+		}
+	}
+	mw.wloc += copy(mw.buf[mw.wloc:], s)
+	return nil
+}
+
+// Reset changes the underlying writer used by the Writer
+func (mw *Writer) Reset(w io.Writer) {
+	mw.buf = mw.buf[:cap(mw.buf)]
+	mw.w = w
+	mw.wloc = 0
+}
+
+// WriteMapHeader writes a map header of the given
+// size to the writer
+func (mw *Writer) WriteMapHeader(sz uint32) error {
+	switch {
+	case sz <= 15:
+		return mw.push(wfixmap(uint8(sz)))
+	case sz <= math.MaxUint16:
+		return mw.prefix16(mmap16, uint16(sz))
+	default:
+		return mw.prefix32(mmap32, sz)
+	}
+}
+
+// WriteArrayHeader writes an array header of the
+// given size to the writer
+func (mw *Writer) WriteArrayHeader(sz uint32) error {
+	switch {
+	case sz <= 15:
+		return mw.push(wfixarray(uint8(sz)))
+	case sz <= math.MaxUint16:
+		return mw.prefix16(marray16, uint16(sz))
+	default:
+		return mw.prefix32(marray32, sz)
+	}
+}
+
+// WriteNil writes a nil byte to the buffer
+func (mw *Writer) WriteNil() error {
+	return mw.push(mnil)
+}
+
+// WriteFloat64 writes a float64 to the writer
+func (mw *Writer) WriteFloat64(f float64) error {
+	return mw.prefix64(mfloat64, math.Float64bits(f))
+}
+
+// WriteFloat32 writes a float32 to the writer
+func (mw *Writer) WriteFloat32(f float32) error {
+	return mw.prefix32(mfloat32, math.Float32bits(f))
+}
+
+// WriteDuration writes a time.Duration to the writer
+func (mw *Writer) WriteDuration(d time.Duration) error {
+	return mw.WriteInt64(int64(d))
+}
+
+// WriteInt64 writes an int64 to the writer
+func (mw *Writer) WriteInt64(i int64) error {
+	if i >= 0 {
+		switch {
+		case i <= math.MaxInt8:
+			return mw.push(wfixint(uint8(i)))
+		case i <= math.MaxInt16:
+			return mw.prefix16(mint16, uint16(i))
+		case i <= math.MaxInt32:
+			return mw.prefix32(mint32, uint32(i))
+		default:
+			return mw.prefix64(mint64, uint64(i))
+		}
+	}
+	switch {
+	case i >= -32:
+		return mw.push(wnfixint(int8(i)))
+	case i >= math.MinInt8:
+		return mw.prefix8(mint8, uint8(i))
+	case i >= math.MinInt16:
+		return mw.prefix16(mint16, uint16(i))
+	case i >= math.MinInt32:
+		return mw.prefix32(mint32, uint32(i))
+	default:
+		return mw.prefix64(mint64, uint64(i))
+	}
+}
+
+// WriteInt8 writes an int8 to the writer
+func (mw *Writer) WriteInt8(i int8) error { return mw.WriteInt64(int64(i)) }
+
+// WriteInt16 writes an int16 to the writer
+func (mw *Writer) WriteInt16(i int16) error { return mw.WriteInt64(int64(i)) }
+
+// WriteInt32 writes an int32 to the writer
+func (mw *Writer) WriteInt32(i int32) error { return mw.WriteInt64(int64(i)) }
+
+// WriteInt writes an int to the writer
+func (mw *Writer) WriteInt(i int) error { return mw.WriteInt64(int64(i)) }
+
+// WriteUint64 writes a uint64 to the writer
+func (mw *Writer) WriteUint64(u uint64) error {
+	switch {
+	case u <= (1<<7)-1:
+		return mw.push(wfixint(uint8(u)))
+	case u <= math.MaxUint8:
+		return mw.prefix8(muint8, uint8(u))
+	case u <= math.MaxUint16:
+		return mw.prefix16(muint16, uint16(u))
+	case u <= math.MaxUint32:
+		return mw.prefix32(muint32, uint32(u))
+	default:
+		return mw.prefix64(muint64, u)
+	}
+}
+
+// WriteByte is analogous to WriteUint8
+func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) }
+
+// WriteUint8 writes a uint8 to the writer
+func (mw *Writer) WriteUint8(u uint8) error { return mw.WriteUint64(uint64(u)) }
+
+// WriteUint16 writes a uint16 to the writer
+func (mw *Writer) WriteUint16(u uint16) error { return mw.WriteUint64(uint64(u)) }
+
+// WriteUint32 writes a uint32 to the writer
+func (mw *Writer) WriteUint32(u uint32) error { return mw.WriteUint64(uint64(u)) }
+
+// WriteUint writes a uint to the writer
+func (mw *Writer) WriteUint(u uint) error { return mw.WriteUint64(uint64(u)) }
+
+// WriteBytes writes binary as 'bin' to the writer
+func (mw *Writer) WriteBytes(b []byte) error {
+	sz := uint32(len(b))
+	var err error
+	switch {
+	case sz <= math.MaxUint8:
+		err = mw.prefix8(mbin8, uint8(sz))
+	case sz <= math.MaxUint16:
+		err = mw.prefix16(mbin16, uint16(sz))
+	default:
+		err = mw.prefix32(mbin32, sz)
+	}
+	if err != nil {
+		return err
+	}
+	_, err = mw.Write(b)
+	return err
+}
+
+// WriteBytesHeader writes just the size header
+// of a MessagePack 'bin' object. The user is responsible
+// for then writing 'sz' more bytes into the stream.
+func (mw *Writer) WriteBytesHeader(sz uint32) error {
+	switch {
+	case sz <= math.MaxUint8:
+		return mw.prefix8(mbin8, uint8(sz))
+	case sz <= math.MaxUint16:
+		return mw.prefix16(mbin16, uint16(sz))
+	default:
+		return mw.prefix32(mbin32, sz)
+	}
+}
+
+// WriteBool writes a bool to the writer
+func (mw *Writer) WriteBool(b bool) error {
+	if b {
+		return mw.push(mtrue)
+	}
+	return mw.push(mfalse)
+}
+
+// WriteString writes a messagepack string to the writer.
+// (This is NOT an implementation of io.StringWriter)
+func (mw *Writer) WriteString(s string) error {
+	sz := uint32(len(s))
+	var err error
+	switch {
+	case sz <= 31:
+		err = mw.push(wfixstr(uint8(sz)))
+	case sz <= math.MaxUint8:
+		err = mw.prefix8(mstr8, uint8(sz))
+	case sz <= math.MaxUint16:
+		err = mw.prefix16(mstr16, uint16(sz))
+	default:
+		err = mw.prefix32(mstr32, sz)
+	}
+	if err != nil {
+		return err
+	}
+	return mw.writeString(s)
+}
+
+// WriteStringHeader writes just the string size
+// header of a MessagePack 'str' object. The user
+// is responsible for writing 'sz' more valid UTF-8
+// bytes to the stream.
+func (mw *Writer) WriteStringHeader(sz uint32) error {
+	switch {
+	case sz <= 31:
+		return mw.push(wfixstr(uint8(sz)))
+	case sz <= math.MaxUint8:
+		return mw.prefix8(mstr8, uint8(sz))
+	case sz <= math.MaxUint16:
+		return mw.prefix16(mstr16, uint16(sz))
+	default:
+		return mw.prefix32(mstr32, sz)
+	}
+}
+
+// WriteStringFromBytes writes a 'str' object
+// from a []byte.
+func (mw *Writer) WriteStringFromBytes(str []byte) error {
+	sz := uint32(len(str))
+	var err error
+	switch {
+	case sz <= 31:
+		err = mw.push(wfixstr(uint8(sz)))
+	case sz <= math.MaxUint8:
+		err = mw.prefix8(mstr8, uint8(sz))
+	case sz <= math.MaxUint16:
+		err = mw.prefix16(mstr16, uint16(sz))
+	default:
+		err = mw.prefix32(mstr32, sz)
+	}
+	if err != nil {
+		return err
+	}
+	_, err = mw.Write(str)
+	return err
+}
+
+// WriteComplex64 writes a complex64 to the writer
+func (mw *Writer) WriteComplex64(f complex64) error {
+	o, err := mw.require(10)
+	if err != nil {
+		return err
+	}
+	mw.buf[o] = mfixext8
+	mw.buf[o+1] = Complex64Extension
+	big.PutUint32(mw.buf[o+2:], math.Float32bits(real(f)))
+	big.PutUint32(mw.buf[o+6:], math.Float32bits(imag(f)))
+	return nil
+}
+
+// WriteComplex128 writes a complex128 to the writer
+func (mw *Writer) WriteComplex128(f complex128) error {
+	o, err := mw.require(18)
+	if err != nil {
+		return err
+	}
+	mw.buf[o] = mfixext16
+	mw.buf[o+1] = Complex128Extension
+	big.PutUint64(mw.buf[o+2:], math.Float64bits(real(f)))
+	big.PutUint64(mw.buf[o+10:], math.Float64bits(imag(f)))
+	return nil
+}
+
+// WriteMapStrStr writes a map[string]string to the writer
+func (mw *Writer) WriteMapStrStr(mp map[string]string) (err error) {
+	err = mw.WriteMapHeader(uint32(len(mp)))
+	if err != nil {
+		return
+	}
+	for key, val := range mp {
+		err = mw.WriteString(key)
+		if err != nil {
+			return
+		}
+		err = mw.WriteString(val)
+		if err != nil {
+			return
+		}
+	}
+	return nil
+}
+
+// WriteMapStrIntf writes a map[string]interface to the writer
+func (mw *Writer) WriteMapStrIntf(mp map[string]interface{}) (err error) {
+	err = mw.WriteMapHeader(uint32(len(mp)))
+	if err != nil {
+		return
+	}
+	for key, val := range mp {
+		err = mw.WriteString(key)
+		if err != nil {
+			return
+		}
+		err = mw.WriteIntf(val)
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// WriteTime writes a time.Time object to the wire.
+//
+// Time is encoded as Unix time, which means that
+// location (time zone) data is removed from the object.
+// The encoded object itself is 12 bytes: 8 bytes for
+// a big-endian 64-bit integer denoting seconds
+// elapsed since "zero" Unix time, followed by 4 bytes
+// for a big-endian 32-bit signed integer denoting
+// the nanosecond offset of the time. This encoding
+// is intended to ease portability across languages.
+// (Note that this is *not* the standard time.Time
+// binary encoding, because its implementation relies
+// heavily on the internal representation used by the
+// time package.)
+func (mw *Writer) WriteTime(t time.Time) error {
+	t = t.UTC()
+	o, err := mw.require(15)
+	if err != nil {
+		return err
+	}
+	mw.buf[o] = mext8
+	mw.buf[o+1] = 12
+	mw.buf[o+2] = TimeExtension
+	putUnix(mw.buf[o+3:], t.Unix(), int32(t.Nanosecond()))
+	return nil
+}
+
+// WriteIntf writes the concrete type of 'v'.
+// WriteIntf will error if 'v' is not one of the following:
+//   - A bool, float, string, []byte, int, uint, or complex
+//   - A map of supported types (with string keys)
+//   - An array or slice of supported types
+//   - A pointer to a supported type
+//   - A type that satisfies the msgp.Encodable interface
+//   - A type that satisfies the msgp.Extension interface
+func (mw *Writer) WriteIntf(v interface{}) error {
+	if v == nil {
+		return mw.WriteNil()
+	}
+	switch v := v.(type) {
+
+	// preferred interfaces
+
+	case Encodable:
+		return v.EncodeMsg(mw)
+	case Extension:
+		return mw.WriteExtension(v)
+
+	// concrete types
+
+	case bool:
+		return mw.WriteBool(v)
+	case float32:
+		return mw.WriteFloat32(v)
+	case float64:
+		return mw.WriteFloat64(v)
+	case complex64:
+		return mw.WriteComplex64(v)
+	case complex128:
+		return mw.WriteComplex128(v)
+	case uint8:
+		return mw.WriteUint8(v)
+	case uint16:
+		return mw.WriteUint16(v)
+	case uint32:
+		return mw.WriteUint32(v)
+	case uint64:
+		return mw.WriteUint64(v)
+	case uint:
+		return mw.WriteUint(v)
+	case int8:
+		return mw.WriteInt8(v)
+	case int16:
+		return mw.WriteInt16(v)
+	case int32:
+		return mw.WriteInt32(v)
+	case int64:
+		return mw.WriteInt64(v)
+	case int:
+		return mw.WriteInt(v)
+	case string:
+		return mw.WriteString(v)
+	case []byte:
+		return mw.WriteBytes(v)
+	case map[string]string:
+		return mw.WriteMapStrStr(v)
+	case map[string]interface{}:
+		return mw.WriteMapStrIntf(v)
+	case time.Time:
+		return mw.WriteTime(v)
+	case time.Duration:
+		return mw.WriteDuration(v)
+	}
+
+	val := reflect.ValueOf(v)
+	if !isSupported(val.Kind()) || !val.IsValid() {
+		return errors.New("msgp: type " + val.String() + " not supported")
+	}
+
+	switch val.Kind() {
+	case reflect.Ptr:
+		if val.IsNil() {
+			return mw.WriteNil()
+		}
+		return mw.WriteIntf(val.Elem().Interface())
+	case reflect.Slice:
+		return mw.writeSlice(val)
+	case reflect.Map:
+		return mw.writeMap(val)
+	}
+	return &ErrUnsupportedType{T: val.Type()}
+}
+
+func (mw *Writer) writeMap(v reflect.Value) (err error) {
+	if v.Type().Key().Kind() != reflect.String {
+		return errors.New("msgp: map keys must be strings")
+	}
+	ks := v.MapKeys()
+	err = mw.WriteMapHeader(uint32(len(ks)))
+	if err != nil {
+		return
+	}
+	for _, key := range ks {
+		val := v.MapIndex(key)
+		err = mw.WriteString(key.String())
+		if err != nil {
+			return
+		}
+		err = mw.WriteIntf(val.Interface())
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+func (mw *Writer) writeSlice(v reflect.Value) (err error) {
+	// is []byte
+	if v.Type().ConvertibleTo(btsType) {
+		return mw.WriteBytes(v.Bytes())
+	}
+
+	sz := uint32(v.Len())
+	err = mw.WriteArrayHeader(sz)
+	if err != nil {
+		return
+	}
+	for i := uint32(0); i < sz; i++ {
+		err = mw.WriteIntf(v.Index(int(i)).Interface())
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// is the reflect.Kind encodable?
+func isSupported(k reflect.Kind) bool {
+	switch k {
+	case reflect.Func, reflect.Chan, reflect.Invalid, reflect.UnsafePointer:
+		return false
+	default:
+		return true
+	}
+}
+
+// GuessSize guesses the size of the underlying
+// value of 'i'. If the underlying value is not
+// a simple builtin (or []byte), GuessSize defaults
+// to 512.
+func GuessSize(i interface{}) int {
+	if i == nil {
+		return NilSize
+	}
+
+	switch i := i.(type) {
+	case Sizer:
+		return i.Msgsize()
+	case Extension:
+		return ExtensionPrefixSize + i.Len()
+	case float64:
+		return Float64Size
+	case float32:
+		return Float32Size
+	case uint8, uint16, uint32, uint64, uint:
+		return UintSize
+	case int8, int16, int32, int64, int:
+		return IntSize
+	case []byte:
+		return BytesPrefixSize + len(i)
+	case string:
+		return StringPrefixSize + len(i)
+	case complex64:
+		return Complex64Size
+	case complex128:
+		return Complex128Size
+	case bool:
+		return BoolSize
+	case map[string]interface{}:
+		s := MapHeaderSize
+		for key, val := range i {
+			s += StringPrefixSize + len(key) + GuessSize(val)
+		}
+		return s
+	case map[string]string:
+		s := MapHeaderSize
+		for key, val := range i {
+			s += 2*StringPrefixSize + len(key) + len(val)
+		}
+		return s
+	default:
+		return 512
+	}
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
new file mode 100644
index 0000000000..676a6efe19
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
@@ -0,0 +1,436 @@
+package msgp
+
+import (
+	"math"
+	"reflect"
+	"time"
+)
+
+// ensure 'sz' extra bytes in 'b' btw len(b) and cap(b)
+func ensure(b []byte, sz int) ([]byte, int) {
+	l := len(b)
+	c := cap(b)
+	if c-l < sz {
+		o := make([]byte, (2*c)+sz) // exponential growth
+		n := copy(o, b)
+		return o[:n+sz], n
+	}
+	return b[:l+sz], l
+}
+
+// AppendMapHeader appends a map header with the
+// given size to the slice
+func AppendMapHeader(b []byte, sz uint32) []byte {
+	switch {
+	case sz <= 15:
+		return append(b, wfixmap(uint8(sz)))
+
+	case sz <= math.MaxUint16:
+		o, n := ensure(b, 3)
+		prefixu16(o[n:], mmap16, uint16(sz))
+		return o
+
+	default:
+		o, n := ensure(b, 5)
+		prefixu32(o[n:], mmap32, sz)
+		return o
+	}
+}
+
+// AppendArrayHeader appends an array header with
+// the given size to the slice
+func AppendArrayHeader(b []byte, sz uint32) []byte {
+	switch {
+	case sz <= 15:
+		return append(b, wfixarray(uint8(sz)))
+
+	case sz <= math.MaxUint16:
+		o, n := ensure(b, 3)
+		prefixu16(o[n:], marray16, uint16(sz))
+		return o
+
+	default:
+		o, n := ensure(b, 5)
+		prefixu32(o[n:], marray32, sz)
+		return o
+	}
+}
+
+// AppendNil appends a 'nil' byte to the slice
+func AppendNil(b []byte) []byte { return append(b, mnil) }
+
+// AppendFloat64 appends a float64 to the slice
+func AppendFloat64(b []byte, f float64) []byte {
+	o, n := ensure(b, Float64Size)
+	prefixu64(o[n:], mfloat64, math.Float64bits(f))
+	return o
+}
+
+// AppendFloat32 appends a float32 to the slice
+func AppendFloat32(b []byte, f float32) []byte {
+	o, n := ensure(b, Float32Size)
+	prefixu32(o[n:], mfloat32, math.Float32bits(f))
+	return o
+}
+
+// AppendDuration appends a time.Duration to the slice
+func AppendDuration(b []byte, d time.Duration) []byte {
+	return AppendInt64(b, int64(d))
+}
+
+// AppendInt64 appends an int64 to the slice
+func AppendInt64(b []byte, i int64) []byte {
+	if i >= 0 {
+		switch {
+		case i <= math.MaxInt8:
+			return append(b, wfixint(uint8(i)))
+		case i <= math.MaxInt16:
+			o, n := ensure(b, 3)
+			putMint16(o[n:], int16(i))
+			return o
+		case i <= math.MaxInt32:
+			o, n := ensure(b, 5)
+			putMint32(o[n:], int32(i))
+			return o
+		default:
+			o, n := ensure(b, 9)
+			putMint64(o[n:], i)
+			return o
+		}
+	}
+	switch {
+	case i >= -32:
+		return append(b, wnfixint(int8(i)))
+	case i >= math.MinInt8:
+		o, n := ensure(b, 2)
+		putMint8(o[n:], int8(i))
+		return o
+	case i >= math.MinInt16:
+		o, n := ensure(b, 3)
+		putMint16(o[n:], int16(i))
+		return o
+	case i >= math.MinInt32:
+		o, n := ensure(b, 5)
+		putMint32(o[n:], int32(i))
+		return o
+	default:
+		o, n := ensure(b, 9)
+		putMint64(o[n:], i)
+		return o
+	}
+}
+
+// AppendInt appends an int to the slice
+func AppendInt(b []byte, i int) []byte { return AppendInt64(b, int64(i)) }
+
+// AppendInt8 appends an int8 to the slice
+func AppendInt8(b []byte, i int8) []byte { return AppendInt64(b, int64(i)) }
+
+// AppendInt16 appends an int16 to the slice
+func AppendInt16(b []byte, i int16) []byte { return AppendInt64(b, int64(i)) }
+
+// AppendInt32 appends an int32 to the slice
+func AppendInt32(b []byte, i int32) []byte { return AppendInt64(b, int64(i)) }
+
+// AppendUint64 appends a uint64 to the slice
+func AppendUint64(b []byte, u uint64) []byte {
+	switch {
+	case u <= (1<<7)-1:
+		return append(b, wfixint(uint8(u)))
+
+	case u <= math.MaxUint8:
+		o, n := ensure(b, 2)
+		putMuint8(o[n:], uint8(u))
+		return o
+
+	case u <= math.MaxUint16:
+		o, n := ensure(b, 3)
+		putMuint16(o[n:], uint16(u))
+		return o
+
+	case u <= math.MaxUint32:
+		o, n := ensure(b, 5)
+		putMuint32(o[n:], uint32(u))
+		return o
+
+	default:
+		o, n := ensure(b, 9)
+		putMuint64(o[n:], u)
+		return o
+
+	}
+}
+
+// AppendUint appends a uint to the slice
+func AppendUint(b []byte, u uint) []byte { return AppendUint64(b, uint64(u)) }
+
+// AppendUint8 appends a uint8 to the slice
+func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) }
+
+// AppendByte is analogous to AppendUint8
+func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) }
+
+// AppendUint16 appends a uint16 to the slice
+func AppendUint16(b []byte, u uint16) []byte { return AppendUint64(b, uint64(u)) }
+
+// AppendUint32 appends a uint32 to the slice
+func AppendUint32(b []byte, u uint32) []byte { return AppendUint64(b, uint64(u)) }
+
+// AppendBytes appends bytes to the slice as MessagePack 'bin' data
+func AppendBytes(b []byte, bts []byte) []byte {
+	sz := len(bts)
+	var o []byte
+	var n int
+	switch {
+	case sz <= math.MaxUint8:
+		o, n = ensure(b, 2+sz)
+		prefixu8(o[n:], mbin8, uint8(sz))
+		n += 2
+	case sz <= math.MaxUint16:
+		o, n = ensure(b, 3+sz)
+		prefixu16(o[n:], mbin16, uint16(sz))
+		n += 3
+	default:
+		o, n = ensure(b, 5+sz)
+		prefixu32(o[n:], mbin32, uint32(sz))
+		n += 5
+	}
+	return o[:n+copy(o[n:], bts)]
+}
+
+// AppendBytesHeader appends an 'bin' header with
+// the given size to the slice.
+func AppendBytesHeader(b []byte, sz uint32) []byte {
+	var o []byte
+	var n int
+	switch {
+	case sz <= math.MaxUint8:
+		o, n = ensure(b, 2)
+		prefixu8(o[n:], mbin8, uint8(sz))
+		return o
+	case sz <= math.MaxUint16:
+		o, n = ensure(b, 3)
+		prefixu16(o[n:], mbin16, uint16(sz))
+		return o
+	}
+	o, n = ensure(b, 5)
+	prefixu32(o[n:], mbin32, sz)
+	return o
+}
+
+// AppendBool appends a bool to the slice
+func AppendBool(b []byte, t bool) []byte {
+	if t {
+		return append(b, mtrue)
+	}
+	return append(b, mfalse)
+}
+
+// AppendString appends a string as a MessagePack 'str' to the slice
+func AppendString(b []byte, s string) []byte {
+	sz := len(s)
+	var n int
+	var o []byte
+	switch {
+	case sz <= 31:
+		o, n = ensure(b, 1+sz)
+		o[n] = wfixstr(uint8(sz))
+		n++
+	case sz <= math.MaxUint8:
+		o, n = ensure(b, 2+sz)
+		prefixu8(o[n:], mstr8, uint8(sz))
+		n += 2
+	case sz <= math.MaxUint16:
+		o, n = ensure(b, 3+sz)
+		prefixu16(o[n:], mstr16, uint16(sz))
+		n += 3
+	default:
+		o, n = ensure(b, 5+sz)
+		prefixu32(o[n:], mstr32, uint32(sz))
+		n += 5
+	}
+	return o[:n+copy(o[n:], s)]
+}
+
+// AppendStringFromBytes appends a []byte
+// as a MessagePack 'str' to the slice 'b.'
+func AppendStringFromBytes(b []byte, str []byte) []byte {
+	sz := len(str)
+	var n int
+	var o []byte
+	switch {
+	case sz <= 31:
+		o, n = ensure(b, 1+sz)
+		o[n] = wfixstr(uint8(sz))
+		n++
+	case sz <= math.MaxUint8:
+		o, n = ensure(b, 2+sz)
+		prefixu8(o[n:], mstr8, uint8(sz))
+		n += 2
+	case sz <= math.MaxUint16:
+		o, n = ensure(b, 3+sz)
+		prefixu16(o[n:], mstr16, uint16(sz))
+		n += 3
+	default:
+		o, n = ensure(b, 5+sz)
+		prefixu32(o[n:], mstr32, uint32(sz))
+		n += 5
+	}
+	return o[:n+copy(o[n:], str)]
+}
+
+// AppendComplex64 appends a complex64 to the slice as a MessagePack extension
+func AppendComplex64(b []byte, c complex64) []byte {
+	o, n := ensure(b, Complex64Size)
+	o[n] = mfixext8
+	o[n+1] = Complex64Extension
+	big.PutUint32(o[n+2:], math.Float32bits(real(c)))
+	big.PutUint32(o[n+6:], math.Float32bits(imag(c)))
+	return o
+}
+
+// AppendComplex128 appends a complex128 to the slice as a MessagePack extension
+func AppendComplex128(b []byte, c complex128) []byte {
+	o, n := ensure(b, Complex128Size)
+	o[n] = mfixext16
+	o[n+1] = Complex128Extension
+	big.PutUint64(o[n+2:], math.Float64bits(real(c)))
+	big.PutUint64(o[n+10:], math.Float64bits(imag(c)))
+	return o
+}
+
+// AppendTime appends a time.Time to the slice as a MessagePack extension
+func AppendTime(b []byte, t time.Time) []byte {
+	o, n := ensure(b, TimeSize)
+	t = t.UTC()
+	o[n] = mext8
+	o[n+1] = 12
+	o[n+2] = TimeExtension
+	putUnix(o[n+3:], t.Unix(), int32(t.Nanosecond()))
+	return o
+}
+
+// AppendMapStrStr appends a map[string]string to the slice
+// as a MessagePack map with 'str'-type keys and values
+func AppendMapStrStr(b []byte, m map[string]string) []byte {
+	sz := uint32(len(m))
+	b = AppendMapHeader(b, sz)
+	for key, val := range m {
+		b = AppendString(b, key)
+		b = AppendString(b, val)
+	}
+	return b
+}
+
+// AppendMapStrIntf appends a map[string]interface{} to the slice
+// as a MessagePack map with 'str'-type keys.
+func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) {
+	sz := uint32(len(m))
+	b = AppendMapHeader(b, sz)
+	var err error
+	for key, val := range m {
+		b = AppendString(b, key)
+		b, err = AppendIntf(b, val)
+		if err != nil {
+			return b, err
+		}
+	}
+	return b, nil
+}
+
+// AppendIntf appends the concrete type of 'i' to the
+// provided []byte. 'i' must be one of the following:
+//   - 'nil'
+//   - A bool, float, string, []byte, int, uint, or complex
+//   - A map[string]interface{} or map[string]string
+//   - A []T, where T is another supported type
+//   - A *T, where T is another supported type
+//   - A type that satisfieds the msgp.Marshaler interface
+//   - A type that satisfies the msgp.Extension interface
+func AppendIntf(b []byte, i interface{}) ([]byte, error) {
+	if i == nil {
+		return AppendNil(b), nil
+	}
+
+	// all the concrete types
+	// for which we have methods
+	switch i := i.(type) {
+	case Marshaler:
+		return i.MarshalMsg(b)
+	case Extension:
+		return AppendExtension(b, i)
+	case bool:
+		return AppendBool(b, i), nil
+	case float32:
+		return AppendFloat32(b, i), nil
+	case float64:
+		return AppendFloat64(b, i), nil
+	case complex64:
+		return AppendComplex64(b, i), nil
+	case complex128:
+		return AppendComplex128(b, i), nil
+	case string:
+		return AppendString(b, i), nil
+	case []byte:
+		return AppendBytes(b, i), nil
+	case int8:
+		return AppendInt8(b, i), nil
+	case int16:
+		return AppendInt16(b, i), nil
+	case int32:
+		return AppendInt32(b, i), nil
+	case int64:
+		return AppendInt64(b, i), nil
+	case int:
+		return AppendInt64(b, int64(i)), nil
+	case uint:
+		return AppendUint64(b, uint64(i)), nil
+	case uint8:
+		return AppendUint8(b, i), nil
+	case uint16:
+		return AppendUint16(b, i), nil
+	case uint32:
+		return AppendUint32(b, i), nil
+	case uint64:
+		return AppendUint64(b, i), nil
+	case time.Time:
+		return AppendTime(b, i), nil
+	case map[string]interface{}:
+		return AppendMapStrIntf(b, i)
+	case map[string]string:
+		return AppendMapStrStr(b, i), nil
+	case []interface{}:
+		b = AppendArrayHeader(b, uint32(len(i)))
+		var err error
+		for _, k := range i {
+			b, err = AppendIntf(b, k)
+			if err != nil {
+				return b, err
+			}
+		}
+		return b, nil
+	}
+
+	var err error
+	v := reflect.ValueOf(i)
+	switch v.Kind() {
+	case reflect.Array, reflect.Slice:
+		l := v.Len()
+		b = AppendArrayHeader(b, uint32(l))
+		for i := 0; i < l; i++ {
+			b, err = AppendIntf(b, v.Index(i).Interface())
+			if err != nil {
+				return b, err
+			}
+		}
+		return b, nil
+	case reflect.Ptr:
+		if v.IsNil() {
+			return AppendNil(b), err
+		}
+		b, err = AppendIntf(b, v.Elem().Interface())
+		return b, err
+	default:
+		return b, &ErrUnsupportedType{T: v.Type()}
+	}
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/README.md b/vendor/github.com/xanzy/go-gitlab/README.md
index 06ddb255d0..1652705af2 100644
--- a/vendor/github.com/xanzy/go-gitlab/README.md
+++ b/vendor/github.com/xanzy/go-gitlab/README.md
@@ -74,6 +74,7 @@ to add new and/or missing endpoints. Currently, the following services are suppo
 - [x] Project Import/export
 - [x] Project Members
 - [x] Project Milestones
+- [x] Project Repository Storage Moves
 - [x] Project Snippets
 - [x] Project Vulnerabilities
 - [x] Project-Level Variables
diff --git a/vendor/github.com/xanzy/go-gitlab/deployments.go b/vendor/github.com/xanzy/go-gitlab/deployments.go
index 40ac4ed513..3ce8ec40fc 100644
--- a/vendor/github.com/xanzy/go-gitlab/deployments.go
+++ b/vendor/github.com/xanzy/go-gitlab/deployments.go
@@ -201,3 +201,21 @@ func (s *DeploymentsService) UpdateProjectDeployment(pid interface{}, deployment
 
 	return d, resp, nil
 }
+
+// DeleteProjectDeployment delete a project deployment.
+//
+// GitLab API docs: https://docs.gitlab.com/ee/api/deployments.html#delete-a-specific-deployment
+func (s *DeploymentsService) DeleteProjectDeployment(pid interface{}, deployment int, options ...RequestOptionFunc) (*Response, error) {
+	project, err := parseID(pid)
+	if err != nil {
+		return nil, err
+	}
+	u := fmt.Sprintf("projects/%s/deployments/%d", PathEscape(project), deployment)
+
+	req, err := s.client.NewRequest(http.MethodDelete, u, nil, options)
+	if err != nil {
+		return nil, err
+	}
+
+	return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go b/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go
index b90f4fa3ed..4c2dd8e042 100644
--- a/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go
+++ b/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go
@@ -801,6 +801,7 @@ type PipelineEvent struct {
 		FinishedAt     string   `json:"finished_at"`
 		Duration       int      `json:"duration"`
 		QueuedDuration int      `json:"queued_duration"`
+		URL            string   `json:"url"`
 		Variables      []struct {
 			Key   string `json:"key"`
 			Value string `json:"value"`
diff --git a/vendor/github.com/xanzy/go-gitlab/gitlab.go b/vendor/github.com/xanzy/go-gitlab/gitlab.go
index 81cf3ff9e6..322f1a4a03 100644
--- a/vendor/github.com/xanzy/go-gitlab/gitlab.go
+++ b/vendor/github.com/xanzy/go-gitlab/gitlab.go
@@ -33,8 +33,9 @@ import (
 	"sync"
 	"time"
 
-	"github.com/google/go-querystring/query"
 	"github.com/hashicorp/go-cleanhttp"
+
+	"github.com/google/go-querystring/query"
 	retryablehttp "github.com/hashicorp/go-retryablehttp"
 	"golang.org/x/oauth2"
 	"golang.org/x/time/rate"
@@ -103,115 +104,120 @@ type Client struct {
 	UserAgent string
 
 	// Services used for talking to different parts of the GitLab API.
-	AccessRequests          *AccessRequestsService
-	Applications            *ApplicationsService
-	AuditEvents             *AuditEventsService
-	Avatar                  *AvatarRequestsService
-	AwardEmoji              *AwardEmojiService
-	Boards                  *IssueBoardsService
-	Branches                *BranchesService
-	BroadcastMessage        *BroadcastMessagesService
-	CIYMLTemplate           *CIYMLTemplatesService
-	ClusterAgents           *ClusterAgentsService
-	Commits                 *CommitsService
-	ContainerRegistry       *ContainerRegistryService
-	CustomAttribute         *CustomAttributesService
-	DeployKeys              *DeployKeysService
-	DeployTokens            *DeployTokensService
-	DeploymentMergeRequests *DeploymentMergeRequestsService
-	Deployments             *DeploymentsService
-	Discussions             *DiscussionsService
-	DockerfileTemplate      *DockerfileTemplatesService
-	Environments            *EnvironmentsService
-	EpicIssues              *EpicIssuesService
-	Epics                   *EpicsService
-	ErrorTracking           *ErrorTrackingService
-	Events                  *EventsService
-	ExternalStatusChecks    *ExternalStatusChecksService
-	Features                *FeaturesService
-	FreezePeriods           *FreezePeriodsService
-	GenericPackages         *GenericPackagesService
-	GeoNodes                *GeoNodesService
-	GitIgnoreTemplates      *GitIgnoreTemplatesService
-	GroupAccessTokens       *GroupAccessTokensService
-	GroupBadges             *GroupBadgesService
-	GroupCluster            *GroupClustersService
-	GroupImportExport       *GroupImportExportService
-	GroupIssueBoards        *GroupIssueBoardsService
-	GroupIterations         *GroupIterationsService
-	GroupLabels             *GroupLabelsService
-	GroupMembers            *GroupMembersService
-	GroupMilestones         *GroupMilestonesService
-	GroupVariables          *GroupVariablesService
-	GroupWikis              *GroupWikisService
-	Groups                  *GroupsService
-	InstanceCluster         *InstanceClustersService
-	InstanceVariables       *InstanceVariablesService
-	Invites                 *InvitesService
-	IssueLinks              *IssueLinksService
-	Issues                  *IssuesService
-	IssuesStatistics        *IssuesStatisticsService
-	Jobs                    *JobsService
-	Keys                    *KeysService
-	Labels                  *LabelsService
-	License                 *LicenseService
-	LicenseTemplates        *LicenseTemplatesService
-	ManagedLicenses         *ManagedLicensesService
-	Markdown                *MarkdownService
-	MergeRequestApprovals   *MergeRequestApprovalsService
-	MergeRequests           *MergeRequestsService
-	Metadata                *MetadataService
-	Milestones              *MilestonesService
-	Namespaces              *NamespacesService
-	Notes                   *NotesService
-	NotificationSettings    *NotificationSettingsService
-	Packages                *PackagesService
-	Pages                   *PagesService
-	PagesDomains            *PagesDomainsService
-	PersonalAccessTokens    *PersonalAccessTokensService
-	PipelineSchedules       *PipelineSchedulesService
-	PipelineTriggers        *PipelineTriggersService
-	Pipelines               *PipelinesService
-	PlanLimits              *PlanLimitsService
-	ProjectAccessTokens     *ProjectAccessTokensService
-	ProjectBadges           *ProjectBadgesService
-	ProjectCluster          *ProjectClustersService
-	ProjectFeatureFlags     *ProjectFeatureFlagService
-	ProjectImportExport     *ProjectImportExportService
-	ProjectIterations       *ProjectIterationsService
-	ProjectMembers          *ProjectMembersService
-	ProjectMirrors          *ProjectMirrorService
-	ProjectSnippets         *ProjectSnippetsService
-	ProjectTemplates        *ProjectTemplatesService
-	ProjectVariables        *ProjectVariablesService
-	ProjectVulnerabilities  *ProjectVulnerabilitiesService
-	Projects                *ProjectsService
-	ProtectedBranches       *ProtectedBranchesService
-	ProtectedEnvironments   *ProtectedEnvironmentsService
-	ProtectedTags           *ProtectedTagsService
-	ReleaseLinks            *ReleaseLinksService
-	Releases                *ReleasesService
-	Repositories            *RepositoriesService
-	RepositoryFiles         *RepositoryFilesService
-	RepositorySubmodules    *RepositorySubmodulesService
-	ResourceLabelEvents     *ResourceLabelEventsService
-	ResourceMilestoneEvents *ResourceMilestoneEventsService
-	ResourceStateEvents     *ResourceStateEventsService
-	ResourceWeightEvents    *ResourceWeightEventsService
-	Runners                 *RunnersService
-	Search                  *SearchService
-	Services                *ServicesService
-	Settings                *SettingsService
-	Sidekiq                 *SidekiqService
-	Snippets                *SnippetsService
-	SystemHooks             *SystemHooksService
-	Tags                    *TagsService
-	Todos                   *TodosService
-	Topics                  *TopicsService
-	Users                   *UsersService
-	Validate                *ValidateService
-	Version                 *VersionService
-	Wikis                   *WikisService
+	AccessRequests               *AccessRequestsService
+	Applications                 *ApplicationsService
+	AuditEvents                  *AuditEventsService
+	Avatar                       *AvatarRequestsService
+	AwardEmoji                   *AwardEmojiService
+	Boards                       *IssueBoardsService
+	Branches                     *BranchesService
+	BroadcastMessage             *BroadcastMessagesService
+	CIYMLTemplate                *CIYMLTemplatesService
+	ClusterAgents                *ClusterAgentsService
+	Commits                      *CommitsService
+	ContainerRegistry            *ContainerRegistryService
+	CustomAttribute              *CustomAttributesService
+	DeployKeys                   *DeployKeysService
+	DeployTokens                 *DeployTokensService
+	DeploymentMergeRequests      *DeploymentMergeRequestsService
+	Deployments                  *DeploymentsService
+	Discussions                  *DiscussionsService
+	DockerfileTemplate           *DockerfileTemplatesService
+	Environments                 *EnvironmentsService
+	EpicIssues                   *EpicIssuesService
+	Epics                        *EpicsService
+	ErrorTracking                *ErrorTrackingService
+	Events                       *EventsService
+	ExternalStatusChecks         *ExternalStatusChecksService
+	Features                     *FeaturesService
+	FreezePeriods                *FreezePeriodsService
+	GenericPackages              *GenericPackagesService
+	GeoNodes                     *GeoNodesService
+	GitIgnoreTemplates           *GitIgnoreTemplatesService
+	GroupAccessTokens            *GroupAccessTokensService
+	GroupBadges                  *GroupBadgesService
+	GroupCluster                 *GroupClustersService
+	GroupEpicBoards              *GroupEpicBoardsService
+	GroupImportExport            *GroupImportExportService
+	GroupIssueBoards             *GroupIssueBoardsService
+	GroupIterations              *GroupIterationsService
+	GroupLabels                  *GroupLabelsService
+	GroupMembers                 *GroupMembersService
+	GroupMilestones              *GroupMilestonesService
+	GroupRepositoryStorageMove   *GroupRepositoryStorageMoveService
+	GroupVariables               *GroupVariablesService
+	GroupWikis                   *GroupWikisService
+	Groups                       *GroupsService
+	InstanceCluster              *InstanceClustersService
+	InstanceVariables            *InstanceVariablesService
+	Invites                      *InvitesService
+	IssueLinks                   *IssueLinksService
+	Issues                       *IssuesService
+	IssuesStatistics             *IssuesStatisticsService
+	Jobs                         *JobsService
+	JobTokenScope                *JobTokenScopeService
+	Keys                         *KeysService
+	Labels                       *LabelsService
+	License                      *LicenseService
+	LicenseTemplates             *LicenseTemplatesService
+	ManagedLicenses              *ManagedLicensesService
+	Markdown                     *MarkdownService
+	MergeRequestApprovals        *MergeRequestApprovalsService
+	MergeRequests                *MergeRequestsService
+	Metadata                     *MetadataService
+	Milestones                   *MilestonesService
+	Namespaces                   *NamespacesService
+	Notes                        *NotesService
+	NotificationSettings         *NotificationSettingsService
+	Packages                     *PackagesService
+	Pages                        *PagesService
+	PagesDomains                 *PagesDomainsService
+	PersonalAccessTokens         *PersonalAccessTokensService
+	PipelineSchedules            *PipelineSchedulesService
+	PipelineTriggers             *PipelineTriggersService
+	Pipelines                    *PipelinesService
+	PlanLimits                   *PlanLimitsService
+	ProjectAccessTokens          *ProjectAccessTokensService
+	ProjectBadges                *ProjectBadgesService
+	ProjectCluster               *ProjectClustersService
+	ProjectFeatureFlags          *ProjectFeatureFlagService
+	ProjectImportExport          *ProjectImportExportService
+	ProjectIterations            *ProjectIterationsService
+	ProjectMembers               *ProjectMembersService
+	ProjectMirrors               *ProjectMirrorService
+	ProjectRepositoryStorageMove *ProjectRepositoryStorageMoveService
+	ProjectSnippets              *ProjectSnippetsService
+	ProjectTemplates             *ProjectTemplatesService
+	ProjectVariables             *ProjectVariablesService
+	ProjectVulnerabilities       *ProjectVulnerabilitiesService
+	Projects                     *ProjectsService
+	ProtectedBranches            *ProtectedBranchesService
+	ProtectedEnvironments        *ProtectedEnvironmentsService
+	ProtectedTags                *ProtectedTagsService
+	ReleaseLinks                 *ReleaseLinksService
+	Releases                     *ReleasesService
+	Repositories                 *RepositoriesService
+	RepositoryFiles              *RepositoryFilesService
+	RepositorySubmodules         *RepositorySubmodulesService
+	ResourceLabelEvents          *ResourceLabelEventsService
+	ResourceMilestoneEvents      *ResourceMilestoneEventsService
+	ResourceStateEvents          *ResourceStateEventsService
+	ResourceWeightEvents         *ResourceWeightEventsService
+	Runners                      *RunnersService
+	Search                       *SearchService
+	Services                     *ServicesService
+	Settings                     *SettingsService
+	Sidekiq                      *SidekiqService
+	SnippetRepositoryStorageMove *SnippetRepositoryStorageMoveService
+	Snippets                     *SnippetsService
+	SystemHooks                  *SystemHooksService
+	Tags                         *TagsService
+	Todos                        *TodosService
+	Topics                       *TopicsService
+	Users                        *UsersService
+	Validate                     *ValidateService
+	Version                      *VersionService
+	Wikis                        *WikisService
 }
 
 // ListOptions specifies the optional parameters to various List methods that
@@ -352,12 +358,14 @@ func newClient(options ...ClientOptionFunc) (*Client, error) {
 	c.GroupAccessTokens = &GroupAccessTokensService{client: c}
 	c.GroupBadges = &GroupBadgesService{client: c}
 	c.GroupCluster = &GroupClustersService{client: c}
+	c.GroupEpicBoards = &GroupEpicBoardsService{client: c}
 	c.GroupImportExport = &GroupImportExportService{client: c}
 	c.GroupIssueBoards = &GroupIssueBoardsService{client: c}
 	c.GroupIterations = &GroupIterationsService{client: c}
 	c.GroupLabels = &GroupLabelsService{client: c}
 	c.GroupMembers = &GroupMembersService{client: c}
 	c.GroupMilestones = &GroupMilestonesService{client: c}
+	c.GroupRepositoryStorageMove = &GroupRepositoryStorageMoveService{client: c}
 	c.GroupVariables = &GroupVariablesService{client: c}
 	c.GroupWikis = &GroupWikisService{client: c}
 	c.Groups = &GroupsService{client: c}
@@ -368,6 +376,7 @@ func newClient(options ...ClientOptionFunc) (*Client, error) {
 	c.Issues = &IssuesService{client: c, timeStats: timeStats}
 	c.IssuesStatistics = &IssuesStatisticsService{client: c}
 	c.Jobs = &JobsService{client: c}
+	c.JobTokenScope = &JobTokenScopeService{client: c}
 	c.Keys = &KeysService{client: c}
 	c.Labels = &LabelsService{client: c}
 	c.License = &LicenseService{client: c}
@@ -397,6 +406,7 @@ func newClient(options ...ClientOptionFunc) (*Client, error) {
 	c.ProjectIterations = &ProjectIterationsService{client: c}
 	c.ProjectMembers = &ProjectMembersService{client: c}
 	c.ProjectMirrors = &ProjectMirrorService{client: c}
+	c.ProjectRepositoryStorageMove = &ProjectRepositoryStorageMoveService{client: c}
 	c.ProjectSnippets = &ProjectSnippetsService{client: c}
 	c.ProjectTemplates = &ProjectTemplatesService{client: c}
 	c.ProjectVariables = &ProjectVariablesService{client: c}
@@ -420,6 +430,7 @@ func newClient(options ...ClientOptionFunc) (*Client, error) {
 	c.Settings = &SettingsService{client: c}
 	c.Sidekiq = &SidekiqService{client: c}
 	c.Snippets = &SnippetsService{client: c}
+	c.SnippetRepositoryStorageMove = &SnippetRepositoryStorageMoveService{client: c}
 	c.SystemHooks = &SystemHooksService{client: c}
 	c.Tags = &TagsService{client: c}
 	c.Todos = &TodosService{client: c}
diff --git a/vendor/github.com/xanzy/go-gitlab/group_badges.go b/vendor/github.com/xanzy/go-gitlab/group_badges.go
index 1ae3ebcc68..c648a74432 100644
--- a/vendor/github.com/xanzy/go-gitlab/group_badges.go
+++ b/vendor/github.com/xanzy/go-gitlab/group_badges.go
@@ -44,6 +44,7 @@ const (
 // https://docs.gitlab.com/ee/api/group_badges.html
 type GroupBadge struct {
 	ID               int       `json:"id"`
+	Name             string    `json:"name"`
 	LinkURL          string    `json:"link_url"`
 	ImageURL         string    `json:"image_url"`
 	RenderedLinkURL  string    `json:"rendered_link_url"`
@@ -55,7 +56,10 @@ type GroupBadge struct {
 //
 // GitLab API docs:
 // https://docs.gitlab.com/ee/api/group_badges.html#list-all-badges-of-a-group
-type ListGroupBadgesOptions ListOptions
+type ListGroupBadgesOptions struct {
+	ListOptions
+	Name *string `url:"name,omitempty" json:"name,omitempty"`
+}
 
 // ListGroupBadges gets a list of a group badges.
 //
@@ -114,6 +118,7 @@ func (s *GroupBadgesService) GetGroupBadge(gid interface{}, badge int, options .
 type AddGroupBadgeOptions struct {
 	LinkURL  *string `url:"link_url,omitempty" json:"link_url,omitempty"`
 	ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"`
+	Name     *string `url:"name,omitempty" json:"name,omitempty"`
 }
 
 // AddGroupBadge adds a badge to a group.
@@ -148,6 +153,7 @@ func (s *GroupBadgesService) AddGroupBadge(gid interface{}, opt *AddGroupBadgeOp
 type EditGroupBadgeOptions struct {
 	LinkURL  *string `url:"link_url,omitempty" json:"link_url,omitempty"`
 	ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"`
+	Name     *string `url:"name,omitempty" json:"name,omitempty"`
 }
 
 // EditGroupBadge updates a badge of a group.
@@ -201,6 +207,7 @@ func (s *GroupBadgesService) DeleteGroupBadge(gid interface{}, badge int, option
 type GroupBadgePreviewOptions struct {
 	LinkURL  *string `url:"link_url,omitempty" json:"link_url,omitempty"`
 	ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"`
+	Name     *string `url:"name,omitempty" json:"name,omitempty"`
 }
 
 // PreviewGroupBadge returns how the link_url and image_url final URLs would be after
diff --git a/vendor/github.com/xanzy/go-gitlab/group_epic_boards.go b/vendor/github.com/xanzy/go-gitlab/group_epic_boards.go
new file mode 100644
index 0000000000..fd8cfd86d2
--- /dev/null
+++ b/vendor/github.com/xanzy/go-gitlab/group_epic_boards.go
@@ -0,0 +1,104 @@
+//
+// Copyright 2021, Patrick Webster
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package gitlab
+
+import (
+	"fmt"
+	"net/http"
+)
+
+// GroupEpicBoardsService handles communication with the group epic board
+// related methods of the GitLab API.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_epic_boards.html
+type GroupEpicBoardsService struct {
+	client *Client
+}
+
+// GroupEpicBoard represents a GitLab group epic board.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_epic_boards.html
+type GroupEpicBoard struct {
+	ID     int             `json:"id"`
+	Name   string          `json:"name"`
+	Group  *Group          `json:"group"`
+	Labels []*LabelDetails `json:"labels"`
+	Lists  []*BoardList    `json:"lists"`
+}
+
+func (b GroupEpicBoard) String() string {
+	return Stringify(b)
+}
+
+// ListGroupEpicBoardsOptions represents the available
+// ListGroupEpicBoards() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_epic_boards.html#list-all-epic-boards-in-a-group
+type ListGroupEpicBoardsOptions ListOptions
+
+// ListGroupEpicBoards gets a list of all epic boards in a group.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_epic_boards.html#list-all-epic-boards-in-a-group
+func (s *GroupEpicBoardsService) ListGroupEpicBoards(gid interface{}, opt *ListGroupEpicBoardsOptions, options ...RequestOptionFunc) ([]*GroupEpicBoard, *Response, error) {
+	group, err := parseID(gid)
+	if err != nil {
+		return nil, nil, err
+	}
+	u := fmt.Sprintf("groups/%s/epic_boards", PathEscape(group))
+
+	req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var gs []*GroupEpicBoard
+	resp, err := s.client.Do(req, &gs)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return gs, resp, nil
+}
+
+// GetGroupEpicBoard gets a single epic board of a group.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_epic_boards.html#single-group-epic-board
+func (s *GroupEpicBoardsService) GetGroupEpicBoard(gid interface{}, board int, options ...RequestOptionFunc) (*GroupEpicBoard, *Response, error) {
+	group, err := parseID(gid)
+	if err != nil {
+		return nil, nil, err
+	}
+	u := fmt.Sprintf("groups/%s/epic_boards/%d", PathEscape(group), board)
+
+	req, err := s.client.NewRequest(http.MethodGet, u, nil, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	gib := new(GroupEpicBoard)
+	resp, err := s.client.Do(req, gib)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return gib, resp, nil
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/group_repository_storage_move.go b/vendor/github.com/xanzy/go-gitlab/group_repository_storage_move.go
new file mode 100644
index 0000000000..18951a1661
--- /dev/null
+++ b/vendor/github.com/xanzy/go-gitlab/group_repository_storage_move.go
@@ -0,0 +1,195 @@
+//
+// Copyright 2023, Nick Westbury
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package gitlab
+
+import (
+	"fmt"
+	"net/http"
+	"time"
+)
+
+// GroupRepositoryStorageMoveService handles communication with the
+// group repositories related methods of the GitLab API.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html
+type GroupRepositoryStorageMoveService struct {
+	client *Client
+}
+
+// GroupRepositoryStorageMove represents the status of a repository move.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html
+type GroupRepositoryStorageMove struct {
+	ID                     int              `json:"id"`
+	CreatedAt              *time.Time       `json:"created_at"`
+	State                  string           `json:"state"`
+	SourceStorageName      string           `json:"source_storage_name"`
+	DestinationStorageName string           `json:"destination_storage_name"`
+	Group                  *RepositoryGroup `json:"group"`
+}
+
+type RepositoryGroup struct {
+	ID     int    `json:"id"`
+	Name   string `json:"name"`
+	WebURL string `json:"web_url"`
+}
+
+// RetrieveAllGroupStorageMovesOptions represents the available
+// RetrieveAllStorageMoves() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#retrieve-all-group-repository-storage-moves
+type RetrieveAllGroupStorageMovesOptions ListOptions
+
+// RetrieveAllStorageMoves retrieves all group repository storage moves
+// accessible by the authenticated user.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#retrieve-all-group-repository-storage-moves
+func (g GroupRepositoryStorageMoveService) RetrieveAllStorageMoves(opts RetrieveAllGroupStorageMovesOptions, options ...RequestOptionFunc) ([]*GroupRepositoryStorageMove, *Response, error) {
+	req, err := g.client.NewRequest(http.MethodGet, "group_repository_storage_moves", opts, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var gsms []*GroupRepositoryStorageMove
+	resp, err := g.client.Do(req, &gsms)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return gsms, resp, err
+}
+
+// RetrieveAllStorageMovesForGroup retrieves all repository storage moves for
+// a single group accessible by the authenticated user.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-single-group
+func (g GroupRepositoryStorageMoveService) RetrieveAllStorageMovesForGroup(group int, opts RetrieveAllGroupStorageMovesOptions, options ...RequestOptionFunc) ([]*GroupRepositoryStorageMove, *Response, error) {
+	u := fmt.Sprintf("groups/%d/repository_storage_moves", group)
+
+	req, err := g.client.NewRequest(http.MethodGet, u, opts, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var gsms []*GroupRepositoryStorageMove
+	resp, err := g.client.Do(req, &gsms)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return gsms, resp, err
+}
+
+// GetStorageMove gets a single group repository storage move.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#get-a-single-group-repository-storage-move
+func (g GroupRepositoryStorageMoveService) GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) {
+	u := fmt.Sprintf("group_repository_storage_moves/%d", repositoryStorage)
+
+	req, err := g.client.NewRequest(http.MethodGet, u, nil, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	gsm := new(GroupRepositoryStorageMove)
+	resp, err := g.client.Do(req, gsm)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return gsm, resp, err
+}
+
+// GetStorageMoveForGroup gets a single repository storage move for a group.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#get-a-single-repository-storage-move-for-a-group
+func (g GroupRepositoryStorageMoveService) GetStorageMoveForGroup(group int, repositoryStorage int, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) {
+	u := fmt.Sprintf("groups/%d/repository_storage_moves/%d", group, repositoryStorage)
+
+	req, err := g.client.NewRequest(http.MethodGet, u, nil, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	gsm := new(GroupRepositoryStorageMove)
+	resp, err := g.client.Do(req, gsm)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return gsm, resp, err
+}
+
+// ScheduleStorageMoveForGroupOptions represents the available
+// ScheduleStorageMoveForGroup() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-group
+type ScheduleStorageMoveForGroupOptions struct {
+	DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"`
+}
+
+// ScheduleStorageMoveForGroup schedule a repository to be moved for a group.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-group
+func (g GroupRepositoryStorageMoveService) ScheduleStorageMoveForGroup(group int, opts ScheduleStorageMoveForGroupOptions, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) {
+	u := fmt.Sprintf("groups/%d/repository_storage_moves", group)
+
+	req, err := g.client.NewRequest(http.MethodPost, u, opts, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	gsm := new(GroupRepositoryStorageMove)
+	resp, err := g.client.Do(req, gsm)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return gsm, resp, err
+}
+
+// ScheduleAllGroupStorageMovesOptions represents the available
+// ScheduleAllStorageMoves() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#schedule-repository-storage-moves-for-all-groups-on-a-storage-shard
+type ScheduleAllGroupStorageMovesOptions struct {
+	SourceStorageName      *string `url:"source_storage_name,omitempty" json:"source_storage_name,omitempty"`
+	DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"`
+}
+
+// ScheduleAllStorageMoves schedules all group repositories to be moved.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#schedule-repository-storage-moves-for-all-groups-on-a-storage-shard
+func (g GroupRepositoryStorageMoveService) ScheduleAllStorageMoves(opts ScheduleAllGroupStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) {
+	req, err := g.client.NewRequest(http.MethodPost, "group_repository_storage_moves", opts, options)
+	if err != nil {
+		return nil, err
+	}
+
+	return g.client.Do(req, nil)
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/groups.go b/vendor/github.com/xanzy/go-gitlab/groups.go
index ceff074112..4a9a996d92 100644
--- a/vendor/github.com/xanzy/go-gitlab/groups.go
+++ b/vendor/github.com/xanzy/go-gitlab/groups.go
@@ -50,6 +50,7 @@ type Group struct {
 	AvatarURL               string                     `json:"avatar_url"`
 	WebURL                  string                     `json:"web_url"`
 	RequestAccessEnabled    bool                       `json:"request_access_enabled"`
+	RepositoryStorage       string                     `json:"repository_storage"`
 	FullName                string                     `json:"full_name"`
 	FullPath                string                     `json:"full_path"`
 	FileTemplateProjectID   int                        `json:"file_template_project_id"`
@@ -75,16 +76,17 @@ type Group struct {
 		GroupAccessLevel int      `json:"group_access_level"`
 		ExpiresAt        *ISOTime `json:"expires_at"`
 	} `json:"shared_with_groups"`
-	LDAPCN                         string           `json:"ldap_cn"`
-	LDAPAccess                     AccessLevelValue `json:"ldap_access"`
-	LDAPGroupLinks                 []*LDAPGroupLink `json:"ldap_group_links"`
-	SAMLGroupLinks                 []*SAMLGroupLink `json:"saml_group_links"`
-	SharedRunnersMinutesLimit      int              `json:"shared_runners_minutes_limit"`
-	ExtraSharedRunnersMinutesLimit int              `json:"extra_shared_runners_minutes_limit"`
-	PreventForkingOutsideGroup     bool             `json:"prevent_forking_outside_group"`
-	MarkedForDeletionOn            *ISOTime         `json:"marked_for_deletion_on"`
-	CreatedAt                      *time.Time       `json:"created_at"`
-	IPRestrictionRanges            string           `json:"ip_restriction_ranges"`
+	LDAPCN                         string             `json:"ldap_cn"`
+	LDAPAccess                     AccessLevelValue   `json:"ldap_access"`
+	LDAPGroupLinks                 []*LDAPGroupLink   `json:"ldap_group_links"`
+	SAMLGroupLinks                 []*SAMLGroupLink   `json:"saml_group_links"`
+	SharedRunnersMinutesLimit      int                `json:"shared_runners_minutes_limit"`
+	ExtraSharedRunnersMinutesLimit int                `json:"extra_shared_runners_minutes_limit"`
+	PreventForkingOutsideGroup     bool               `json:"prevent_forking_outside_group"`
+	MarkedForDeletionOn            *ISOTime           `json:"marked_for_deletion_on"`
+	CreatedAt                      *time.Time         `json:"created_at"`
+	IPRestrictionRanges            string             `json:"ip_restriction_ranges"`
+	WikiAccessLevel                AccessControlValue `json:"wiki_access_level"`
 }
 
 // GroupAvatar represents a GitLab group avatar.
@@ -127,16 +129,17 @@ type SAMLGroupLink struct {
 // GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-groups
 type ListGroupsOptions struct {
 	ListOptions
+	SkipGroups           *[]int            `url:"skip_groups,omitempty" del:"," json:"skip_groups,omitempty"`
 	AllAvailable         *bool             `url:"all_available,omitempty" json:"all_available,omitempty"`
-	MinAccessLevel       *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"`
-	OrderBy              *string           `url:"order_by,omitempty" json:"order_by,omitempty"`
-	Owned                *bool             `url:"owned,omitempty" json:"owned,omitempty"`
 	Search               *string           `url:"search,omitempty" json:"search,omitempty"`
-	SkipGroups           *[]int            `url:"skip_groups,omitempty" del:"," json:"skip_groups,omitempty"`
+	OrderBy              *string           `url:"order_by,omitempty" json:"order_by,omitempty"`
 	Sort                 *string           `url:"sort,omitempty" json:"sort,omitempty"`
 	Statistics           *bool             `url:"statistics,omitempty" json:"statistics,omitempty"`
-	TopLevelOnly         *bool             `url:"top_level_only,omitempty" json:"top_level_only,omitempty"`
 	WithCustomAttributes *bool             `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"`
+	Owned                *bool             `url:"owned,omitempty" json:"owned,omitempty"`
+	MinAccessLevel       *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"`
+	TopLevelOnly         *bool             `url:"top_level_only,omitempty" json:"top_level_only,omitempty"`
+	RepositoryStorage    *string           `url:"repository_storage,omitempty" json:"repository_storage,omitempty"`
 }
 
 // ListGroups gets a list of groups (as user: my groups, as admin: all groups).
@@ -353,6 +356,7 @@ type CreateGroupOptions struct {
 	SharedRunnersMinutesLimit      *int                        `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"`
 	ExtraSharedRunnersMinutesLimit *int                        `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"`
 	IPRestrictionRanges            *string                     `url:"ip_restriction_ranges,omitempty" json:"ip_restriction_ranges,omitempty"`
+	WikiAccessLevel                *AccessControlValue         `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"`
 }
 
 // CreateGroup creates a new project group. Available only for users who can
@@ -481,6 +485,7 @@ type UpdateGroupOptions struct {
 	SharedRunnersSetting                 *SharedRunnersSettingValue  `url:"shared_runners_setting,omitempty" json:"shared_runners_setting,omitempty"`
 	PreventSharingGroupsOutsideHierarchy *bool                       `url:"prevent_sharing_groups_outside_hierarchy,omitempty" json:"prevent_sharing_groups_outside_hierarchy,omitempty"`
 	IPRestrictionRanges                  *string                     `url:"ip_restriction_ranges,omitempty" json:"ip_restriction_ranges,omitempty"`
+	WikiAccessLevel                      *AccessControlValue         `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"`
 }
 
 // UpdateGroup updates an existing group; only available to group owners and
diff --git a/vendor/github.com/xanzy/go-gitlab/issues.go b/vendor/github.com/xanzy/go-gitlab/issues.go
index 45849293c7..42478c9f9d 100644
--- a/vendor/github.com/xanzy/go-gitlab/issues.go
+++ b/vendor/github.com/xanzy/go-gitlab/issues.go
@@ -445,6 +445,7 @@ type CreateIssueOptions struct {
 	Labels                             *Labels    `url:"labels,comma,omitempty" json:"labels,omitempty"`
 	CreatedAt                          *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"`
 	DueDate                            *ISOTime   `url:"due_date,omitempty" json:"due_date,omitempty"`
+	EpicID                             *int       `url:"epic_id,omitempty" json:"epic_id,omitempty"`
 	MergeRequestToResolveDiscussionsOf *int       `url:"merge_request_to_resolve_discussions_of,omitempty" json:"merge_request_to_resolve_discussions_of,omitempty"`
 	DiscussionToResolve                *string    `url:"discussion_to_resolve,omitempty" json:"discussion_to_resolve,omitempty"`
 	Weight                             *int       `url:"weight,omitempty" json:"weight,omitempty"`
@@ -490,6 +491,7 @@ type UpdateIssueOptions struct {
 	StateEvent       *string    `url:"state_event,omitempty" json:"state_event,omitempty"`
 	UpdatedAt        *time.Time `url:"updated_at,omitempty" json:"updated_at,omitempty"`
 	DueDate          *ISOTime   `url:"due_date,omitempty" json:"due_date,omitempty"`
+	EpicID           *int       `url:"epic_id,omitempty" json:"epic_id,omitempty"`
 	Weight           *int       `url:"weight,omitempty" json:"weight,omitempty"`
 	DiscussionLocked *bool      `url:"discussion_locked,omitempty" json:"discussion_locked,omitempty"`
 	IssueType        *string    `url:"issue_type,omitempty" json:"issue_type,omitempty"`
diff --git a/vendor/github.com/xanzy/go-gitlab/job_token_scope.go b/vendor/github.com/xanzy/go-gitlab/job_token_scope.go
new file mode 100644
index 0000000000..82dc508194
--- /dev/null
+++ b/vendor/github.com/xanzy/go-gitlab/job_token_scope.go
@@ -0,0 +1,125 @@
+// Copyright 2021, Sander van Harmelen
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package gitlab
+
+import (
+	"fmt"
+	"net/http"
+)
+
+// JobTokenScopeService handles communication with project CI settings
+// such as token permissions.
+//
+// GitLab API docs: https://docs.gitlab.com/ee/api/project_job_token_scopes.html
+type JobTokenScopeService struct {
+	client *Client
+}
+
+// JobTokenInboundAllowItem represents a single job token inbound allowlist item.
+//
+// GitLab API docs: https://docs.gitlab.com/ee/api/project_job_token_scopes.html
+type JobTokenInboundAllowItem struct {
+	SourceProjectID int `json:"source_project_id"`
+	TargetProjectID int `json:"target_project_id"`
+}
+
+// GetJobTokenInboundAllowListOptions represents the available
+// GetJobTokenInboundAllowList() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-inbound-allowlist
+type GetJobTokenInboundAllowListOptions struct {
+	ListOptions
+}
+
+// GetProjectJobTokenInboundAllowList fetches the CI/CD job token inbound
+// allowlist (job token scope) of a project.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-inbound-allowlist
+func (j *JobTokenScopeService) GetProjectJobTokenInboundAllowList(pid interface{}, opt *GetJobTokenInboundAllowListOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) {
+	project, err := parseID(pid)
+	if err != nil {
+		return nil, nil, err
+	}
+	u := fmt.Sprintf(`projects/%s/job_token_scope/allowlist`, PathEscape(project))
+
+	req, err := j.client.NewRequest(http.MethodGet, u, opt, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var ps []*Project
+	resp, err := j.client.Do(req, &ps)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return ps, resp, nil
+}
+
+// AddProjectToJobScopeAllowListOptions represents the available
+// AddProjectToJobScopeAllowList() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#create-a-new-project-to-a-projects-cicd-job-token-inbound-allowlist
+type JobTokenInboundAllowOptions struct {
+	TargetProjectID *int `url:"target_project_id,omitempty" json:"target_project_id,omitempty"`
+}
+
+// AddProjectToJobScopeAllowList adds a new project to a project's job token
+// inbound allow list.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#create-a-new-project-to-a-projects-cicd-job-token-inbound-allowlist
+func (j *JobTokenScopeService) AddProjectToJobScopeAllowList(pid interface{}, opt *JobTokenInboundAllowOptions, options ...RequestOptionFunc) (*JobTokenInboundAllowItem, *Response, error) {
+	project, err := parseID(pid)
+	if err != nil {
+		return nil, nil, err
+	}
+	u := fmt.Sprintf(`projects/%s/job_token_scope/allowlist`, PathEscape(project))
+
+	req, err := j.client.NewRequest(http.MethodPost, u, opt, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	ai := new(JobTokenInboundAllowItem)
+	resp, err := j.client.Do(req, ai)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return ai, resp, nil
+}
+
+// RemoveProjectFromJobScopeAllowList removes a project from a project's job
+// token inbound allow list.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#remove-a-project-from-a-projects-cicd-job-token-inbound-allowlist
+func (j *JobTokenScopeService) RemoveProjectFromJobScopeAllowList(pid interface{}, targetProject int, options ...RequestOptionFunc) (*Response, error) {
+	project, err := parseID(pid)
+	if err != nil {
+		return nil, err
+	}
+	u := fmt.Sprintf(`projects/%s/job_token_scope/allowlist/%d`, PathEscape(project), targetProject)
+
+	req, err := j.client.NewRequest(http.MethodDelete, u, nil, options)
+	if err != nil {
+		return nil, err
+	}
+
+	return j.client.Do(req, nil)
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/keys.go b/vendor/github.com/xanzy/go-gitlab/keys.go
index 0c03f5d6d6..d9cf598333 100644
--- a/vendor/github.com/xanzy/go-gitlab/keys.go
+++ b/vendor/github.com/xanzy/go-gitlab/keys.go
@@ -64,3 +64,34 @@ func (s *KeysService) GetKeyWithUser(key int, options ...RequestOptionFunc) (*Ke
 
 	return k, resp, nil
 }
+
+// GetKeyByFingerprintOptions represents the available GetKeyByFingerprint()
+// options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/keys.html#get-user-by-fingerprint-of-ssh-key
+// https://docs.gitlab.com/ee/api/keys.html#get-user-by-deploy-key-fingerprint
+type GetKeyByFingerprintOptions struct {
+	Fingerprint string `url:"fingerprint" json:"fingerprint"`
+}
+
+// GetKeyByFingerprint gets a specific SSH key or deploy key by fingerprint
+// along with the associated user information.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/keys.html#get-user-by-fingerprint-of-ssh-key
+// https://docs.gitlab.com/ee/api/keys.html#get-user-by-deploy-key-fingerprint
+func (s *KeysService) GetKeyByFingerprint(opt *GetKeyByFingerprintOptions, options ...RequestOptionFunc) (*Key, *Response, error) {
+	req, err := s.client.NewRequest(http.MethodGet, "keys", opt, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	k := new(Key)
+	resp, err := s.client.Do(req, k)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return k, resp, nil
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/milestones.go b/vendor/github.com/xanzy/go-gitlab/milestones.go
index 095c9132dc..17c97e031a 100644
--- a/vendor/github.com/xanzy/go-gitlab/milestones.go
+++ b/vendor/github.com/xanzy/go-gitlab/milestones.go
@@ -36,6 +36,7 @@ type MilestonesService struct {
 type Milestone struct {
 	ID          int        `json:"id"`
 	IID         int        `json:"iid"`
+	GroupID     int        `json:"group_id"`
 	ProjectID   int        `json:"project_id"`
 	Title       string     `json:"title"`
 	Description string     `json:"description"`
diff --git a/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go b/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go
index bb27c7737d..63d294e6fa 100644
--- a/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go
+++ b/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go
@@ -120,6 +120,27 @@ func (s *PersonalAccessTokensService) GetSinglePersonalAccessToken(options ...Re
 	return pat, resp, nil
 }
 
+// RotatePersonalAccessToken revokes a token and returns a new token that
+// expires in one week.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/personal_access_tokens.html#rotate-a-personal-access-token
+func (s *PersonalAccessTokensService) RotatePersonalAccessToken(token int, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) {
+	u := fmt.Sprintf("personal_access_tokens/%d/rotate", token)
+	req, err := s.client.NewRequest(http.MethodPost, u, nil, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	pat := new(PersonalAccessToken)
+	resp, err := s.client.Do(req, pat)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return pat, resp, nil
+}
+
 // RevokePersonalAccessToken revokes a personal access token.
 //
 // GitLab API docs:
diff --git a/vendor/github.com/xanzy/go-gitlab/pipelines.go b/vendor/github.com/xanzy/go-gitlab/pipelines.go
index 1bba71e002..75a8b6cb5d 100644
--- a/vendor/github.com/xanzy/go-gitlab/pipelines.go
+++ b/vendor/github.com/xanzy/go-gitlab/pipelines.go
@@ -115,7 +115,7 @@ type PipelineTestCases struct {
 	Classname      string          `json:"classname"`
 	File           string          `json:"file"`
 	ExecutionTime  float64         `json:"execution_time"`
-	SystemOutput   string          `json:"system_output"`
+	SystemOutput   interface{}     `json:"system_output"`
 	StackTrace     string          `json:"stack_trace"`
 	AttachmentURL  string          `json:"attachment_url"`
 	RecentFailures *RecentFailures `json:"recent_failures"`
@@ -135,6 +135,7 @@ func (p PipelineTestReport) String() string {
 // on other assets, like Commit.
 type PipelineInfo struct {
 	ID        int        `json:"id"`
+	IID       int        `json:"iid"`
 	ProjectID int        `json:"project_id"`
 	Status    string     `json:"status"`
 	Source    string     `json:"source"`
diff --git a/vendor/github.com/xanzy/go-gitlab/project_repository_storage_move.go b/vendor/github.com/xanzy/go-gitlab/project_repository_storage_move.go
new file mode 100644
index 0000000000..3beecb1f75
--- /dev/null
+++ b/vendor/github.com/xanzy/go-gitlab/project_repository_storage_move.go
@@ -0,0 +1,199 @@
+//
+// Copyright 2023, Nick Westbury
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package gitlab
+
+import (
+	"fmt"
+	"net/http"
+	"time"
+)
+
+// ProjectRepositoryStorageMoveService handles communication with the
+// repositories related methods of the GitLab API.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html
+type ProjectRepositoryStorageMoveService struct {
+	client *Client
+}
+
+// ProjectRepositoryStorageMove represents the status of a repository move.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html
+type ProjectRepositoryStorageMove struct {
+	ID                     int                `json:"id"`
+	CreatedAt              *time.Time         `json:"created_at"`
+	State                  string             `json:"state"`
+	SourceStorageName      string             `json:"source_storage_name"`
+	DestinationStorageName string             `json:"destination_storage_name"`
+	Project                *RepositoryProject `json:"project"`
+}
+
+type RepositoryProject struct {
+	ID                int        `json:"id"`
+	Description       string     `json:"description"`
+	Name              string     `json:"name"`
+	NameWithNamespace string     `json:"name_with_namespace"`
+	Path              string     `json:"path"`
+	PathWithNamespace string     `json:"path_with_namespace"`
+	CreatedAt         *time.Time `json:"created_at"`
+}
+
+// RetrieveAllProjectStorageMovesOptions represents the available
+// RetrieveAllStorageMoves() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#retrieve-all-project-repository-storage-moves
+type RetrieveAllProjectStorageMovesOptions ListOptions
+
+// RetrieveAllStorageMoves retrieves all project repository storage moves
+// accessible by the authenticated user.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#retrieve-all-project-repository-storage-moves
+func (p ProjectRepositoryStorageMoveService) RetrieveAllStorageMoves(opts RetrieveAllProjectStorageMovesOptions, options ...RequestOptionFunc) ([]*ProjectRepositoryStorageMove, *Response, error) {
+	req, err := p.client.NewRequest(http.MethodGet, "project_repository_storage_moves", opts, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var psms []*ProjectRepositoryStorageMove
+	resp, err := p.client.Do(req, &psms)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return psms, resp, err
+}
+
+// RetrieveAllStorageMovesForProject retrieves all repository storage moves for
+// a single project accessible by the authenticated user.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-project
+func (p ProjectRepositoryStorageMoveService) RetrieveAllStorageMovesForProject(project int, opts RetrieveAllProjectStorageMovesOptions, options ...RequestOptionFunc) ([]*ProjectRepositoryStorageMove, *Response, error) {
+	u := fmt.Sprintf("projects/%d/repository_storage_moves", project)
+
+	req, err := p.client.NewRequest(http.MethodGet, u, opts, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var psms []*ProjectRepositoryStorageMove
+	resp, err := p.client.Do(req, &psms)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return psms, resp, err
+}
+
+// GetStorageMove gets a single project repository storage move.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#get-a-single-project-repository-storage-move
+func (p ProjectRepositoryStorageMoveService) GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) {
+	u := fmt.Sprintf("project_repository_storage_moves/%d", repositoryStorage)
+
+	req, err := p.client.NewRequest(http.MethodGet, u, nil, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	psm := new(ProjectRepositoryStorageMove)
+	resp, err := p.client.Do(req, psm)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return psm, resp, err
+}
+
+// GetStorageMoveForProject gets a single repository storage move for a project.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#get-a-single-repository-storage-move-for-a-project
+func (p ProjectRepositoryStorageMoveService) GetStorageMoveForProject(project int, repositoryStorage int, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) {
+	u := fmt.Sprintf("projects/%d/repository_storage_moves/%d", project, repositoryStorage)
+
+	req, err := p.client.NewRequest(http.MethodGet, u, nil, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	psm := new(ProjectRepositoryStorageMove)
+	resp, err := p.client.Do(req, psm)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return psm, resp, err
+}
+
+// ScheduleStorageMoveForProjectOptions represents the available
+// ScheduleStorageMoveForProject() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-project
+type ScheduleStorageMoveForProjectOptions struct {
+	DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"`
+}
+
+// ScheduleStorageMoveForProject schedule a repository to be moved for a project.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-project
+func (p ProjectRepositoryStorageMoveService) ScheduleStorageMoveForProject(project int, opts ScheduleStorageMoveForProjectOptions, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) {
+	u := fmt.Sprintf("projects/%d/repository_storage_moves", project)
+
+	req, err := p.client.NewRequest(http.MethodPost, u, opts, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	psm := new(ProjectRepositoryStorageMove)
+	resp, err := p.client.Do(req, psm)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return psm, resp, err
+}
+
+// ScheduleAllProjectStorageMovesOptions represents the available
+// ScheduleAllStorageMoves() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#schedule-repository-storage-moves-for-all-projects-on-a-storage-shard
+type ScheduleAllProjectStorageMovesOptions struct {
+	SourceStorageName      *string `url:"source_storage_name,omitempty" json:"source_storage_name,omitempty"`
+	DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"`
+}
+
+// ScheduleAllStorageMoves schedules all repositories to be moved.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#schedule-repository-storage-moves-for-all-projects-on-a-storage-shard
+func (p ProjectRepositoryStorageMoveService) ScheduleAllStorageMoves(opts ScheduleAllProjectStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) {
+	req, err := p.client.NewRequest(http.MethodPost, "project_repository_storage_moves", opts, options)
+	if err != nil {
+		return nil, err
+	}
+
+	return p.client.Do(req, nil)
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/projects.go b/vendor/github.com/xanzy/go-gitlab/projects.go
index cbe67635bd..6d4cfe77ae 100644
--- a/vendor/github.com/xanzy/go-gitlab/projects.go
+++ b/vendor/github.com/xanzy/go-gitlab/projects.go
@@ -158,7 +158,7 @@ type Project struct {
 	AutoDevopsDeployStrategy                 string             `json:"auto_devops_deploy_strategy"`
 	AutoDevopsEnabled                        bool               `json:"auto_devops_enabled"`
 	BuildGitStrategy                         string             `json:"build_git_strategy"`
-	EmailsDisabled                           bool               `json:"emails_disabled"`
+	EmailsEnabled                            bool               `json:"emails_enabled"`
 	ExternalAuthorizationClassificationLabel string             `json:"external_authorization_classification_label"`
 	RequirementsEnabled                      bool               `json:"requirements_enabled"`
 	RequirementsAccessLevel                  AccessControlValue `json:"requirements_access_level"`
@@ -166,6 +166,8 @@ type Project struct {
 	SecurityAndComplianceAccessLevel         AccessControlValue `json:"security_and_compliance_access_level"`
 	MergeRequestDefaultTargetSelf            bool               `json:"mr_default_target_self"`
 
+	// Deprecated: Use EmailsEnabled instead
+	EmailsDisabled bool `json:"emails_disabled"`
 	// Deprecated: This parameter has been renamed to PublicJobs in GitLab 9.0.
 	PublicBuilds bool `json:"public_builds"`
 }
@@ -589,7 +591,7 @@ type CreateProjectOptions struct {
 	ContainerRegistryAccessLevel              *AccessControlValue                  `url:"container_registry_access_level,omitempty" json:"container_registry_access_level,omitempty"`
 	DefaultBranch                             *string                              `url:"default_branch,omitempty" json:"default_branch,omitempty"`
 	Description                               *string                              `url:"description,omitempty" json:"description,omitempty"`
-	EmailsDisabled                            *bool                                `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"`
+	EmailsEnabled                             *bool                                `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"`
 	EnforceAuthChecksOnUploads                *bool                                `url:"enforce_auth_checks_on_uploads,omitempty" json:"enforce_auth_checks_on_uploads,omitempty"`
 	ExternalAuthorizationClassificationLabel  *string                              `url:"external_authorization_classification_label,omitempty" json:"external_authorization_classification_label,omitempty"`
 	ForkingAccessLevel                        *AccessControlValue                  `url:"forking_access_level,omitempty" json:"forking_access_level,omitempty"`
@@ -646,6 +648,8 @@ type CreateProjectOptions struct {
 	CIForwardDeploymentEnabled *bool `url:"ci_forward_deployment_enabled,omitempty" json:"ci_forward_deployment_enabled,omitempty"`
 	// Deprecated: Use ContainerRegistryAccessLevel instead.
 	ContainerRegistryEnabled *bool `url:"container_registry_enabled,omitempty" json:"container_registry_enabled,omitempty"`
+	// Deprecated: Use EmailsEnabled instead
+	EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"`
 	// Deprecated: Use IssuesAccessLevel instead.
 	IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"`
 	// Deprecated: No longer supported in recent versions.
@@ -812,7 +816,7 @@ type EditProjectOptions struct {
 	ContainerRegistryAccessLevel              *AccessControlValue                  `url:"container_registry_access_level,omitempty" json:"container_registry_access_level,omitempty"`
 	DefaultBranch                             *string                              `url:"default_branch,omitempty" json:"default_branch,omitempty"`
 	Description                               *string                              `url:"description,omitempty" json:"description,omitempty"`
-	EmailsDisabled                            *bool                                `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"`
+	EmailsEnabled                             *bool                                `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"`
 	EnforceAuthChecksOnUploads                *bool                                `url:"enforce_auth_checks_on_uploads,omitempty" json:"enforce_auth_checks_on_uploads,omitempty"`
 	ExternalAuthorizationClassificationLabel  *string                              `url:"external_authorization_classification_label,omitempty" json:"external_authorization_classification_label,omitempty"`
 	ForkingAccessLevel                        *AccessControlValue                  `url:"forking_access_level,omitempty" json:"forking_access_level,omitempty"`
@@ -870,6 +874,8 @@ type EditProjectOptions struct {
 
 	// Deprecated: Use ContainerRegistryAccessLevel instead.
 	ContainerRegistryEnabled *bool `url:"container_registry_enabled,omitempty" json:"container_registry_enabled,omitempty"`
+	// Deprecated: Use EmailsEnabled instead
+	EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"`
 	// Deprecated: Use IssuesAccessLevel instead.
 	IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"`
 	// Deprecated: Use BuildsAccessLevel instead.
diff --git a/vendor/github.com/xanzy/go-gitlab/protected_tags.go b/vendor/github.com/xanzy/go-gitlab/protected_tags.go
index 62da5c4749..76e8ff4d2a 100644
--- a/vendor/github.com/xanzy/go-gitlab/protected_tags.go
+++ b/vendor/github.com/xanzy/go-gitlab/protected_tags.go
@@ -44,6 +44,7 @@ type ProtectedTag struct {
 // GitLab API docs:
 // https://docs.gitlab.com/ee/api/protected_tags.html
 type TagAccessDescription struct {
+	ID                     int              `json:"id"`
 	UserID                 int              `json:"user_id"`
 	GroupID                int              `json:"group_id"`
 	AccessLevel            AccessLevelValue `json:"access_level"`
diff --git a/vendor/github.com/xanzy/go-gitlab/services.go b/vendor/github.com/xanzy/go-gitlab/services.go
index e7acb9e981..69842aec14 100644
--- a/vendor/github.com/xanzy/go-gitlab/services.go
+++ b/vendor/github.com/xanzy/go-gitlab/services.go
@@ -44,6 +44,7 @@ type Service struct {
 	Active                   bool       `json:"active"`
 	PushEvents               bool       `json:"push_events"`
 	IssuesEvents             bool       `json:"issues_events"`
+	AlertEvents              bool       `json:"alert_events"`
 	ConfidentialIssuesEvents bool       `json:"confidential_issues_events"`
 	CommitEvents             bool       `json:"commit_events"`
 	MergeRequestsEvents      bool       `json:"merge_requests_events"`
@@ -54,6 +55,7 @@ type Service struct {
 	PipelineEvents           bool       `json:"pipeline_events"`
 	JobEvents                bool       `json:"job_events"`
 	WikiPageEvents           bool       `json:"wiki_page_events"`
+	VulnerabilityEvents      bool       `json:"vulnerability_events"`
 	DeploymentEvents         bool       `json:"deployment_events"`
 }
 
@@ -191,8 +193,8 @@ type DiscordService struct {
 // GitLab API docs:
 // https://docs.gitlab.com/ee/api/services.html#discord
 type DiscordServiceProperties struct {
-	NotifyOnlyBrokenPipelines bool   `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"`
 	BranchesToBeNotified      string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"`
+	NotifyOnlyBrokenPipelines bool   `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"`
 }
 
 // GetDiscordService gets Discord service settings for a project.
@@ -226,7 +228,18 @@ func (s *ServicesService) GetDiscordService(pid interface{}, options ...RequestO
 // GitLab API docs:
 // https://docs.gitlab.com/ee/api/services.html#createedit-discord-service
 type SetDiscordServiceOptions struct {
-	WebHook *string `json:"webhook,omitempty"`
+	WebHook                   *string `url:"webhook,omitempty" json:"webhook,omitempty"`
+	BranchesToBeNotified      *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"`
+	ConfidentialIssuesEvents  *bool   `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"`
+	ConfidentialNoteEvents    *bool   `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"`
+	IssuesEvents              *bool   `url:"issues_events,omitempty" json:"issues_events,omitempty"`
+	MergeRequestsEvents       *bool   `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"`
+	NoteEvents                *bool   `url:"note_events,omitempty" json:"note_events,omitempty"`
+	NotifyOnlyBrokenPipelines *bool   `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"`
+	PipelineEvents            *bool   `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"`
+	PushEvents                *bool   `url:"push_events,omitempty" json:"push_events,omitempty"`
+	TagPushEvents             *bool   `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"`
+	WikiPageEvents            *bool   `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"`
 }
 
 // SetDiscordService sets Discord service for a project.
@@ -993,6 +1006,7 @@ type MattermostServiceProperties struct {
 	TagPushChannel            string    `json:"tag_push_channel"`
 	PipelineChannel           string    `json:"pipeline_channel"`
 	PushChannel               string    `json:"push_channel"`
+	VulnerabilityChannel      string    `json:"vulnerability_channel"`
 	WikiPageChannel           string    `json:"wiki_page_channel"`
 }
 
@@ -1032,24 +1046,24 @@ type SetMattermostServiceOptions struct {
 	Channel                   *string `url:"channel,omitempty" json:"channel,omitempty"`
 	NotifyOnlyBrokenPipelines *bool   `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"`
 	BranchesToBeNotified      *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"`
-	ConfidentialIssueChannel  *string `url:"confidential_issue_channel,omitempty" json:"confidential_issue_channel,omitempty"`
-	ConfidentialIssuesEvents  *bool   `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"`
-	ConfidentialNoteChannel   *string `json:"confidential_note_channel,omitempty"`
-	ConfidentialNoteEvents    *bool   `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"`
-	IssueChannel              *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"`
+	PushEvents                *bool   `url:"push_events,omitempty" json:"push_events,omitempty"`
 	IssuesEvents              *bool   `url:"issues_events,omitempty" json:"issues_events,omitempty"`
-	MergeRequestChannel       *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"`
+	ConfidentialIssuesEvents  *bool   `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"`
 	MergeRequestsEvents       *bool   `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"`
-	TagPushChannel            *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"`
 	TagPushEvents             *bool   `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"`
-	NoteChannel               *string `url:"note_channel,omitempty" json:"note_channel,omitempty"`
 	NoteEvents                *bool   `url:"note_events,omitempty" json:"note_events,omitempty"`
-	PipelineChannel           *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"`
+	ConfidentialNoteChannel   *string `url:"confidential_note_channel,omitempty" json:"confidential_note_channel,omitempty"`
 	PipelineEvents            *bool   `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"`
+	WikiPageEvents            *bool   `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"`
 	PushChannel               *string `url:"push_channel,omitempty" json:"push_channel,omitempty"`
-	PushEvents                *bool   `url:"push_events,omitempty" json:"push_events,omitempty"`
+	IssueChannel              *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"`
+	ConfidentialIssueChannel  *string `url:"confidential_issue_channel,omitempty" json:"confidential_issue_channel,omitempty"`
+	MergeRequestChannel       *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"`
+	NoteChannel               *string `url:"note_channel,omitempty" json:"note_channel,omitempty"`
+	ConfidentialNoteEvents    *bool   `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"`
+	TagPushChannel            *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"`
+	PipelineChannel           *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"`
 	WikiPageChannel           *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"`
-	WikiPageEvents            *bool   `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"`
 }
 
 // SetMattermostService sets Mattermost service for a project.
@@ -1409,6 +1423,7 @@ type SlackServiceProperties struct {
 	NotifyOnlyBrokenPipelines BoolValue `json:"notify_only_broken_pipelines"`
 	NotifyOnlyDefaultBranch   BoolValue `json:"notify_only_default_branch"`
 	BranchesToBeNotified      string    `json:"branches_to_be_notified"`
+	AlertChannel              string    `json:"alert_channel"`
 	ConfidentialIssueChannel  string    `json:"confidential_issue_channel"`
 	ConfidentialNoteChannel   string    `json:"confidential_note_channel"`
 	DeploymentChannel         string    `json:"deployment_channel"`
@@ -1418,6 +1433,7 @@ type SlackServiceProperties struct {
 	TagPushChannel            string    `json:"tag_push_channel"`
 	PipelineChannel           string    `json:"pipeline_channel"`
 	PushChannel               string    `json:"push_channel"`
+	VulnerabilityChannel      string    `json:"vulnerability_channel"`
 	WikiPageChannel           string    `json:"wiki_page_channel"`
 }
 
@@ -1458,29 +1474,28 @@ type SetSlackServiceOptions struct {
 	NotifyOnlyBrokenPipelines *bool   `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"`
 	NotifyOnlyDefaultBranch   *bool   `url:"notify_only_default_branch,omitempty" json:"notify_only_default_branch,omitempty"`
 	BranchesToBeNotified      *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"`
+	AlertChannel              *string `url:"alert_channel,omitempty" json:"alert_channel,omitempty"`
+	AlertEvents               *bool   `url:"alert_events,omitempty" json:"alert_events,omitempty"`
 	ConfidentialIssueChannel  *string `url:"confidential_issue_channel,omitempty" json:"confidential_issue_channel,omitempty"`
 	ConfidentialIssuesEvents  *bool   `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"`
-	// TODO: Currently, GitLab ignores this option (not implemented yet?), so
-	// there is no way to set it. Uncomment when this is fixed.
-	// See: https://gitlab.com/gitlab-org/gitlab-ce/issues/49730
-	// ConfidentialNoteChannel   *string `json:"confidential_note_channel,omitempty"`
-	ConfidentialNoteEvents *bool   `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"`
-	DeploymentChannel      *string `url:"deployment_channel,omitempty" json:"deployment_channel,omitempty"`
-	DeploymentEvents       *bool   `url:"deployment_events,omitempty" json:"deployment_events,omitempty"`
-	IssueChannel           *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"`
-	IssuesEvents           *bool   `url:"issues_events,omitempty" json:"issues_events,omitempty"`
-	MergeRequestChannel    *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"`
-	MergeRequestsEvents    *bool   `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"`
-	TagPushChannel         *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"`
-	TagPushEvents          *bool   `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"`
-	NoteChannel            *string `url:"note_channel,omitempty" json:"note_channel,omitempty"`
-	NoteEvents             *bool   `url:"note_events,omitempty" json:"note_events,omitempty"`
-	PipelineChannel        *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"`
-	PipelineEvents         *bool   `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"`
-	PushChannel            *string `url:"push_channel,omitempty" json:"push_channel,omitempty"`
-	PushEvents             *bool   `url:"push_events,omitempty" json:"push_events,omitempty"`
-	WikiPageChannel        *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"`
-	WikiPageEvents         *bool   `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"`
+	ConfidentialNoteChannel   *string `url:"confidential_note_channel,omitempty" json:"confidential_note_channel,omitempty"`
+	ConfidentialNoteEvents    *bool   `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"`
+	DeploymentChannel         *string `url:"deployment_channel,omitempty" json:"deployment_channel,omitempty"`
+	DeploymentEvents          *bool   `url:"deployment_events,omitempty" json:"deployment_events,omitempty"`
+	IssueChannel              *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"`
+	IssuesEvents              *bool   `url:"issues_events,omitempty" json:"issues_events,omitempty"`
+	MergeRequestChannel       *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"`
+	MergeRequestsEvents       *bool   `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"`
+	NoteChannel               *string `url:"note_channel,omitempty" json:"note_channel,omitempty"`
+	NoteEvents                *bool   `url:"note_events,omitempty" json:"note_events,omitempty"`
+	PipelineChannel           *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"`
+	PipelineEvents            *bool   `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"`
+	PushChannel               *string `url:"push_channel,omitempty" json:"push_channel,omitempty"`
+	PushEvents                *bool   `url:"push_events,omitempty" json:"push_events,omitempty"`
+	TagPushChannel            *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"`
+	TagPushEvents             *bool   `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"`
+	WikiPageChannel           *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"`
+	WikiPageEvents            *bool   `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"`
 }
 
 // SetSlackService sets Slack service for a project
diff --git a/vendor/github.com/xanzy/go-gitlab/settings.go b/vendor/github.com/xanzy/go-gitlab/settings.go
index 3ac4eadb4f..92a1ef656f 100644
--- a/vendor/github.com/xanzy/go-gitlab/settings.go
+++ b/vendor/github.com/xanzy/go-gitlab/settings.go
@@ -189,6 +189,7 @@ type Settings struct {
 	HousekeepingFullRepackPeriod                          int               `json:"housekeeping_full_repack_period"`
 	HousekeepingGcPeriod                                  int               `json:"housekeeping_gc_period"`
 	HousekeepingIncrementalRepackPeriod                   int               `json:"housekeeping_incremental_repack_period"`
+	HousekeepingOptimizeRepositoryPeriod                  int               `json:"housekeeping_optimize_repository_period"`
 	ImportSources                                         []string          `json:"import_sources"`
 	InactiveProjectsDeleteAfterMonths                     int               `json:"inactive_projects_delete_after_months"`
 	InactiveProjectsMinSizeMB                             int               `json:"inactive_projects_min_size_mb"`
@@ -567,6 +568,7 @@ type UpdateSettingsOptions struct {
 	HousekeepingFullRepackPeriod                          *int               `url:"housekeeping_full_repack_period,omitempty" json:"housekeeping_full_repack_period,omitempty"`
 	HousekeepingGcPeriod                                  *int               `url:"housekeeping_gc_period,omitempty" json:"housekeeping_gc_period,omitempty"`
 	HousekeepingIncrementalRepackPeriod                   *int               `url:"housekeeping_incremental_repack_period,omitempty" json:"housekeeping_incremental_repack_period,omitempty"`
+	HousekeepingOptimizedepositoryPeriod                  *int               `url:"housekeeping_optimize_repository_period,omitempty" json:"housekeeping_optimize_repository_period,omitempty"`
 	ImportSources                                         *[]string          `url:"import_sources,omitempty" json:"import_sources,omitempty"`
 	InactiveProjectsDeleteAfterMonths                     *int               `url:"inactive_projects_delete_after_months,omitempty" json:"inactive_projects_delete_after_months,omitempty"`
 	InactiveProjectsMinSizeMB                             *int               `url:"inactive_projects_min_size_mb,omitempty" json:"inactive_projects_min_size_mb,omitempty"`
diff --git a/vendor/github.com/xanzy/go-gitlab/snippet_repository_storage_move.go b/vendor/github.com/xanzy/go-gitlab/snippet_repository_storage_move.go
new file mode 100644
index 0000000000..00761ec2ed
--- /dev/null
+++ b/vendor/github.com/xanzy/go-gitlab/snippet_repository_storage_move.go
@@ -0,0 +1,203 @@
+//
+// Copyright 2023, Nick Westbury
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package gitlab
+
+import (
+	"fmt"
+	"net/http"
+	"time"
+)
+
+// SnippetRepositoryStorageMoveService handles communication with the
+// snippets related methods of the GitLab API.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html
+type SnippetRepositoryStorageMoveService struct {
+	client *Client
+}
+
+// SnippetRepositoryStorageMove represents the status of a repository move.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html
+type SnippetRepositoryStorageMove struct {
+	ID                     int                `json:"id"`
+	CreatedAt              *time.Time         `json:"created_at"`
+	State                  string             `json:"state"`
+	SourceStorageName      string             `json:"source_storage_name"`
+	DestinationStorageName string             `json:"destination_storage_name"`
+	Snippet                *RepositorySnippet `json:"snippet"`
+}
+
+type RepositorySnippet struct {
+	ID            int             `json:"id"`
+	Title         string          `json:"title"`
+	Description   string          `json:"description"`
+	Visibility    VisibilityValue `json:"visibility"`
+	UpdatedAt     *time.Time      `json:"updated_at"`
+	CreatedAt     *time.Time      `json:"created_at"`
+	ProjectID     int             `json:"project_id"`
+	WebURL        string          `json:"web_url"`
+	RawURL        string          `json:"raw_url"`
+	SSHURLToRepo  string          `json:"ssh_url_to_repo"`
+	HTTPURLToRepo string          `json:"http_url_to_repo"`
+}
+
+// RetrieveAllSnippetStorageMovesOptions represents the available
+// RetrieveAllStorageMoves() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-snippet
+type RetrieveAllSnippetStorageMovesOptions ListOptions
+
+// RetrieveAllStorageMoves retrieves all snippet repository storage moves
+// accessible by the authenticated user.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-snippet
+func (s SnippetRepositoryStorageMoveService) RetrieveAllStorageMoves(opts RetrieveAllSnippetStorageMovesOptions, options ...RequestOptionFunc) ([]*SnippetRepositoryStorageMove, *Response, error) {
+	req, err := s.client.NewRequest(http.MethodGet, "snippet_repository_storage_moves", opts, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var ssms []*SnippetRepositoryStorageMove
+	resp, err := s.client.Do(req, &ssms)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return ssms, resp, err
+}
+
+// RetrieveAllStorageMovesForSnippet retrieves all repository storage moves for
+// a single snippet accessible by the authenticated user.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-snippet
+func (s SnippetRepositoryStorageMoveService) RetrieveAllStorageMovesForSnippet(snippet int, opts RetrieveAllSnippetStorageMovesOptions, options ...RequestOptionFunc) ([]*SnippetRepositoryStorageMove, *Response, error) {
+	u := fmt.Sprintf("snippets/%d/repository_storage_moves", snippet)
+
+	req, err := s.client.NewRequest(http.MethodGet, u, opts, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var ssms []*SnippetRepositoryStorageMove
+	resp, err := s.client.Do(req, &ssms)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return ssms, resp, err
+}
+
+// GetStorageMove gets a single snippet repository storage move.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#get-a-single-snippet-repository-storage-move
+func (s SnippetRepositoryStorageMoveService) GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) {
+	u := fmt.Sprintf("snippet_repository_storage_moves/%d", repositoryStorage)
+
+	req, err := s.client.NewRequest(http.MethodGet, u, nil, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	ssm := new(SnippetRepositoryStorageMove)
+	resp, err := s.client.Do(req, ssm)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return ssm, resp, err
+}
+
+// GetStorageMoveForSnippet gets a single repository storage move for a snippet.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#get-a-single-repository-storage-move-for-a-snippet
+func (s SnippetRepositoryStorageMoveService) GetStorageMoveForSnippet(snippet int, repositoryStorage int, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) {
+	u := fmt.Sprintf("snippets/%d/repository_storage_moves/%d", snippet, repositoryStorage)
+
+	req, err := s.client.NewRequest(http.MethodGet, u, nil, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	ssm := new(SnippetRepositoryStorageMove)
+	resp, err := s.client.Do(req, ssm)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return ssm, resp, err
+}
+
+// ScheduleStorageMoveForSnippetOptions represents the available
+// ScheduleStorageMoveForSnippet() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-snippet
+type ScheduleStorageMoveForSnippetOptions struct {
+	DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"`
+}
+
+// ScheduleStorageMoveForSnippet schedule a repository to be moved for a snippet.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-snippet
+func (s SnippetRepositoryStorageMoveService) ScheduleStorageMoveForSnippet(snippet int, opts ScheduleStorageMoveForSnippetOptions, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) {
+	u := fmt.Sprintf("snippets/%d/repository_storage_moves", snippet)
+
+	req, err := s.client.NewRequest(http.MethodPost, u, opts, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	ssm := new(SnippetRepositoryStorageMove)
+	resp, err := s.client.Do(req, ssm)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return ssm, resp, err
+}
+
+// ScheduleAllSnippetStorageMovesOptions represents the available
+// ScheduleAllStorageMoves() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#schedule-repository-storage-moves-for-all-snippets-on-a-storage-shard
+type ScheduleAllSnippetStorageMovesOptions struct {
+	SourceStorageName      *string `url:"source_storage_name,omitempty" json:"source_storage_name,omitempty"`
+	DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"`
+}
+
+// ScheduleAllStorageMoves schedules all snippet repositories to be moved.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#schedule-repository-storage-moves-for-all-snippets-on-a-storage-shard
+func (s SnippetRepositoryStorageMoveService) ScheduleAllStorageMoves(opts ScheduleAllSnippetStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) {
+	req, err := s.client.NewRequest(http.MethodPost, "snippet_repository_storage_moves", opts, options)
+	if err != nil {
+		return nil, err
+	}
+
+	return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/todos.go b/vendor/github.com/xanzy/go-gitlab/todos.go
index 4e3c58dee5..7ea26d01b7 100644
--- a/vendor/github.com/xanzy/go-gitlab/todos.go
+++ b/vendor/github.com/xanzy/go-gitlab/todos.go
@@ -59,7 +59,7 @@ type TodoTarget struct {
 	Description          string                 `json:"description"`
 	Downvotes            int                    `json:"downvotes"`
 	ID                   int                    `json:"id"`
-	IID                  int                    `json:"iid"`
+	IID                  interface{}            `json:"iid"`
 	Labels               []string               `json:"labels"`
 	Milestone            *Milestone             `json:"milestone"`
 	ProjectID            int                    `json:"project_id"`
diff --git a/vendor/github.com/xanzy/go-gitlab/users.go b/vendor/github.com/xanzy/go-gitlab/users.go
index 68ddd6bd74..d3eff7615e 100644
--- a/vendor/github.com/xanzy/go-gitlab/users.go
+++ b/vendor/github.com/xanzy/go-gitlab/users.go
@@ -398,6 +398,36 @@ func (s *UsersService) SetUserStatus(opt *UserStatusOptions, options ...RequestO
 	return status, resp, nil
 }
 
+// UserAssociationsCount represents the user associations count.
+//
+// Gitlab API docs: https://docs.gitlab.com/ee/api/users.html#list-associations-count-for-user
+type UserAssociationsCount struct {
+	GroupsCount        int `json:"groups_count"`
+	ProjectsCount      int `json:"projects_count"`
+	IssuesCount        int `json:"issues_count"`
+	MergeRequestsCount int `json:"merge_requests_count"`
+}
+
+// GetUserAssociationsCount gets a list of a specified user associations.
+//
+// Gitlab API docs: https://docs.gitlab.com/ee/api/users.html#list-associations-count-for-user
+func (s *UsersService) GetUserAssociationsCount(user int, options ...RequestOptionFunc) (*UserAssociationsCount, *Response, error) {
+	u := fmt.Sprintf("users/%d/associations_count", user)
+
+	req, err := s.client.NewRequest(http.MethodGet, u, nil, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	uac := new(UserAssociationsCount)
+	resp, err := s.client.Do(req, uac)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return uac, resp, nil
+}
+
 // SSHKey represents a SSH key.
 //
 // GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-ssh-keys
@@ -409,11 +439,16 @@ type SSHKey struct {
 	ExpiresAt *time.Time `json:"expires_at"`
 }
 
+// ListSSHKeysOptions represents the available ListSSHKeys options.
+//
+// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-ssh-keys
+type ListSSHKeysOptions ListOptions
+
 // ListSSHKeys gets a list of currently authenticated user's SSH keys.
 //
 // GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-ssh-keys
-func (s *UsersService) ListSSHKeys(options ...RequestOptionFunc) ([]*SSHKey, *Response, error) {
-	req, err := s.client.NewRequest(http.MethodGet, "user/keys", nil, options)
+func (s *UsersService) ListSSHKeys(opt *ListSSHKeysOptions, options ...RequestOptionFunc) ([]*SSHKey, *Response, error) {
+	req, err := s.client.NewRequest(http.MethodGet, "user/keys", opt, options)
 	if err != nil {
 		return nil, nil, err
 	}
@@ -1373,3 +1408,50 @@ func (s *UsersService) DisableTwoFactor(user int, options ...RequestOptionFunc)
 		return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode)
 	}
 }
+
+// UserRunner represents a GitLab runner linked to the current user.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/users.html#create-a-runner
+type UserRunner struct {
+	ID             int        `json:"id"`
+	Token          string     `json:"token"`
+	TokenExpiresAt *time.Time `json:"token_expires_at"`
+}
+
+// CreateUserRunnerOptions represents the available CreateUserRunner() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/users.html#create-a-runner
+type CreateUserRunnerOptions struct {
+	RunnerType      *string   `url:"runner_type,omitempty" json:"runner_type,omitempty"`
+	GroupID         *int      `url:"group_id,omitempty" json:"group_id,omitempty"`
+	ProjectID       *int      `url:"project_id,omitempty" json:"project_id,omitempty"`
+	Description     *string   `url:"description,omitempty" json:"description,omitempty"`
+	Paused          *bool     `url:"paused,omitempty" json:"paused,omitempty"`
+	Locked          *bool     `url:"locked,omitempty" json:"locked,omitempty"`
+	RunUntagged     *bool     `url:"run_untagged,omitempty" json:"run_untagged,omitempty"`
+	TagList         *[]string `url:"tag_list,omitempty" json:"tag_list,omitempty"`
+	AccessLevel     *string   `url:"access_level,omitempty" json:"access_level,omitempty"`
+	MaximumTimeout  *int      `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"`
+	MaintenanceNote *string   `url:"maintenance_note,omitempty" json:"maintenance_note,omitempty"`
+}
+
+// CreateUserRunner creates a runner linked to the current user.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/users.html#create-a-runner
+func (s *UsersService) CreateUserRunner(opts *CreateUserRunnerOptions, options ...RequestOptionFunc) (*UserRunner, *Response, error) {
+	req, err := s.client.NewRequest(http.MethodPost, "user/runners", opts, options)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	r := new(UserRunner)
+	resp, err := s.client.Do(req, r)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return r, resp, nil
+}
diff --git a/vendor/go.step.sm/crypto/jose/parse.go b/vendor/go.step.sm/crypto/jose/parse.go
index 8496a6481a..9807af03d6 100644
--- a/vendor/go.step.sm/crypto/jose/parse.go
+++ b/vendor/go.step.sm/crypto/jose/parse.go
@@ -375,7 +375,7 @@ func guessSignatureAlgorithm(key crypto.PrivateKey) SignatureAlgorithm {
 
 // guessKnownJWKAlgorithm sets the algorithm for keys that only have one
 // possible algorithm.
-func guessKnownJWKAlgorithm(ctx *context, jwk *JSONWebKey) {
+func guessKnownJWKAlgorithm(_ *context, jwk *JSONWebKey) {
 	if jwk.Algorithm == "" && jwk.Use != "enc" {
 		switch k := jwk.Key.(type) {
 		case *ecdsa.PrivateKey:
diff --git a/vendor/go.step.sm/crypto/pemutil/pem.go b/vendor/go.step.sm/crypto/pemutil/pem.go
index e02512ebc0..0941323d4a 100644
--- a/vendor/go.step.sm/crypto/pemutil/pem.go
+++ b/vendor/go.step.sm/crypto/pemutil/pem.go
@@ -44,6 +44,9 @@ var PromptPassword PasswordPrompter
 // check if a file exists and prompts the user if it should be overwritten.
 var WriteFile FileWriter = utils.WriteFile
 
+// PEMBlockHeader is the expected header for any PEM formatted block.
+var PEMBlockHeader = []byte("-----BEGIN ")
+
 // context add options to the pem methods.
 type context struct {
 	filename         string
@@ -282,7 +285,7 @@ func ReadCertificate(filename string, opts ...Options) (*x509.Certificate, error
 	}
 
 	// PEM format
-	if bytes.HasPrefix(b, []byte("-----BEGIN ")) {
+	if bytes.Contains(b, PEMBlockHeader) {
 		var crt interface{}
 		crt, err = Read(filename, opts...)
 		if err != nil {
@@ -311,7 +314,7 @@ func ReadCertificateBundle(filename string) ([]*x509.Certificate, error) {
 	}
 
 	// PEM format
-	if bytes.HasPrefix(b, []byte("-----BEGIN ")) {
+	if bytes.Contains(b, PEMBlockHeader) {
 		var block *pem.Block
 		var bundle []*x509.Certificate
 		for len(b) > 0 {
@@ -352,7 +355,7 @@ func ReadCertificateRequest(filename string) (*x509.CertificateRequest, error) {
 	}
 
 	// PEM format
-	if bytes.HasPrefix(b, []byte("-----BEGIN ")) {
+	if bytes.Contains(b, PEMBlockHeader) {
 		csr, err := Parse(b, WithFilename(filename))
 		if err != nil {
 			return nil, err
diff --git a/vendor/go4.org/intern/LICENSE b/vendor/go4.org/intern/LICENSE
new file mode 100644
index 0000000000..b0ab8921dc
--- /dev/null
+++ b/vendor/go4.org/intern/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2020, Brad Fitzpatrick
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/go4.org/intern/README.md b/vendor/go4.org/intern/README.md
new file mode 100644
index 0000000000..1db456b735
--- /dev/null
+++ b/vendor/go4.org/intern/README.md
@@ -0,0 +1,4 @@
+# go4.org/intern
+
+See https://godoc.org/go4.org/intern
+
diff --git a/vendor/go4.org/intern/intern.go b/vendor/go4.org/intern/intern.go
new file mode 100644
index 0000000000..536014cd35
--- /dev/null
+++ b/vendor/go4.org/intern/intern.go
@@ -0,0 +1,183 @@
+// Copyright 2020 Brad Fitzpatrick. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package intern lets you make smaller comparable values by boxing
+// a larger comparable value (such as a 16 byte string header) down
+// into a globally unique 8 byte pointer.
+//
+// The globally unique pointers are garbage collected with weak
+// references and finalizers. This package hides that.
+//
+// The GitHub repo is https://github.com/go4org/intern
+package intern // import "go4.org/intern"
+
+import (
+	"os"
+	"runtime"
+	"strconv"
+	"sync"
+	"unsafe"
+
+	_ "go4.org/unsafe/assume-no-moving-gc"
+)
+
+// A Value pointer is the handle to an underlying comparable value.
+// See func Get for how Value pointers may be used.
+type Value struct {
+	_      [0]func() // prevent people from accidentally using value type as comparable
+	cmpVal interface{}
+	// resurrected is guarded by mu (for all instances of Value).
+	// It is set true whenever v is synthesized from a uintptr.
+	resurrected bool
+}
+
+// Get returns the comparable value passed to the Get func
+// that returned v.
+func (v *Value) Get() interface{} { return v.cmpVal }
+
+// key is a key in our global value map.
+// It contains type-specialized fields to avoid allocations
+// when converting common types to empty interfaces.
+type key struct {
+	s      string
+	cmpVal interface{}
+	// isString reports whether key contains a string.
+	// Without it, the zero value of key is ambiguous.
+	isString bool
+}
+
+// keyFor returns a key to use with cmpVal.
+func keyFor(cmpVal interface{}) key {
+	if s, ok := cmpVal.(string); ok {
+		return key{s: s, isString: true}
+	}
+	return key{cmpVal: cmpVal}
+}
+
+// Value returns a *Value built from k.
+func (k key) Value() *Value {
+	if k.isString {
+		return &Value{cmpVal: k.s}
+	}
+	return &Value{cmpVal: k.cmpVal}
+}
+
+var (
+	// mu guards valMap, a weakref map of *Value by underlying value.
+	// It also guards the resurrected field of all *Values.
+	mu      sync.Mutex
+	valMap  = map[key]uintptr{} // to uintptr(*Value)
+	valSafe = safeMap()         // non-nil in safe+leaky mode
+)
+
+// safeMap returns a non-nil map if we're in safe-but-leaky mode,
+// as controlled by GO4_INTERN_SAFE_BUT_LEAKY.
+func safeMap() map[key]*Value {
+	if v, _ := strconv.ParseBool(os.Getenv("GO4_INTERN_SAFE_BUT_LEAKY")); v {
+		return map[key]*Value{}
+	}
+	return nil
+}
+
+// Get returns a pointer representing the comparable value cmpVal.
+//
+// The returned pointer will be the same for Get(v) and Get(v2)
+// if and only if v == v2, and can be used as a map key.
+func Get(cmpVal interface{}) *Value {
+	return get(keyFor(cmpVal))
+}
+
+// GetByString is identical to Get, except that it is specialized for strings.
+// This avoids an allocation from putting a string into an interface{}
+// to pass as an argument to Get.
+func GetByString(s string) *Value {
+	return get(key{s: s, isString: true})
+}
+
+// We play unsafe games that violate Go's rules (and assume a non-moving
+// collector). So we quiet Go here.
+// See the comment below Get for more implementation details.
+//go:nocheckptr
+func get(k key) *Value {
+	mu.Lock()
+	defer mu.Unlock()
+
+	var v *Value
+	if valSafe != nil {
+		v = valSafe[k]
+	} else if addr, ok := valMap[k]; ok {
+		v = (*Value)((unsafe.Pointer)(addr))
+		v.resurrected = true
+	}
+	if v != nil {
+		return v
+	}
+	v = k.Value()
+	if valSafe != nil {
+		valSafe[k] = v
+	} else {
+		// SetFinalizer before uintptr conversion (theoretical concern;
+		// see https://github.com/go4org/intern/issues/13)
+		runtime.SetFinalizer(v, finalize)
+		valMap[k] = uintptr(unsafe.Pointer(v))
+	}
+	return v
+}
+
+func finalize(v *Value) {
+	mu.Lock()
+	defer mu.Unlock()
+	if v.resurrected {
+		// We lost the race. Somebody resurrected it while we
+		// were about to finalize it. Try again next round.
+		v.resurrected = false
+		runtime.SetFinalizer(v, finalize)
+		return
+	}
+	delete(valMap, keyFor(v.cmpVal))
+}
+
+// Interning is simple if you don't require that unused values be
+// garbage collectable. But we do require that; we don't want to be
+// DOS vector. We do this by using a uintptr to hide the pointer from
+// the garbage collector, and using a finalizer to eliminate the
+// pointer when no other code is using it.
+//
+// The obvious implementation of this is to use a
+// map[interface{}]uintptr-of-*interface{}, and set up a finalizer to
+// delete from the map. Unfortunately, this is racy. Because pointers
+// are being created in violation of Go's unsafety rules, it's
+// possible to create a pointer to a value concurrently with the GC
+// concluding that the value can be collected. There are other races
+// that break the equality invariant as well, but the use-after-free
+// will cause a runtime crash.
+//
+// To make this work, the finalizer needs to know that no references
+// have been unsafely created since the finalizer was set up. To do
+// this, values carry a "resurrected" sentinel, which gets set
+// whenever a pointer is unsafely created. If the finalizer encounters
+// the sentinel, it clears the sentinel and delays collection for one
+// additional GC cycle, by re-installing itself as finalizer. This
+// ensures that the unsafely created pointer is visible to the GC, and
+// will correctly prevent collection.
+//
+// This technique does mean that interned values that get reused take
+// at least 3 GC cycles to fully collect (1 to clear the sentinel, 1
+// to clean up the unsafe map, 1 to be actually deleted).
+//
+// @ianlancetaylor commented in
+// https://github.com/golang/go/issues/41303#issuecomment-717401656
+// that it is possible to implement weak references in terms of
+// finalizers without unsafe. Unfortunately, the approach he outlined
+// does not work here, for two reasons. First, there is no way to
+// construct a strong pointer out of a weak pointer; our map stores
+// weak pointers, but we must return strong pointers to callers.
+// Second, and more fundamentally, we must return not just _a_ strong
+// pointer to callers, but _the same_ strong pointer to callers. In
+// order to return _the same_ strong pointer to callers, we must track
+// it, which is exactly what we cannot do with strong pointers.
+//
+// See https://github.com/inetaf/netaddr/issues/53 for more
+// discussion, and https://github.com/go4org/intern/issues/2 for an
+// illustration of the subtleties at play.
diff --git a/vendor/go4.org/unsafe/assume-no-moving-gc/LICENSE b/vendor/go4.org/unsafe/assume-no-moving-gc/LICENSE
new file mode 100644
index 0000000000..b0ab8921dc
--- /dev/null
+++ b/vendor/go4.org/unsafe/assume-no-moving-gc/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2020, Brad Fitzpatrick
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/go4.org/unsafe/assume-no-moving-gc/README.md b/vendor/go4.org/unsafe/assume-no-moving-gc/README.md
new file mode 100644
index 0000000000..920fc9ddab
--- /dev/null
+++ b/vendor/go4.org/unsafe/assume-no-moving-gc/README.md
@@ -0,0 +1,13 @@
+# go4.org/unsafe/assume-no-moving-gc
+
+If your Go package wants to declare that it plays `unsafe` games that only
+work if the Go runtime's garbage collector is not a moving collector, then add:
+
+```go
+import _ "go4.org/unsafe/assume-no-moving-gc"
+```
+
+Then your program will explode if that's no longer the case. (Users can override
+the explosion with a scary sounding environment variable.)
+
+This also gives us a way to find all the really gross unsafe packages.
diff --git a/vendor/go4.org/unsafe/assume-no-moving-gc/assume-no-moving-gc.go b/vendor/go4.org/unsafe/assume-no-moving-gc/assume-no-moving-gc.go
new file mode 100644
index 0000000000..14a41e73ed
--- /dev/null
+++ b/vendor/go4.org/unsafe/assume-no-moving-gc/assume-no-moving-gc.go
@@ -0,0 +1,22 @@
+// Copyright 2020 Brad Fitzpatrick. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package go4.org/unsafe/assume-no-moving-gc exists so you can depend
+// on it from unsafe code that wants to declare that it assumes that
+// the Go runtime does not using a moving garbage colllector.
+//
+// This package is then updated for new Go versions when that
+// is still the case and explodes at runtime with a failure
+// otherwise, unless an environment variable overrides it.
+//
+// To use:
+//
+//     import _ "go4.org/unsafe/assume-no-moving-gc"
+//
+// There is no API.
+//
+// The GitHub repo is at https://github.com/go4org/unsafe-assume-no-moving-gc
+package assume_no_moving_gc
+
+const env = "ASSUME_NO_MOVING_GC_UNSAFE_RISK_IT_WITH"
diff --git a/vendor/go4.org/unsafe/assume-no-moving-gc/untested.go b/vendor/go4.org/unsafe/assume-no-moving-gc/untested.go
new file mode 100644
index 0000000000..da4d943f6b
--- /dev/null
+++ b/vendor/go4.org/unsafe/assume-no-moving-gc/untested.go
@@ -0,0 +1,26 @@
+// Copyright 2020 Brad Fitzpatrick. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.20
+// +build go1.20
+
+package assume_no_moving_gc
+
+import (
+	"os"
+	"runtime"
+	"strings"
+)
+
+func init() {
+	dots := strings.SplitN(runtime.Version(), ".", 3)
+	v := runtime.Version()
+	if len(dots) >= 2 {
+		v = dots[0] + "." + dots[1]
+	}
+	if os.Getenv(env) == v {
+		return
+	}
+	panic("Something in this program imports go4.org/unsafe/assume-no-moving-gc to declare that it assumes a non-moving garbage collector, but your version of go4.org/unsafe/assume-no-moving-gc hasn't been updated to assert that it's safe against the " + v + " runtime. If you want to risk it, run with environment variable " + env + "=" + v + " set. Notably, if " + v + " adds a moving garbage collector, this program is unsafe to use.")
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go
new file mode 100644
index 0000000000..6c8d97b6a5
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/key.go
@@ -0,0 +1,206 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+// +build windows
+
+// Package registry provides access to the Windows registry.
+//
+// Here is a simple example, opening a registry key and reading a string value from it.
+//
+//	k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+//	if err != nil {
+//		log.Fatal(err)
+//	}
+//	defer k.Close()
+//
+//	s, _, err := k.GetStringValue("SystemRoot")
+//	if err != nil {
+//		log.Fatal(err)
+//	}
+//	fmt.Printf("Windows system root is %q\n", s)
+package registry
+
+import (
+	"io"
+	"runtime"
+	"syscall"
+	"time"
+)
+
+const (
+	// Registry key security and access rights.
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx
+	// for details.
+	ALL_ACCESS         = 0xf003f
+	CREATE_LINK        = 0x00020
+	CREATE_SUB_KEY     = 0x00004
+	ENUMERATE_SUB_KEYS = 0x00008
+	EXECUTE            = 0x20019
+	NOTIFY             = 0x00010
+	QUERY_VALUE        = 0x00001
+	READ               = 0x20019
+	SET_VALUE          = 0x00002
+	WOW64_32KEY        = 0x00200
+	WOW64_64KEY        = 0x00100
+	WRITE              = 0x20006
+)
+
+// Key is a handle to an open Windows registry key.
+// Keys can be obtained by calling OpenKey; there are
+// also some predefined root keys such as CURRENT_USER.
+// Keys can be used directly in the Windows API.
+type Key syscall.Handle
+
+const (
+	// Windows defines some predefined root keys that are always open.
+	// An application can use these keys as entry points to the registry.
+	// Normally these keys are used in OpenKey to open new keys,
+	// but they can also be used anywhere a Key is required.
+	CLASSES_ROOT     = Key(syscall.HKEY_CLASSES_ROOT)
+	CURRENT_USER     = Key(syscall.HKEY_CURRENT_USER)
+	LOCAL_MACHINE    = Key(syscall.HKEY_LOCAL_MACHINE)
+	USERS            = Key(syscall.HKEY_USERS)
+	CURRENT_CONFIG   = Key(syscall.HKEY_CURRENT_CONFIG)
+	PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA)
+)
+
+// Close closes open key k.
+func (k Key) Close() error {
+	return syscall.RegCloseKey(syscall.Handle(k))
+}
+
+// OpenKey opens a new key with path name relative to key k.
+// It accepts any open key, including CURRENT_USER and others,
+// and returns the new key and an error.
+// The access parameter specifies desired access rights to the
+// key to be opened.
+func OpenKey(k Key, path string, access uint32) (Key, error) {
+	p, err := syscall.UTF16PtrFromString(path)
+	if err != nil {
+		return 0, err
+	}
+	var subkey syscall.Handle
+	err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey)
+	if err != nil {
+		return 0, err
+	}
+	return Key(subkey), nil
+}
+
+// OpenRemoteKey opens a predefined registry key on another
+// computer pcname. The key to be opened is specified by k, but
+// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS.
+// If pcname is "", OpenRemoteKey returns local computer key.
+func OpenRemoteKey(pcname string, k Key) (Key, error) {
+	var err error
+	var p *uint16
+	if pcname != "" {
+		p, err = syscall.UTF16PtrFromString(`\\` + pcname)
+		if err != nil {
+			return 0, err
+		}
+	}
+	var remoteKey syscall.Handle
+	err = regConnectRegistry(p, syscall.Handle(k), &remoteKey)
+	if err != nil {
+		return 0, err
+	}
+	return Key(remoteKey), nil
+}
+
+// ReadSubKeyNames returns the names of subkeys of key k.
+// The parameter n controls the number of returned names,
+// analogous to the way os.File.Readdirnames works.
+func (k Key) ReadSubKeyNames(n int) ([]string, error) {
+	// RegEnumKeyEx must be called repeatedly and to completion.
+	// During this time, this goroutine cannot migrate away from
+	// its current thread. See https://golang.org/issue/49320 and
+	// https://golang.org/issue/49466.
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+
+	names := make([]string, 0)
+	// Registry key size limit is 255 bytes and described there:
+	// https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx
+	buf := make([]uint16, 256) //plus extra room for terminating zero byte
+loopItems:
+	for i := uint32(0); ; i++ {
+		if n > 0 {
+			if len(names) == n {
+				return names, nil
+			}
+		}
+		l := uint32(len(buf))
+		for {
+			err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
+			if err == nil {
+				break
+			}
+			if err == syscall.ERROR_MORE_DATA {
+				// Double buffer size and try again.
+				l = uint32(2 * len(buf))
+				buf = make([]uint16, l)
+				continue
+			}
+			if err == _ERROR_NO_MORE_ITEMS {
+				break loopItems
+			}
+			return names, err
+		}
+		names = append(names, syscall.UTF16ToString(buf[:l]))
+	}
+	if n > len(names) {
+		return names, io.EOF
+	}
+	return names, nil
+}
+
+// CreateKey creates a key named path under open key k.
+// CreateKey returns the new key and a boolean flag that reports
+// whether the key already existed.
+// The access parameter specifies the access rights for the key
+// to be created.
+func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) {
+	var h syscall.Handle
+	var d uint32
+	err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path),
+		0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d)
+	if err != nil {
+		return 0, false, err
+	}
+	return Key(h), d == _REG_OPENED_EXISTING_KEY, nil
+}
+
+// DeleteKey deletes the subkey path of key k and its values.
+func DeleteKey(k Key, path string) error {
+	return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path))
+}
+
+// A KeyInfo describes the statistics of a key. It is returned by Stat.
+type KeyInfo struct {
+	SubKeyCount     uint32
+	MaxSubKeyLen    uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte
+	ValueCount      uint32
+	MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte
+	MaxValueLen     uint32 // longest data component among the key's values, in bytes
+	lastWriteTime   syscall.Filetime
+}
+
+// ModTime returns the key's last write time.
+func (ki *KeyInfo) ModTime() time.Time {
+	return time.Unix(0, ki.lastWriteTime.Nanoseconds())
+}
+
+// Stat retrieves information about the open key k.
+func (k Key) Stat() (*KeyInfo, error) {
+	var ki KeyInfo
+	err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil,
+		&ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount,
+		&ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime)
+	if err != nil {
+		return nil, err
+	}
+	return &ki, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go
new file mode 100644
index 0000000000..ee74927d3c
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go
@@ -0,0 +1,10 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build generate
+// +build generate
+
+package registry
+
+//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go
diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go
new file mode 100644
index 0000000000..4173351230
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/syscall.go
@@ -0,0 +1,33 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+// +build windows
+
+package registry
+
+import "syscall"
+
+const (
+	_REG_OPTION_NON_VOLATILE = 0
+
+	_REG_CREATED_NEW_KEY     = 1
+	_REG_OPENED_EXISTING_KEY = 2
+
+	_ERROR_NO_MORE_ITEMS syscall.Errno = 259
+)
+
+func LoadRegLoadMUIString() error {
+	return procRegLoadMUIStringW.Find()
+}
+
+//sys	regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW
+//sys	regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW
+//sys	regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW
+//sys	regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW
+//sys	regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW
+//sys   regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW
+//sys	regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW
+
+//sys	expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW
diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go
new file mode 100644
index 0000000000..2789f6f18d
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/value.go
@@ -0,0 +1,387 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+// +build windows
+
+package registry
+
+import (
+	"errors"
+	"io"
+	"syscall"
+	"unicode/utf16"
+	"unsafe"
+)
+
+const (
+	// Registry value types.
+	NONE                       = 0
+	SZ                         = 1
+	EXPAND_SZ                  = 2
+	BINARY                     = 3
+	DWORD                      = 4
+	DWORD_BIG_ENDIAN           = 5
+	LINK                       = 6
+	MULTI_SZ                   = 7
+	RESOURCE_LIST              = 8
+	FULL_RESOURCE_DESCRIPTOR   = 9
+	RESOURCE_REQUIREMENTS_LIST = 10
+	QWORD                      = 11
+)
+
+var (
+	// ErrShortBuffer is returned when the buffer was too short for the operation.
+	ErrShortBuffer = syscall.ERROR_MORE_DATA
+
+	// ErrNotExist is returned when a registry key or value does not exist.
+	ErrNotExist = syscall.ERROR_FILE_NOT_FOUND
+
+	// ErrUnexpectedType is returned by Get*Value when the value's type was unexpected.
+	ErrUnexpectedType = errors.New("unexpected key value type")
+)
+
+// GetValue retrieves the type and data for the specified value associated
+// with an open key k. It fills up buffer buf and returns the retrieved
+// byte count n. If buf is too small to fit the stored value it returns
+// ErrShortBuffer error along with the required buffer size n.
+// If no buffer is provided, it returns true and actual buffer size n.
+// If no buffer is provided, GetValue returns the value's type only.
+// If the value does not exist, the error returned is ErrNotExist.
+//
+// GetValue is a low level function. If value's type is known, use the appropriate
+// Get*Value function instead.
+func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) {
+	pname, err := syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return 0, 0, err
+	}
+	var pbuf *byte
+	if len(buf) > 0 {
+		pbuf = (*byte)(unsafe.Pointer(&buf[0]))
+	}
+	l := uint32(len(buf))
+	err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l)
+	if err != nil {
+		return int(l), valtype, err
+	}
+	return int(l), valtype, nil
+}
+
+func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) {
+	p, err := syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return nil, 0, err
+	}
+	var t uint32
+	n := uint32(len(buf))
+	for {
+		err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n)
+		if err == nil {
+			return buf[:n], t, nil
+		}
+		if err != syscall.ERROR_MORE_DATA {
+			return nil, 0, err
+		}
+		if n <= uint32(len(buf)) {
+			return nil, 0, err
+		}
+		buf = make([]byte, n)
+	}
+}
+
+// GetStringValue retrieves the string value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetStringValue returns ErrNotExist.
+// If value is not SZ or EXPAND_SZ, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) {
+	data, typ, err2 := k.getValue(name, make([]byte, 64))
+	if err2 != nil {
+		return "", typ, err2
+	}
+	switch typ {
+	case SZ, EXPAND_SZ:
+	default:
+		return "", typ, ErrUnexpectedType
+	}
+	if len(data) == 0 {
+		return "", typ, nil
+	}
+	u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
+	return syscall.UTF16ToString(u), typ, nil
+}
+
+// GetMUIStringValue retrieves the localized string value for
+// the specified value name associated with an open key k.
+// If the value name doesn't exist or the localized string value
+// can't be resolved, GetMUIStringValue returns ErrNotExist.
+// GetMUIStringValue panics if the system doesn't support
+// regLoadMUIString; use LoadRegLoadMUIString to check if
+// regLoadMUIString is supported before calling this function.
+func (k Key) GetMUIStringValue(name string) (string, error) {
+	pname, err := syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return "", err
+	}
+
+	buf := make([]uint16, 1024)
+	var buflen uint32
+	var pdir *uint16
+
+	err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+	if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path
+
+		// Try to resolve the string value using the system directory as
+		// a DLL search path; this assumes the string value is of the form
+		// @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320.
+
+		// This approach works with tzres.dll but may have to be revised
+		// in the future to allow callers to provide custom search paths.
+
+		var s string
+		s, err = ExpandString("%SystemRoot%\\system32\\")
+		if err != nil {
+			return "", err
+		}
+		pdir, err = syscall.UTF16PtrFromString(s)
+		if err != nil {
+			return "", err
+		}
+
+		err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+	}
+
+	for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed
+		if buflen <= uint32(len(buf)) {
+			break // Buffer not growing, assume race; break
+		}
+		buf = make([]uint16, buflen)
+		err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+	}
+
+	if err != nil {
+		return "", err
+	}
+
+	return syscall.UTF16ToString(buf), nil
+}
+
+// ExpandString expands environment-variable strings and replaces
+// them with the values defined for the current user.
+// Use ExpandString to expand EXPAND_SZ strings.
+func ExpandString(value string) (string, error) {
+	if value == "" {
+		return "", nil
+	}
+	p, err := syscall.UTF16PtrFromString(value)
+	if err != nil {
+		return "", err
+	}
+	r := make([]uint16, 100)
+	for {
+		n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r)))
+		if err != nil {
+			return "", err
+		}
+		if n <= uint32(len(r)) {
+			return syscall.UTF16ToString(r[:n]), nil
+		}
+		r = make([]uint16, n)
+	}
+}
+
+// GetStringsValue retrieves the []string value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetStringsValue returns ErrNotExist.
+// If value is not MULTI_SZ, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) {
+	data, typ, err2 := k.getValue(name, make([]byte, 64))
+	if err2 != nil {
+		return nil, typ, err2
+	}
+	if typ != MULTI_SZ {
+		return nil, typ, ErrUnexpectedType
+	}
+	if len(data) == 0 {
+		return nil, typ, nil
+	}
+	p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
+	if len(p) == 0 {
+		return nil, typ, nil
+	}
+	if p[len(p)-1] == 0 {
+		p = p[:len(p)-1] // remove terminating null
+	}
+	val = make([]string, 0, 5)
+	from := 0
+	for i, c := range p {
+		if c == 0 {
+			val = append(val, string(utf16.Decode(p[from:i])))
+			from = i + 1
+		}
+	}
+	return val, typ, nil
+}
+
+// GetIntegerValue retrieves the integer value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetIntegerValue returns ErrNotExist.
+// If value is not DWORD or QWORD, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) {
+	data, typ, err2 := k.getValue(name, make([]byte, 8))
+	if err2 != nil {
+		return 0, typ, err2
+	}
+	switch typ {
+	case DWORD:
+		if len(data) != 4 {
+			return 0, typ, errors.New("DWORD value is not 4 bytes long")
+		}
+		var val32 uint32
+		copy((*[4]byte)(unsafe.Pointer(&val32))[:], data)
+		return uint64(val32), DWORD, nil
+	case QWORD:
+		if len(data) != 8 {
+			return 0, typ, errors.New("QWORD value is not 8 bytes long")
+		}
+		copy((*[8]byte)(unsafe.Pointer(&val))[:], data)
+		return val, QWORD, nil
+	default:
+		return 0, typ, ErrUnexpectedType
+	}
+}
+
+// GetBinaryValue retrieves the binary value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetBinaryValue returns ErrNotExist.
+// If value is not BINARY, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) {
+	data, typ, err2 := k.getValue(name, make([]byte, 64))
+	if err2 != nil {
+		return nil, typ, err2
+	}
+	if typ != BINARY {
+		return nil, typ, ErrUnexpectedType
+	}
+	return data, typ, nil
+}
+
+func (k Key) setValue(name string, valtype uint32, data []byte) error {
+	p, err := syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return err
+	}
+	if len(data) == 0 {
+		return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0)
+	}
+	return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data)))
+}
+
+// SetDWordValue sets the data and type of a name value
+// under key k to value and DWORD.
+func (k Key) SetDWordValue(name string, value uint32) error {
+	return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:])
+}
+
+// SetQWordValue sets the data and type of a name value
+// under key k to value and QWORD.
+func (k Key) SetQWordValue(name string, value uint64) error {
+	return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:])
+}
+
+func (k Key) setStringValue(name string, valtype uint32, value string) error {
+	v, err := syscall.UTF16FromString(value)
+	if err != nil {
+		return err
+	}
+	buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
+	return k.setValue(name, valtype, buf)
+}
+
+// SetStringValue sets the data and type of a name value
+// under key k to value and SZ. The value must not contain a zero byte.
+func (k Key) SetStringValue(name, value string) error {
+	return k.setStringValue(name, SZ, value)
+}
+
+// SetExpandStringValue sets the data and type of a name value
+// under key k to value and EXPAND_SZ. The value must not contain a zero byte.
+func (k Key) SetExpandStringValue(name, value string) error {
+	return k.setStringValue(name, EXPAND_SZ, value)
+}
+
+// SetStringsValue sets the data and type of a name value
+// under key k to value and MULTI_SZ. The value strings
+// must not contain a zero byte.
+func (k Key) SetStringsValue(name string, value []string) error {
+	ss := ""
+	for _, s := range value {
+		for i := 0; i < len(s); i++ {
+			if s[i] == 0 {
+				return errors.New("string cannot have 0 inside")
+			}
+		}
+		ss += s + "\x00"
+	}
+	v := utf16.Encode([]rune(ss + "\x00"))
+	buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
+	return k.setValue(name, MULTI_SZ, buf)
+}
+
+// SetBinaryValue sets the data and type of a name value
+// under key k to value and BINARY.
+func (k Key) SetBinaryValue(name string, value []byte) error {
+	return k.setValue(name, BINARY, value)
+}
+
+// DeleteValue removes a named value from the key k.
+func (k Key) DeleteValue(name string) error {
+	return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name))
+}
+
+// ReadValueNames returns the value names of key k.
+// The parameter n controls the number of returned names,
+// analogous to the way os.File.Readdirnames works.
+func (k Key) ReadValueNames(n int) ([]string, error) {
+	ki, err := k.Stat()
+	if err != nil {
+		return nil, err
+	}
+	names := make([]string, 0, ki.ValueCount)
+	buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character
+loopItems:
+	for i := uint32(0); ; i++ {
+		if n > 0 {
+			if len(names) == n {
+				return names, nil
+			}
+		}
+		l := uint32(len(buf))
+		for {
+			err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
+			if err == nil {
+				break
+			}
+			if err == syscall.ERROR_MORE_DATA {
+				// Double buffer size and try again.
+				l = uint32(2 * len(buf))
+				buf = make([]uint16, l)
+				continue
+			}
+			if err == _ERROR_NO_MORE_ITEMS {
+				break loopItems
+			}
+			return names, err
+		}
+		names = append(names, syscall.UTF16ToString(buf[:l]))
+	}
+	if n > len(names) {
+		return names, io.EOF
+	}
+	return names, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
new file mode 100644
index 0000000000..fc1835d8a2
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
@@ -0,0 +1,117 @@
+// Code generated by 'go generate'; DO NOT EDIT.
+
+package registry
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+	errnoERROR_IO_PENDING = 997
+)
+
+var (
+	errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+	errERROR_EINVAL     error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+	switch e {
+	case 0:
+		return errERROR_EINVAL
+	case errnoERROR_IO_PENDING:
+		return errERROR_IO_PENDING
+	}
+	// TODO: add more here, after collecting data on the common
+	// error values see on Windows. (perhaps when running
+	// all.bat?)
+	return e
+}
+
+var (
+	modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+	modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+
+	procRegConnectRegistryW       = modadvapi32.NewProc("RegConnectRegistryW")
+	procRegCreateKeyExW           = modadvapi32.NewProc("RegCreateKeyExW")
+	procRegDeleteKeyW             = modadvapi32.NewProc("RegDeleteKeyW")
+	procRegDeleteValueW           = modadvapi32.NewProc("RegDeleteValueW")
+	procRegEnumValueW             = modadvapi32.NewProc("RegEnumValueW")
+	procRegLoadMUIStringW         = modadvapi32.NewProc("RegLoadMUIStringW")
+	procRegSetValueExW            = modadvapi32.NewProc("RegSetValueExW")
+	procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW")
+)
+
+func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) {
+	r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result)))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) {
+	r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) {
+	r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) {
+	r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
+	r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) {
+	r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) {
+	r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go
index cecbb9ba11..829383f55b 100644
--- a/vendor/google.golang.org/api/internal/cba.go
+++ b/vendor/google.golang.org/api/internal/cba.go
@@ -91,16 +91,10 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) {
 		s2aMTLSEndpoint:  "",
 	}
 
-	// Check the env to determine whether to use S2A.
-	if !isGoogleS2AEnabled() {
+	if !shouldUseS2A(clientCertSource, settings) {
 		return &defaultTransportConfig, nil
 	}
 
-	// If client cert is found, use that over S2A.
-	// If MTLS is not enabled for the endpoint, skip S2A.
-	if clientCertSource != nil || !mtlsEndpointEnabledForS2A() {
-		return &defaultTransportConfig, nil
-	}
 	s2aMTLSEndpoint := settings.DefaultMTLSEndpoint
 	// If there is endpoint override, honor it.
 	if settings.Endpoint != "" {
@@ -118,10 +112,6 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) {
 	}, nil
 }
 
-func isGoogleS2AEnabled() bool {
-	return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true"
-}
-
 // getClientCertificateSource returns a default client certificate source, if
 // not provided by the user.
 //
@@ -275,8 +265,36 @@ func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, fun
 	return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil
 }
 
+func shouldUseS2A(clientCertSource cert.Source, settings *DialSettings) bool {
+	// If client cert is found, use that over S2A.
+	if clientCertSource != nil {
+		return false
+	}
+	// If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A.
+	if !isGoogleS2AEnabled() {
+		return false
+	}
+	// If DefaultMTLSEndpoint is not set and no endpoint override, skip S2A.
+	if settings.DefaultMTLSEndpoint == "" && settings.Endpoint == "" {
+		return false
+	}
+	// If MTLS is not enabled for this endpoint, skip S2A.
+	if !mtlsEndpointEnabledForS2A() {
+		return false
+	}
+	// If custom HTTP client is provided, skip S2A.
+	if settings.HTTPClient != nil {
+		return false
+	}
+	return true
+}
+
 // mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection.
 var mtlsEndpointEnabledForS2A = func() bool {
 	// TODO(xmenxk): determine this via discovery config.
 	return true
 }
+
+func isGoogleS2AEnabled() bool {
+	return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true"
+}
diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go
index 6fdda3b791..06fd417033 100644
--- a/vendor/google.golang.org/api/internal/version.go
+++ b/vendor/google.golang.org/api/internal/version.go
@@ -5,4 +5,4 @@
 package internal
 
 // Version is the current tagged release of the library.
-const Version = "0.134.0"
+const Version = "0.138.0"
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE b/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE
new file mode 100644
index 0000000000..f760d366c6
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE
@@ -0,0 +1,234 @@
+## License
+
+This work is dual-licensed under Apache 2.0 or BSD3.
+You may select, at your option, one of the above-listed licenses.
+
+`SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause`
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2016 Datadog, Inc.
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+---
+
+Copyright (c) 2016-Present, Datadog <info@datadoghq.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in the
+      documentation and/or other materials provided with the distribution.
+    * Neither the name of Datadog nor the
+      names of its contributors may be used to endorse or promote products
+      derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-3rdparty.csv b/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-3rdparty.csv
new file mode 100644
index 0000000000..1b6a22fa8b
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-3rdparty.csv
@@ -0,0 +1,4 @@
+Component,Origin,License,Copyright
+import,io.opentracing,Apache-2.0,Copyright 2016-2017 The OpenTracing Authors
+appsec,https://github.com/DataDog/libddwaf,Apache-2.0 OR BSD-3-Clause,Copyright (c) 2021 Datadog <info@datadoghq.com>
+golang,https://go.googlesource.com/go,BSD-3-Clause,Copyright (c) 2009 The Go Authors
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-APACHE b/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-APACHE
new file mode 100644
index 0000000000..bff56b5431
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-APACHE
@@ -0,0 +1,200 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2016 Datadog, Inc.
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-BSD3 b/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-BSD3
new file mode 100644
index 0000000000..9237320990
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-BSD3
@@ -0,0 +1,24 @@
+Copyright (c) 2016-Present, Datadog <info@datadoghq.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in the
+      documentation and/or other materials provided with the distribution.
+    * Neither the name of Datadog nor the
+      names of its contributors may be used to endorse or promote products
+      derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/NOTICE b/vendor/gopkg.in/DataDog/dd-trace-go.v1/NOTICE
new file mode 100644
index 0000000000..a53b8aded6
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/NOTICE
@@ -0,0 +1,4 @@
+Datadog dd-trace-go
+Copyright 2016-Present Datadog, Inc.
+
+This product includes software developed at Datadog, Inc. (https://www.datadoghq.com/).
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ddtrace.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ddtrace.go
new file mode 100644
index 0000000000..c4d106458c
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ddtrace.go
@@ -0,0 +1,158 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+// Package ddtrace contains the interfaces that specify the implementations of Datadog's
+// tracing library, as well as a set of sub-packages containing various implementations:
+// our native implementation ("tracer"), a wrapper that can be used with Opentracing
+// ("opentracer") and a mock tracer to be used for testing ("mocktracer"). Additionally,
+// package "ext" provides a set of tag names and values specific to Datadog's APM product.
+//
+// To get started, visit the documentation for any of the packages you'd like to begin
+// with by accessing the subdirectories of this package: https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace#pkg-subdirectories.
+package ddtrace // import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+
+import (
+	"context"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// SpanContextW3C represents a SpanContext with an additional method to allow
+// access of the 128-bit trace id of the span, if present.
+type SpanContextW3C interface {
+	SpanContext
+
+	// TraceID128 returns the hex-encoded 128-bit trace ID that this context is carrying.
+	// The string will be exactly 32 bytes and may include leading zeroes.
+	TraceID128() string
+
+	// TraceID128 returns the raw bytes of the 128-bit trace ID that this context is carrying.
+	TraceID128Bytes() [16]byte
+}
+
+// Tracer specifies an implementation of the Datadog tracer which allows starting
+// and propagating spans. The official implementation if exposed as functions
+// within the "tracer" package.
+type Tracer interface {
+	// StartSpan starts a span with the given operation name and options.
+	StartSpan(operationName string, opts ...StartSpanOption) Span
+
+	// Extract extracts a span context from a given carrier. Note that baggage item
+	// keys will always be lower-cased to maintain consistency. It is impossible to
+	// maintain the original casing due to MIME header canonicalization standards.
+	Extract(carrier interface{}) (SpanContext, error)
+
+	// Inject injects a span context into the given carrier.
+	Inject(context SpanContext, carrier interface{}) error
+
+	// Stop stops the tracer. Calls to Stop should be idempotent.
+	Stop()
+}
+
+// Span represents a chunk of computation time. Spans have names, durations,
+// timestamps and other metadata. A Tracer is used to create hierarchies of
+// spans in a request, buffer and submit them to the server.
+type Span interface {
+	// SetTag sets a key/value pair as metadata on the span.
+	SetTag(key string, value interface{})
+
+	// SetOperationName sets the operation name for this span. An operation name should be
+	// a representative name for a group of spans (e.g. "grpc.server" or "http.request").
+	SetOperationName(operationName string)
+
+	// BaggageItem returns the baggage item held by the given key.
+	BaggageItem(key string) string
+
+	// SetBaggageItem sets a new baggage item at the given key. The baggage
+	// item should propagate to all descendant spans, both in- and cross-process.
+	SetBaggageItem(key, val string)
+
+	// Finish finishes the current span with the given options. Finish calls should be idempotent.
+	Finish(opts ...FinishOption)
+
+	// Context returns the SpanContext of this Span.
+	Context() SpanContext
+}
+
+// SpanContext represents a span state that can propagate to descendant spans
+// and across process boundaries. It contains all the information needed to
+// spawn a direct descendant of the span that it belongs to. It can be used
+// to create distributed tracing by propagating it using the provided interfaces.
+type SpanContext interface {
+	// SpanID returns the span ID that this context is carrying.
+	SpanID() uint64
+
+	// TraceID returns the trace ID that this context is carrying.
+	TraceID() uint64
+
+	// ForeachBaggageItem provides an iterator over the key/value pairs set as
+	// baggage within this context. Iteration stops when the handler returns
+	// false.
+	ForeachBaggageItem(handler func(k, v string) bool)
+}
+
+// StartSpanOption is a configuration option that can be used with a Tracer's StartSpan method.
+type StartSpanOption func(cfg *StartSpanConfig)
+
+// FinishOption is a configuration option that can be used with a Span's Finish method.
+type FinishOption func(cfg *FinishConfig)
+
+// FinishConfig holds the configuration for finishing a span. It is usually passed around by
+// reference to one or more FinishOption functions which shape it into its final form.
+type FinishConfig struct {
+	// FinishTime represents the time that should be set as finishing time for the
+	// span. Implementations should use the current time when FinishTime.IsZero().
+	FinishTime time.Time
+
+	// Error holds an optional error that should be set on the span before
+	// finishing.
+	Error error
+
+	// NoDebugStack will prevent any set errors from generating an attached stack trace tag.
+	NoDebugStack bool
+
+	// StackFrames specifies the number of stack frames to be attached in spans that finish with errors.
+	StackFrames uint
+
+	// SkipStackFrames specifies the offset at which to start reporting stack frames from the stack.
+	SkipStackFrames uint
+}
+
+// StartSpanConfig holds the configuration for starting a new span. It is usually passed
+// around by reference to one or more StartSpanOption functions which shape it into its
+// final form.
+type StartSpanConfig struct {
+	// Parent holds the SpanContext that should be used as a parent for the
+	// new span. If nil, implementations should return a root span.
+	Parent SpanContext
+
+	// StartTime holds the time that should be used as the start time of the span.
+	// Implementations should use the current time when StartTime.IsZero().
+	StartTime time.Time
+
+	// Tags holds a set of key/value pairs that should be set as metadata on the
+	// new span.
+	Tags map[string]interface{}
+
+	// SpanID will be the SpanID of the Span, overriding the random number that would
+	// be generated. If no Parent SpanContext is present, then this will also set the
+	// TraceID to the same value.
+	SpanID uint64
+
+	// Context is the parent context where the span should be stored.
+	Context context.Context
+}
+
+// Logger implementations are able to log given messages that the tracer or profiler might output.
+type Logger interface {
+	// Log prints the given message.
+	Log(msg string)
+}
+
+// UseLogger sets l as the logger for all tracer and profiler logs.
+func UseLogger(l Logger) {
+	log.UseLogger(l)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/app_types.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/app_types.go
new file mode 100644
index 0000000000..eb6ded8f60
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/app_types.go
@@ -0,0 +1,81 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package ext // import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+
+// App types determine how to categorize a trace in the Datadog application.
+// For more fine-grained behaviour, use the SpanType* constants.
+const (
+	// DEPRECATED: Use SpanTypeWeb
+	// AppTypeWeb specifies the Web span type and can be used as a tag value
+	// for a span's SpanType tag.
+	AppTypeWeb = "web"
+
+	// AppTypeDB specifies the DB span type and can be used as a tag value
+	// for a span's SpanType tag. If possible, use one of the SpanType*
+	// constants for a more accurate indication.
+	AppTypeDB = "db"
+
+	// AppTypeCache specifies the Cache span type and can be used as a tag value
+	// for a span's SpanType tag. If possible, consider using SpanTypeRedis or
+	// SpanTypeMemcached.
+	AppTypeCache = "cache"
+
+	// AppTypeRPC specifies the RPC span type and can be used as a tag value
+	// for a span's SpanType tag.
+	AppTypeRPC = "rpc"
+)
+
+// Span types have similar behaviour to "app types" and help categorize
+// traces in the Datadog application. They can also help fine grain agent
+// level behaviours such as obfuscation and quantization, when these are
+// enabled in the agent's configuration.
+const (
+	// SpanTypeWeb marks a span as an HTTP server request.
+	SpanTypeWeb = "web"
+
+	// SpanTypeHTTP marks a span as an HTTP client request.
+	SpanTypeHTTP = "http"
+
+	// SpanTypeSQL marks a span as an SQL operation. These spans may
+	// have an "sql.command" tag.
+	SpanTypeSQL = "sql"
+
+	// SpanTypeCassandra marks a span as a Cassandra operation. These
+	// spans may have an "sql.command" tag.
+	SpanTypeCassandra = "cassandra"
+
+	// SpanTypeRedis marks a span as a Redis operation. These spans may
+	// also have a "redis.raw_command" tag.
+	SpanTypeRedis = "redis"
+
+	// SpanTypeMemcached marks a span as a memcached operation.
+	SpanTypeMemcached = "memcached"
+
+	// SpanTypeMongoDB marks a span as a MongoDB operation.
+	SpanTypeMongoDB = "mongodb"
+
+	// SpanTypeElasticSearch marks a span as an ElasticSearch operation.
+	// These spans may also have an "elasticsearch.body" tag.
+	SpanTypeElasticSearch = "elasticsearch"
+
+	// SpanTypeLevelDB marks a span as a leveldb operation
+	SpanTypeLevelDB = "leveldb"
+
+	// SpanTypeDNS marks a span as a DNS operation.
+	SpanTypeDNS = "dns"
+
+	// SpanTypeMessageConsumer marks a span as a queue operation
+	SpanTypeMessageConsumer = "queue"
+
+	// SpanTypeMessageProducer marks a span as a queue operation.
+	SpanTypeMessageProducer = "queue"
+
+	// SpanTypeConsul marks a span as a Consul operation.
+	SpanTypeConsul = "consul"
+
+	// SpanTypeGraphql marks a span as a graphql operation.
+	SpanTypeGraphQL = "graphql"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/db.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/db.go
new file mode 100644
index 0000000000..8074342d17
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/db.go
@@ -0,0 +1,87 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package ext
+
+const (
+	// DBApplication indicates the application using the database.
+	DBApplication = "db.application"
+	// DBName indicates the database name.
+	DBName = "db.name"
+	// DBType indicates the type of Database.
+	DBType = "db.type"
+	// DBInstance indicates the instance name of Database.
+	DBInstance = "db.instance"
+	// DBUser indicates the user name of Database, e.g. "readonly_user" or "reporting_user".
+	DBUser = "db.user"
+	// DBStatement records a database statement for the given database type.
+	DBStatement = "db.statement"
+	// DBSystem indicates the database management system (DBMS) product being used.
+	DBSystem = "db.system"
+)
+
+// Available values for db.system.
+const (
+	DBSystemMemcached          = "memcached"
+	DBSystemMySQL              = "mysql"
+	DBSystemPostgreSQL         = "postgresql"
+	DBSystemMicrosoftSQLServer = "mssql"
+	// DBSystemOtherSQL is used for other SQL databases not listed above.
+	DBSystemOtherSQL      = "other_sql"
+	DBSystemElasticsearch = "elasticsearch"
+	DBSystemRedis         = "redis"
+	DBSystemMongoDB       = "mongodb"
+	DBSystemCassandra     = "cassandra"
+	DBSystemConsulKV      = "consul"
+	DBSystemLevelDB       = "leveldb"
+	DBSystemBuntDB        = "buntdb"
+)
+
+// MicrosoftSQLServer tags.
+const (
+	// MicrosoftSQLServerInstanceName indicates the Microsoft SQL Server instance name connecting to.
+	MicrosoftSQLServerInstanceName = "db.mssql.instance_name"
+)
+
+// MongoDB tags.
+const (
+	// MongoDBCollection indicates the collection being accessed.
+	MongoDBCollection = "db.mongodb.collection"
+)
+
+// Redis tags.
+const (
+	// RedisDatabaseIndex indicates the Redis database index connected to.
+	RedisDatabaseIndex = "db.redis.database_index"
+)
+
+// Cassandra tags.
+const (
+	// CassandraQuery is the tag name used for cassandra queries.
+	// Deprecated: this value is no longer used internally and will be removed in future versions.
+	CassandraQuery = "cassandra.query"
+
+	// CassandraBatch is the tag name used for cassandra batches.
+	// Deprecated: this value is no longer used internally and will be removed in future versions.
+	CassandraBatch = "cassandra.batch"
+
+	// CassandraConsistencyLevel is the tag name to set for consitency level.
+	CassandraConsistencyLevel = "cassandra.consistency_level"
+
+	// CassandraCluster specifies the tag name that is used to set the cluster.
+	CassandraCluster = "cassandra.cluster"
+
+	// CassandraRowCount specifies the tag name to use when settings the row count.
+	CassandraRowCount = "cassandra.row_count"
+
+	// CassandraKeyspace is used as tag name for setting the key space.
+	CassandraKeyspace = "cassandra.keyspace"
+
+	// CassandraPaginated specifies the tag name for paginated queries.
+	CassandraPaginated = "cassandra.paginated"
+
+	// CassandraContactPoints holds the list of cassandra initial seed nodes used to discover the cluster.
+	CassandraContactPoints = "db.cassandra.contact.points"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/messaging.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/messaging.go
new file mode 100644
index 0000000000..553cfaa696
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/messaging.go
@@ -0,0 +1,25 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package ext
+
+const (
+	// MessagingSystem identifies which messaging system created this span (kafka, rabbitmq, amazonsqs, googlepubsub...)
+	MessagingSystem = "messaging.system"
+)
+
+// Available values for messaging.system.
+const (
+	MessagingSystemGCPPubsub = "googlepubsub"
+	MessagingSystemKafka     = "kafka"
+)
+
+// Kafka tags.
+const (
+	// MessagingKafkaPartition defines the Kafka partition the trace is associated with.
+	MessagingKafkaPartition = "messaging.kafka.partition"
+	// KafkaBootstrapServers holds a comma separated list of bootstrap servers as defined in producer or consumer config.
+	KafkaBootstrapServers = "messaging.kafka.bootstrap.servers"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/peer.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/peer.go
new file mode 100644
index 0000000000..f9909cb8b2
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/peer.go
@@ -0,0 +1,20 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package ext
+
+const (
+	// PeerHostIPV4 records IPv4 host address of the peer.
+	PeerHostIPV4 = "peer.ipv4"
+	// PeerHostIPV6 records the IPv6 host address of the peer.
+	PeerHostIPV6 = "peer.ipv6"
+	// PeerService records the service name of the peer service.
+	PeerService = "peer.service"
+	// PeerHostname records the host name of the peer.
+	// Deprecated: Use NetworkDestinationName instead for hostname and NetworkDestinationIP for IP addresses
+	PeerHostname = "peer.hostname"
+	// PeerPort records the port number of the peer.
+	PeerPort = "peer.port"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/priority.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/priority.go
new file mode 100644
index 0000000000..8a5c0fc405
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/priority.go
@@ -0,0 +1,27 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package ext
+
+// Priority is a hint given to the backend so that it knows which traces to reject or kept.
+// In a distributed context, it should be set before any context propagation (fork, RPC calls) to be effective.
+
+const (
+	// PriorityUserReject informs the backend that a trace should be rejected and not stored.
+	// This should be used by user code or configuration overriding default priority
+	PriorityUserReject = -1
+
+	// PriorityAutoReject informs the backend that a trace should be rejected and not stored.
+	// This is used by the builtin sampler.
+	PriorityAutoReject = 0
+
+	// PriorityAutoKeep informs the backend that a trace should be kept and not stored.
+	// This is used by the builtin sampler.
+	PriorityAutoKeep = 1
+
+	// PriorityUserKeep informs the backend that a trace should be kept and not stored.
+	// This should be used by user code or configuration overriding default priority
+	PriorityUserKeep = 2
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/rpc.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/rpc.go
new file mode 100644
index 0000000000..e7c4308229
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/rpc.go
@@ -0,0 +1,34 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package ext
+
+const (
+	// RPCSystem identifies the RPC remoting system.
+	RPCSystem = "rpc.system"
+	// RPCService represents the full (logical) name of the service being called, including its package name,
+	// if applicable. Note this is the logical name of the service from the RPC interface perspective,
+	// which can be different from the name of any implementing class.
+	RPCService = "rpc.service"
+	// RPCMethod represents the name of the (logical) method being called. Note this is the logical name of the
+	// method from the RPC interface perspective, which can be different from the name of
+	// any implementing method/function.
+	RPCMethod = "rpc.method"
+)
+
+// Well-known identifiers for rpc.system.
+const (
+	// RPCSystemGRPC identifies gRPC.
+	RPCSystemGRPC = "grpc"
+	// RPCSystemTwirp identifies Twirp.
+	RPCSystemTwirp = "twirp"
+)
+
+// gRPC specific tags.
+const (
+	// GRPCFullMethod represents the full name of the logical method being called following the
+	// format: /$package.$service/$method
+	GRPCFullMethod = "rpc.grpc.full_method"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/span_kind.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/span_kind.go
new file mode 100644
index 0000000000..71a3ce50b8
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/span_kind.go
@@ -0,0 +1,32 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package ext
+
+// span_kind values are set per span following the opentelemetry standard
+// falls under the values of client, server, producer, consumer, and internal
+const (
+
+	// SpanKindServer indicates that the span covers server-side handling of a synchronous RPC or other remote request
+	// This span should not have any local parents but can have other distributed parents
+	SpanKindServer = "server"
+
+	// SpanKindClient indicates that the span describes a request to some remote service.
+	// This span should not have any local children but can have other distributed children
+	SpanKindClient = "client"
+
+	// SpanKindConsumer indicates that the span describes the initiators of an asynchronous request.
+	// This span should not have any local parents but can have other distributed parents
+	SpanKindConsumer = "consumer"
+
+	// SpanKindProducer indicates that the span describes a child of an asynchronous producer request.
+	// This span should not have any local children but can have other distributed children
+	SpanKindProducer = "producer"
+
+	// SpanKindInternal indicates that the span represents an internal operation within an application,
+	// as opposed to an operations with remote parents or children.
+	// This is the default value and not explicitly set to save memory
+	SpanKindInternal = "internal"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/system.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/system.go
new file mode 100644
index 0000000000..163720a4f5
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/system.go
@@ -0,0 +1,12 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package ext
+
+// Standard system metadata names
+const (
+	// The pid of the traced process
+	Pid = "process_id"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/tags.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/tags.go
new file mode 100644
index 0000000000..375d7df7b5
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/tags.go
@@ -0,0 +1,120 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+// Package ext contains a set of Datadog-specific constants. Most of them are used
+// for setting span metadata.
+package ext
+
+const (
+	// TargetHost sets the target host address.
+	// Deprecated: Use NetworkDestinationName instead for hostname and NetworkDestinationIP for IP addresses
+	TargetHost = "out.host"
+
+	// NetworkDestinationName is the remote hostname or similar where the outbound connection is being made to.
+	NetworkDestinationName = "network.destination.name"
+
+	// NetworkDestinationIP is the remote address where the outbound connection is being made to.
+	NetworkDestinationIP = "network.destination.ip"
+
+	// TargetPort sets the target host port.
+	// Deprecated: Use NetworkDestinationPort instead.
+	TargetPort = "out.port"
+
+	// NetworkDestinationPort is the remote port number of the outbound connection.
+	NetworkDestinationPort = "network.destination.port"
+
+	// SamplingPriority is the tag that marks the sampling priority of a span.
+	// Deprecated in favor of ManualKeep and ManualDrop.
+	SamplingPriority = "sampling.priority"
+
+	// SQLType sets the sql type tag.
+	SQLType = "sql"
+
+	// SQLQuery sets the sql query tag on a span.
+	SQLQuery = "sql.query"
+
+	// HTTPMethod specifies the HTTP method used in a span.
+	HTTPMethod = "http.method"
+
+	// HTTPCode sets the HTTP status code as a tag.
+	HTTPCode = "http.status_code"
+
+	// HTTPRoute is the route value of the HTTP request.
+	HTTPRoute = "http.route"
+
+	// HTTPURL sets the HTTP URL for a span.
+	HTTPURL = "http.url"
+
+	// HTTPUserAgent is the user agent header value of the HTTP request.
+	HTTPUserAgent = "http.useragent"
+
+	// HTTPClientIP sets the HTTP client IP tag.
+	HTTPClientIP = "http.client_ip"
+
+	// HTTPRequestHeaders sets the HTTP request headers partial tag
+	// This tag is meant to be composed, i.e http.request.headers.headerX, http.request.headers.headerY, etc...
+	// See https://docs.datadoghq.com/tracing/trace_collection/tracing_naming_convention/#http-requests
+	HTTPRequestHeaders = "http.request.headers"
+
+	// SpanName is a pseudo-key for setting a span's operation name by means of
+	// a tag. It is mostly here to facilitate vendor-agnostic frameworks like Opentracing
+	// and OpenCensus.
+	SpanName = "span.name"
+
+	// SpanType defines the Span type (web, db, cache).
+	SpanType = "span.type"
+
+	// ServiceName defines the Service name for this Span.
+	ServiceName = "service.name"
+
+	// Version is a tag that specifies the current application version.
+	Version = "version"
+
+	// ResourceName defines the Resource name for the Span.
+	ResourceName = "resource.name"
+
+	// Error specifies the error tag. It's value is usually of type "error".
+	Error = "error"
+
+	// ErrorMsg specifies the error message.
+	ErrorMsg = "error.message"
+
+	// ErrorType specifies the error type.
+	ErrorType = "error.type"
+
+	// ErrorStack specifies the stack dump.
+	ErrorStack = "error.stack"
+
+	// ErrorDetails holds details about an error which implements a formatter.
+	ErrorDetails = "error.details"
+
+	// Environment specifies the environment to use with a trace.
+	Environment = "env"
+
+	// EventSampleRate specifies the rate at which this span will be sampled
+	// as an APM event.
+	EventSampleRate = "_dd1.sr.eausr"
+
+	// AnalyticsEvent specifies whether the span should be recorded as a Trace
+	// Search & Analytics event.
+	AnalyticsEvent = "analytics.event"
+
+	// ManualKeep is a tag which specifies that the trace to which this span
+	// belongs to should be kept when set to true.
+	ManualKeep = "manual.keep"
+
+	// ManualDrop is a tag which specifies that the trace to which this span
+	// belongs to should be dropped when set to true.
+	ManualDrop = "manual.drop"
+
+	// RuntimeID is a tag that contains a unique id for this process.
+	RuntimeID = "runtime-id"
+
+	// Component defines library integration the span originated from.
+	Component = "component"
+
+	// SpanKind defines the kind of span based on Otel requirements (client, server, producer, consumer).
+	SpanKind = "span.kind"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal/globaltracer.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal/globaltracer.go
new file mode 100644
index 0000000000..363d1f9983
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal/globaltracer.go
@@ -0,0 +1,104 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package internal // import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+
+import (
+	"sync/atomic"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+)
+
+var (
+	// globalTracer stores the current tracer as *ddtrace.Tracer (pointer to interface). The
+	// atomic.Value type requires types to be consistent, which requires using *ddtrace.Tracer.
+	globalTracer atomic.Value
+)
+
+func init() {
+	var tracer ddtrace.Tracer = &NoopTracer{}
+	globalTracer.Store(&tracer)
+}
+
+// SetGlobalTracer sets the global tracer to t.
+func SetGlobalTracer(t ddtrace.Tracer) {
+	old := *globalTracer.Swap(&t).(*ddtrace.Tracer)
+	if !Testing {
+		old.Stop()
+	}
+}
+
+// GetGlobalTracer returns the currently active tracer.
+func GetGlobalTracer() ddtrace.Tracer {
+	return *globalTracer.Load().(*ddtrace.Tracer)
+}
+
+// Testing is set to true when the mock tracer is active. It usually signifies that we are in a test
+// environment. This value is used by tracer.Start to prevent overriding the GlobalTracer in tests.
+var Testing = false
+
+var _ ddtrace.Tracer = (*NoopTracer)(nil)
+
+// NoopTracer is an implementation of ddtrace.Tracer that is a no-op.
+type NoopTracer struct{}
+
+// StartSpan implements ddtrace.Tracer.
+func (NoopTracer) StartSpan(_ string, _ ...ddtrace.StartSpanOption) ddtrace.Span {
+	return NoopSpan{}
+}
+
+// SetServiceInfo implements ddtrace.Tracer.
+func (NoopTracer) SetServiceInfo(_, _, _ string) {}
+
+// Extract implements ddtrace.Tracer.
+func (NoopTracer) Extract(_ interface{}) (ddtrace.SpanContext, error) {
+	return NoopSpanContext{}, nil
+}
+
+// Inject implements ddtrace.Tracer.
+func (NoopTracer) Inject(_ ddtrace.SpanContext, _ interface{}) error { return nil }
+
+// Stop implements ddtrace.Tracer.
+func (NoopTracer) Stop() {}
+
+var _ ddtrace.Span = (*NoopSpan)(nil)
+
+// NoopSpan is an implementation of ddtrace.Span that is a no-op.
+type NoopSpan struct{}
+
+// SetTag implements ddtrace.Span.
+func (NoopSpan) SetTag(_ string, _ interface{}) {}
+
+// SetOperationName implements ddtrace.Span.
+func (NoopSpan) SetOperationName(_ string) {}
+
+// BaggageItem implements ddtrace.Span.
+func (NoopSpan) BaggageItem(_ string) string { return "" }
+
+// SetBaggageItem implements ddtrace.Span.
+func (NoopSpan) SetBaggageItem(_, _ string) {}
+
+// Finish implements ddtrace.Span.
+func (NoopSpan) Finish(_ ...ddtrace.FinishOption) {}
+
+// Tracer implements ddtrace.Span.
+func (NoopSpan) Tracer() ddtrace.Tracer { return NoopTracer{} }
+
+// Context implements ddtrace.Span.
+func (NoopSpan) Context() ddtrace.SpanContext { return NoopSpanContext{} }
+
+var _ ddtrace.SpanContext = (*NoopSpanContext)(nil)
+
+// NoopSpanContext is an implementation of ddtrace.SpanContext that is a no-op.
+type NoopSpanContext struct{}
+
+// SpanID implements ddtrace.SpanContext.
+func (NoopSpanContext) SpanID() uint64 { return 0 }
+
+// TraceID implements ddtrace.SpanContext.
+func (NoopSpanContext) TraceID() uint64 { return 0 }
+
+// ForeachBaggageItem implements ddtrace.SpanContext.
+func (NoopSpanContext) ForeachBaggageItem(_ func(k, v string) bool) {}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/context.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/context.go
new file mode 100644
index 0000000000..07b72c1ae8
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/context.go
@@ -0,0 +1,62 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"context"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+)
+
+type contextKey struct{}
+
+var activeSpanKey = contextKey{}
+
+// ContextWithSpan returns a copy of the given context which includes the span s.
+func ContextWithSpan(ctx context.Context, s Span) context.Context {
+	return context.WithValue(ctx, activeSpanKey, s)
+}
+
+// SpanFromContext returns the span contained in the given context. A second return
+// value indicates if a span was found in the context. If no span is found, a no-op
+// span is returned.
+func SpanFromContext(ctx context.Context) (Span, bool) {
+	if ctx == nil {
+		return &internal.NoopSpan{}, false
+	}
+	v := ctx.Value(activeSpanKey)
+	if s, ok := v.(ddtrace.Span); ok {
+		return s, true
+	}
+	return &internal.NoopSpan{}, false
+}
+
+// StartSpanFromContext returns a new span with the given operation name and options. If a span
+// is found in the context, it will be used as the parent of the resulting span. If the ChildOf
+// option is passed, it will only be used as the parent if there is no span found in `ctx`.
+func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+	// copy opts in case the caller reuses the slice in parallel
+	// we will add at least 1, at most 2 items
+	optsLocal := make([]StartSpanOption, len(opts), len(opts)+2)
+	copy(optsLocal, opts)
+
+	if ctx == nil {
+		// default to context.Background() to avoid panics on Go >= 1.15
+		ctx = context.Background()
+	} else if s, ok := SpanFromContext(ctx); ok {
+		optsLocal = append(optsLocal, ChildOf(s.Context()))
+	}
+	optsLocal = append(optsLocal, withContext(ctx))
+	s := StartSpan(operationName, optsLocal...)
+	if span, ok := s.(*span); ok && span.pprofCtxActive != nil {
+		// If pprof labels were applied for this span, use the derived ctx that
+		// includes them. Otherwise a child of this span wouldn't be able to
+		// correctly restore the labels of its parent when it finishes.
+		ctx = span.pprofCtxActive
+	}
+	return s, ContextWithSpan(ctx, s)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/doc.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/doc.go
new file mode 100644
index 0000000000..0880db0d1c
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/doc.go
@@ -0,0 +1,110 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+// Package tracer contains Datadog's core tracing client. It is used to trace
+// requests as they flow across web servers, databases and microservices, giving
+// developers visibility into bottlenecks and troublesome requests. To start the
+// tracer, simply call the start method along with an optional set of options.
+// By default, the trace agent is considered to be found at "localhost:8126". In a
+// setup where this would be different (let's say 127.0.0.1:1234), we could do:
+//
+//	tracer.Start(tracer.WithAgentAddr("127.0.0.1:1234"))
+//	defer tracer.Stop()
+//
+// The tracing client can perform trace sampling. While the trace agent
+// already samples traces to reduce bandwidth usage, client sampling reduces
+// performance overhead. To make use of it, the package comes with a ready-to-use
+// rate sampler that can be passed to the tracer. To use it and keep only 30% of the
+// requests, one would do:
+//
+//	s := tracer.NewRateSampler(0.3)
+//	tracer.Start(tracer.WithSampler(s))
+//
+// More precise control of sampling rates can be configured using sampling rules.
+// This can be applied based on span name, service or both, and is used to determine
+// the sampling rate to apply. MaxPerSecond specifies max number of spans per second
+// that can be sampled per the rule and applies only to sampling rules of type
+// tracer.SamplingRuleSpan. If MaxPerSecond is not specified, the default is no limit.
+//
+//	rules := []tracer.SamplingRule{
+//	      // sample 10% of traces with the span name "web.request"
+//	      tracer.NameRule("web.request", 0.1),
+//	      // sample 20% of traces for the service "test-service"
+//	      tracer.ServiceRule("test-service", 0.2),
+//	      // sample 30% of traces when the span name is "db.query" and the service
+//	      // is "postgres.db"
+//	      tracer.NameServiceRule("db.query", "postgres.db", 0.3),
+//	      // sample 100% of traces when service and name match these regular expressions
+//	      {Service: regexp.MustCompile("^test-"), Name: regexp.MustCompile("http\\..*"), Rate: 1.0},
+//	      // sample 50% of spans when service and name match these glob patterns with no limit on the number of spans
+//	      tracer.SpanNameServiceRule("^test-", "http\\..*", 0.5),
+//	      // sample 50% of spans when service and name match these glob patterns up to 100 spans per second
+//	      tracer.SpanNameServiceMPSRule("^test-", "http\\..*", 0.5, 100),
+//	}
+//	tracer.Start(tracer.WithSamplingRules(rules))
+//	defer tracer.Stop()
+//
+// Sampling rules can also be configured at runtime using the DD_TRACE_SAMPLING_RULES and
+// DD_SPAN_SAMPLING_RULES environment variables. When set, it overrides rules set by tracer.WithSamplingRules.
+// The value is a JSON array of objects.
+// For trace sampling rules, the "sample_rate" field is required, the "name" and "service" fields are optional.
+// For span sampling rules, the "name" and "service", if specified, must be a valid glob pattern,
+// i.e. a string where "*" matches any contiguous substring, even an empty string,
+// and "?" character matches exactly one of any character.
+// The "sample_rate" field is optional, and if not specified, defaults to "1.0", sampling 100% of the spans.
+// The "max_per_second" field is optional, and if not specified, defaults to 0, keeping all the previously sampled spans.
+//
+//	export DD_TRACE_SAMPLING_RULES='[{"name": "web.request", "sample_rate": 1.0}]'
+//	export DD_SPAN_SAMPLING_RULES='[{"service":"test.?","name": "web.*", "sample_rate": 1.0, "max_per_second":100}]'
+//
+// To create spans, use the functions StartSpan and StartSpanFromContext. Both accept
+// StartSpanOptions that can be used to configure the span. A span that is started
+// with no parent will begin a new trace. See the function documentation for details
+// on specific usage. Each trace has a hard limit of 100,000 spans, after which the
+// trace will be dropped and give a diagnostic log message. In practice users should
+// not approach this limit as traces of this size are not useful and impossible to
+// visualize.
+//
+// See the contrib package ( https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib )
+// for integrating datadog with various libraries, frameworks and clients.
+//
+// All spans created by the tracer contain a context hereby referred to as the span
+// context. Note that this is different from Go's context. The span context is used
+// to package essential information from a span, which is needed when creating child
+// spans that inherit from it. Thus, a child span is created from a span's span context.
+// The span context can originate from within the same process, but also a
+// different process or even a different machine in the case of distributed tracing.
+//
+// To make use of distributed tracing, a span's context may be injected via a carrier
+// into a transport (HTTP, RPC, etc.) to be extracted on the other end and used to
+// create spans that are direct descendants of it. A couple of carrier interfaces
+// which should cover most of the use-case scenarios are readily provided, such as
+// HTTPCarrier and TextMapCarrier. Users are free to create their own, which will work
+// with our propagation algorithm as long as they implement the TextMapReader and TextMapWriter
+// interfaces. An example alternate implementation is the MDCarrier in our gRPC integration.
+//
+// As an example, injecting a span's context into an HTTP request would look like this.
+// (See the net/http contrib package for more examples https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http):
+//
+//	req, err := http.NewRequest("GET", "http://example.com", nil)
+//	// ...
+//	err := tracer.Inject(span.Context(), tracer.HTTPHeadersCarrier(req.Header))
+//	// ...
+//	http.DefaultClient.Do(req)
+//
+// Then, on the server side, to continue the trace one would do:
+//
+//	sctx, err := tracer.Extract(tracer.HTTPHeadersCarrier(req.Header))
+//	// ...
+//	span := tracer.StartSpan("child.span", tracer.ChildOf(sctx))
+//
+// In the same manner, any means can be used as a carrier to inject a context into a transport. Go's
+// context can also be used as a means to transport spans within the same process. The methods
+// StartSpanFromContext, ContextWithSpan and SpanFromContext exist for this reason.
+//
+// Some libraries and frameworks are supported out-of-the-box by using one
+// of our integrations. You can see a list of supported integrations here:
+// https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/contrib
+package tracer // import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/log.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/log.go
new file mode 100644
index 0000000000..84899fbc5e
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/log.go
@@ -0,0 +1,128 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"math"
+	"net/http"
+	"runtime"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/version"
+)
+
+// startupInfo contains various information about the status of the tracer on startup.
+type startupInfo struct {
+	Date                        string            `json:"date"`                           // ISO 8601 date and time of start
+	OSName                      string            `json:"os_name"`                        // Windows, Darwin, Debian, etc.
+	OSVersion                   string            `json:"os_version"`                     // Version of the OS
+	Version                     string            `json:"version"`                        // Tracer version
+	Lang                        string            `json:"lang"`                           // "Go"
+	LangVersion                 string            `json:"lang_version"`                   // Go version, e.g. go1.13
+	Env                         string            `json:"env"`                            // Tracer env
+	Service                     string            `json:"service"`                        // Tracer Service
+	AgentURL                    string            `json:"agent_url"`                      // The address of the agent
+	AgentError                  string            `json:"agent_error"`                    // Any error that occurred trying to connect to agent
+	Debug                       bool              `json:"debug"`                          // Whether debug mode is enabled
+	AnalyticsEnabled            bool              `json:"analytics_enabled"`              // True if there is a global analytics rate set
+	SampleRate                  string            `json:"sample_rate"`                    // The default sampling rate for the rules sampler
+	SampleRateLimit             string            `json:"sample_rate_limit"`              // The rate limit configured with the rules sampler
+	SamplingRules               []SamplingRule    `json:"sampling_rules"`                 // Rules used by the rules sampler
+	SamplingRulesError          string            `json:"sampling_rules_error"`           // Any errors that occurred while parsing sampling rules
+	ServiceMappings             map[string]string `json:"service_mappings"`               // Service Mappings
+	Tags                        map[string]string `json:"tags"`                           // Global tags
+	RuntimeMetricsEnabled       bool              `json:"runtime_metrics_enabled"`        // Whether or not runtime metrics are enabled
+	HealthMetricsEnabled        bool              `json:"health_metrics_enabled"`         // Whether or not health metrics are enabled
+	ProfilerCodeHotspotsEnabled bool              `json:"profiler_code_hotspots_enabled"` // Whether or not profiler code hotspots are enabled
+	ProfilerEndpointsEnabled    bool              `json:"profiler_endpoints_enabled"`     // Whether or not profiler endpoints are enabled
+	ApplicationVersion          string            `json:"dd_version"`                     // Version of the user's application
+	Architecture                string            `json:"architecture"`                   // Architecture of host machine
+	GlobalService               string            `json:"global_service"`                 // Global service string. If not-nil should be same as Service. (#614)
+	LambdaMode                  string            `json:"lambda_mode"`                    // Whether or not the client has enabled lambda mode
+	AppSec                      bool              `json:"appsec"`                         // AppSec status: true when started, false otherwise.
+	AgentFeatures               agentFeatures     `json:"agent_features"`                 // Lists the capabilities of the agent.
+}
+
+// checkEndpoint tries to connect to the URL specified by endpoint.
+// If the endpoint is not reachable, checkEndpoint returns an error
+// explaining why.
+func checkEndpoint(c *http.Client, endpoint string) error {
+	req, err := http.NewRequest("POST", endpoint, bytes.NewReader([]byte{0x90}))
+	if err != nil {
+		return fmt.Errorf("cannot create http request: %v", err)
+	}
+	req.Header.Set(traceCountHeader, "0")
+	req.Header.Set("Content-Type", "application/msgpack")
+	res, err := c.Do(req)
+	if err != nil {
+		return err
+	}
+	defer res.Body.Close()
+	return nil
+}
+
+// logStartup generates a startupInfo for a tracer and writes it to the log in
+// JSON format.
+func logStartup(t *tracer) {
+	tags := make(map[string]string)
+	for k, v := range t.config.globalTags {
+		tags[k] = fmt.Sprintf("%v", v)
+	}
+
+	info := startupInfo{
+		Date:                        time.Now().Format(time.RFC3339),
+		OSName:                      osinfo.OSName(),
+		OSVersion:                   osinfo.OSVersion(),
+		Version:                     version.Tag,
+		Lang:                        "Go",
+		LangVersion:                 runtime.Version(),
+		Env:                         t.config.env,
+		Service:                     t.config.serviceName,
+		AgentURL:                    t.config.transport.endpoint(),
+		Debug:                       t.config.debug,
+		AnalyticsEnabled:            !math.IsNaN(globalconfig.AnalyticsRate()),
+		SampleRate:                  fmt.Sprintf("%f", t.rulesSampling.traces.globalRate),
+		SampleRateLimit:             "disabled",
+		SamplingRules:               append(t.config.traceRules, t.config.spanRules...),
+		ServiceMappings:             t.config.serviceMappings,
+		Tags:                        tags,
+		RuntimeMetricsEnabled:       t.config.runtimeMetrics,
+		HealthMetricsEnabled:        t.config.runtimeMetrics,
+		ApplicationVersion:          t.config.version,
+		ProfilerCodeHotspotsEnabled: t.config.profilerHotspots,
+		ProfilerEndpointsEnabled:    t.config.profilerEndpoints,
+		Architecture:                runtime.GOARCH,
+		GlobalService:               globalconfig.ServiceName(),
+		LambdaMode:                  fmt.Sprintf("%t", t.config.logToStdout),
+		AgentFeatures:               t.config.agent,
+		AppSec:                      appsec.Enabled(),
+	}
+	if _, _, err := samplingRulesFromEnv(); err != nil {
+		info.SamplingRulesError = fmt.Sprintf("%s", err)
+	}
+	if limit, ok := t.rulesSampling.TraceRateLimit(); ok {
+		info.SampleRateLimit = fmt.Sprintf("%v", limit)
+	}
+	if !t.config.logToStdout {
+		if err := checkEndpoint(t.config.httpClient, t.config.transport.endpoint()); err != nil {
+			info.AgentError = fmt.Sprintf("%s", err)
+			log.Warn("DIAGNOSTICS Unable to reach agent intake: %s", err)
+		}
+	}
+	bs, err := json.Marshal(info)
+	if err != nil {
+		log.Warn("DIAGNOSTICS Failed to serialize json for startup log (%v) %#v\n", err, info)
+		return
+	}
+	log.Info("DATADOG TRACER CONFIGURATION %s\n", string(bs))
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/metrics.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/metrics.go
new file mode 100644
index 0000000000..a454ca1673
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/metrics.go
@@ -0,0 +1,110 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"runtime"
+	"runtime/debug"
+	"sync/atomic"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// defaultMetricsReportInterval specifies the interval at which runtime metrics will
+// be reported.
+const defaultMetricsReportInterval = 10 * time.Second
+
+type statsdClient interface {
+	Incr(name string, tags []string, rate float64) error
+	Count(name string, value int64, tags []string, rate float64) error
+	Gauge(name string, value float64, tags []string, rate float64) error
+	Timing(name string, value time.Duration, tags []string, rate float64) error
+	Flush() error
+	Close() error
+}
+
+// reportRuntimeMetrics periodically reports go runtime metrics at
+// the given interval.
+func (t *tracer) reportRuntimeMetrics(interval time.Duration) {
+	var ms runtime.MemStats
+	gc := debug.GCStats{
+		// When len(stats.PauseQuantiles) is 5, it will be filled with the
+		// minimum, 25%, 50%, 75%, and maximum pause times. See the documentation
+		// for (runtime/debug).ReadGCStats.
+		PauseQuantiles: make([]time.Duration, 5),
+	}
+
+	tick := time.NewTicker(interval)
+	defer tick.Stop()
+	for {
+		select {
+		case <-tick.C:
+			log.Debug("Reporting runtime metrics...")
+			runtime.ReadMemStats(&ms)
+			debug.ReadGCStats(&gc)
+
+			statsd := t.statsd
+			// CPU statistics
+			statsd.Gauge("runtime.go.num_cpu", float64(runtime.NumCPU()), nil, 1)
+			statsd.Gauge("runtime.go.num_goroutine", float64(runtime.NumGoroutine()), nil, 1)
+			statsd.Gauge("runtime.go.num_cgo_call", float64(runtime.NumCgoCall()), nil, 1)
+			// General statistics
+			statsd.Gauge("runtime.go.mem_stats.alloc", float64(ms.Alloc), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.total_alloc", float64(ms.TotalAlloc), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.sys", float64(ms.Sys), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.lookups", float64(ms.Lookups), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.mallocs", float64(ms.Mallocs), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.frees", float64(ms.Frees), nil, 1)
+			// Heap memory statistics
+			statsd.Gauge("runtime.go.mem_stats.heap_alloc", float64(ms.HeapAlloc), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.heap_sys", float64(ms.HeapSys), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.heap_idle", float64(ms.HeapIdle), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.heap_inuse", float64(ms.HeapInuse), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.heap_released", float64(ms.HeapReleased), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.heap_objects", float64(ms.HeapObjects), nil, 1)
+			// Stack memory statistics
+			statsd.Gauge("runtime.go.mem_stats.stack_inuse", float64(ms.StackInuse), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.stack_sys", float64(ms.StackSys), nil, 1)
+			// Off-heap memory statistics
+			statsd.Gauge("runtime.go.mem_stats.m_span_inuse", float64(ms.MSpanInuse), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.m_span_sys", float64(ms.MSpanSys), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.m_cache_inuse", float64(ms.MCacheInuse), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.m_cache_sys", float64(ms.MCacheSys), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.buck_hash_sys", float64(ms.BuckHashSys), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.gc_sys", float64(ms.GCSys), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.other_sys", float64(ms.OtherSys), nil, 1)
+			// Garbage collector statistics
+			statsd.Gauge("runtime.go.mem_stats.next_gc", float64(ms.NextGC), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.last_gc", float64(ms.LastGC), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.pause_total_ns", float64(ms.PauseTotalNs), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.num_gc", float64(ms.NumGC), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.num_forced_gc", float64(ms.NumForcedGC), nil, 1)
+			statsd.Gauge("runtime.go.mem_stats.gc_cpu_fraction", ms.GCCPUFraction, nil, 1)
+			for i, p := range []string{"min", "25p", "50p", "75p", "max"} {
+				statsd.Gauge("runtime.go.gc_stats.pause_quantiles."+p, float64(gc.PauseQuantiles[i]), nil, 1)
+			}
+
+		case <-t.stop:
+			return
+		}
+	}
+}
+
+func (t *tracer) reportHealthMetrics(interval time.Duration) {
+	ticker := time.NewTicker(interval)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-ticker.C:
+			t.statsd.Count("datadog.tracer.spans_started", int64(atomic.SwapUint32(&t.spansStarted, 0)), nil, 1)
+			t.statsd.Count("datadog.tracer.spans_finished", int64(atomic.SwapUint32(&t.spansFinished, 0)), nil, 1)
+			t.statsd.Count("datadog.tracer.traces_dropped", int64(atomic.SwapUint32(&t.tracesDropped, 0)), []string{"reason:trace_too_large"}, 1)
+		case <-t.stop:
+			return
+		}
+	}
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/option.go
new file mode 100644
index 0000000000..00ca5ff4ff
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/option.go
@@ -0,0 +1,1052 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"math"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/normalizer"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/version"
+
+	"github.com/DataDog/datadog-go/v5/statsd"
+)
+
+var (
+	// defaultSocketAPM specifies the socket path to use for connecting to the trace-agent.
+	// Replaced in tests
+	defaultSocketAPM = "/var/run/datadog/apm.socket"
+
+	// defaultSocketDSD specifies the socket path to use for connecting to the statsd server.
+	// Replaced in tests
+	defaultSocketDSD = "/var/run/datadog/dsd.socket"
+
+	// defaultMaxTagsHeaderLen specifies the default maximum length of the X-Datadog-Tags header value.
+	defaultMaxTagsHeaderLen = 128
+)
+
+// config holds the tracer configuration.
+type config struct {
+	// debug, when true, writes details to logs.
+	debug bool
+
+	// agent holds the capabilities of the agent and determines some
+	// of the behaviour of the tracer.
+	agent agentFeatures
+
+	// featureFlags specifies any enabled feature flags.
+	featureFlags map[string]struct{}
+
+	// logToStdout reports whether we should log all traces to the standard
+	// output instead of using the agent. This is used in Lambda environments.
+	logToStdout bool
+
+	// sendRetries is the number of times a trace payload send is retried upon
+	// failure.
+	sendRetries int
+
+	// logStartup, when true, causes various startup info to be written
+	// when the tracer starts.
+	logStartup bool
+
+	// serviceName specifies the name of this application.
+	serviceName string
+
+	// universalVersion, reports whether span service name and config service name
+	// should match to set application version tag. False by default
+	universalVersion bool
+
+	// version specifies the version of this application
+	version string
+
+	// env contains the environment that this application will run under.
+	env string
+
+	// sampler specifies the sampler that will be used for sampling traces.
+	sampler Sampler
+
+	// agentURL is the agent URL that receives traces from the tracer.
+	agentURL *url.URL
+
+	// serviceMappings holds a set of service mappings to dynamically rename services
+	serviceMappings map[string]string
+
+	// globalTags holds a set of tags that will be automatically applied to
+	// all spans.
+	globalTags map[string]interface{}
+
+	// transport specifies the Transport interface which will be used to send data to the agent.
+	transport transport
+
+	// propagator propagates span context cross-process
+	propagator Propagator
+
+	// httpClient specifies the HTTP client to be used by the agent's transport.
+	httpClient *http.Client
+
+	// hostname is automatically assigned when the DD_TRACE_REPORT_HOSTNAME is set to true,
+	// and is added as a special tag to the root span of traces.
+	hostname string
+
+	// logger specifies the logger to use when printing errors. If not specified, the "log" package
+	// will be used.
+	logger ddtrace.Logger
+
+	// runtimeMetrics specifies whether collection of runtime metrics is enabled.
+	runtimeMetrics bool
+
+	// dogstatsdAddr specifies the address to connect for sending metrics to the
+	// Datadog Agent. If not set, it defaults to "localhost:8125" or to the
+	// combination of the environment variables DD_AGENT_HOST and DD_DOGSTATSD_PORT.
+	dogstatsdAddr string
+
+	// statsdClient is set when a user provides a custom statsd client for tracking metrics
+	// associated with the runtime and the tracer.
+	statsdClient statsdClient
+
+	// spanRules contains user-defined rules to determine the sampling rate to apply
+	// to a single span without affecting the entire trace
+	spanRules []SamplingRule
+
+	// traceRules contains user-defined rules to determine the sampling rate to apply
+	// to the entire trace if any spans satisfy the criteria
+	traceRules []SamplingRule
+
+	// tickChan specifies a channel which will receive the time every time the tracer must flush.
+	// It defaults to time.Ticker; replaced in tests.
+	tickChan <-chan time.Time
+
+	// noDebugStack disables the collection of debug stack traces globally. No traces reporting
+	// errors will record a stack trace when this option is set.
+	noDebugStack bool
+
+	// profilerHotspots specifies whether profiler Code Hotspots is enabled.
+	profilerHotspots bool
+
+	// profilerEndpoints specifies whether profiler endpoint filtering is enabled.
+	profilerEndpoints bool
+
+	// enabled reports whether tracing is enabled.
+	enabled bool
+
+	// enableHostnameDetection specifies whether the tracer should enable hostname detection.
+	enableHostnameDetection bool
+
+	// spanAttributeSchemaVersion holds the selected DD_TRACE_SPAN_ATTRIBUTE_SCHEMA version.
+	spanAttributeSchemaVersion int
+
+	// peerServiceDefaultsEnabled indicates whether the peer.service tag calculation is enabled or not.
+	peerServiceDefaultsEnabled bool
+
+	// peerServiceMappings holds a set of service mappings to dynamically rename peer.service values.
+	peerServiceMappings map[string]string
+}
+
+// HasFeature reports whether feature f is enabled.
+func (c *config) HasFeature(f string) bool {
+	_, ok := c.featureFlags[strings.TrimSpace(f)]
+	return ok
+}
+
+// StartOption represents a function that can be provided as a parameter to Start.
+type StartOption func(*config)
+
+// maxPropagatedTagsLength limits the size of DD_TRACE_X_DATADOG_TAGS_MAX_LENGTH to prevent HTTP 413 responses.
+const maxPropagatedTagsLength = 512
+
+// newConfig renders the tracer configuration based on defaults, environment variables
+// and passed user opts.
+func newConfig(opts ...StartOption) *config {
+	c := new(config)
+	c.sampler = NewAllSampler()
+
+	if internal.BoolEnv("DD_TRACE_ANALYTICS_ENABLED", false) {
+		globalconfig.SetAnalyticsRate(1.0)
+	}
+	if os.Getenv("DD_TRACE_REPORT_HOSTNAME") == "true" {
+		var err error
+		c.hostname, err = os.Hostname()
+		if err != nil {
+			log.Warn("unable to look up hostname: %v", err)
+		}
+	}
+	if v := os.Getenv("DD_TRACE_SOURCE_HOSTNAME"); v != "" {
+		c.hostname = v
+	}
+	if v := os.Getenv("DD_ENV"); v != "" {
+		c.env = v
+	}
+	if v := os.Getenv("DD_TRACE_FEATURES"); v != "" {
+		WithFeatureFlags(strings.FieldsFunc(v, func(r rune) bool {
+			return r == ',' || r == ' '
+		})...)(c)
+	}
+	if v := os.Getenv("DD_SERVICE"); v != "" {
+		c.serviceName = v
+		globalconfig.SetServiceName(v)
+	}
+	if ver := os.Getenv("DD_VERSION"); ver != "" {
+		c.version = ver
+	}
+	if v := os.Getenv("DD_SERVICE_MAPPING"); v != "" {
+		internal.ForEachStringTag(v, func(key, val string) { WithServiceMapping(key, val)(c) })
+	}
+	if v := os.Getenv("DD_TRACE_HEADER_TAGS"); v != "" {
+		WithHeaderTags(strings.Split(v, ","))(c)
+	}
+	if v := os.Getenv("DD_TAGS"); v != "" {
+		tags := internal.ParseTagString(v)
+		internal.CleanGitMetadataTags(tags)
+		for key, val := range tags {
+			WithGlobalTag(key, val)(c)
+		}
+	}
+	if _, ok := os.LookupEnv("AWS_LAMBDA_FUNCTION_NAME"); ok {
+		// AWS_LAMBDA_FUNCTION_NAME being set indicates that we're running in an AWS Lambda environment.
+		// See: https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html
+		c.logToStdout = true
+	}
+	c.logStartup = internal.BoolEnv("DD_TRACE_STARTUP_LOGS", true)
+	c.runtimeMetrics = internal.BoolEnv("DD_RUNTIME_METRICS_ENABLED", false)
+	c.debug = internal.BoolEnv("DD_TRACE_DEBUG", false)
+	c.enabled = internal.BoolEnv("DD_TRACE_ENABLED", true)
+	c.profilerEndpoints = internal.BoolEnv(traceprof.EndpointEnvVar, true)
+	c.profilerHotspots = internal.BoolEnv(traceprof.CodeHotspotsEnvVar, true)
+	c.enableHostnameDetection = internal.BoolEnv("DD_CLIENT_HOSTNAME_ENABLED", true)
+
+	schemaVersionStr := os.Getenv("DD_TRACE_SPAN_ATTRIBUTE_SCHEMA")
+	if v, ok := namingschema.ParseVersion(schemaVersionStr); ok {
+		namingschema.SetVersion(v)
+		c.spanAttributeSchemaVersion = int(v)
+	} else {
+		v := namingschema.SetDefaultVersion()
+		c.spanAttributeSchemaVersion = int(v)
+		log.Warn("DD_TRACE_SPAN_ATTRIBUTE_SCHEMA=%s is not a valid value, setting to default of v%d", schemaVersionStr, v)
+	}
+	// Allow DD_TRACE_SPAN_ATTRIBUTE_SCHEMA=v0 users to disable default integration (contrib AKA v0) service names.
+	// These default service names are always disabled for v1 onwards.
+	namingschema.SetUseGlobalServiceName(internal.BoolEnv("DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED", false))
+
+	// peer.service tag default calculation is enabled by default if using attribute schema >= 1
+	c.peerServiceDefaultsEnabled = true
+	if c.spanAttributeSchemaVersion == int(namingschema.SchemaV0) {
+		c.peerServiceDefaultsEnabled = internal.BoolEnv("DD_TRACE_PEER_SERVICE_DEFAULTS_ENABLED", false)
+	}
+	c.peerServiceMappings = make(map[string]string)
+	if v := os.Getenv("DD_TRACE_PEER_SERVICE_MAPPING"); v != "" {
+		internal.ForEachStringTag(v, func(key, val string) { c.peerServiceMappings[key] = val })
+	}
+
+	for _, fn := range opts {
+		fn(c)
+	}
+	if c.agentURL == nil {
+		c.agentURL = resolveAgentAddr()
+		if url := internal.AgentURLFromEnv(); url != nil {
+			c.agentURL = url
+		}
+	}
+	if c.agentURL.Scheme == "unix" {
+		// If we're connecting over UDS we can just rely on the agent to provide the hostname
+		log.Debug("connecting to agent over unix, do not set hostname on any traces")
+		c.enableHostnameDetection = false
+		c.httpClient = udsClient(c.agentURL.Path)
+		c.agentURL = &url.URL{
+			Scheme: "http",
+			Host:   fmt.Sprintf("UDS_%s", strings.NewReplacer(":", "_", "/", "_", `\`, "_").Replace(c.agentURL.Path)),
+		}
+	} else if c.httpClient == nil {
+		c.httpClient = defaultClient
+	}
+	WithGlobalTag(ext.RuntimeID, globalconfig.RuntimeID())(c)
+	if c.env == "" {
+		if v, ok := c.globalTags["env"]; ok {
+			if e, ok := v.(string); ok {
+				c.env = e
+			}
+		}
+	}
+	if c.version == "" {
+		if v, ok := c.globalTags["version"]; ok {
+			if ver, ok := v.(string); ok {
+				c.version = ver
+			}
+		}
+	}
+	if c.serviceName == "" {
+		if v, ok := c.globalTags["service"]; ok {
+			if s, ok := v.(string); ok {
+				c.serviceName = s
+				globalconfig.SetServiceName(s)
+			}
+		} else {
+			c.serviceName = filepath.Base(os.Args[0])
+		}
+	}
+	if c.transport == nil {
+		c.transport = newHTTPTransport(c.agentURL.String(), c.httpClient)
+	}
+	if c.propagator == nil {
+		envKey := "DD_TRACE_X_DATADOG_TAGS_MAX_LENGTH"
+		max := internal.IntEnv(envKey, defaultMaxTagsHeaderLen)
+		if max < 0 {
+			log.Warn("Invalid value %d for %s. Setting to 0.", max, envKey)
+			max = 0
+		}
+		if max > maxPropagatedTagsLength {
+			log.Warn("Invalid value %d for %s. Maximum allowed is %d. Setting to %d.", max, envKey, maxPropagatedTagsLength, maxPropagatedTagsLength)
+			max = maxPropagatedTagsLength
+		}
+		c.propagator = NewPropagator(&PropagatorConfig{
+			MaxTagsHeaderLen: max,
+		})
+	}
+	if c.logger != nil {
+		log.UseLogger(c.logger)
+	}
+	if c.debug {
+		log.SetLevel(log.LevelDebug)
+	}
+	c.loadAgentFeatures()
+	if c.statsdClient == nil {
+		// configure statsd client
+		addr := c.dogstatsdAddr
+		if addr == "" {
+			// no config defined address; use defaults
+			addr = defaultDogstatsdAddr()
+		}
+		if agentport := c.agent.StatsdPort; agentport > 0 {
+			// the agent reported a non-standard port
+			host, _, err := net.SplitHostPort(addr)
+			if err == nil {
+				// we have a valid host:port address; replace the port because
+				// the agent knows better
+				if host == "" {
+					host = defaultHostname
+				}
+				addr = net.JoinHostPort(host, strconv.Itoa(agentport))
+			}
+			// not a valid TCP address, leave it as it is (could be a socket connection)
+		}
+		c.dogstatsdAddr = addr
+	}
+
+	return c
+}
+
+func newStatsdClient(c *config) (statsdClient, error) {
+	if c.statsdClient != nil {
+		return c.statsdClient, nil
+	}
+
+	client, err := statsd.New(c.dogstatsdAddr, statsd.WithMaxMessagesPerPayload(40), statsd.WithTags(statsTags(c)))
+	if err != nil {
+		return &statsd.NoOpClient{}, err
+	}
+	return client, nil
+}
+
+// defaultHTTPClient returns the default http.Client to start the tracer with.
+func defaultHTTPClient() *http.Client {
+	if _, err := os.Stat(defaultSocketAPM); err == nil {
+		// we have the UDS socket file, use it
+		return udsClient(defaultSocketAPM)
+	}
+	return defaultClient
+}
+
+// udsClient returns a new http.Client which connects using the given UDS socket path.
+func udsClient(socketPath string) *http.Client {
+	return &http.Client{
+		Transport: &http.Transport{
+			Proxy: http.ProxyFromEnvironment,
+			DialContext: func(ctx context.Context, network, address string) (net.Conn, error) {
+				return defaultDialer.DialContext(ctx, "unix", (&net.UnixAddr{
+					Name: socketPath,
+					Net:  "unix",
+				}).String())
+			},
+			MaxIdleConns:          100,
+			IdleConnTimeout:       90 * time.Second,
+			TLSHandshakeTimeout:   10 * time.Second,
+			ExpectContinueTimeout: 1 * time.Second,
+		},
+		Timeout: defaultHTTPTimeout,
+	}
+}
+
+// defaultDogstatsdAddr returns the default connection address for Dogstatsd.
+func defaultDogstatsdAddr() string {
+	envHost, envPort := os.Getenv("DD_AGENT_HOST"), os.Getenv("DD_DOGSTATSD_PORT")
+	if _, err := os.Stat(defaultSocketDSD); err == nil && envHost == "" && envPort == "" {
+		// socket exists and user didn't specify otherwise via env vars
+		return "unix://" + defaultSocketDSD
+	}
+	host, port := defaultHostname, "8125"
+	if envHost != "" {
+		host = envHost
+	}
+	if envPort != "" {
+		port = envPort
+	}
+	return net.JoinHostPort(host, port)
+}
+
+// agentFeatures holds information about the trace-agent's capabilities.
+// When running WithLambdaMode, a zero-value of this struct will be used
+// as features.
+type agentFeatures struct {
+	// DropP0s reports whether it's ok for the tracer to not send any
+	// P0 traces to the agent.
+	DropP0s bool
+
+	// Stats reports whether the agent can receive client-computed stats on
+	// the /v0.6/stats endpoint.
+	Stats bool
+
+	// StatsdPort specifies the Dogstatsd port as provided by the agent.
+	// If it's the default, it will be 0, which means 8125.
+	StatsdPort int
+
+	// featureFlags specifies all the feature flags reported by the trace-agent.
+	featureFlags map[string]struct{}
+}
+
+// HasFlag reports whether the agent has set the feat feature flag.
+func (a *agentFeatures) HasFlag(feat string) bool {
+	_, ok := a.featureFlags[feat]
+	return ok
+}
+
+// loadAgentFeatures queries the trace-agent for its capabilities and updates
+// the tracer's behaviour.
+func (c *config) loadAgentFeatures() {
+	c.agent = agentFeatures{}
+	if c.logToStdout {
+		// there is no agent; all features off
+		return
+	}
+	resp, err := c.httpClient.Get(fmt.Sprintf("%s/info", c.agentURL))
+	if err != nil {
+		log.Error("Loading features: %v", err)
+		return
+	}
+	if resp.StatusCode == http.StatusNotFound {
+		// agent is older than 7.28.0, features not discoverable
+		return
+	}
+	defer resp.Body.Close()
+	type infoResponse struct {
+		Endpoints     []string `json:"endpoints"`
+		ClientDropP0s bool     `json:"client_drop_p0s"`
+		StatsdPort    int      `json:"statsd_port"`
+		FeatureFlags  []string `json:"feature_flags"`
+	}
+	var info infoResponse
+	if err := json.NewDecoder(resp.Body).Decode(&info); err != nil {
+		log.Error("Decoding features: %v", err)
+		return
+	}
+	c.agent.DropP0s = info.ClientDropP0s
+	c.agent.StatsdPort = info.StatsdPort
+	for _, endpoint := range info.Endpoints {
+		switch endpoint {
+		case "/v0.6/stats":
+			c.agent.Stats = true
+		}
+	}
+	c.agent.featureFlags = make(map[string]struct{}, len(info.FeatureFlags))
+	for _, flag := range info.FeatureFlags {
+		c.agent.featureFlags[flag] = struct{}{}
+	}
+}
+
+func (c *config) canComputeStats() bool {
+	return c.agent.Stats && c.HasFeature("discovery")
+}
+
+func (c *config) canDropP0s() bool {
+	return c.canComputeStats() && c.agent.DropP0s
+}
+
+func statsTags(c *config) []string {
+	tags := []string{
+		"lang:go",
+		"version:" + version.Tag,
+		"lang_version:" + runtime.Version(),
+	}
+	if c.serviceName != "" {
+		tags = append(tags, "service:"+c.serviceName)
+	}
+	if c.env != "" {
+		tags = append(tags, "env:"+c.env)
+	}
+	if c.hostname != "" {
+		tags = append(tags, "host:"+c.hostname)
+	}
+	for k, v := range c.globalTags {
+		if vstr, ok := v.(string); ok {
+			tags = append(tags, k+":"+vstr)
+		}
+	}
+	return tags
+}
+
+// withNoopStats is used for testing to disable statsd client
+func withNoopStats() StartOption {
+	return func(c *config) {
+		c.statsdClient = &statsd.NoOpClient{}
+	}
+}
+
+// WithFeatureFlags specifies a set of feature flags to enable. Please take into account
+// that most, if not all features flags are considered to be experimental and result in
+// unexpected bugs.
+func WithFeatureFlags(feats ...string) StartOption {
+	return func(c *config) {
+		if c.featureFlags == nil {
+			c.featureFlags = make(map[string]struct{}, len(feats))
+		}
+		for _, f := range feats {
+			c.featureFlags[strings.TrimSpace(f)] = struct{}{}
+		}
+		log.Info("FEATURES enabled: %v", feats)
+	}
+}
+
+// WithLogger sets logger as the tracer's error printer.
+func WithLogger(logger ddtrace.Logger) StartOption {
+	return func(c *config) {
+		c.logger = logger
+	}
+}
+
+// WithPrioritySampling is deprecated, and priority sampling is enabled by default.
+// When using distributed tracing, the priority sampling value is propagated in order to
+// get all the parts of a distributed trace sampled.
+// To learn more about priority sampling, please visit:
+// https://docs.datadoghq.com/tracing/getting_further/trace_sampling_and_storage/#priority-sampling-for-distributed-tracing
+func WithPrioritySampling() StartOption {
+	return func(c *config) {
+		// This is now enabled by default.
+	}
+}
+
+// WithDebugStack can be used to globally enable or disable the collection of stack traces when
+// spans finish with errors. It is enabled by default. This is a global version of the NoDebugStack
+// FinishOption.
+func WithDebugStack(enabled bool) StartOption {
+	return func(c *config) {
+		c.noDebugStack = !enabled
+	}
+}
+
+// WithDebugMode enables debug mode on the tracer, resulting in more verbose logging.
+func WithDebugMode(enabled bool) StartOption {
+	return func(c *config) {
+		c.debug = enabled
+	}
+}
+
+// WithLambdaMode enables lambda mode on the tracer, for use with AWS Lambda.
+// This option is only required if the the Datadog Lambda Extension is not
+// running.
+func WithLambdaMode(enabled bool) StartOption {
+	return func(c *config) {
+		c.logToStdout = enabled
+	}
+}
+
+// WithSendRetries enables re-sending payloads that are not successfully
+// submitted to the agent.  This will cause the tracer to retry the send at
+// most `retries` times.
+func WithSendRetries(retries int) StartOption {
+	return func(c *config) {
+		c.sendRetries = retries
+	}
+}
+
+// WithPropagator sets an alternative propagator to be used by the tracer.
+func WithPropagator(p Propagator) StartOption {
+	return func(c *config) {
+		c.propagator = p
+	}
+}
+
+// WithServiceName is deprecated. Please use WithService.
+// If you are using an older version and you are upgrading from WithServiceName
+// to WithService, please note that WithService will determine the service name of
+// server and framework integrations.
+func WithServiceName(name string) StartOption {
+	return func(c *config) {
+		c.serviceName = name
+		if globalconfig.ServiceName() != "" {
+			log.Warn("ddtrace/tracer: deprecated config WithServiceName should not be used " +
+				"with `WithService` or `DD_SERVICE`; integration service name will not be set.")
+		}
+		globalconfig.SetServiceName("")
+	}
+}
+
+// WithService sets the default service name for the program.
+func WithService(name string) StartOption {
+	return func(c *config) {
+		c.serviceName = name
+		globalconfig.SetServiceName(c.serviceName)
+	}
+}
+
+// WithGlobalServiceName causes contrib libraries to use the global service name and not any locally defined service name.
+// This is synonymous with `DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED`.
+func WithGlobalServiceName(enabled bool) StartOption {
+	return func(_ *config) {
+		namingschema.SetUseGlobalServiceName(enabled)
+	}
+}
+
+// WithAgentAddr sets the address where the agent is located. The default is
+// localhost:8126. It should contain both host and port.
+func WithAgentAddr(addr string) StartOption {
+	return func(c *config) {
+		c.agentURL = &url.URL{
+			Scheme: "http",
+			Host:   addr,
+		}
+	}
+}
+
+// WithEnv sets the environment to which all traces started by the tracer will be submitted.
+// The default value is the environment variable DD_ENV, if it is set.
+func WithEnv(env string) StartOption {
+	return func(c *config) {
+		c.env = env
+	}
+}
+
+// WithServiceMapping determines service "from" to be renamed to service "to".
+// This option is is case sensitive and can be used multiple times.
+func WithServiceMapping(from, to string) StartOption {
+	return func(c *config) {
+		if c.serviceMappings == nil {
+			c.serviceMappings = make(map[string]string)
+		}
+		c.serviceMappings[from] = to
+	}
+}
+
+// WithPeerServiceDefaults sets default calculation for peer.service.
+func WithPeerServiceDefaults(enabled bool) StartOption {
+	// TODO: add link to public docs
+	return func(c *config) {
+		c.peerServiceDefaultsEnabled = enabled
+	}
+}
+
+// WithPeerServiceMapping determines the value of the peer.service tag "from" to be renamed to service "to".
+func WithPeerServiceMapping(from, to string) StartOption {
+	return func(c *config) {
+		if c.peerServiceMappings == nil {
+			c.peerServiceMappings = make(map[string]string)
+		}
+		c.peerServiceMappings[from] = to
+	}
+}
+
+// WithGlobalTag sets a key/value pair which will be set as a tag on all spans
+// created by tracer. This option may be used multiple times.
+func WithGlobalTag(k string, v interface{}) StartOption {
+	return func(c *config) {
+		if c.globalTags == nil {
+			c.globalTags = make(map[string]interface{})
+		}
+		c.globalTags[k] = v
+	}
+}
+
+// WithSampler sets the given sampler to be used with the tracer. By default
+// an all-permissive sampler is used.
+func WithSampler(s Sampler) StartOption {
+	return func(c *config) {
+		c.sampler = s
+	}
+}
+
+// WithHTTPRoundTripper is deprecated. Please consider using WithHTTPClient instead.
+// The function allows customizing the underlying HTTP transport for emitting spans.
+func WithHTTPRoundTripper(r http.RoundTripper) StartOption {
+	return WithHTTPClient(&http.Client{
+		Transport: r,
+		Timeout:   defaultHTTPTimeout,
+	})
+}
+
+// WithHTTPClient specifies the HTTP client to use when emitting spans to the agent.
+func WithHTTPClient(client *http.Client) StartOption {
+	return func(c *config) {
+		c.httpClient = client
+	}
+}
+
+// WithUDS configures the HTTP client to dial the Datadog Agent via the specified Unix Domain Socket path.
+func WithUDS(socketPath string) StartOption {
+	return func(c *config) {
+		c.agentURL = &url.URL{
+			Scheme: "unix",
+			Path:   socketPath,
+		}
+	}
+}
+
+// WithAnalytics allows specifying whether Trace Search & Analytics should be enabled
+// for integrations.
+func WithAnalytics(on bool) StartOption {
+	return func(cfg *config) {
+		if on {
+			globalconfig.SetAnalyticsRate(1.0)
+		} else {
+			globalconfig.SetAnalyticsRate(math.NaN())
+		}
+	}
+}
+
+// WithAnalyticsRate sets the global sampling rate for sampling APM events.
+func WithAnalyticsRate(rate float64) StartOption {
+	return func(_ *config) {
+		if rate >= 0.0 && rate <= 1.0 {
+			globalconfig.SetAnalyticsRate(rate)
+		} else {
+			globalconfig.SetAnalyticsRate(math.NaN())
+		}
+	}
+}
+
+// WithRuntimeMetrics enables automatic collection of runtime metrics every 10 seconds.
+func WithRuntimeMetrics() StartOption {
+	return func(cfg *config) {
+		cfg.runtimeMetrics = true
+	}
+}
+
+// WithDogstatsdAddress specifies the address to connect to for sending metrics to the Datadog
+// Agent. It should be a "host:port" string, or the path to a unix domain socket.If not set, it
+// attempts to determine the address of the statsd service according to the following rules:
+//  1. Look for /var/run/datadog/dsd.socket and use it if present. IF NOT, continue to #2.
+//  2. The host is determined by DD_AGENT_HOST, and defaults to "localhost"
+//  3. The port is retrieved from the agent. If not present, it is determined by DD_DOGSTATSD_PORT, and defaults to 8125
+//
+// This option is in effect when WithRuntimeMetrics is enabled.
+func WithDogstatsdAddress(addr string) StartOption {
+	return func(cfg *config) {
+		cfg.dogstatsdAddr = addr
+	}
+}
+
+// WithSamplingRules specifies the sampling rates to apply to spans based on the
+// provided rules.
+func WithSamplingRules(rules []SamplingRule) StartOption {
+	return func(cfg *config) {
+		for _, rule := range rules {
+			if rule.ruleType == SamplingRuleSpan {
+				cfg.spanRules = append(cfg.spanRules, rule)
+			} else {
+				cfg.traceRules = append(cfg.traceRules, rule)
+			}
+		}
+	}
+}
+
+// WithServiceVersion specifies the version of the service that is running. This will
+// be included in spans from this service in the "version" tag, provided that
+// span service name and config service name match. Do NOT use with WithUniversalVersion.
+func WithServiceVersion(version string) StartOption {
+	return func(cfg *config) {
+		cfg.version = version
+		cfg.universalVersion = false
+	}
+}
+
+// WithUniversalVersion specifies the version of the service that is running, and will be applied to all spans,
+// regardless of whether span service name and config service name match.
+// See: WithService, WithServiceVersion. Do NOT use with WithServiceVersion.
+func WithUniversalVersion(version string) StartOption {
+	return func(c *config) {
+		c.version = version
+		c.universalVersion = true
+	}
+}
+
+// WithHostname allows specifying the hostname with which to mark outgoing traces.
+func WithHostname(name string) StartOption {
+	return func(c *config) {
+		c.hostname = name
+	}
+}
+
+// WithTraceEnabled allows specifying whether tracing will be enabled
+func WithTraceEnabled(enabled bool) StartOption {
+	return func(c *config) {
+		c.enabled = enabled
+	}
+}
+
+// WithLogStartup allows enabling or disabling the startup log.
+func WithLogStartup(enabled bool) StartOption {
+	return func(c *config) {
+		c.logStartup = enabled
+	}
+}
+
+// WithProfilerCodeHotspots enables the code hotspots integration between the
+// tracer and profiler. This is done by automatically attaching pprof labels
+// called "span id" and "local root span id" when new spans are created. You
+// should not use these label names in your own code when this is enabled. The
+// enabled value defaults to the value of the
+// DD_PROFILING_CODE_HOTSPOTS_COLLECTION_ENABLED env variable or true.
+func WithProfilerCodeHotspots(enabled bool) StartOption {
+	return func(c *config) {
+		c.profilerHotspots = enabled
+	}
+}
+
+// WithProfilerEndpoints enables the endpoints integration between the tracer
+// and profiler. This is done by automatically attaching a pprof label called
+// "trace endpoint" holding the resource name of the top-level service span if
+// its type is "http", "rpc" or "" (default). You should not use this label
+// name in your own code when this is enabled. The enabled value defaults to
+// the value of the DD_PROFILING_ENDPOINT_COLLECTION_ENABLED env variable or
+// true.
+func WithProfilerEndpoints(enabled bool) StartOption {
+	return func(c *config) {
+		c.profilerEndpoints = enabled
+	}
+}
+
+// StartSpanOption is a configuration option for StartSpan. It is aliased in order
+// to help godoc group all the functions returning it together. It is considered
+// more correct to refer to it as the type as the origin, ddtrace.StartSpanOption.
+type StartSpanOption = ddtrace.StartSpanOption
+
+// Tag sets the given key/value pair as a tag on the started Span.
+func Tag(k string, v interface{}) StartSpanOption {
+	return func(cfg *ddtrace.StartSpanConfig) {
+		if cfg.Tags == nil {
+			cfg.Tags = map[string]interface{}{}
+		}
+		cfg.Tags[k] = v
+	}
+}
+
+// ServiceName sets the given service name on the started span. For example "http.server".
+func ServiceName(name string) StartSpanOption {
+	return Tag(ext.ServiceName, name)
+}
+
+// ResourceName sets the given resource name on the started span. A resource could
+// be an SQL query, a URL, an RPC method or something else.
+func ResourceName(name string) StartSpanOption {
+	return Tag(ext.ResourceName, name)
+}
+
+// SpanType sets the given span type on the started span. Some examples in the case of
+// the Datadog APM product could be "web", "db" or "cache".
+func SpanType(name string) StartSpanOption {
+	return Tag(ext.SpanType, name)
+}
+
+var measuredTag = Tag(keyMeasured, 1)
+
+// Measured marks this span to be measured for metrics and stats calculations.
+func Measured() StartSpanOption {
+	// cache a global instance of this tag: saves one alloc/call
+	return measuredTag
+}
+
+// WithSpanID sets the SpanID on the started span, instead of using a random number.
+// If there is no parent Span (eg from ChildOf), then the TraceID will also be set to the
+// value given here.
+func WithSpanID(id uint64) StartSpanOption {
+	return func(cfg *ddtrace.StartSpanConfig) {
+		cfg.SpanID = id
+	}
+}
+
+// ChildOf tells StartSpan to use the given span context as a parent for the
+// created span.
+func ChildOf(ctx ddtrace.SpanContext) StartSpanOption {
+	return func(cfg *ddtrace.StartSpanConfig) {
+		cfg.Parent = ctx
+	}
+}
+
+// withContext associates the ctx with the span.
+func withContext(ctx context.Context) StartSpanOption {
+	return func(cfg *ddtrace.StartSpanConfig) {
+		cfg.Context = ctx
+	}
+}
+
+// StartTime sets a custom time as the start time for the created span. By
+// default a span is started using the creation time.
+func StartTime(t time.Time) StartSpanOption {
+	return func(cfg *ddtrace.StartSpanConfig) {
+		cfg.StartTime = t
+	}
+}
+
+// AnalyticsRate sets a custom analytics rate for a span. It decides the percentage
+// of events that will be picked up by the App Analytics product. It's represents a
+// float64 between 0 and 1 where 0.5 would represent 50% of events.
+func AnalyticsRate(rate float64) StartSpanOption {
+	if math.IsNaN(rate) {
+		return func(cfg *ddtrace.StartSpanConfig) {}
+	}
+	return Tag(ext.EventSampleRate, rate)
+}
+
+// FinishOption is a configuration option for FinishSpan. It is aliased in order
+// to help godoc group all the functions returning it together. It is considered
+// more correct to refer to it as the type as the origin, ddtrace.FinishOption.
+type FinishOption = ddtrace.FinishOption
+
+// FinishTime sets the given time as the finishing time for the span. By default,
+// the current time is used.
+func FinishTime(t time.Time) FinishOption {
+	return func(cfg *ddtrace.FinishConfig) {
+		cfg.FinishTime = t
+	}
+}
+
+// WithError marks the span as having had an error. It uses the information from
+// err to set tags such as the error message, error type and stack trace. It has
+// no effect if the error is nil.
+func WithError(err error) FinishOption {
+	return func(cfg *ddtrace.FinishConfig) {
+		cfg.Error = err
+	}
+}
+
+// NoDebugStack prevents any error presented using the WithError finishing option
+// from generating a stack trace. This is useful in situations where errors are frequent
+// and performance is critical.
+func NoDebugStack() FinishOption {
+	return func(cfg *ddtrace.FinishConfig) {
+		cfg.NoDebugStack = true
+	}
+}
+
+// StackFrames limits the number of stack frames included into erroneous spans to n, starting from skip.
+func StackFrames(n, skip uint) FinishOption {
+	if n == 0 {
+		return NoDebugStack()
+	}
+	return func(cfg *ddtrace.FinishConfig) {
+		cfg.StackFrames = n
+		cfg.SkipStackFrames = skip
+	}
+}
+
+// WithHeaderTags enables the integration to attach HTTP request headers as span tags.
+// Warning:
+// Using this feature can risk exposing sensitive data such as authorization tokens to Datadog.
+// Special headers can not be sub-selected. E.g., an entire Cookie header would be transmitted, without the ability to choose specific Cookies.
+func WithHeaderTags(headerAsTags []string) StartOption {
+	return func(c *config) {
+		globalconfig.ClearHeaderTags()
+		for _, h := range headerAsTags {
+			if strings.HasPrefix(h, "x-datadog-") {
+				continue
+			}
+			header, tag := normalizer.HeaderTag(h)
+			globalconfig.SetHeaderTag(header, tag)
+		}
+	}
+}
+
+// UserMonitoringConfig is used to configure what is used to identify a user.
+// This configuration can be set by combining one or several UserMonitoringOption with a call to SetUser().
+type UserMonitoringConfig struct {
+	PropagateID bool
+	Email       string
+	Name        string
+	Role        string
+	SessionID   string
+	Scope       string
+	Metadata    map[string]string
+}
+
+// UserMonitoringOption represents a function that can be provided as a parameter to SetUser.
+type UserMonitoringOption func(*UserMonitoringConfig)
+
+// WithUserMetadata returns the option setting additional metadata of the authenticated user.
+// This can be used multiple times and the given data will be tracked as `usr.{key}=value`.
+func WithUserMetadata(key, value string) UserMonitoringOption {
+	return func(cfg *UserMonitoringConfig) {
+		cfg.Metadata[key] = value
+	}
+}
+
+// WithUserEmail returns the option setting the email of the authenticated user.
+func WithUserEmail(email string) UserMonitoringOption {
+	return func(cfg *UserMonitoringConfig) {
+		cfg.Email = email
+	}
+}
+
+// WithUserName returns the option setting the name of the authenticated user.
+func WithUserName(name string) UserMonitoringOption {
+	return func(cfg *UserMonitoringConfig) {
+		cfg.Name = name
+	}
+}
+
+// WithUserSessionID returns the option setting the session ID of the authenticated user.
+func WithUserSessionID(sessionID string) UserMonitoringOption {
+	return func(cfg *UserMonitoringConfig) {
+		cfg.SessionID = sessionID
+	}
+}
+
+// WithUserRole returns the option setting the role of the authenticated user.
+func WithUserRole(role string) UserMonitoringOption {
+	return func(cfg *UserMonitoringConfig) {
+		cfg.Role = role
+	}
+}
+
+// WithUserScope returns the option setting the scope (authorizations) of the authenticated user.
+func WithUserScope(scope string) UserMonitoringOption {
+	return func(cfg *UserMonitoringConfig) {
+		cfg.Scope = scope
+	}
+}
+
+// WithPropagation returns the option allowing the user id to be propagated through distributed traces.
+// The user id is base64 encoded and added to the datadog propagated tags header.
+// This option should only be used if you are certain that the user id passed to `SetUser()` does not contain any
+// personal identifiable information or any kind of sensitive data, as it will be leaked to other services.
+func WithPropagation() UserMonitoringOption {
+	return func(cfg *UserMonitoringConfig) {
+		cfg.PropagateID = true
+	}
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/payload.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/payload.go
new file mode 100644
index 0000000000..2d66ac54af
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/payload.go
@@ -0,0 +1,153 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"bytes"
+	"encoding/binary"
+	"io"
+	"sync/atomic"
+
+	"github.com/tinylib/msgp/msgp"
+)
+
+// payload is a wrapper on top of the msgpack encoder which allows constructing an
+// encoded array by pushing its entries sequentially, one at a time. It basically
+// allows us to encode as we would with a stream, except that the contents of the stream
+// can be read as a slice by the msgpack decoder at any time. It follows the guidelines
+// from the msgpack array spec:
+// https://github.com/msgpack/msgpack/blob/master/spec.md#array-format-family
+//
+// payload implements io.Reader and can be used with the decoder directly. To create
+// a new payload use the newPayload method.
+//
+// payload is not safe for concurrent use.
+//
+// payload is meant to be used only once and eventually dismissed with the
+// single exception of retrying failed flush attempts.
+//
+// ⚠️  Warning!
+//
+// The payload should not be reused for multiple sets of traces.  Resetting the
+// payload for re-use requires the transport to wait for the HTTP package to
+// Close the request body before attempting to re-use it again! This requires
+// additional logic to be in place. See:
+//
+// • https://github.com/golang/go/blob/go1.16/src/net/http/client.go#L136-L138
+// • https://github.com/DataDog/dd-trace-go/pull/475
+// • https://github.com/DataDog/dd-trace-go/pull/549
+// • https://github.com/DataDog/dd-trace-go/pull/976
+type payload struct {
+	// header specifies the first few bytes in the msgpack stream
+	// indicating the type of array (fixarray, array16 or array32)
+	// and the number of items contained in the stream.
+	header []byte
+
+	// off specifies the current read position on the header.
+	off int
+
+	// count specifies the number of items in the stream.
+	count uint32
+
+	// buf holds the sequence of msgpack-encoded items.
+	buf bytes.Buffer
+
+	// reader is used for reading the contents of buf.
+	reader *bytes.Reader
+}
+
+var _ io.Reader = (*payload)(nil)
+
+// newPayload returns a ready to use payload.
+func newPayload() *payload {
+	p := &payload{
+		header: make([]byte, 8),
+		off:    8,
+	}
+	return p
+}
+
+// push pushes a new item into the stream.
+func (p *payload) push(t spanList) error {
+	if err := msgp.Encode(&p.buf, t); err != nil {
+		return err
+	}
+	atomic.AddUint32(&p.count, 1)
+	p.updateHeader()
+	return nil
+}
+
+// itemCount returns the number of items available in the srteam.
+func (p *payload) itemCount() int {
+	return int(atomic.LoadUint32(&p.count))
+}
+
+// size returns the payload size in bytes. After the first read the value becomes
+// inaccurate by up to 8 bytes.
+func (p *payload) size() int {
+	return p.buf.Len() + len(p.header) - p.off
+}
+
+// reset sets up the payload to be read a second time. It maintains the
+// underlying byte contents of the buffer. reset should not be used in order to
+// reuse the payload for another set of traces.
+func (p *payload) reset() {
+	p.updateHeader()
+	if p.reader != nil {
+		p.reader.Seek(0, 0)
+	}
+}
+
+// clear empties the payload buffers.
+func (p *payload) clear() {
+	p.buf = bytes.Buffer{}
+	p.reader = nil
+}
+
+// https://github.com/msgpack/msgpack/blob/master/spec.md#array-format-family
+const (
+	msgpackArrayFix byte = 144  // up to 15 items
+	msgpackArray16       = 0xdc // up to 2^16-1 items, followed by size in 2 bytes
+	msgpackArray32       = 0xdd // up to 2^32-1 items, followed by size in 4 bytes
+)
+
+// updateHeader updates the payload header based on the number of items currently
+// present in the stream.
+func (p *payload) updateHeader() {
+	n := uint64(atomic.LoadUint32(&p.count))
+	switch {
+	case n <= 15:
+		p.header[7] = msgpackArrayFix + byte(n)
+		p.off = 7
+	case n <= 1<<16-1:
+		binary.BigEndian.PutUint64(p.header, n) // writes 2 bytes
+		p.header[5] = msgpackArray16
+		p.off = 5
+	default: // n <= 1<<32-1
+		binary.BigEndian.PutUint64(p.header, n) // writes 4 bytes
+		p.header[3] = msgpackArray32
+		p.off = 3
+	}
+}
+
+// Close implements io.Closer
+func (p *payload) Close() error {
+	return nil
+}
+
+// Read implements io.Reader. It reads from the msgpack-encoded stream.
+func (p *payload) Read(b []byte) (n int, err error) {
+	if p.off < len(p.header) {
+		// reading header
+		n = copy(b, p.header[p.off:])
+		p.off += n
+		return n, nil
+	}
+	if p.reader == nil {
+		p.reader = bytes.NewReader(p.buf.Bytes())
+	}
+	return p.reader.Read(b)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/propagating_tags.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/propagating_tags.go
new file mode 100644
index 0000000000..0d5ddde1f5
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/propagating_tags.go
@@ -0,0 +1,68 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+func (t *trace) hasPropagatingTag(k string) bool {
+	t.mu.RLock()
+	defer t.mu.RUnlock()
+	_, ok := t.propagatingTags[k]
+	return ok
+}
+
+func (t *trace) propagatingTag(k string) string {
+	t.mu.RLock()
+	defer t.mu.RUnlock()
+	return t.propagatingTags[k]
+}
+
+// setPropagatingTag sets the key/value pair as a trace propagating tag.
+func (t *trace) setPropagatingTag(key, value string) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.setPropagatingTagLocked(key, value)
+}
+
+// setPropagatingTagLocked sets the key/value pair as a trace propagating tag.
+// Not safe for concurrent use, setPropagatingTag should be used instead in that case.
+func (t *trace) setPropagatingTagLocked(key, value string) {
+	if t.propagatingTags == nil {
+		t.propagatingTags = make(map[string]string, 1)
+	}
+	t.propagatingTags[key] = value
+}
+
+// unsetPropagatingTag deletes the key/value pair from the trace's propagated tags.
+func (t *trace) unsetPropagatingTag(key string) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	delete(t.propagatingTags, key)
+}
+
+// iteratePropagatingTags allows safe iteration through the propagating tags of a trace.
+// the trace must not be modified during this call, as it is locked for reading.
+//
+// f should return whether or not the iteration should continue.
+func (t *trace) iteratePropagatingTags(f func(k, v string) bool) {
+	t.mu.RLock()
+	defer t.mu.RUnlock()
+	for k, v := range t.propagatingTags {
+		if !f(k, v) {
+			break
+		}
+	}
+}
+
+func (t *trace) replacePropagatingTags(tags map[string]string) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.propagatingTags = tags
+}
+
+func (t *trace) propagatingTagsLen() int {
+	t.mu.RLock()
+	defer t.mu.RUnlock()
+	return len(t.propagatingTags)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/propagator.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/propagator.go
new file mode 100644
index 0000000000..d2ace0d275
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/propagator.go
@@ -0,0 +1,57 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"errors"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+)
+
+// Propagator implementations should be able to inject and extract
+// SpanContexts into an implementation specific carrier.
+type Propagator interface {
+	// Inject takes the SpanContext and injects it into the carrier.
+	Inject(context ddtrace.SpanContext, carrier interface{}) error
+
+	// Extract returns the SpanContext from the given carrier.
+	Extract(carrier interface{}) (ddtrace.SpanContext, error)
+}
+
+// TextMapWriter allows setting key/value pairs of strings on the underlying
+// data structure. Carriers implementing TextMapWriter are compatible to be
+// used with Datadog's TextMapPropagator.
+type TextMapWriter interface {
+	// Set sets the given key/value pair.
+	Set(key, val string)
+}
+
+// TextMapReader allows iterating over sets of key/value pairs. Carriers implementing
+// TextMapReader are compatible to be used with Datadog's TextMapPropagator.
+type TextMapReader interface {
+	// ForeachKey iterates over all keys that exist in the underlying
+	// carrier. It takes a callback function which will be called
+	// using all key/value pairs as arguments. ForeachKey will return
+	// the first error returned by the handler.
+	ForeachKey(handler func(key, val string) error) error
+}
+
+var (
+	// ErrInvalidCarrier is returned when the carrier provided to the propagator
+	// does not implement the correct interfaces.
+	ErrInvalidCarrier = errors.New("invalid carrier")
+
+	// ErrInvalidSpanContext is returned when the span context found in the
+	// carrier is not of the expected type.
+	ErrInvalidSpanContext = errors.New("invalid span context")
+
+	// ErrSpanContextCorrupted is returned when there was a problem parsing
+	// the information found in the carrier.
+	ErrSpanContextCorrupted = errors.New("span context corrupted")
+
+	// ErrSpanContextNotFound represents missing information in the given carrier.
+	ErrSpanContextNotFound = errors.New("span context not found")
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rand.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rand.go
new file mode 100644
index 0000000000..ecedc3ed10
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rand.go
@@ -0,0 +1,56 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	cryptorand "crypto/rand"
+	"math"
+	"math/big"
+	"math/rand"
+	"sync"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// random holds a thread-safe source of random numbers.
+var random *rand.Rand
+
+func init() {
+	var seed int64
+	n, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64))
+	if err == nil {
+		seed = n.Int64()
+	} else {
+		log.Warn("cannot generate random seed: %v; using current time", err)
+		seed = time.Now().UnixNano()
+	}
+	random = rand.New(&safeSource{
+		source: rand.NewSource(seed),
+	})
+}
+
+// safeSource holds a thread-safe implementation of rand.Source64.
+type safeSource struct {
+	source rand.Source
+	sync.Mutex
+}
+
+func (rs *safeSource) Int63() int64 {
+	rs.Lock()
+	n := rs.source.Int63()
+	rs.Unlock()
+
+	return n
+}
+
+func (rs *safeSource) Uint64() uint64 { return uint64(rs.Int63()) }
+
+func (rs *safeSource) Seed(seed int64) {
+	rs.Lock()
+	rs.source.Seed(seed)
+	rs.Unlock()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rules_sampler.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rules_sampler.go
new file mode 100644
index 0000000000..cf5d200d96
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rules_sampler.go
@@ -0,0 +1,593 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"os"
+	"regexp"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
+
+	"golang.org/x/time/rate"
+)
+
+// rulesSampler holds instances of trace sampler and single span sampler, that are configured with the given set of rules.
+type rulesSampler struct {
+	// traceRulesSampler samples trace spans based on a user-defined set of rules and might impact sampling decision of the trace.
+	traces *traceRulesSampler
+
+	// singleSpanRulesSampler samples individual spans based on a separate user-defined set of rules and
+	// cannot impact the trace sampling decision.
+	spans *singleSpanRulesSampler
+}
+
+// newRulesSampler configures a *rulesSampler instance using the given set of rules.
+// Rules are split between trace and single span sampling rules according to their type.
+// Such rules are user-defined through environment variable or WithSamplingRules option.
+// Invalid rules or environment variable values are tolerated, by logging warnings and then ignoring them.
+func newRulesSampler(traceRules, spanRules []SamplingRule) *rulesSampler {
+	return &rulesSampler{
+		traces: newTraceRulesSampler(traceRules),
+		spans:  newSingleSpanRulesSampler(spanRules),
+	}
+}
+
+func (r *rulesSampler) SampleTrace(s *span) bool { return r.traces.apply(s) }
+
+func (r *rulesSampler) SampleSpan(s *span) bool { return r.spans.apply(s) }
+
+func (r *rulesSampler) HasSpanRules() bool { return r.spans.enabled() }
+
+func (r *rulesSampler) TraceRateLimit() (float64, bool) { return r.traces.limit() }
+
+// SamplingRule is used for applying sampling rates to spans that match
+// the service name, operation name or both.
+// For basic usage, consider using the helper functions ServiceRule, NameRule, etc.
+type SamplingRule struct {
+	// Service specifies the regex pattern that a span service name must match.
+	Service *regexp.Regexp
+
+	// Name specifies the regex pattern that a span operation name must match.
+	Name *regexp.Regexp
+
+	// Rate specifies the sampling rate that should be applied to spans that match
+	// service and/or name of the rule.
+	Rate float64
+
+	// MaxPerSecond specifies max number of spans per second that can be sampled per the rule.
+	// If not specified, the default is no limit.
+	MaxPerSecond float64
+
+	ruleType     SamplingRuleType
+	exactService string
+	exactName    string
+	limiter      *rateLimiter
+}
+
+// match returns true when the span's details match all the expected values in the rule.
+func (sr *SamplingRule) match(s *span) bool {
+	if sr.Service != nil && !sr.Service.MatchString(s.Service) {
+		return false
+	} else if sr.exactService != "" && sr.exactService != s.Service {
+		return false
+	}
+	if sr.Name != nil && !sr.Name.MatchString(s.Name) {
+		return false
+	} else if sr.exactName != "" && sr.exactName != s.Name {
+		return false
+	}
+	return true
+}
+
+// SamplingRuleType represents a type of sampling rule spans are matched against.
+type SamplingRuleType int
+
+const (
+	// SamplingRuleTrace specifies a sampling rule that applies to the entire trace if any spans satisfy the criteria.
+	// If a sampling rule is of type SamplingRuleTrace, such rule determines the sampling rate to apply
+	// to trace spans. If a span matches that rule, it will impact the trace sampling decision.
+	SamplingRuleTrace = iota
+
+	// SamplingRuleSpan specifies a sampling rule that applies to a single span without affecting the entire trace.
+	// If a sampling rule is of type SamplingRuleSingleSpan, such rule determines the sampling rate to apply
+	// to individual spans. If a span matches a rule, it will NOT impact the trace sampling decision.
+	// In the case that a trace is dropped and thus not sent to the Agent, spans kept on account
+	// of matching SamplingRuleSingleSpan rules must be conveyed separately.
+	SamplingRuleSpan
+)
+
+func (sr SamplingRuleType) String() string {
+	switch sr {
+	case SamplingRuleTrace:
+		return "trace"
+	case SamplingRuleSpan:
+		return "span"
+	default:
+		return ""
+	}
+}
+
+// ServiceRule returns a SamplingRule that applies the provided sampling rate
+// to spans that match the service name provided.
+func ServiceRule(service string, rate float64) SamplingRule {
+	return SamplingRule{
+		exactService: service,
+		Rate:         rate,
+	}
+}
+
+// NameRule returns a SamplingRule that applies the provided sampling rate
+// to spans that match the operation name provided.
+func NameRule(name string, rate float64) SamplingRule {
+	return SamplingRule{
+		exactName: name,
+		Rate:      rate,
+	}
+}
+
+// NameServiceRule returns a SamplingRule that applies the provided sampling rate
+// to spans matching both the operation and service names provided.
+func NameServiceRule(name string, service string, rate float64) SamplingRule {
+	return SamplingRule{
+		exactService: service,
+		exactName:    name,
+		Rate:         rate,
+	}
+}
+
+// RateRule returns a SamplingRule that applies the provided sampling rate to all spans.
+func RateRule(rate float64) SamplingRule {
+	return SamplingRule{
+		Rate: rate,
+	}
+}
+
+// SpanNameServiceRule returns a SamplingRule of type SamplingRuleSpan that applies
+// the provided sampling rate to all spans matching the operation and service name glob patterns provided.
+// Operation and service fields must be valid glob patterns.
+func SpanNameServiceRule(name, service string, rate float64) SamplingRule {
+	return SamplingRule{
+		Service:   globMatch(service),
+		Name:      globMatch(name),
+		Rate:      rate,
+		ruleType:  SamplingRuleSpan,
+		exactName: name,
+		limiter:   newSingleSpanRateLimiter(0),
+	}
+}
+
+// SpanNameServiceMPSRule returns a SamplingRule of type SamplingRuleSpan that applies
+// the provided sampling rate to all spans matching the operation and service name glob patterns
+// up to the max number of spans per second that can be sampled.
+// Operation and service fields must be valid glob patterns.
+func SpanNameServiceMPSRule(name, service string, rate, limit float64) SamplingRule {
+	return SamplingRule{
+		Service:      globMatch(service),
+		Name:         globMatch(name),
+		MaxPerSecond: limit,
+		Rate:         rate,
+		ruleType:     SamplingRuleSpan,
+		exactName:    name,
+		limiter:      newSingleSpanRateLimiter(limit),
+	}
+}
+
+// traceRulesSampler allows a user-defined list of rules to apply to traces.
+// These rules can match based on the span's Service, Name or both.
+// When making a sampling decision, the rules are checked in order until
+// a match is found.
+// If a match is found, the rate from that rule is used.
+// If no match is found, and the DD_TRACE_SAMPLE_RATE environment variable
+// was set to a valid rate, that value is used.
+// Otherwise, the rules sampler didn't apply to the span, and the decision
+// is passed to the priority sampler.
+//
+// The rate is used to determine if the span should be sampled, but an upper
+// limit can be defined using the DD_TRACE_RATE_LIMIT environment variable.
+// Its value is the number of spans to sample per second.
+// Spans that matched the rules but exceeded the rate limit are not sampled.
+type traceRulesSampler struct {
+	rules      []SamplingRule // the rules to match spans with
+	globalRate float64        // a rate to apply when no rules match a span
+	limiter    *rateLimiter   // used to limit the volume of spans sampled
+}
+
+// newTraceRulesSampler configures a *traceRulesSampler instance using the given set of rules.
+// Invalid rules or environment variable values are tolerated, by logging warnings and then ignoring them.
+func newTraceRulesSampler(rules []SamplingRule) *traceRulesSampler {
+	return &traceRulesSampler{
+		rules:      rules,
+		globalRate: globalSampleRate(),
+		limiter:    newRateLimiter(),
+	}
+}
+
+// globalSampleRate returns the sampling rate found in the DD_TRACE_SAMPLE_RATE environment variable.
+// If it is invalid or not within the 0-1 range, NaN is returned.
+func globalSampleRate() float64 {
+	defaultRate := math.NaN()
+	v := os.Getenv("DD_TRACE_SAMPLE_RATE")
+	if v == "" {
+		return defaultRate
+	}
+	r, err := strconv.ParseFloat(v, 64)
+	if err != nil {
+		log.Warn("ignoring DD_TRACE_SAMPLE_RATE: error: %v", err)
+		return defaultRate
+	}
+	if r >= 0.0 && r <= 1.0 {
+		return r
+	}
+	log.Warn("ignoring DD_TRACE_SAMPLE_RATE: out of range %f", r)
+	return defaultRate
+}
+
+func (rs *traceRulesSampler) enabled() bool {
+	return len(rs.rules) > 0 || !math.IsNaN(rs.globalRate)
+}
+
+// apply uses the sampling rules to determine the sampling rate for the
+// provided span. If the rules don't match, and a default rate hasn't been
+// set using DD_TRACE_SAMPLE_RATE, then it returns false and the span is not
+// modified.
+func (rs *traceRulesSampler) apply(span *span) bool {
+	if !rs.enabled() {
+		// short path when disabled
+		return false
+	}
+
+	var matched bool
+	rate := rs.globalRate
+	for _, rule := range rs.rules {
+		if rule.match(span) {
+			matched = true
+			rate = rule.Rate
+			break
+		}
+	}
+	if !matched && math.IsNaN(rate) {
+		// no matching rule or global rate, so we want to fall back
+		// to priority sampling
+		return false
+	}
+
+	rs.applyRule(span, rate, time.Now())
+	return true
+}
+
+func (rs *traceRulesSampler) applyRule(span *span, rate float64, now time.Time) {
+	span.SetTag(keyRulesSamplerAppliedRate, rate)
+	if !sampledByRate(span.TraceID, rate) {
+		span.setSamplingPriority(ext.PriorityUserReject, samplernames.RuleRate)
+		return
+	}
+
+	sampled, rate := rs.limiter.allowOne(now)
+	if sampled {
+		span.setSamplingPriority(ext.PriorityUserKeep, samplernames.RuleRate)
+	} else {
+		span.setSamplingPriority(ext.PriorityUserReject, samplernames.RuleRate)
+	}
+	span.SetTag(keyRulesSamplerLimiterRate, rate)
+}
+
+// limit returns the rate limit set in the rules sampler, controlled by DD_TRACE_RATE_LIMIT, and
+// true if rules sampling is enabled. If not present it returns math.NaN() and false.
+func (rs *traceRulesSampler) limit() (float64, bool) {
+	if rs.enabled() {
+		return float64(rs.limiter.limiter.Limit()), true
+	}
+	return math.NaN(), false
+}
+
+// defaultRateLimit specifies the default trace rate limit used when DD_TRACE_RATE_LIMIT is not set.
+const defaultRateLimit = 100.0
+
+// newRateLimiter returns a rate limiter which restricts the number of traces sampled per second.
+// The limit is DD_TRACE_RATE_LIMIT if set, `defaultRateLimit` otherwise.
+func newRateLimiter() *rateLimiter {
+	limit := defaultRateLimit
+	v := os.Getenv("DD_TRACE_RATE_LIMIT")
+	if v != "" {
+		l, err := strconv.ParseFloat(v, 64)
+		if err != nil {
+			log.Warn("DD_TRACE_RATE_LIMIT invalid, using default value %f: %v", limit, err)
+		} else if l < 0.0 {
+			log.Warn("DD_TRACE_RATE_LIMIT negative, using default value %f", limit)
+		} else {
+			// override the default limit
+			limit = l
+		}
+	}
+	return &rateLimiter{
+		limiter:  rate.NewLimiter(rate.Limit(limit), int(math.Ceil(limit))),
+		prevTime: time.Now(),
+	}
+}
+
+// singleSpanRulesSampler allows a user-defined list of rules to apply to spans
+// to sample single spans.
+// These rules match based on the span's Service and Name. If empty value is supplied
+// to either Service or Name field, it will default to "*", allow all.
+// When making a sampling decision, the rules are checked in order until
+// a match is found.
+// If a match is found, the rate from that rule is used.
+// If no match is found, no changes or further sampling is applied to the spans.
+// The rate is used to determine if the span should be sampled, but an upper
+// limit can be defined using the max_per_second field when supplying the rule.
+// If max_per_second is absent in the rule, the default is allow all.
+// Its value is the max number of spans to sample per second.
+// Spans that matched the rules but exceeded the rate limit are not sampled.
+type singleSpanRulesSampler struct {
+	rules []SamplingRule // the rules to match spans with
+}
+
+// newSingleSpanRulesSampler configures a *singleSpanRulesSampler instance using the given set of rules.
+// Invalid rules or environment variable values are tolerated, by logging warnings and then ignoring them.
+func newSingleSpanRulesSampler(rules []SamplingRule) *singleSpanRulesSampler {
+	return &singleSpanRulesSampler{
+		rules: rules,
+	}
+}
+
+func (rs *singleSpanRulesSampler) enabled() bool {
+	return len(rs.rules) > 0
+}
+
+// apply uses the sampling rules to determine the sampling rate for the
+// provided span. If the rules don't match, then it returns false and the span is not
+// modified.
+func (rs *singleSpanRulesSampler) apply(span *span) bool {
+	for _, rule := range rs.rules {
+		if rule.match(span) {
+			rate := rule.Rate
+			span.setMetric(keyRulesSamplerAppliedRate, rate)
+			if !sampledByRate(span.SpanID, rate) {
+				return false
+			}
+			var sampled bool
+			if rule.limiter != nil {
+				sampled, rate = rule.limiter.allowOne(nowTime())
+				if !sampled {
+					return false
+				}
+			}
+			span.setMetric(keySpanSamplingMechanism, float64(samplernames.SingleSpan))
+			span.setMetric(keySingleSpanSamplingRuleRate, rate)
+			if rule.MaxPerSecond != 0 {
+				span.setMetric(keySingleSpanSamplingMPS, rule.MaxPerSecond)
+			}
+			return true
+		}
+	}
+	return false
+}
+
+// rateLimiter is a wrapper on top of golang.org/x/time/rate which implements a rate limiter but also
+// returns the effective rate of allowance.
+type rateLimiter struct {
+	limiter *rate.Limiter
+
+	mu          sync.Mutex // guards below fields
+	prevTime    time.Time  // time at which prevAllowed and prevSeen were set
+	allowed     float64    // number of spans allowed in the current period
+	seen        float64    // number of spans seen in the current period
+	prevAllowed float64    // number of spans allowed in the previous period
+	prevSeen    float64    // number of spans seen in the previous period
+}
+
+// allowOne returns the rate limiter's decision to allow the span to be sampled, and the
+// effective rate at the time it is called. The effective rate is computed by averaging the rate
+// for the previous second with the current rate
+func (r *rateLimiter) allowOne(now time.Time) (bool, float64) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if d := now.Sub(r.prevTime); d >= time.Second {
+		// enough time has passed to reset the counters
+		if d.Truncate(time.Second) == time.Second && r.seen > 0 {
+			// exactly one second, so update prev
+			r.prevAllowed = r.allowed
+			r.prevSeen = r.seen
+		} else {
+			// more than one second, so reset previous rate
+			r.prevAllowed = 0
+			r.prevSeen = 0
+		}
+		r.prevTime = now
+		r.allowed = 0
+		r.seen = 0
+	}
+
+	r.seen++
+	var sampled bool
+	if r.limiter.AllowN(now, 1) {
+		r.allowed++
+		sampled = true
+	}
+	er := (r.prevAllowed + r.allowed) / (r.prevSeen + r.seen)
+	return sampled, er
+}
+
+// newSingleSpanRateLimiter returns a rate limiter which restricts the number of single spans sampled per second.
+// This defaults to infinite, allow all behaviour. The MaxPerSecond value of the rule may override the default.
+func newSingleSpanRateLimiter(mps float64) *rateLimiter {
+	limit := math.MaxFloat64
+	if mps > 0 {
+		limit = mps
+	}
+	return &rateLimiter{
+		limiter:  rate.NewLimiter(rate.Limit(limit), int(math.Ceil(limit))),
+		prevTime: time.Now(),
+	}
+}
+
+// globMatch compiles pattern string into glob format, i.e. regular expressions with only '?'
+// and '*' treated as regex metacharacters.
+func globMatch(pattern string) *regexp.Regexp {
+	if pattern == "" {
+		return regexp.MustCompile("^.*$")
+	}
+	// escaping regex characters
+	pattern = regexp.QuoteMeta(pattern)
+	// replacing '?' and '*' with regex characters
+	pattern = strings.Replace(pattern, "\\?", ".", -1)
+	pattern = strings.Replace(pattern, "\\*", ".*", -1)
+	// pattern must match an entire string
+	return regexp.MustCompile(fmt.Sprintf("^%s$", pattern))
+}
+
+// samplingRulesFromEnv parses sampling rules from the DD_TRACE_SAMPLING_RULES,
+// DD_SPAN_SAMPLING_RULES and DD_SPAN_SAMPLING_RULES_FILE environment variables.
+func samplingRulesFromEnv() (trace, span []SamplingRule, err error) {
+	var errs []string
+	defer func() {
+		if len(errs) != 0 {
+			err = fmt.Errorf("\n\t%s", strings.Join(errs, "\n\t"))
+		}
+	}()
+	rulesFromEnv := os.Getenv("DD_TRACE_SAMPLING_RULES")
+	if rulesFromEnv != "" {
+		trace, err = unmarshalSamplingRules([]byte(rulesFromEnv), SamplingRuleTrace)
+		if err != nil {
+			errs = append(errs, err.Error())
+		}
+	}
+	span, err = unmarshalSamplingRules([]byte(os.Getenv("DD_SPAN_SAMPLING_RULES")), SamplingRuleSpan)
+	if err != nil {
+		errs = append(errs, err.Error())
+	}
+	rulesFile := os.Getenv("DD_SPAN_SAMPLING_RULES_FILE")
+	if len(span) != 0 {
+		if rulesFile != "" {
+			log.Warn("DIAGNOSTICS Error(s): DD_SPAN_SAMPLING_RULES is available and will take precedence over DD_SPAN_SAMPLING_RULES_FILE")
+		}
+		return trace, span, err
+	}
+	if rulesFile != "" {
+		rulesFromEnvFile, err := os.ReadFile(rulesFile)
+		if err != nil {
+			errs = append(errs, fmt.Sprintf("Couldn't read file from DD_SPAN_SAMPLING_RULES_FILE: %v", err))
+		}
+		span, err = unmarshalSamplingRules(rulesFromEnvFile, SamplingRuleSpan)
+		if err != nil {
+			errs = append(errs, err.Error())
+		}
+	}
+	return trace, span, err
+}
+
+// unmarshalSamplingRules unmarshals JSON from b and returns the sampling rules found, attributing
+// the type t to them. If any errors are occurred, they are returned.
+func unmarshalSamplingRules(b []byte, spanType SamplingRuleType) ([]SamplingRule, error) {
+	if len(b) == 0 {
+		return nil, nil
+	}
+	var jsonRules []struct {
+		Service      string      `json:"service"`
+		Name         string      `json:"name"`
+		Rate         json.Number `json:"sample_rate"`
+		MaxPerSecond float64     `json:"max_per_second"`
+	}
+	err := json.Unmarshal(b, &jsonRules)
+	if err != nil {
+		return nil, fmt.Errorf("error unmarshalling JSON: %v", err)
+	}
+	rules := make([]SamplingRule, 0, len(jsonRules))
+	var errs []string
+	for i, v := range jsonRules {
+		if v.Rate == "" {
+			if spanType == SamplingRuleSpan {
+				v.Rate = "1"
+			} else {
+				errs = append(errs, fmt.Sprintf("at index %d: rate not provided", i))
+				continue
+			}
+		}
+		rate, err := v.Rate.Float64()
+		if err != nil {
+			errs = append(errs, fmt.Sprintf("at index %d: %v", i, err))
+			continue
+		}
+		if rate < 0.0 || rate > 1.0 {
+			errs = append(errs, fmt.Sprintf("at index %d: ignoring rule %+v: rate is out of [0.0, 1.0] range", i, v))
+			continue
+		}
+		switch spanType {
+		case SamplingRuleSpan:
+			rules = append(rules, SamplingRule{
+				Service:      globMatch(v.Service),
+				Name:         globMatch(v.Name),
+				Rate:         rate,
+				MaxPerSecond: v.MaxPerSecond,
+				limiter:      newSingleSpanRateLimiter(v.MaxPerSecond),
+				ruleType:     SamplingRuleSpan,
+			})
+		case SamplingRuleTrace:
+			if v.Rate == "" {
+				errs = append(errs, fmt.Sprintf("at index %d: rate not provided", i))
+				continue
+			}
+			rate, err := v.Rate.Float64()
+			if err != nil {
+				errs = append(errs, fmt.Sprintf("at index %d: %v", i, err))
+				continue
+			}
+			if rate < 0.0 || rate > 1.0 {
+				errs = append(errs, fmt.Sprintf("at index %d: ignoring rule %+v: rate is out of [0.0, 1.0] range", i, v))
+				continue
+			}
+
+			switch {
+			case v.Service != "" && v.Name != "":
+				rules = append(rules, NameServiceRule(v.Name, v.Service, rate))
+			case v.Service != "":
+				rules = append(rules, ServiceRule(v.Service, rate))
+			case v.Name != "":
+				rules = append(rules, NameRule(v.Name, rate))
+			}
+		}
+	}
+	if len(errs) != 0 {
+		return rules, fmt.Errorf("%s", strings.Join(errs, "\n\t"))
+	}
+	return rules, nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (sr *SamplingRule) MarshalJSON() ([]byte, error) {
+	s := struct {
+		Service      string   `json:"service"`
+		Name         string   `json:"name"`
+		Rate         float64  `json:"sample_rate"`
+		Type         string   `json:"type"`
+		MaxPerSecond *float64 `json:"max_per_second,omitempty"`
+	}{}
+	if sr.exactService != "" {
+		s.Service = sr.exactService
+	} else if sr.Service != nil {
+		s.Service = fmt.Sprintf("%s", sr.Service)
+	}
+	if sr.exactName != "" {
+		s.Name = sr.exactName
+	} else if sr.Name != nil {
+		s.Name = fmt.Sprintf("%s", sr.Name)
+	}
+	s.Rate = sr.Rate
+	s.Type = fmt.Sprintf("%v(%d)", sr.ruleType.String(), sr.ruleType)
+	if sr.MaxPerSecond != 0 {
+		s.MaxPerSecond = &sr.MaxPerSecond
+	}
+	return json.Marshal(&s)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/sampler.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/sampler.go
new file mode 100644
index 0000000000..2c95caf26f
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/sampler.go
@@ -0,0 +1,150 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"encoding/json"
+	"io"
+	"math"
+	"sync"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
+)
+
+// Sampler is the generic interface of any sampler. It must be safe for concurrent use.
+type Sampler interface {
+	// Sample returns true if the given span should be sampled.
+	Sample(span Span) bool
+}
+
+// RateSampler is a sampler implementation which randomly selects spans using a
+// provided rate. For example, a rate of 0.75 will permit 75% of the spans.
+// RateSampler implementations should be safe for concurrent use.
+type RateSampler interface {
+	Sampler
+
+	// Rate returns the current sample rate.
+	Rate() float64
+
+	// SetRate sets a new sample rate.
+	SetRate(rate float64)
+}
+
+// rateSampler samples from a sample rate.
+type rateSampler struct {
+	sync.RWMutex
+	rate float64
+}
+
+// NewAllSampler is a short-hand for NewRateSampler(1). It is all-permissive.
+func NewAllSampler() RateSampler { return NewRateSampler(1) }
+
+// NewRateSampler returns an initialized RateSampler with a given sample rate.
+func NewRateSampler(rate float64) RateSampler {
+	return &rateSampler{rate: rate}
+}
+
+// Rate returns the current rate of the sampler.
+func (r *rateSampler) Rate() float64 {
+	r.RLock()
+	defer r.RUnlock()
+	return r.rate
+}
+
+// SetRate sets a new sampling rate.
+func (r *rateSampler) SetRate(rate float64) {
+	r.Lock()
+	r.rate = rate
+	r.Unlock()
+}
+
+// constants used for the Knuth hashing, same as agent.
+const knuthFactor = uint64(1111111111111111111)
+
+// Sample returns true if the given span should be sampled.
+func (r *rateSampler) Sample(spn ddtrace.Span) bool {
+	if r.rate == 1 {
+		// fast path
+		return true
+	}
+	s, ok := spn.(*span)
+	if !ok {
+		return false
+	}
+	r.RLock()
+	defer r.RUnlock()
+	return sampledByRate(s.TraceID, r.rate)
+}
+
+// sampledByRate verifies if the number n should be sampled at the specified
+// rate.
+func sampledByRate(n uint64, rate float64) bool {
+	if rate < 1 {
+		return n*knuthFactor < uint64(rate*math.MaxUint64)
+	}
+	return true
+}
+
+// prioritySampler holds a set of per-service sampling rates and applies
+// them to spans.
+type prioritySampler struct {
+	mu          sync.RWMutex
+	rates       map[string]float64
+	defaultRate float64
+}
+
+func newPrioritySampler() *prioritySampler {
+	return &prioritySampler{
+		rates:       make(map[string]float64),
+		defaultRate: 1.,
+	}
+}
+
+// readRatesJSON will try to read the rates as JSON from the given io.ReadCloser.
+func (ps *prioritySampler) readRatesJSON(rc io.ReadCloser) error {
+	var payload struct {
+		Rates map[string]float64 `json:"rate_by_service"`
+	}
+	if err := json.NewDecoder(rc).Decode(&payload); err != nil {
+		return err
+	}
+	rc.Close()
+	const defaultRateKey = "service:,env:"
+	ps.mu.Lock()
+	defer ps.mu.Unlock()
+	ps.rates = payload.Rates
+	if v, ok := ps.rates[defaultRateKey]; ok {
+		ps.defaultRate = v
+		delete(ps.rates, defaultRateKey)
+	}
+	return nil
+}
+
+// getRate returns the sampling rate to be used for the given span. Callers must
+// guard the span.
+func (ps *prioritySampler) getRate(spn *span) float64 {
+	key := "service:" + spn.Service + ",env:" + spn.Meta[ext.Environment]
+	ps.mu.RLock()
+	defer ps.mu.RUnlock()
+	if rate, ok := ps.rates[key]; ok {
+		return rate
+	}
+	return ps.defaultRate
+}
+
+// apply applies sampling priority to the given span. Caller must ensure it is safe
+// to modify the span.
+func (ps *prioritySampler) apply(spn *span) {
+	rate := ps.getRate(spn)
+	if sampledByRate(spn.TraceID, rate) {
+		spn.setSamplingPriority(ext.PriorityAutoKeep, samplernames.AgentRate)
+	} else {
+		spn.setSamplingPriority(ext.PriorityAutoReject, samplernames.AgentRate)
+	}
+	spn.SetTag(keySamplingPriorityRate, rate)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span.go
new file mode 100644
index 0000000000..1d5e039d70
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span.go
@@ -0,0 +1,724 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+//go:generate msgp -unexported -marshal=false -o=span_msgp.go -tests=false
+
+package tracer
+
+import (
+	"context"
+	"encoding/base64"
+	"fmt"
+	"math"
+	"os"
+	"reflect"
+	"runtime"
+	"runtime/pprof"
+	rt "runtime/trace"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+	sharedinternal "gopkg.in/DataDog/dd-trace-go.v1/internal"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof"
+
+	"github.com/DataDog/datadog-agent/pkg/obfuscate"
+	"github.com/tinylib/msgp/msgp"
+	"golang.org/x/xerrors"
+)
+
+type (
+	// spanList implements msgp.Encodable on top of a slice of spans.
+	spanList []*span
+
+	// spanLists implements msgp.Decodable on top of a slice of spanList.
+	// This type is only used in tests.
+	spanLists []spanList
+)
+
+var (
+	_ ddtrace.Span   = (*span)(nil)
+	_ msgp.Encodable = (*spanList)(nil)
+	_ msgp.Decodable = (*spanLists)(nil)
+)
+
+// errorConfig holds customization options for setting error tags.
+type errorConfig struct {
+	noDebugStack bool
+	stackFrames  uint
+	stackSkip    uint
+}
+
+// span represents a computation. Callers must call Finish when a span is
+// complete to ensure it's submitted.
+type span struct {
+	sync.RWMutex `msg:"-"` // all fields are protected by this RWMutex
+
+	Name     string             `msg:"name"`              // operation name
+	Service  string             `msg:"service"`           // service name (i.e. "grpc.server", "http.request")
+	Resource string             `msg:"resource"`          // resource name (i.e. "/user?id=123", "SELECT * FROM users")
+	Type     string             `msg:"type"`              // protocol associated with the span (i.e. "web", "db", "cache")
+	Start    int64              `msg:"start"`             // span start time expressed in nanoseconds since epoch
+	Duration int64              `msg:"duration"`          // duration of the span expressed in nanoseconds
+	Meta     map[string]string  `msg:"meta,omitempty"`    // arbitrary map of metadata
+	Metrics  map[string]float64 `msg:"metrics,omitempty"` // arbitrary map of numeric metrics
+	SpanID   uint64             `msg:"span_id"`           // identifier of this span
+	TraceID  uint64             `msg:"trace_id"`          // lower 64-bits of the root span identifier
+	ParentID uint64             `msg:"parent_id"`         // identifier of the span's direct parent
+	Error    int32              `msg:"error"`             // error status of the span; 0 means no errors
+
+	goExecTraced bool         `msg:"-"`
+	noDebugStack bool         `msg:"-"` // disables debug stack traces
+	finished     bool         `msg:"-"` // true if the span has been submitted to a tracer.
+	context      *spanContext `msg:"-"` // span propagation context
+
+	pprofCtxActive  context.Context `msg:"-"` // contains pprof.WithLabel labels to tell the profiler more about this span
+	pprofCtxRestore context.Context `msg:"-"` // contains pprof.WithLabel labels of the parent span (if any) that need to be restored when this span finishes
+
+	taskEnd func() // ends execution tracer (runtime/trace) task, if started
+}
+
+// Context yields the SpanContext for this Span. Note that the return
+// value of Context() is still valid after a call to Finish(). This is
+// called the span context and it is different from Go's context.
+func (s *span) Context() ddtrace.SpanContext { return s.context }
+
+// SetBaggageItem sets a key/value pair as baggage on the span. Baggage items
+// are propagated down to descendant spans and injected cross-process. Use with
+// care as it adds extra load onto your tracing layer.
+func (s *span) SetBaggageItem(key, val string) {
+	s.context.setBaggageItem(key, val)
+}
+
+// BaggageItem gets the value for a baggage item given its key. Returns the
+// empty string if the value isn't found in this Span.
+func (s *span) BaggageItem(key string) string {
+	return s.context.baggageItem(key)
+}
+
+// SetTag adds a set of key/value metadata to the span.
+func (s *span) SetTag(key string, value interface{}) {
+	s.Lock()
+	defer s.Unlock()
+	// We don't lock spans when flushing, so we could have a data race when
+	// modifying a span as it's being flushed. This protects us against that
+	// race, since spans are marked `finished` before we flush them.
+	if s.finished {
+		return
+	}
+	switch key {
+	case ext.Error:
+		s.setTagError(value, errorConfig{
+			noDebugStack: s.noDebugStack,
+		})
+		return
+	}
+	if v, ok := value.(bool); ok {
+		s.setTagBool(key, v)
+		return
+	}
+	if v, ok := value.(string); ok {
+		if key == ext.ResourceName && s.pprofCtxActive != nil && spanResourcePIISafe(s) {
+			// If the user overrides the resource name for the span,
+			// update the endpoint label for the runtime profilers.
+			//
+			// We don't change s.pprofCtxRestore since that should
+			// stay as the original parent span context regardless
+			// of what we change at a lower level.
+			s.pprofCtxActive = pprof.WithLabels(s.pprofCtxActive, pprof.Labels(traceprof.TraceEndpoint, v))
+			pprof.SetGoroutineLabels(s.pprofCtxActive)
+		}
+		s.setMeta(key, v)
+		return
+	}
+	if v, ok := toFloat64(value); ok {
+		s.setMetric(key, v)
+		return
+	}
+	if v, ok := value.(fmt.Stringer); ok {
+		defer func() {
+			if e := recover(); e != nil {
+				if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr && v.IsNil() {
+					// If .String() panics due to a nil receiver, we want to catch this
+					// and replace the string value with "<nil>", just as Sprintf does.
+					// Other panics should not be handled.
+					s.setMeta(key, "<nil>")
+					return
+				}
+				panic(e)
+			}
+		}()
+		s.setMeta(key, v.String())
+		return
+	}
+	// not numeric, not a string, not a fmt.Stringer, not a bool, and not an error
+	s.setMeta(key, fmt.Sprint(value))
+}
+
+// setSamplingPriority locks then span, then updates the sampling priority.
+// It also updates the trace's sampling priority.
+func (s *span) setSamplingPriority(priority int, sampler samplernames.SamplerName) {
+	s.Lock()
+	defer s.Unlock()
+	s.setSamplingPriorityLocked(priority, sampler)
+}
+
+// Root returns the root span of the span's trace. The return value shouldn't be
+// nil as long as the root span is valid and not finished.
+func (s *span) Root() Span {
+	return s.root()
+}
+
+// root returns the root span of the span's trace. The return value shouldn't be
+// nil as long as the root span is valid and not finished.
+// As opposed to the public Root method, this one returns the actual span type
+// when internal usage requires it (to avoid type assertions from Root's return
+// value).
+func (s *span) root() *span {
+	if s == nil || s.context == nil {
+		return nil
+	}
+	if s.context.trace == nil {
+		return nil
+	}
+	return s.context.trace.root
+}
+
+// SetUser associates user information to the current trace which the
+// provided span belongs to. The options can be used to tune which user
+// bit of information gets monitored. In case of distributed traces,
+// the user id can be propagated across traces using the WithPropagation() option.
+// See https://docs.datadoghq.com/security_platform/application_security/setup_and_configure/?tab=set_user#add-user-information-to-traces
+func (s *span) SetUser(id string, opts ...UserMonitoringOption) {
+	cfg := UserMonitoringConfig{
+		Metadata: make(map[string]string),
+	}
+	for _, fn := range opts {
+		fn(&cfg)
+	}
+	root := s.root()
+	trace := root.context.trace
+	root.Lock()
+	defer root.Unlock()
+	// We don't lock spans when flushing, so we could have a data race when
+	// modifying a span as it's being flushed. This protects us against that
+	// race, since spans are marked `finished` before we flush them.
+	if root.finished {
+		return
+	}
+	if cfg.PropagateID {
+		// Delete usr.id from the tags since _dd.p.usr.id takes precedence
+		delete(root.Meta, keyUserID)
+		idenc := base64.StdEncoding.EncodeToString([]byte(id))
+		trace.setPropagatingTag(keyPropagatedUserID, idenc)
+		s.context.updated = true
+	} else {
+		if trace.hasPropagatingTag(keyPropagatedUserID) {
+			// Unset the propagated user ID so that a propagated user ID coming from upstream won't be propagated anymore.
+			trace.unsetPropagatingTag(keyPropagatedUserID)
+			s.context.updated = true
+		}
+		delete(root.Meta, keyPropagatedUserID)
+	}
+
+	usrData := map[string]string{
+		keyUserID:        id,
+		keyUserEmail:     cfg.Email,
+		keyUserName:      cfg.Name,
+		keyUserScope:     cfg.Scope,
+		keyUserRole:      cfg.Role,
+		keyUserSessionID: cfg.SessionID,
+	}
+	for k, v := range cfg.Metadata {
+		usrData[fmt.Sprintf("usr.%s", k)] = v
+	}
+	for k, v := range usrData {
+		if v != "" {
+			// setMeta is used since the span is already locked
+			root.setMeta(k, v)
+		}
+	}
+}
+
+// setSamplingPriorityLocked updates the sampling priority.
+// It also updates the trace's sampling priority.
+func (s *span) setSamplingPriorityLocked(priority int, sampler samplernames.SamplerName) {
+	// We don't lock spans when flushing, so we could have a data race when
+	// modifying a span as it's being flushed. This protects us against that
+	// race, since spans are marked `finished` before we flush them.
+	if s.finished {
+		return
+	}
+	s.setMetric(keySamplingPriority, float64(priority))
+	s.context.setSamplingPriority(priority, sampler)
+}
+
+// setTagError sets the error tag. It accounts for various valid scenarios.
+// This method is not safe for concurrent use.
+func (s *span) setTagError(value interface{}, cfg errorConfig) {
+	setError := func(yes bool) {
+		if yes {
+			if s.Error == 0 {
+				// new error
+				atomic.AddInt32(&s.context.errors, 1)
+			}
+			s.Error = 1
+		} else {
+			if s.Error > 0 {
+				// flip from active to inactive
+				atomic.AddInt32(&s.context.errors, -1)
+			}
+			s.Error = 0
+		}
+	}
+	if s.finished {
+		return
+	}
+	switch v := value.(type) {
+	case bool:
+		// bool value as per Opentracing spec.
+		setError(v)
+	case error:
+		// if anyone sets an error value as the tag, be nice here
+		// and provide all the benefits.
+		setError(true)
+		s.setMeta(ext.ErrorMsg, v.Error())
+		s.setMeta(ext.ErrorType, reflect.TypeOf(v).String())
+		if !cfg.noDebugStack {
+			s.setMeta(ext.ErrorStack, takeStacktrace(cfg.stackFrames, cfg.stackSkip))
+		}
+		switch v.(type) {
+		case xerrors.Formatter:
+			s.setMeta(ext.ErrorDetails, fmt.Sprintf("%+v", v))
+		case fmt.Formatter:
+			// pkg/errors approach
+			s.setMeta(ext.ErrorDetails, fmt.Sprintf("%+v", v))
+		}
+	case nil:
+		// no error
+		setError(false)
+	default:
+		// in all other cases, let's assume that setting this tag
+		// is the result of an error.
+		setError(true)
+	}
+}
+
+// defaultStackLength specifies the default maximum size of a stack trace.
+const defaultStackLength = 32
+
+// takeStacktrace takes a stack trace of maximum n entries, skipping the first skip entries.
+// If n is 0, up to 20 entries are retrieved.
+func takeStacktrace(n, skip uint) string {
+	if n == 0 {
+		n = defaultStackLength
+	}
+	var builder strings.Builder
+	pcs := make([]uintptr, n)
+
+	// +2 to exclude runtime.Callers and takeStacktrace
+	numFrames := runtime.Callers(2+int(skip), pcs)
+	if numFrames == 0 {
+		return ""
+	}
+	frames := runtime.CallersFrames(pcs[:numFrames])
+	for i := 0; ; i++ {
+		frame, more := frames.Next()
+		if i != 0 {
+			builder.WriteByte('\n')
+		}
+		builder.WriteString(frame.Function)
+		builder.WriteByte('\n')
+		builder.WriteByte('\t')
+		builder.WriteString(frame.File)
+		builder.WriteByte(':')
+		builder.WriteString(strconv.Itoa(frame.Line))
+		if !more {
+			break
+		}
+	}
+	return builder.String()
+}
+
+// setMeta sets a string tag. This method is not safe for concurrent use.
+func (s *span) setMeta(key, v string) {
+	if s.Meta == nil {
+		s.Meta = make(map[string]string, 1)
+	}
+	delete(s.Metrics, key)
+	switch key {
+	case ext.SpanName:
+		s.Name = v
+	case ext.ServiceName:
+		s.Service = v
+	case ext.ResourceName:
+		s.Resource = v
+	case ext.SpanType:
+		s.Type = v
+	default:
+		s.Meta[key] = v
+	}
+}
+
+// setTagBool sets a boolean tag on the span.
+func (s *span) setTagBool(key string, v bool) {
+	switch key {
+	case ext.AnalyticsEvent:
+		if v {
+			s.setMetric(ext.EventSampleRate, 1.0)
+		} else {
+			s.setMetric(ext.EventSampleRate, 0.0)
+		}
+	case ext.ManualDrop:
+		if v {
+			s.setSamplingPriorityLocked(ext.PriorityUserReject, samplernames.Manual)
+		}
+	case ext.ManualKeep:
+		if v {
+			s.setSamplingPriorityLocked(ext.PriorityUserKeep, samplernames.Manual)
+		}
+	default:
+		if v {
+			s.setMeta(key, "true")
+		} else {
+			s.setMeta(key, "false")
+		}
+	}
+}
+
+// setMetric sets a numeric tag, in our case called a metric. This method
+// is not safe for concurrent use.
+func (s *span) setMetric(key string, v float64) {
+	if s.Metrics == nil {
+		s.Metrics = make(map[string]float64, 1)
+	}
+	delete(s.Meta, key)
+	switch key {
+	case ext.ManualKeep:
+		if v == float64(samplernames.AppSec) {
+			s.setSamplingPriorityLocked(ext.PriorityUserKeep, samplernames.AppSec)
+		}
+	case ext.SamplingPriority:
+		// ext.SamplingPriority is deprecated in favor of ext.ManualKeep and ext.ManualDrop.
+		// We have it here for backward compatibility.
+		s.setSamplingPriorityLocked(int(v), samplernames.Manual)
+	default:
+		s.Metrics[key] = v
+	}
+}
+
+// Finish closes this Span (but not its children) providing the duration
+// of its part of the tracing session.
+func (s *span) Finish(opts ...ddtrace.FinishOption) {
+	t := now()
+	if len(opts) > 0 {
+		cfg := ddtrace.FinishConfig{
+			NoDebugStack: s.noDebugStack,
+		}
+		for _, fn := range opts {
+			fn(&cfg)
+		}
+		if !cfg.FinishTime.IsZero() {
+			t = cfg.FinishTime.UnixNano()
+		}
+		if cfg.Error != nil {
+			s.Lock()
+			s.setTagError(cfg.Error, errorConfig{
+				noDebugStack: cfg.NoDebugStack,
+				stackFrames:  cfg.StackFrames,
+				stackSkip:    cfg.SkipStackFrames,
+			})
+			s.Unlock()
+		}
+	}
+	if s.taskEnd != nil {
+		s.taskEnd()
+	}
+	if s.goExecTraced && rt.IsEnabled() {
+		// Only tag spans as traced if they both started & ended with
+		// execution tracing enabled. This is technically not sufficient
+		// for spans which could straddle the boundary between two
+		// execution traces, but there's really nothing we can do in
+		// those cases since execution tracing tasks aren't recorded in
+		// traces if they started before the trace.
+		s.SetTag("go_execution_traced", "yes")
+	} else if s.goExecTraced {
+		// If the span started with tracing enabled, but tracing wasn't
+		// enabled when the span finished, we still have some data to
+		// show. If tracing wasn't enabled when the span started, we
+		// won't have data in the execution trace to identify it so
+		// there's nothign we can show.
+		s.SetTag("go_execution_traced", "partial")
+	}
+	s.finish(t)
+
+	if s.pprofCtxRestore != nil {
+		// Restore the labels of the parent span so any CPU samples after this
+		// point are attributed correctly.
+		pprof.SetGoroutineLabels(s.pprofCtxRestore)
+	}
+}
+
+// SetOperationName sets or changes the operation name.
+func (s *span) SetOperationName(operationName string) {
+	s.Lock()
+	defer s.Unlock()
+	// We don't lock spans when flushing, so we could have a data race when
+	// modifying a span as it's being flushed. This protects us against that
+	// race, since spans are marked `finished` before we flush them.
+	if s.finished {
+		// already finished
+		return
+	}
+	s.Name = operationName
+}
+
+func (s *span) finish(finishTime int64) {
+	s.Lock()
+	defer s.Unlock()
+	// We don't lock spans when flushing, so we could have a data race when
+	// modifying a span as it's being flushed. This protects us against that
+	// race, since spans are marked `finished` before we flush them.
+	if s.finished {
+		// already finished
+		return
+	}
+	if s.Duration == 0 {
+		s.Duration = finishTime - s.Start
+	}
+	if s.Duration < 0 {
+		s.Duration = 0
+	}
+	s.finished = true
+
+	keep := true
+	if t, ok := internal.GetGlobalTracer().(*tracer); ok {
+		// we have an active tracer
+		if t.config.canComputeStats() && shouldComputeStats(s) {
+			// the agent supports computed stats
+			select {
+			case t.stats.In <- newAggregableSpan(s, t.obfuscator):
+				// ok
+			default:
+				log.Error("Stats channel full, disregarding span.")
+			}
+		}
+		if t.config.canDropP0s() {
+			// the agent supports dropping p0's in the client
+			keep = shouldKeep(s)
+		}
+	}
+	if keep {
+		// a single kept span keeps the whole trace.
+		s.context.trace.keep()
+	}
+	if log.DebugEnabled() {
+		// avoid allocating the ...interface{} argument if debug logging is disabled
+		log.Debug("Finished Span: %v, Operation: %s, Resource: %s, Tags: %v, %v",
+			s, s.Name, s.Resource, s.Meta, s.Metrics)
+	}
+	s.context.finish()
+}
+
+// newAggregableSpan creates a new summary for the span s, within an application
+// version version.
+func newAggregableSpan(s *span, obfuscator *obfuscate.Obfuscator) *aggregableSpan {
+	var statusCode uint32
+	if sc, ok := s.Meta["http.status_code"]; ok && sc != "" {
+		if c, err := strconv.Atoi(sc); err == nil && c > 0 && c <= math.MaxInt32 {
+			statusCode = uint32(c)
+		}
+	}
+	key := aggregation{
+		Name:       s.Name,
+		Resource:   obfuscatedResource(obfuscator, s.Type, s.Resource),
+		Service:    s.Service,
+		Type:       s.Type,
+		Synthetics: strings.HasPrefix(s.Meta[keyOrigin], "synthetics"),
+		StatusCode: statusCode,
+	}
+	return &aggregableSpan{
+		key:      key,
+		Start:    s.Start,
+		Duration: s.Duration,
+		TopLevel: s.Metrics[keyTopLevel] == 1,
+		Error:    s.Error,
+	}
+}
+
+// textNonParsable specifies the text that will be assigned to resources for which the resource
+// can not be parsed due to an obfuscation error.
+const textNonParsable = "Non-parsable SQL query"
+
+// obfuscatedResource returns the obfuscated version of the given resource. It is
+// obfuscated using the given obfuscator for the given span type typ.
+func obfuscatedResource(o *obfuscate.Obfuscator, typ, resource string) string {
+	if o == nil {
+		return resource
+	}
+	switch typ {
+	case "sql", "cassandra":
+		oq, err := o.ObfuscateSQLString(resource)
+		if err != nil {
+			log.Error("Error obfuscating stats group resource %q: %v", resource, err)
+			return textNonParsable
+		}
+		return oq.Query
+	case "redis":
+		return o.QuantizeRedisString(resource)
+	default:
+		return resource
+	}
+}
+
+// shouldKeep reports whether the trace should be kept.
+// a single span being kept implies the whole trace being kept.
+func shouldKeep(s *span) bool {
+	if p, ok := s.context.samplingPriority(); ok && p > 0 {
+		// positive sampling priorities stay
+		return true
+	}
+	if atomic.LoadInt32(&s.context.errors) > 0 {
+		// traces with any span containing an error get kept
+		return true
+	}
+	if v, ok := s.Metrics[ext.EventSampleRate]; ok {
+		return sampledByRate(s.TraceID, v)
+	}
+	return false
+}
+
+// shouldComputeStats mentions whether this span needs to have stats computed for.
+// Warning: callers must guard!
+func shouldComputeStats(s *span) bool {
+	if v, ok := s.Metrics[keyMeasured]; ok && v == 1 {
+		return true
+	}
+	if v, ok := s.Metrics[keyTopLevel]; ok && v == 1 {
+		return true
+	}
+	return false
+}
+
+// String returns a human readable representation of the span. Not for
+// production, just debugging.
+func (s *span) String() string {
+	s.RLock()
+	defer s.RUnlock()
+	lines := []string{
+		fmt.Sprintf("Name: %s", s.Name),
+		fmt.Sprintf("Service: %s", s.Service),
+		fmt.Sprintf("Resource: %s", s.Resource),
+		fmt.Sprintf("TraceID: %d", s.TraceID),
+		fmt.Sprintf("TraceID128: %s", s.context.TraceID128()),
+		fmt.Sprintf("SpanID: %d", s.SpanID),
+		fmt.Sprintf("ParentID: %d", s.ParentID),
+		fmt.Sprintf("Start: %s", time.Unix(0, s.Start)),
+		fmt.Sprintf("Duration: %s", time.Duration(s.Duration)),
+		fmt.Sprintf("Error: %d", s.Error),
+		fmt.Sprintf("Type: %s", s.Type),
+		"Tags:",
+	}
+	for key, val := range s.Meta {
+		lines = append(lines, fmt.Sprintf("\t%s:%s", key, val))
+	}
+	for key, val := range s.Metrics {
+		lines = append(lines, fmt.Sprintf("\t%s:%f", key, val))
+	}
+	return strings.Join(lines, "\n")
+}
+
+// Format implements fmt.Formatter.
+func (s *span) Format(f fmt.State, c rune) {
+	switch c {
+	case 's':
+		fmt.Fprint(f, s.String())
+	case 'v':
+		if svc := globalconfig.ServiceName(); svc != "" {
+			fmt.Fprintf(f, "dd.service=%s ", svc)
+		}
+		if tr, ok := internal.GetGlobalTracer().(*tracer); ok {
+			if tr.config.env != "" {
+				fmt.Fprintf(f, "dd.env=%s ", tr.config.env)
+			}
+			if tr.config.version != "" {
+				fmt.Fprintf(f, "dd.version=%s ", tr.config.version)
+			}
+		} else {
+			if env := os.Getenv("DD_ENV"); env != "" {
+				fmt.Fprintf(f, "dd.env=%s ", env)
+			}
+			if v := os.Getenv("DD_VERSION"); v != "" {
+				fmt.Fprintf(f, "dd.version=%s ", v)
+			}
+		}
+		var traceID string
+		if sharedinternal.BoolEnv("DD_TRACE_128_BIT_TRACEID_LOGGING_ENABLED", false) && s.context.traceID.HasUpper() {
+			traceID = s.context.TraceID128()
+		} else {
+			traceID = fmt.Sprintf("%d", s.TraceID)
+		}
+		fmt.Fprintf(f, `dd.trace_id=%q `, traceID)
+		fmt.Fprintf(f, `dd.span_id="%d"`, s.SpanID)
+	default:
+		fmt.Fprintf(f, "%%!%c(ddtrace.Span=%v)", c, s)
+	}
+}
+
+const (
+	keySamplingPriority     = "_sampling_priority_v1"
+	keySamplingPriorityRate = "_dd.agent_psr"
+	keyDecisionMaker        = "_dd.p.dm"
+	keyServiceHash          = "_dd.dm.service_hash"
+	keyOrigin               = "_dd.origin"
+	// keyHostname can be used to override the agent's hostname detection when using `WithHostname`. Not to be confused with keyTracerHostname
+	// which is set via auto-detection.
+	keyHostname                = "_dd.hostname"
+	keyRulesSamplerAppliedRate = "_dd.rule_psr"
+	keyRulesSamplerLimiterRate = "_dd.limit_psr"
+	keyMeasured                = "_dd.measured"
+	// keyTopLevel is the key of top level metric indicating if a span is top level.
+	// A top level span is a local root (parent span of the local trace) or the first span of each service.
+	keyTopLevel = "_dd.top_level"
+	// keyPropagationError holds any error from propagated trace tags (if any)
+	keyPropagationError = "_dd.propagation_error"
+	// keySpanSamplingMechanism specifies the sampling mechanism by which an individual span was sampled
+	keySpanSamplingMechanism = "_dd.span_sampling.mechanism"
+	// keySingleSpanSamplingRuleRate specifies the configured sampling probability for the single span sampling rule.
+	keySingleSpanSamplingRuleRate = "_dd.span_sampling.rule_rate"
+	// keySingleSpanSamplingMPS specifies the configured limit for the single span sampling rule
+	// that the span matched. If there is no configured limit, then this tag is omitted.
+	keySingleSpanSamplingMPS = "_dd.span_sampling.max_per_second"
+	// keyPropagatedUserID holds the propagated user identifier, if user id propagation is enabled.
+	keyPropagatedUserID = "_dd.p.usr.id"
+	//keyTracerHostname holds the tracer detected hostname, only present when not connected over UDS to agent.
+	keyTracerHostname = "_dd.tracer_hostname"
+	// keyTraceID128 is the lowercase, hex encoded upper 64 bits of a 128-bit trace id, if present.
+	keyTraceID128 = "_dd.p.tid"
+	// keySpanAttributeSchemaVersion holds the selected DD_TRACE_SPAN_ATTRIBUTE_SCHEMA version.
+	keySpanAttributeSchemaVersion = "_dd.trace_span_attribute_schema"
+	// keyPeerServiceSource indicates the precursor tag that was used as the value of peer.service.
+	keyPeerServiceSource = "_dd.peer.service.source"
+	// keyPeerServiceRemappedFrom indicates the previous value for peer.service, in case remapping happened.
+	keyPeerServiceRemappedFrom = "_dd.peer.service.remapped_from"
+)
+
+// The following set of tags is used for user monitoring and set through calls to span.SetUser().
+const (
+	keyUserID        = "usr.id"
+	keyUserEmail     = "usr.email"
+	keyUserName      = "usr.name"
+	keyUserRole      = "usr.role"
+	keyUserScope     = "usr.scope"
+	keyUserSessionID = "usr.session_id"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span_msgp.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span_msgp.go
new file mode 100644
index 0000000000..16bb758f8c
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span_msgp.go
@@ -0,0 +1,453 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+// NOTE: THIS FILE WAS PRODUCED BY THE
+// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
+// DO NOT EDIT
+
+import (
+	"github.com/tinylib/msgp/msgp"
+)
+
+// DecodeMsg implements msgp.Decodable
+func (z *span) DecodeMsg(dc *msgp.Reader) (err error) {
+	var field []byte
+	_ = field
+	var zb0001 uint32
+	zb0001, err = dc.ReadMapHeader()
+	if err != nil {
+		return
+	}
+	for zb0001 > 0 {
+		zb0001--
+		field, err = dc.ReadMapKeyPtr()
+		if err != nil {
+			return
+		}
+		switch msgp.UnsafeString(field) {
+		case "name":
+			z.Name, err = dc.ReadString()
+			if err != nil {
+				return
+			}
+		case "service":
+			z.Service, err = dc.ReadString()
+			if err != nil {
+				return
+			}
+		case "resource":
+			z.Resource, err = dc.ReadString()
+			if err != nil {
+				return
+			}
+		case "type":
+			z.Type, err = dc.ReadString()
+			if err != nil {
+				return
+			}
+		case "start":
+			z.Start, err = dc.ReadInt64()
+			if err != nil {
+				return
+			}
+		case "duration":
+			z.Duration, err = dc.ReadInt64()
+			if err != nil {
+				return
+			}
+		case "meta":
+			var zb0002 uint32
+			zb0002, err = dc.ReadMapHeader()
+			if err != nil {
+				return
+			}
+			if z.Meta == nil && zb0002 > 0 {
+				z.Meta = make(map[string]string, zb0002)
+			} else if len(z.Meta) > 0 {
+				for key := range z.Meta {
+					delete(z.Meta, key)
+				}
+			}
+			for zb0002 > 0 {
+				zb0002--
+				var za0001 string
+				var za0002 string
+				za0001, err = dc.ReadString()
+				if err != nil {
+					return
+				}
+				za0002, err = dc.ReadString()
+				if err != nil {
+					return
+				}
+				z.Meta[za0001] = za0002
+			}
+		case "metrics":
+			var zb0003 uint32
+			zb0003, err = dc.ReadMapHeader()
+			if err != nil {
+				return
+			}
+			if z.Metrics == nil && zb0003 > 0 {
+				z.Metrics = make(map[string]float64, zb0003)
+			} else if len(z.Metrics) > 0 {
+				for key := range z.Metrics {
+					delete(z.Metrics, key)
+				}
+			}
+			for zb0003 > 0 {
+				zb0003--
+				var za0003 string
+				var za0004 float64
+				za0003, err = dc.ReadString()
+				if err != nil {
+					return
+				}
+				za0004, err = dc.ReadFloat64()
+				if err != nil {
+					return
+				}
+				z.Metrics[za0003] = za0004
+			}
+		case "span_id":
+			z.SpanID, err = dc.ReadUint64()
+			if err != nil {
+				return
+			}
+		case "trace_id":
+			z.TraceID, err = dc.ReadUint64()
+			if err != nil {
+				return
+			}
+		case "parent_id":
+			z.ParentID, err = dc.ReadUint64()
+			if err != nil {
+				return
+			}
+		case "error":
+			z.Error, err = dc.ReadInt32()
+			if err != nil {
+				return
+			}
+		default:
+			err = dc.Skip()
+			if err != nil {
+				return
+			}
+		}
+	}
+	return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *span) EncodeMsg(en *msgp.Writer) (err error) {
+	// map header, size 12
+	// write "name"
+	err = en.Append(0x8c, 0xa4, 0x6e, 0x61, 0x6d, 0x65)
+	if err != nil {
+		return
+	}
+	err = en.WriteString(z.Name)
+	if err != nil {
+		return
+	}
+	// write "service"
+	err = en.Append(0xa7, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65)
+	if err != nil {
+		return
+	}
+	err = en.WriteString(z.Service)
+	if err != nil {
+		return
+	}
+	// write "resource"
+	err = en.Append(0xa8, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65)
+	if err != nil {
+		return
+	}
+	err = en.WriteString(z.Resource)
+	if err != nil {
+		return
+	}
+	// write "type"
+	err = en.Append(0xa4, 0x74, 0x79, 0x70, 0x65)
+	if err != nil {
+		return
+	}
+	err = en.WriteString(z.Type)
+	if err != nil {
+		return
+	}
+	// write "start"
+	err = en.Append(0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+	if err != nil {
+		return
+	}
+	err = en.WriteInt64(z.Start)
+	if err != nil {
+		return
+	}
+	// write "duration"
+	err = en.Append(0xa8, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e)
+	if err != nil {
+		return
+	}
+	err = en.WriteInt64(z.Duration)
+	if err != nil {
+		return
+	}
+	// write "meta"
+	err = en.Append(0xa4, 0x6d, 0x65, 0x74, 0x61)
+	if err != nil {
+		return
+	}
+	err = en.WriteMapHeader(uint32(len(z.Meta)))
+	if err != nil {
+		return
+	}
+	for za0001, za0002 := range z.Meta {
+		err = en.WriteString(za0001)
+		if err != nil {
+			return
+		}
+		err = en.WriteString(za0002)
+		if err != nil {
+			return
+		}
+	}
+	// write "metrics"
+	err = en.Append(0xa7, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73)
+	if err != nil {
+		return
+	}
+	err = en.WriteMapHeader(uint32(len(z.Metrics)))
+	if err != nil {
+		return
+	}
+	for za0003, za0004 := range z.Metrics {
+		err = en.WriteString(za0003)
+		if err != nil {
+			return
+		}
+		err = en.WriteFloat64(za0004)
+		if err != nil {
+			return
+		}
+	}
+	// write "span_id"
+	err = en.Append(0xa7, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64)
+	if err != nil {
+		return
+	}
+	err = en.WriteUint64(z.SpanID)
+	if err != nil {
+		return
+	}
+	// write "trace_id"
+	err = en.Append(0xa8, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64)
+	if err != nil {
+		return
+	}
+	err = en.WriteUint64(z.TraceID)
+	if err != nil {
+		return
+	}
+	// write "parent_id"
+	err = en.Append(0xa9, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64)
+	if err != nil {
+		return
+	}
+	err = en.WriteUint64(z.ParentID)
+	if err != nil {
+		return
+	}
+	// write "error"
+	err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
+	if err != nil {
+		return
+	}
+	err = en.WriteInt32(z.Error)
+	if err != nil {
+		return
+	}
+	return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *span) Msgsize() (s int) {
+	s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.StringPrefixSize + len(z.Service) + 9 + msgp.StringPrefixSize + len(z.Resource) + 5 + msgp.StringPrefixSize + len(z.Type) + 6 + msgp.Int64Size + 9 + msgp.Int64Size + 5 + msgp.MapHeaderSize
+	if z.Meta != nil {
+		for za0001, za0002 := range z.Meta {
+			_ = za0002
+			s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
+		}
+	}
+	s += 8 + msgp.MapHeaderSize
+	if z.Metrics != nil {
+		for za0003, za0004 := range z.Metrics {
+			_ = za0004
+			s += msgp.StringPrefixSize + len(za0003) + msgp.Float64Size
+		}
+	}
+	s += 8 + msgp.Uint64Size + 9 + msgp.Uint64Size + 10 + msgp.Uint64Size + 6 + msgp.Int32Size
+	return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *spanList) DecodeMsg(dc *msgp.Reader) (err error) {
+	var zb0002 uint32
+	zb0002, err = dc.ReadArrayHeader()
+	if err != nil {
+		return
+	}
+	if cap((*z)) >= int(zb0002) {
+		(*z) = (*z)[:zb0002]
+	} else {
+		(*z) = make(spanList, zb0002)
+	}
+	for zb0001 := range *z {
+		if dc.IsNil() {
+			err = dc.ReadNil()
+			if err != nil {
+				return
+			}
+			(*z)[zb0001] = nil
+		} else {
+			if (*z)[zb0001] == nil {
+				(*z)[zb0001] = new(span)
+			}
+			err = (*z)[zb0001].DecodeMsg(dc)
+			if err != nil {
+				return
+			}
+		}
+	}
+	return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z spanList) EncodeMsg(en *msgp.Writer) (err error) {
+	err = en.WriteArrayHeader(uint32(len(z)))
+	if err != nil {
+		return
+	}
+	for zb0003 := range z {
+		if z[zb0003] == nil {
+			err = en.WriteNil()
+			if err != nil {
+				return
+			}
+		} else {
+			err = z[zb0003].EncodeMsg(en)
+			if err != nil {
+				return
+			}
+		}
+	}
+	return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z spanList) Msgsize() (s int) {
+	s = msgp.ArrayHeaderSize
+	for zb0003 := range z {
+		if z[zb0003] == nil {
+			s += msgp.NilSize
+		} else {
+			s += z[zb0003].Msgsize()
+		}
+	}
+	return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *spanLists) DecodeMsg(dc *msgp.Reader) (err error) {
+	var zb0003 uint32
+	zb0003, err = dc.ReadArrayHeader()
+	if err != nil {
+		return
+	}
+	if cap((*z)) >= int(zb0003) {
+		(*z) = (*z)[:zb0003]
+	} else {
+		(*z) = make(spanLists, zb0003)
+	}
+	for zb0001 := range *z {
+		var zb0004 uint32
+		zb0004, err = dc.ReadArrayHeader()
+		if err != nil {
+			return
+		}
+		if cap((*z)[zb0001]) >= int(zb0004) {
+			(*z)[zb0001] = ((*z)[zb0001])[:zb0004]
+		} else {
+			(*z)[zb0001] = make(spanList, zb0004)
+		}
+		for zb0002 := range (*z)[zb0001] {
+			if dc.IsNil() {
+				err = dc.ReadNil()
+				if err != nil {
+					return
+				}
+				(*z)[zb0001][zb0002] = nil
+			} else {
+				if (*z)[zb0001][zb0002] == nil {
+					(*z)[zb0001][zb0002] = new(span)
+				}
+				err = (*z)[zb0001][zb0002].DecodeMsg(dc)
+				if err != nil {
+					return
+				}
+			}
+		}
+	}
+	return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z spanLists) EncodeMsg(en *msgp.Writer) (err error) {
+	err = en.WriteArrayHeader(uint32(len(z)))
+	if err != nil {
+		return
+	}
+	for zb0005 := range z {
+		err = en.WriteArrayHeader(uint32(len(z[zb0005])))
+		if err != nil {
+			return
+		}
+		for zb0006 := range z[zb0005] {
+			if z[zb0005][zb0006] == nil {
+				err = en.WriteNil()
+				if err != nil {
+					return
+				}
+			} else {
+				err = z[zb0005][zb0006].EncodeMsg(en)
+				if err != nil {
+					return
+				}
+			}
+		}
+	}
+	return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z spanLists) Msgsize() (s int) {
+	s = msgp.ArrayHeaderSize
+	for zb0005 := range z {
+		s += msgp.ArrayHeaderSize
+		for zb0006 := range z[zb0005] {
+			if z[zb0005][zb0006] == nil {
+				s += msgp.NilSize
+			} else {
+				s += z[zb0005][zb0006].Msgsize()
+			}
+		}
+	}
+	return
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/spancontext.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/spancontext.go
new file mode 100644
index 0000000000..d418329b42
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/spancontext.go
@@ -0,0 +1,511 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"encoding/binary"
+	"encoding/hex"
+	"fmt"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+	ginternal "gopkg.in/DataDog/dd-trace-go.v1/internal"
+	sharedinternal "gopkg.in/DataDog/dd-trace-go.v1/internal"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
+)
+
+var _ ddtrace.SpanContext = (*spanContext)(nil)
+
+type traceID [16]byte // traceID in big endian, i.e. <upper><lower>
+
+var emptyTraceID traceID
+
+func (t *traceID) HexEncoded() string {
+	return hex.EncodeToString(t[:])
+}
+
+func (t *traceID) Lower() uint64 {
+	return binary.BigEndian.Uint64(t[8:])
+}
+
+func (t *traceID) Upper() uint64 {
+	return binary.BigEndian.Uint64(t[:8])
+}
+
+func (t *traceID) SetLower(i uint64) {
+	binary.BigEndian.PutUint64(t[8:], i)
+}
+
+func (t *traceID) SetUpper(i uint64) {
+	binary.BigEndian.PutUint64(t[:8], i)
+}
+
+func (t *traceID) SetUpperFromHex(s string) error {
+	u, err := strconv.ParseUint(s, 16, 64)
+	if err != nil {
+		return fmt.Errorf("malformed %q: %s", s, err)
+	}
+	t.SetUpper(u)
+	return nil
+}
+
+func (t *traceID) Empty() bool {
+	return *t == emptyTraceID
+}
+
+func (t *traceID) HasUpper() bool {
+	//TODO: in go 1.20 we can simplify this
+	for _, b := range t[:8] {
+		if b != 0 {
+			return true
+		}
+	}
+	return false
+}
+
+func (t *traceID) UpperHex() string {
+	return hex.EncodeToString(t[:8])
+}
+
+// SpanContext represents a span state that can propagate to descendant spans
+// and across process boundaries. It contains all the information needed to
+// spawn a direct descendant of the span that it belongs to. It can be used
+// to create distributed tracing by propagating it using the provided interfaces.
+type spanContext struct {
+	updated bool // updated is tracking changes for priority / origin / x-datadog-tags
+
+	// the below group should propagate only locally
+
+	trace  *trace // reference to the trace that this span belongs too
+	span   *span  // reference to the span that hosts this context
+	errors int32  // number of spans with errors in this trace
+
+	// the below group should propagate cross-process
+
+	traceID traceID
+	spanID  uint64
+
+	mu         sync.RWMutex // guards below fields
+	baggage    map[string]string
+	hasBaggage uint32 // atomic int for quick checking presence of baggage. 0 indicates no baggage, otherwise baggage exists.
+	origin     string // e.g. "synthetics"
+}
+
+// newSpanContext creates a new SpanContext to serve as context for the given
+// span. If the provided parent is not nil, the context will inherit the trace,
+// baggage and other values from it. This method also pushes the span into the
+// new context's trace and as a result, it should not be called multiple times
+// for the same span.
+func newSpanContext(span *span, parent *spanContext) *spanContext {
+	context := &spanContext{
+		spanID: span.SpanID,
+		span:   span,
+	}
+	context.traceID.SetLower(span.TraceID)
+	if parent != nil {
+		context.traceID.SetUpper(parent.traceID.Upper())
+		context.trace = parent.trace
+		context.origin = parent.origin
+		context.errors = parent.errors
+		parent.ForeachBaggageItem(func(k, v string) bool {
+			context.setBaggageItem(k, v)
+			return true
+		})
+	} else if sharedinternal.BoolEnv("DD_TRACE_128_BIT_TRACEID_GENERATION_ENABLED", false) {
+		// add 128 bit trace id, if enabled, formatted as big-endian:
+		// <32-bit unix seconds> <32 bits of zero> <64 random bits>
+		id128 := time.Duration(span.Start) / time.Second
+		// casting from int64 -> uint32 should be safe since the start time won't be
+		// negative, and the seconds should fit within 32-bits for the foreseeable future.
+		// (We only want 32 bits of time, then the rest is zero)
+		tUp := uint64(uint32(id128)) << 32 // We need the time at the upper 32 bits of the uint
+		context.traceID.SetUpper(tUp)
+	}
+	if context.trace == nil {
+		context.trace = newTrace()
+	}
+	if context.trace.root == nil {
+		// first span in the trace can safely be assumed to be the root
+		context.trace.root = span
+	}
+	// put span in context's trace
+	context.trace.push(span)
+	// setting context.updated to false here is necessary to distinguish
+	// between initializing properties of the span (priority)
+	// and updating them after extracting context through propagators
+	context.updated = false
+	return context
+}
+
+// SpanID implements ddtrace.SpanContext.
+func (c *spanContext) SpanID() uint64 { return c.spanID }
+
+// TraceID implements ddtrace.SpanContext.
+func (c *spanContext) TraceID() uint64 { return c.traceID.Lower() }
+
+// TraceID128 implements ddtrace.SpanContextW3C.
+func (c *spanContext) TraceID128() string {
+	return c.traceID.HexEncoded()
+}
+
+// TraceID128Bytes implements ddtrace.SpanContextW3C.
+func (c *spanContext) TraceID128Bytes() [16]byte {
+	return c.traceID
+}
+
+// ForeachBaggageItem implements ddtrace.SpanContext.
+func (c *spanContext) ForeachBaggageItem(handler func(k, v string) bool) {
+	if atomic.LoadUint32(&c.hasBaggage) == 0 {
+		return
+	}
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	for k, v := range c.baggage {
+		if !handler(k, v) {
+			break
+		}
+	}
+}
+
+func (c *spanContext) setSamplingPriority(p int, sampler samplernames.SamplerName) {
+	if c.trace == nil {
+		c.trace = newTrace()
+	}
+	if c.trace.priority != nil && *c.trace.priority != float64(p) {
+		c.updated = true
+	}
+	c.trace.setSamplingPriority(p, sampler)
+}
+
+func (c *spanContext) samplingPriority() (p int, ok bool) {
+	if c.trace == nil {
+		return 0, false
+	}
+	return c.trace.samplingPriority()
+}
+
+func (c *spanContext) setBaggageItem(key, val string) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.baggage == nil {
+		atomic.StoreUint32(&c.hasBaggage, 1)
+		c.baggage = make(map[string]string, 1)
+	}
+	c.baggage[key] = val
+}
+
+func (c *spanContext) baggageItem(key string) string {
+	if atomic.LoadUint32(&c.hasBaggage) == 0 {
+		return ""
+	}
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.baggage[key]
+}
+
+func (c *spanContext) meta(key string) (val string, ok bool) {
+	c.span.RLock()
+	defer c.span.RUnlock()
+	val, ok = c.span.Meta[key]
+	return val, ok
+}
+
+// finish marks this span as finished in the trace.
+func (c *spanContext) finish() { c.trace.finishedOne(c.span) }
+
+// samplingDecision is the decision to send a trace to the agent or not.
+type samplingDecision uint32
+
+const (
+	// decisionNone is the default state of a trace.
+	// If no decision is made about the trace, the trace won't be sent to the agent.
+	decisionNone samplingDecision = iota
+	// decisionDrop prevents the trace from being sent to the agent.
+	decisionDrop
+	// decisionKeep ensures the trace will be sent to the agent.
+	decisionKeep
+)
+
+// trace contains shared context information about a trace, such as sampling
+// priority, the root reference and a buffer of the spans which are part of the
+// trace, if these exist.
+type trace struct {
+	mu               sync.RWMutex      // guards below fields
+	spans            []*span           // all the spans that are part of this trace
+	tags             map[string]string // trace level tags
+	propagatingTags  map[string]string // trace level tags that will be propagated across service boundaries
+	finished         int               // the number of finished spans
+	full             bool              // signifies that the span buffer is full
+	priority         *float64          // sampling priority
+	locked           bool              // specifies if the sampling priority can be altered
+	samplingDecision samplingDecision  // samplingDecision indicates whether to send the trace to the agent.
+
+	// root specifies the root of the trace, if known; it is nil when a span
+	// context is extracted from a carrier, at which point there are no spans in
+	// the trace yet.
+	root *span
+}
+
+var (
+	// traceStartSize is the initial size of our trace buffer,
+	// by default we allocate for a handful of spans within the trace,
+	// reasonable as span is actually way bigger, and avoids re-allocating
+	// over and over. Could be fine-tuned at runtime.
+	traceStartSize = 10
+	// traceMaxSize is the maximum number of spans we keep in memory for a
+	// single trace. This is to avoid memory leaks. If more spans than this
+	// are added to a trace, then the trace is dropped and the spans are
+	// discarded. Adding additional spans after a trace is dropped does
+	// nothing.
+	traceMaxSize = int(1e5)
+)
+
+// newTrace creates a new trace using the given callback which will be called
+// upon completion of the trace.
+func newTrace() *trace {
+	return &trace{spans: make([]*span, 0, traceStartSize)}
+}
+
+func (t *trace) samplingPriorityLocked() (p int, ok bool) {
+	if t.priority == nil {
+		return 0, false
+	}
+	return int(*t.priority), true
+}
+
+func (t *trace) samplingPriority() (p int, ok bool) {
+	t.mu.RLock()
+	defer t.mu.RUnlock()
+	return t.samplingPriorityLocked()
+}
+
+func (t *trace) setSamplingPriority(p int, sampler samplernames.SamplerName) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.setSamplingPriorityLocked(p, sampler)
+}
+
+func (t *trace) keep() {
+	atomic.CompareAndSwapUint32((*uint32)(&t.samplingDecision), uint32(decisionNone), uint32(decisionKeep))
+}
+
+func (t *trace) drop() {
+	atomic.CompareAndSwapUint32((*uint32)(&t.samplingDecision), uint32(decisionNone), uint32(decisionDrop))
+}
+
+func (t *trace) setTag(key, value string) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.setTagLocked(key, value)
+}
+
+func (t *trace) setTagLocked(key, value string) {
+	if t.tags == nil {
+		t.tags = make(map[string]string, 1)
+	}
+	t.tags[key] = value
+}
+
+func (t *trace) setSamplingPriorityLocked(p int, sampler samplernames.SamplerName) {
+	if t.locked {
+		return
+	}
+	if t.priority == nil {
+		t.priority = new(float64)
+	}
+	*t.priority = float64(p)
+	_, ok := t.propagatingTags[keyDecisionMaker]
+	if p > 0 && !ok && sampler != samplernames.Unknown {
+		// We have a positive priority and the sampling mechanism isn't set.
+		// Send nothing when sampler is `Unknown` for RFC compliance.
+		t.setPropagatingTagLocked(keyDecisionMaker, "-"+strconv.Itoa(int(sampler)))
+	}
+	if p <= 0 && ok {
+		delete(t.propagatingTags, keyDecisionMaker)
+	}
+}
+
+// push pushes a new span into the trace. If the buffer is full, it returns
+// a errBufferFull error.
+func (t *trace) push(sp *span) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.full {
+		return
+	}
+	tr, haveTracer := internal.GetGlobalTracer().(*tracer)
+	if len(t.spans) >= traceMaxSize {
+		// capacity is reached, we will not be able to complete this trace.
+		t.full = true
+		t.spans = nil // GC
+		log.Error("trace buffer full (%d), dropping trace", traceMaxSize)
+		if haveTracer {
+			atomic.AddUint32(&tr.tracesDropped, 1)
+		}
+		return
+	}
+	if v, ok := sp.Metrics[keySamplingPriority]; ok {
+		t.setSamplingPriorityLocked(int(v), samplernames.Unknown)
+	}
+	t.spans = append(t.spans, sp)
+	if haveTracer {
+		atomic.AddUint32(&tr.spansStarted, 1)
+	}
+}
+
+// finishedOne acknowledges that another span in the trace has finished, and checks
+// if the trace is complete, in which case it calls the onFinish function. It uses
+// the given priority, if non-nil, to mark the root span.
+func (t *trace) finishedOne(s *span) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.full {
+		// capacity has been reached, the buffer is no longer tracking
+		// all the spans in the trace, so the below conditions will not
+		// be accurate and would trigger a pre-mature flush, exposing us
+		// to a race condition where spans can be modified while flushing.
+		return
+	}
+	t.finished++
+	if s == t.root && t.priority != nil {
+		// after the root has finished we lock down the priority;
+		// we won't be able to make changes to a span after finishing
+		// without causing a race condition.
+		t.root.setMetric(keySamplingPriority, *t.priority)
+		t.locked = true
+	}
+	if len(t.spans) > 0 && s == t.spans[0] {
+		// first span in chunk finished, lock down the tags
+		//
+		// TODO(barbayar): make sure this doesn't happen in vain when switching to
+		// the new wire format. We won't need to set the tags on the first span
+		// in the chunk there.
+		for k, v := range t.tags {
+			s.setMeta(k, v)
+		}
+		for k, v := range t.propagatingTags {
+			s.setMeta(k, v)
+		}
+		for k, v := range ginternal.GetTracerGitMetadataTags() {
+			s.setMeta(k, v)
+		}
+		if s.context != nil && s.context.traceID.HasUpper() {
+			s.setMeta(keyTraceID128, s.context.traceID.UpperHex())
+		}
+	}
+	if len(t.spans) == t.finished {
+		defer func() {
+			t.spans = nil
+			t.finished = 0 // important, because a buffer can be used for several flushes
+		}()
+	}
+	tr, ok := internal.GetGlobalTracer().(*tracer)
+	if !ok {
+		return
+	}
+	setPeerService(s, tr.config)
+	if len(t.spans) != t.finished {
+		return
+	}
+	if hn := tr.hostname(); hn != "" {
+		s.setMeta(keyTracerHostname, hn)
+	}
+	// we have a tracer that can receive completed traces.
+	atomic.AddUint32(&tr.spansFinished, uint32(len(t.spans)))
+	tr.pushTrace(&finishedTrace{
+		spans:    t.spans,
+		willSend: decisionKeep == samplingDecision(atomic.LoadUint32((*uint32)(&t.samplingDecision))),
+	})
+}
+
+// setPeerService sets the peer.service, _dd.peer.service.source, and _dd.peer.service.remapped_from
+// tags as applicable for the given span.
+func setPeerService(s *span, cfg *config) {
+	if _, ok := s.Meta[ext.PeerService]; ok { // peer.service already set on the span
+		s.setMeta(keyPeerServiceSource, ext.PeerService)
+	} else { // no peer.service currently set
+		spanKind := s.Meta[ext.SpanKind]
+		isOutboundRequest := spanKind == ext.SpanKindClient || spanKind == ext.SpanKindProducer
+		shouldSetDefaultPeerService := isOutboundRequest && cfg.peerServiceDefaultsEnabled
+		if !shouldSetDefaultPeerService {
+			return
+		}
+		source := setPeerServiceFromSource(s)
+		if source == "" {
+			log.Debug("No source tag value could be found for span %q, peer.service not set", s.Name)
+			return
+		}
+		s.setMeta(keyPeerServiceSource, source)
+	}
+	// Overwrite existing peer.service value if remapped by the user
+	ps := s.Meta[ext.PeerService]
+	if to, ok := cfg.peerServiceMappings[ps]; ok {
+		s.setMeta(keyPeerServiceRemappedFrom, ps)
+		s.setMeta(ext.PeerService, to)
+	}
+}
+
+// setPeerServiceFromSource sets peer.service from the sources determined
+// by the tags on the span. It returns the source tag name that it used for
+// the peer.service value, or the empty string if no valid source tag was available.
+func setPeerServiceFromSource(s *span) string {
+	has := func(tag string) bool {
+		_, ok := s.Meta[tag]
+		return ok
+	}
+	var sources []string
+	useTargetHost := true
+	switch {
+	// order of the cases and their sources matters here. These are in priority order (highest to lowest)
+	case has("aws_service"):
+		sources = []string{
+			"queuename",
+			"topicname",
+			"streamname",
+			"tablename",
+			"bucketname",
+		}
+	case s.Meta[ext.DBSystem] == ext.DBSystemCassandra:
+		sources = []string{
+			ext.CassandraContactPoints,
+		}
+		useTargetHost = false
+	case has(ext.DBSystem):
+		sources = []string{
+			ext.DBName,
+			ext.DBInstance,
+		}
+	case has(ext.MessagingSystem):
+		sources = []string{
+			ext.KafkaBootstrapServers,
+		}
+	case has(ext.RPCSystem):
+		sources = []string{
+			ext.RPCService,
+		}
+	}
+	// network destination tags will be used as fallback unless there are higher priority sources already set.
+	if useTargetHost {
+		sources = append(sources, []string{
+			ext.NetworkDestinationName,
+			ext.PeerHostname,
+			ext.TargetHost,
+		}...)
+	}
+	for _, source := range sources {
+		if val, ok := s.Meta[source]; ok {
+			s.setMeta(ext.PeerService, val)
+			return source
+		}
+	}
+	return ""
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/sqlcomment.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/sqlcomment.go
new file mode 100644
index 0000000000..b220d5c6fa
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/sqlcomment.go
@@ -0,0 +1,300 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"strconv"
+	"strings"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
+)
+
+// SQLCommentInjectionMode represents the mode of SQL comment injection.
+//
+// Deprecated: Use DBMPropagationMode instead.
+type SQLCommentInjectionMode DBMPropagationMode
+
+const (
+	// SQLInjectionUndefined represents the comment injection mode is not set. This is the same as SQLInjectionDisabled.
+	SQLInjectionUndefined SQLCommentInjectionMode = SQLCommentInjectionMode(DBMPropagationModeUndefined)
+	// SQLInjectionDisabled represents the comment injection mode where all injection is disabled.
+	SQLInjectionDisabled SQLCommentInjectionMode = SQLCommentInjectionMode(DBMPropagationModeDisabled)
+	// SQLInjectionModeService represents the comment injection mode where only service tags (name, env, version) are injected.
+	SQLInjectionModeService SQLCommentInjectionMode = SQLCommentInjectionMode(DBMPropagationModeService)
+	// SQLInjectionModeFull represents the comment injection mode where both service tags and tracing tags. Tracing tags include span id, trace id and sampling priority.
+	SQLInjectionModeFull SQLCommentInjectionMode = SQLCommentInjectionMode(DBMPropagationModeFull)
+)
+
+// DBMPropagationMode represents the mode of dbm propagation.
+//
+// Note that enabling sql comment propagation results in potentially confidential data (service names)
+// being stored in the databases which can then be accessed by other 3rd parties that have been granted
+// access to the database.
+type DBMPropagationMode string
+
+const (
+	// DBMPropagationModeUndefined represents the dbm propagation mode not being set. This is the same as DBMPropagationModeDisabled.
+	DBMPropagationModeUndefined DBMPropagationMode = ""
+	// DBMPropagationModeDisabled represents the dbm propagation mode where all propagation is disabled.
+	DBMPropagationModeDisabled DBMPropagationMode = "disabled"
+	// DBMPropagationModeService represents the dbm propagation mode where only service tags (name, env, version) are propagated to dbm.
+	DBMPropagationModeService DBMPropagationMode = "service"
+	// DBMPropagationModeFull represents the dbm propagation mode where both service tags and tracing tags are propagated. Tracing tags include span id, trace id and the sampled flag.
+	DBMPropagationModeFull DBMPropagationMode = "full"
+)
+
+// Key names for SQL comment tags.
+const (
+	sqlCommentTraceParent   = "traceparent"
+	sqlCommentParentService = "ddps"
+	sqlCommentDBService     = "dddbs"
+	sqlCommentParentVersion = "ddpv"
+	sqlCommentEnv           = "dde"
+)
+
+// Current trace context version (see https://www.w3.org/TR/trace-context/#version)
+const w3cContextVersion = "00"
+
+// SQLCommentCarrier is a carrier implementation that injects a span context in a SQL query in the form
+// of a sqlcommenter formatted comment prepended to the original query text.
+// See https://google.github.io/sqlcommenter/spec/ for more details.
+type SQLCommentCarrier struct {
+	Query         string
+	Mode          DBMPropagationMode
+	DBServiceName string
+	SpanID        uint64
+}
+
+// Inject injects a span context in the carrier's Query field as a comment.
+func (c *SQLCommentCarrier) Inject(spanCtx ddtrace.SpanContext) error {
+	c.SpanID = generateSpanID(now())
+	tags := make(map[string]string)
+	switch c.Mode {
+	case DBMPropagationModeUndefined:
+		fallthrough
+	case DBMPropagationModeDisabled:
+		return nil
+	case DBMPropagationModeFull:
+		var (
+			sampled int64
+			traceID uint64
+		)
+		if ctx, ok := spanCtx.(*spanContext); ok {
+			if sp, ok := ctx.samplingPriority(); ok && sp > 0 {
+				sampled = 1
+			}
+			traceID = ctx.TraceID()
+		}
+		if traceID == 0 { // check if this is a root span
+			traceID = c.SpanID
+		}
+		tags[sqlCommentTraceParent] = encodeTraceParent(traceID, c.SpanID, sampled)
+		fallthrough
+	case DBMPropagationModeService:
+		if ctx, ok := spanCtx.(*spanContext); ok {
+			if e, ok := ctx.meta(ext.Environment); ok && e != "" {
+				tags[sqlCommentEnv] = e
+			}
+			if v, ok := ctx.meta(ext.Version); ok && v != "" {
+				tags[sqlCommentParentVersion] = v
+			}
+		}
+		if globalconfig.ServiceName() != "" {
+			tags[sqlCommentParentService] = globalconfig.ServiceName()
+		}
+		tags[sqlCommentDBService] = c.DBServiceName
+	}
+	c.Query = commentQuery(c.Query, tags)
+	return nil
+}
+
+// encodeTraceParent encodes trace parent as per the w3c trace context spec (https://www.w3.org/TR/trace-context/#version).
+func encodeTraceParent(traceID uint64, spanID uint64, sampled int64) string {
+	var b strings.Builder
+	// traceparent has a fixed length of 55:
+	// 2 bytes for the version, 32 for the trace id, 16 for the span id, 2 for the sampled flag and 3 for separators
+	b.Grow(55)
+	b.WriteString(w3cContextVersion)
+	b.WriteRune('-')
+	tid := strconv.FormatUint(traceID, 16)
+	for i := 0; i < 32-len(tid); i++ {
+		b.WriteRune('0')
+	}
+	b.WriteString(tid)
+	b.WriteRune('-')
+	sid := strconv.FormatUint(spanID, 16)
+	for i := 0; i < 16-len(sid); i++ {
+		b.WriteRune('0')
+	}
+	b.WriteString(sid)
+	b.WriteRune('-')
+	b.WriteRune('0')
+	b.WriteString(strconv.FormatInt(sampled, 16))
+	return b.String()
+}
+
+var (
+	keyReplacer   = strings.NewReplacer(" ", "%20", "!", "%21", "#", "%23", "$", "%24", "%", "%25", "&", "%26", "'", "%27", "(", "%28", ")", "%29", "*", "%2A", "+", "%2B", ",", "%2C", "/", "%2F", ":", "%3A", ";", "%3B", "=", "%3D", "?", "%3F", "@", "%40", "[", "%5B", "]", "%5D")
+	valueReplacer = strings.NewReplacer(" ", "%20", "!", "%21", "#", "%23", "$", "%24", "%", "%25", "&", "%26", "'", "%27", "(", "%28", ")", "%29", "*", "%2A", "+", "%2B", ",", "%2C", "/", "%2F", ":", "%3A", ";", "%3B", "=", "%3D", "?", "%3F", "@", "%40", "[", "%5B", "]", "%5D", "'", "\\'")
+)
+
+// commentQuery returns the given query with the tags from the SQLCommentCarrier applied to it as a
+// prepended SQL comment. The format of the comment follows the sqlcommenter spec.
+// See https://google.github.io/sqlcommenter/spec/ for more details.
+func commentQuery(query string, tags map[string]string) string {
+	if len(tags) == 0 {
+		return ""
+	}
+	var b strings.Builder
+	// the sqlcommenter specification dictates that tags should be sorted. Since we know all injected keys,
+	// we skip a sorting operation by specifying the order of keys statically
+	orderedKeys := []string{sqlCommentDBService, sqlCommentEnv, sqlCommentParentService, sqlCommentParentVersion, sqlCommentTraceParent}
+	first := true
+	for _, k := range orderedKeys {
+		if v, ok := tags[k]; ok {
+			// we need to URL-encode both keys and values and escape single quotes in values
+			// https://google.github.io/sqlcommenter/spec/
+			key := keyReplacer.Replace(k)
+			val := valueReplacer.Replace(v)
+			if first {
+				b.WriteString("/*")
+			} else {
+				b.WriteRune(',')
+			}
+			b.WriteString(key)
+			b.WriteRune('=')
+			b.WriteRune('\'')
+			b.WriteString(val)
+			b.WriteRune('\'')
+			first = false
+		}
+	}
+	if b.Len() == 0 {
+		return query
+	}
+	b.WriteString("*/")
+	if query == "" {
+		return b.String()
+	}
+	log.Debug("Injected sql comment: %s", b.String())
+	b.WriteRune(' ')
+	b.WriteString(query)
+	return b.String()
+}
+
+// Extract parses for key value attributes in a sql query injected with trace information in order to build a span context
+func (c *SQLCommentCarrier) Extract() (ddtrace.SpanContext, error) {
+	var ctx *spanContext
+	// There may be multiple comments within the sql query, so we must identify which one contains trace information.
+	// We look at each comment until we find one that contains a traceparent
+	if traceComment, found := findTraceComment(c.Query); found {
+		var err error
+		if ctx, err = spanContextFromTraceComment(traceComment); err != nil {
+			return nil, err
+		}
+	} else {
+		return nil, ErrSpanContextNotFound
+	}
+	if ctx.traceID.Empty() || ctx.spanID == 0 {
+		return nil, ErrSpanContextNotFound
+	}
+	return ctx, nil
+}
+
+// spanContextFromTraceComment looks for specific kv pairs in a comment containing trace information.
+// It returns a span context with the appropriate attributes
+func spanContextFromTraceComment(c string) (*spanContext, error) {
+	var ctx spanContext
+	kvs := strings.Split(c, ",")
+	for _, unparsedKV := range kvs {
+		splitKV := strings.Split(unparsedKV, "=")
+		if len(splitKV) != 2 {
+			return nil, ErrSpanContextCorrupted
+		}
+		key := splitKV[0]
+		value := strings.Trim(splitKV[1], "'")
+		switch key {
+		case sqlCommentTraceParent:
+			traceIDLower, traceIDUpper, spanID, sampled, err := decodeTraceParent(value)
+			if err != nil {
+				return nil, err
+			}
+			ctx.traceID.SetLower(traceIDLower)
+			ctx.traceID.SetUpper(traceIDUpper)
+			ctx.spanID = spanID
+			ctx.setSamplingPriority(sampled, samplernames.Unknown)
+		default:
+		}
+	}
+	return &ctx, nil
+}
+
+// decodeTraceParent decodes trace parent as per the w3c trace context spec (https://www.w3.org/TR/trace-context/#version).
+// this also supports decoding traceparents from open telemetry sql comments which are 128 bit
+func decodeTraceParent(traceParent string) (traceIDLower uint64, traceIDUpper uint64, spanID uint64, sampled int, err error) {
+	if len(traceParent) < 55 {
+		return 0, 0, 0, 0, ErrSpanContextCorrupted
+	}
+	version := traceParent[0:2]
+	switch version {
+	case w3cContextVersion:
+		if traceIDUpper, err = strconv.ParseUint(traceParent[3:19], 16, 64); err != nil {
+			return 0, 0, 0, 0, ErrSpanContextCorrupted
+		}
+		if traceIDLower, err = strconv.ParseUint(traceParent[19:35], 16, 64); err != nil {
+			return 0, 0, 0, 0, ErrSpanContextCorrupted
+		}
+		if spanID, err = strconv.ParseUint(traceParent[36:52], 16, 64); err != nil {
+			return 0, 0, 0, 0, ErrSpanContextCorrupted
+		}
+		if sampled, err = strconv.Atoi(traceParent[53:55]); err != nil {
+			return 0, 0, 0, 0, ErrSpanContextCorrupted
+		}
+	default:
+	}
+	return traceIDLower, traceIDUpper, spanID, sampled, err
+}
+
+// findTraceComment looks for a sql comment that contains trace information by looking for the keyword traceparent
+func findTraceComment(query string) (traceComment string, found bool) {
+	startIndex := -1
+	containsTrace := false
+	keyLength := len(sqlCommentTraceParent)
+	qLength := len(query)
+	for i := 0; i < qLength-1; {
+		if query[i] == '/' && query[i+1] == '*' {
+			// look for leading /*
+			startIndex = i
+			i += 2
+			containsTrace = false
+		} else if query[i] == '*' && query[i+1] == '/' {
+			// look for closing */
+			if startIndex == -1 {
+				// malformed comment, it did not have a leading /*
+				return "", false
+			}
+			if !containsTrace {
+				// ignore this comment, it was not a trace comment
+				startIndex = -1
+				i += 2
+			} else {
+				// do not return the query with the leading /* or trailing */
+				return query[startIndex+2 : i], true
+			}
+		} else if !containsTrace && i+keyLength < qLength && query[i:i+keyLength] == sqlCommentTraceParent {
+			// look for occurrence of keyword in the query if not yet found and make sure we don't go out of range
+			containsTrace = true
+			i += keyLength
+		} else {
+			i++
+		}
+	}
+	return "", false
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats.go
new file mode 100644
index 0000000000..95c5caee11
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats.go
@@ -0,0 +1,350 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+//go:generate msgp -unexported -marshal=false -o=stats_msgp.go -tests=false
+
+package tracer
+
+import (
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+
+	"github.com/DataDog/datadog-go/v5/statsd"
+	"github.com/DataDog/sketches-go/ddsketch"
+	"google.golang.org/protobuf/proto"
+)
+
+// aggregableSpan holds necessary information about a span that can be used to
+// aggregate statistics in a bucket.
+type aggregableSpan struct {
+	// key specifies the aggregation key under which this span can be placed into
+	// grouped inside a bucket.
+	key aggregation
+
+	Start, Duration int64
+	Error           int32
+	TopLevel        bool
+}
+
+// defaultStatsBucketSize specifies the default span of time that will be
+// covered in one stats bucket.
+var defaultStatsBucketSize = (10 * time.Second).Nanoseconds()
+
+// concentrator aggregates and stores statistics on incoming spans in time buckets,
+// flushing them occasionally to the underlying transport located in the given
+// tracer config.
+type concentrator struct {
+	// In specifies the channel to be used for feeding data to the concentrator.
+	// In order for In to have a consumer, the concentrator must be started using
+	// a call to Start.
+	In chan *aggregableSpan
+
+	// mu guards below fields
+	mu sync.Mutex
+
+	// buckets maintains a set of buckets, where the map key represents
+	// the starting point in time of that bucket, in nanoseconds.
+	buckets map[int64]*rawBucket
+
+	// stopped reports whether the concentrator is stopped (when non-zero)
+	stopped uint32
+
+	wg           sync.WaitGroup // waits for any active goroutines
+	bucketSize   int64          // the size of a bucket in nanoseconds
+	stop         chan struct{}  // closing this channel triggers shutdown
+	cfg          *config        // tracer startup configuration
+	statsdClient statsdClient   // statsd client for sending metrics.
+}
+
+// newConcentrator creates a new concentrator using the given tracer
+// configuration c. It creates buckets of bucketSize nanoseconds duration.
+func newConcentrator(c *config, bucketSize int64) *concentrator {
+	return &concentrator{
+		In:         make(chan *aggregableSpan, 10000),
+		bucketSize: bucketSize,
+		stopped:    1,
+		buckets:    make(map[int64]*rawBucket),
+		cfg:        c,
+	}
+}
+
+// alignTs returns the provided timestamp truncated to the bucket size.
+// It gives us the start time of the time bucket in which such timestamp falls.
+func alignTs(ts, bucketSize int64) int64 { return ts - ts%bucketSize }
+
+// Start starts the concentrator. A started concentrator needs to be stopped
+// in order to gracefully shut down, using Stop.
+func (c *concentrator) Start() {
+	if atomic.SwapUint32(&c.stopped, 0) == 0 {
+		// already running
+		log.Warn("(*concentrator).Start called more than once. This is likely a programming error.")
+		return
+	}
+	c.stop = make(chan struct{})
+	c.wg.Add(1)
+	go func() {
+		defer c.wg.Done()
+		tick := time.NewTicker(time.Duration(c.bucketSize) * time.Nanosecond)
+		defer tick.Stop()
+		c.runFlusher(tick.C)
+	}()
+	c.wg.Add(1)
+	go func() {
+		defer c.wg.Done()
+		c.runIngester()
+	}()
+}
+
+// runFlusher runs the flushing loop which sends stats to the underlying transport.
+func (c *concentrator) runFlusher(tick <-chan time.Time) {
+	for {
+		select {
+		case now := <-tick:
+			c.flushAndSend(now, withoutCurrentBucket)
+		case <-c.stop:
+			return
+		}
+	}
+}
+
+// statsd returns any tracer configured statsd client, or a no-op.
+func (c *concentrator) statsd() statsdClient {
+	if c.statsdClient == nil {
+		return &statsd.NoOpClient{}
+	}
+	return c.statsdClient
+}
+
+// runIngester runs the loop which accepts incoming data on the concentrator's In
+// channel.
+func (c *concentrator) runIngester() {
+	for {
+		select {
+		case s := <-c.In:
+			c.statsd().Incr("datadog.tracer.stats.spans_in", nil, 1)
+			c.add(s)
+		case <-c.stop:
+			return
+		}
+	}
+}
+
+// add adds s into the concentrator's internal stats buckets.
+func (c *concentrator) add(s *aggregableSpan) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	btime := alignTs(s.Start+s.Duration, c.bucketSize)
+	b, ok := c.buckets[btime]
+	if !ok {
+		b = newRawBucket(uint64(btime), c.bucketSize)
+		c.buckets[btime] = b
+	}
+	b.handleSpan(s)
+}
+
+// Stop stops the concentrator and blocks until the operation completes.
+func (c *concentrator) Stop() {
+	if atomic.SwapUint32(&c.stopped, 1) > 0 {
+		return
+	}
+	close(c.stop)
+	c.wg.Wait()
+drain:
+	for {
+		select {
+		case s := <-c.In:
+			c.statsd().Incr("datadog.tracer.stats.spans_in", nil, 1)
+			c.add(s)
+		default:
+			break drain
+		}
+	}
+	c.flushAndSend(time.Now(), withCurrentBucket)
+}
+
+const (
+	withCurrentBucket    = true
+	withoutCurrentBucket = false
+)
+
+// flushAndSend flushes all the stats buckets with the given timestamp and sends them using the transport specified in
+// the concentrator config. The current bucket is only included if includeCurrent is true, such as during shutdown.
+func (c *concentrator) flushAndSend(timenow time.Time, includeCurrent bool) {
+	sp := func() statsPayload {
+		c.mu.Lock()
+		defer c.mu.Unlock()
+		now := timenow.UnixNano()
+		sp := statsPayload{
+			Hostname: c.cfg.hostname,
+			Env:      c.cfg.env,
+			Version:  c.cfg.version,
+			Stats:    make([]statsBucket, 0, len(c.buckets)),
+		}
+		for ts, srb := range c.buckets {
+			if !includeCurrent && ts > now-c.bucketSize {
+				// do not flush the current bucket
+				continue
+			}
+			log.Debug("Flushing bucket %d", ts)
+			sp.Stats = append(sp.Stats, srb.Export())
+			delete(c.buckets, ts)
+		}
+		return sp
+	}()
+
+	if len(sp.Stats) == 0 {
+		// nothing to flush
+		return
+	}
+	c.statsd().Incr("datadog.tracer.stats.flush_payloads", nil, 1)
+	c.statsd().Incr("datadog.tracer.stats.flush_buckets", nil, float64(len(sp.Stats)))
+	if err := c.cfg.transport.sendStats(&sp); err != nil {
+		c.statsd().Incr("datadog.tracer.stats.flush_errors", nil, 1)
+		log.Error("Error sending stats payload: %v", err)
+	}
+}
+
+// aggregation specifies a uniquely identifiable key under which a certain set
+// of stats are grouped inside a bucket.
+type aggregation struct {
+	Name       string
+	Type       string
+	Resource   string
+	Service    string
+	StatusCode uint32
+	Synthetics bool
+}
+
+type rawBucket struct {
+	start, duration uint64
+	data            map[aggregation]*rawGroupedStats
+}
+
+func newRawBucket(btime uint64, bsize int64) *rawBucket {
+	return &rawBucket{
+		start:    btime,
+		duration: uint64(bsize),
+		data:     make(map[aggregation]*rawGroupedStats),
+	}
+}
+
+func (sb *rawBucket) handleSpan(s *aggregableSpan) {
+	gs, ok := sb.data[s.key]
+	if !ok {
+		gs = newRawGroupedStats()
+		sb.data[s.key] = gs
+	}
+	if s.TopLevel {
+		gs.topLevelHits++
+	}
+	gs.hits++
+	if s.Error != 0 {
+		gs.errors++
+	}
+	gs.duration += uint64(s.Duration)
+	// alter resolution of duration distro
+	trundur := nsTimestampToFloat(s.Duration)
+	if s.Error != 0 {
+		gs.errDistribution.Add(trundur)
+	} else {
+		gs.okDistribution.Add(trundur)
+	}
+}
+
+// Export transforms a RawBucket into a statsBucket, typically used
+// before communicating data to the API, as RawBucket is the internal
+// type while statsBucket is the public, shared one.
+func (sb *rawBucket) Export() statsBucket {
+	csb := statsBucket{
+		Start:    sb.start,
+		Duration: sb.duration,
+		Stats:    make([]groupedStats, len(sb.data)),
+	}
+	for k, v := range sb.data {
+		b, err := v.export(k)
+		if err != nil {
+			log.Error("Could not export stats bucket: %v.", err)
+			continue
+		}
+		csb.Stats = append(csb.Stats, b)
+	}
+	return csb
+}
+
+type rawGroupedStats struct {
+	hits            uint64
+	topLevelHits    uint64
+	errors          uint64
+	duration        uint64
+	okDistribution  *ddsketch.DDSketch
+	errDistribution *ddsketch.DDSketch
+}
+
+func newRawGroupedStats() *rawGroupedStats {
+	const (
+		// relativeAccuracy is the value accuracy we have on the percentiles. For example, we can
+		// say that p99 is 100ms +- 1ms
+		relativeAccuracy = 0.01
+		// maxNumBins is the maximum number of bins of the ddSketch we use to store percentiles.
+		// It can affect relative accuracy, but in practice, 2048 bins is enough to have 1% relative accuracy from
+		// 80 micro second to 1 year: http://www.vldb.org/pvldb/vol12/p2195-masson.pdf
+		maxNumBins = 2048
+	)
+	okSketch, err := ddsketch.LogCollapsingLowestDenseDDSketch(relativeAccuracy, maxNumBins)
+	if err != nil {
+		log.Error("Error when creating ddsketch: %v", err)
+	}
+	errSketch, err := ddsketch.LogCollapsingLowestDenseDDSketch(relativeAccuracy, maxNumBins)
+	if err != nil {
+		log.Error("Error when creating ddsketch: %v", err)
+	}
+	return &rawGroupedStats{
+		okDistribution:  okSketch,
+		errDistribution: errSketch,
+	}
+}
+
+func (s *rawGroupedStats) export(k aggregation) (groupedStats, error) {
+	msg := s.okDistribution.ToProto()
+	okSummary, err := proto.Marshal(msg)
+	if err != nil {
+		return groupedStats{}, err
+	}
+	msg = s.errDistribution.ToProto()
+	errSummary, err := proto.Marshal(msg)
+	if err != nil {
+		return groupedStats{}, err
+	}
+	return groupedStats{
+		Service:        k.Service,
+		Name:           k.Name,
+		Resource:       k.Resource,
+		HTTPStatusCode: k.StatusCode,
+		Type:           k.Type,
+		Hits:           s.hits,
+		Errors:         s.errors,
+		Duration:       s.duration,
+		TopLevelHits:   s.topLevelHits,
+		OkSummary:      okSummary,
+		ErrorSummary:   errSummary,
+		Synthetics:     k.Synthetics,
+	}, nil
+}
+
+// nsTimestampToFloat converts a nanosec timestamp into a float nanosecond timestamp truncated to a fixed precision
+func nsTimestampToFloat(ns int64) float64 {
+	// 10 bits precision (any value will be +/- 1/1024)
+	const roundMask int64 = 1 << 10
+	var shift uint
+	for ns > roundMask {
+		ns = ns >> 1
+		shift++
+	}
+	return float64(ns << shift)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats_payload.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats_payload.go
new file mode 100644
index 0000000000..35a68b46b9
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats_payload.go
@@ -0,0 +1,56 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+//go:generate msgp -unexported -marshal=false -o=stats_payload_msgp.go -tests=false
+
+package tracer
+
+// statsPayload specifies information about client computed stats and is encoded
+// to be sent to the agent.
+type statsPayload struct {
+	// Hostname specifies the hostname of the application.
+	Hostname string
+
+	// Env specifies the env. of the application, as defined by the user.
+	Env string
+
+	// Version specifies the application version.
+	Version string
+
+	// Stats holds all stats buckets computed within this payload.
+	Stats []statsBucket
+}
+
+// statsBucket specifies a set of stats computed over a duration.
+type statsBucket struct {
+	// Start specifies the beginning of this bucket.
+	Start uint64
+
+	// Duration specifies the duration of this bucket.
+	Duration uint64
+
+	// Stats contains a set of statistics computed for the duration of this bucket.
+	Stats []groupedStats
+}
+
+// groupedStats contains a set of statistics grouped under various aggregation keys.
+type groupedStats struct {
+	// These fields indicate the properties under which the stats were aggregated.
+	Service        string `json:"service,omitempty"`
+	Name           string `json:"name,omitempty"`
+	Resource       string `json:"resource,omitempty"`
+	HTTPStatusCode uint32 `json:"HTTP_status_code,omitempty"`
+	Type           string `json:"type,omitempty"`
+	DBType         string `json:"DB_type,omitempty"`
+
+	// These fields specify the stats for the above aggregation.
+	Hits         uint64 `json:"hits,omitempty"`
+	Errors       uint64 `json:"errors,omitempty"`
+	Duration     uint64 `json:"duration,omitempty"`
+	OkSummary    []byte `json:"okSummary,omitempty"`
+	ErrorSummary []byte `json:"errorSummary,omitempty"`
+	Synthetics   bool   `json:"synthetics,omitempty"`
+	TopLevelHits uint64 `json:"topLevelHits,omitempty"`
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats_payload_msgp.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats_payload_msgp.go
new file mode 100644
index 0000000000..7d15d036e7
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats_payload_msgp.go
@@ -0,0 +1,450 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+// NOTE: THIS FILE WAS PRODUCED BY THE
+// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
+// DO NOT EDIT
+
+import (
+	"github.com/tinylib/msgp/msgp"
+)
+
+// DecodeMsg implements msgp.Decodable
+func (z *groupedStats) DecodeMsg(dc *msgp.Reader) (err error) {
+	var field []byte
+	_ = field
+	var zb0001 uint32
+	zb0001, err = dc.ReadMapHeader()
+	if err != nil {
+		return
+	}
+	for zb0001 > 0 {
+		zb0001--
+		field, err = dc.ReadMapKeyPtr()
+		if err != nil {
+			return
+		}
+		switch msgp.UnsafeString(field) {
+		case "Service":
+			z.Service, err = dc.ReadString()
+			if err != nil {
+				return
+			}
+		case "Name":
+			z.Name, err = dc.ReadString()
+			if err != nil {
+				return
+			}
+		case "Resource":
+			z.Resource, err = dc.ReadString()
+			if err != nil {
+				return
+			}
+		case "HTTPStatusCode":
+			z.HTTPStatusCode, err = dc.ReadUint32()
+			if err != nil {
+				return
+			}
+		case "Type":
+			z.Type, err = dc.ReadString()
+			if err != nil {
+				return
+			}
+		case "DBType":
+			z.DBType, err = dc.ReadString()
+			if err != nil {
+				return
+			}
+		case "Hits":
+			z.Hits, err = dc.ReadUint64()
+			if err != nil {
+				return
+			}
+		case "Errors":
+			z.Errors, err = dc.ReadUint64()
+			if err != nil {
+				return
+			}
+		case "Duration":
+			z.Duration, err = dc.ReadUint64()
+			if err != nil {
+				return
+			}
+		case "OkSummary":
+			z.OkSummary, err = dc.ReadBytes(z.OkSummary)
+			if err != nil {
+				return
+			}
+		case "ErrorSummary":
+			z.ErrorSummary, err = dc.ReadBytes(z.ErrorSummary)
+			if err != nil {
+				return
+			}
+		case "Synthetics":
+			z.Synthetics, err = dc.ReadBool()
+			if err != nil {
+				return
+			}
+		case "TopLevelHits":
+			z.TopLevelHits, err = dc.ReadUint64()
+			if err != nil {
+				return
+			}
+		default:
+			err = dc.Skip()
+			if err != nil {
+				return
+			}
+		}
+	}
+	return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
+	// map header, size 13
+	// write "Service"
+	err = en.Append(0x8d, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65)
+	if err != nil {
+		return
+	}
+	err = en.WriteString(z.Service)
+	if err != nil {
+		return
+	}
+	// write "Name"
+	err = en.Append(0xa4, 0x4e, 0x61, 0x6d, 0x65)
+	if err != nil {
+		return
+	}
+	err = en.WriteString(z.Name)
+	if err != nil {
+		return
+	}
+	// write "Resource"
+	err = en.Append(0xa8, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65)
+	if err != nil {
+		return
+	}
+	err = en.WriteString(z.Resource)
+	if err != nil {
+		return
+	}
+	// write "HTTPStatusCode"
+	err = en.Append(0xae, 0x48, 0x54, 0x54, 0x50, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65)
+	if err != nil {
+		return
+	}
+	err = en.WriteUint32(z.HTTPStatusCode)
+	if err != nil {
+		return
+	}
+	// write "Type"
+	err = en.Append(0xa4, 0x54, 0x79, 0x70, 0x65)
+	if err != nil {
+		return
+	}
+	err = en.WriteString(z.Type)
+	if err != nil {
+		return
+	}
+	// write "DBType"
+	err = en.Append(0xa6, 0x44, 0x42, 0x54, 0x79, 0x70, 0x65)
+	if err != nil {
+		return
+	}
+	err = en.WriteString(z.DBType)
+	if err != nil {
+		return
+	}
+	// write "Hits"
+	err = en.Append(0xa4, 0x48, 0x69, 0x74, 0x73)
+	if err != nil {
+		return
+	}
+	err = en.WriteUint64(z.Hits)
+	if err != nil {
+		return
+	}
+	// write "Errors"
+	err = en.Append(0xa6, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73)
+	if err != nil {
+		return
+	}
+	err = en.WriteUint64(z.Errors)
+	if err != nil {
+		return
+	}
+	// write "Duration"
+	err = en.Append(0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e)
+	if err != nil {
+		return
+	}
+	err = en.WriteUint64(z.Duration)
+	if err != nil {
+		return
+	}
+	// write "OkSummary"
+	err = en.Append(0xa9, 0x4f, 0x6b, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79)
+	if err != nil {
+		return
+	}
+	err = en.WriteBytes(z.OkSummary)
+	if err != nil {
+		return
+	}
+	// write "ErrorSummary"
+	err = en.Append(0xac, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79)
+	if err != nil {
+		return
+	}
+	err = en.WriteBytes(z.ErrorSummary)
+	if err != nil {
+		return
+	}
+	// write "Synthetics"
+	err = en.Append(0xaa, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x73)
+	if err != nil {
+		return
+	}
+	err = en.WriteBool(z.Synthetics)
+	if err != nil {
+		return
+	}
+	// write "TopLevelHits"
+	err = en.Append(0xac, 0x54, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x48, 0x69, 0x74, 0x73)
+	if err != nil {
+		return
+	}
+	err = en.WriteUint64(z.TopLevelHits)
+	if err != nil {
+		return
+	}
+	return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *groupedStats) Msgsize() (s int) {
+	s = 1 + 8 + msgp.StringPrefixSize + len(z.Service) + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.StringPrefixSize + len(z.Resource) + 15 + msgp.Uint32Size + 5 + msgp.StringPrefixSize + len(z.Type) + 7 + msgp.StringPrefixSize + len(z.DBType) + 5 + msgp.Uint64Size + 7 + msgp.Uint64Size + 9 + msgp.Uint64Size + 10 + msgp.BytesPrefixSize + len(z.OkSummary) + 13 + msgp.BytesPrefixSize + len(z.ErrorSummary) + 11 + msgp.BoolSize + 13 + msgp.Uint64Size
+	return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *statsBucket) DecodeMsg(dc *msgp.Reader) (err error) {
+	var field []byte
+	_ = field
+	var zb0001 uint32
+	zb0001, err = dc.ReadMapHeader()
+	if err != nil {
+		return
+	}
+	for zb0001 > 0 {
+		zb0001--
+		field, err = dc.ReadMapKeyPtr()
+		if err != nil {
+			return
+		}
+		switch msgp.UnsafeString(field) {
+		case "Start":
+			z.Start, err = dc.ReadUint64()
+			if err != nil {
+				return
+			}
+		case "Duration":
+			z.Duration, err = dc.ReadUint64()
+			if err != nil {
+				return
+			}
+		case "Stats":
+			var zb0002 uint32
+			zb0002, err = dc.ReadArrayHeader()
+			if err != nil {
+				return
+			}
+			if cap(z.Stats) >= int(zb0002) {
+				z.Stats = (z.Stats)[:zb0002]
+			} else {
+				z.Stats = make([]groupedStats, zb0002)
+			}
+			for za0001 := range z.Stats {
+				err = z.Stats[za0001].DecodeMsg(dc)
+				if err != nil {
+					return
+				}
+			}
+		default:
+			err = dc.Skip()
+			if err != nil {
+				return
+			}
+		}
+	}
+	return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *statsBucket) EncodeMsg(en *msgp.Writer) (err error) {
+	// map header, size 3
+	// write "Start"
+	err = en.Append(0x83, 0xa5, 0x53, 0x74, 0x61, 0x72, 0x74)
+	if err != nil {
+		return
+	}
+	err = en.WriteUint64(z.Start)
+	if err != nil {
+		return
+	}
+	// write "Duration"
+	err = en.Append(0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e)
+	if err != nil {
+		return
+	}
+	err = en.WriteUint64(z.Duration)
+	if err != nil {
+		return
+	}
+	// write "Stats"
+	err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x74, 0x73)
+	if err != nil {
+		return
+	}
+	err = en.WriteArrayHeader(uint32(len(z.Stats)))
+	if err != nil {
+		return
+	}
+	for za0001 := range z.Stats {
+		err = z.Stats[za0001].EncodeMsg(en)
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *statsBucket) Msgsize() (s int) {
+	s = 1 + 6 + msgp.Uint64Size + 9 + msgp.Uint64Size + 6 + msgp.ArrayHeaderSize
+	for za0001 := range z.Stats {
+		s += z.Stats[za0001].Msgsize()
+	}
+	return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *statsPayload) DecodeMsg(dc *msgp.Reader) (err error) {
+	var field []byte
+	_ = field
+	var zb0001 uint32
+	zb0001, err = dc.ReadMapHeader()
+	if err != nil {
+		return
+	}
+	for zb0001 > 0 {
+		zb0001--
+		field, err = dc.ReadMapKeyPtr()
+		if err != nil {
+			return
+		}
+		switch msgp.UnsafeString(field) {
+		case "Hostname":
+			z.Hostname, err = dc.ReadString()
+			if err != nil {
+				return
+			}
+		case "Env":
+			z.Env, err = dc.ReadString()
+			if err != nil {
+				return
+			}
+		case "Version":
+			z.Version, err = dc.ReadString()
+			if err != nil {
+				return
+			}
+		case "Stats":
+			var zb0002 uint32
+			zb0002, err = dc.ReadArrayHeader()
+			if err != nil {
+				return
+			}
+			if cap(z.Stats) >= int(zb0002) {
+				z.Stats = (z.Stats)[:zb0002]
+			} else {
+				z.Stats = make([]statsBucket, zb0002)
+			}
+			for za0001 := range z.Stats {
+				err = z.Stats[za0001].DecodeMsg(dc)
+				if err != nil {
+					return
+				}
+			}
+		default:
+			err = dc.Skip()
+			if err != nil {
+				return
+			}
+		}
+	}
+	return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *statsPayload) EncodeMsg(en *msgp.Writer) (err error) {
+	// map header, size 4
+	// write "Hostname"
+	err = en.Append(0x84, 0xa8, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65)
+	if err != nil {
+		return
+	}
+	err = en.WriteString(z.Hostname)
+	if err != nil {
+		return
+	}
+	// write "Env"
+	err = en.Append(0xa3, 0x45, 0x6e, 0x76)
+	if err != nil {
+		return
+	}
+	err = en.WriteString(z.Env)
+	if err != nil {
+		return
+	}
+	// write "Version"
+	err = en.Append(0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
+	if err != nil {
+		return
+	}
+	err = en.WriteString(z.Version)
+	if err != nil {
+		return
+	}
+	// write "Stats"
+	err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x74, 0x73)
+	if err != nil {
+		return
+	}
+	err = en.WriteArrayHeader(uint32(len(z.Stats)))
+	if err != nil {
+		return
+	}
+	for za0001 := range z.Stats {
+		err = z.Stats[za0001].EncodeMsg(en)
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *statsPayload) Msgsize() (s int) {
+	s = 1 + 9 + msgp.StringPrefixSize + len(z.Hostname) + 4 + msgp.StringPrefixSize + len(z.Env) + 8 + msgp.StringPrefixSize + len(z.Version) + 6 + msgp.ArrayHeaderSize
+	for za0001 := range z.Stats {
+		s += z.Stats[za0001].Msgsize()
+	}
+	return
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/telemetry.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/telemetry.go
new file mode 100644
index 0000000000..ae6ffdb4d8
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/telemetry.go
@@ -0,0 +1,79 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"fmt"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
+)
+
+// startTelemetry starts the global instrumentation telemetry client with tracer data
+// unless instrumentation telemetry is disabled via the DD_INSTRUMENTATION_TELEMETRY_ENABLED
+// env var.
+// If the telemetry client has already been started by the profiler, then
+// an app-product-change event is sent with appsec information and an app-client-configuration-change
+// event is sent with tracer config data.
+// Note that the tracer is not considered as a standalone product by telemetry so we cannot send
+// an app-product-change event for the tracer.
+func startTelemetry(c *config) {
+	if telemetry.Disabled() {
+		// Do not do extra work populating config data if instrumentation telemetry is disabled.
+		return
+	}
+	telemetry.GlobalClient.ApplyOps(
+		telemetry.WithService(c.serviceName),
+		telemetry.WithEnv(c.env),
+		telemetry.WithHTTPClient(c.httpClient),
+		// c.logToStdout is true if serverless is turned on
+		telemetry.WithURL(c.logToStdout, c.agentURL.String()),
+		telemetry.WithVersion(c.version),
+	)
+	telemetryConfigs := []telemetry.Configuration{
+		{Name: "trace_debug_enabled", Value: c.debug},
+		{Name: "agent_feature_drop_p0s", Value: c.agent.DropP0s},
+		{Name: "stats_computation_enabled", Value: c.agent.Stats},
+		{Name: "dogstatsd_port", Value: c.agent.StatsdPort},
+		{Name: "lambda_mode", Value: c.logToStdout},
+		{Name: "send_retries", Value: c.sendRetries},
+		{Name: "trace_startup_logs_enabled", Value: c.logStartup},
+		{Name: "service", Value: c.serviceName},
+		{Name: "universal_version", Value: c.universalVersion},
+		{Name: "env", Value: c.env},
+		{Name: "agent_url", Value: c.agentURL.String()},
+		{Name: "agent_hostname", Value: c.hostname},
+		{Name: "runtime_metrics_enabled", Value: c.runtimeMetrics},
+		{Name: "dogstatsd_addr", Value: c.dogstatsdAddr},
+		{Name: "trace_debug_enabled", Value: !c.noDebugStack},
+		{Name: "profiling_hotspots_enabled", Value: c.profilerHotspots},
+		{Name: "profiling_endpoints_enabled", Value: c.profilerEndpoints},
+		{Name: "trace_enabled", Value: c.enabled},
+	}
+	for k, v := range c.featureFlags {
+		telemetryConfigs = append(telemetryConfigs, telemetry.Configuration{Name: k, Value: v})
+	}
+	for k, v := range c.serviceMappings {
+		telemetryConfigs = append(telemetryConfigs, telemetry.Configuration{Name: "service_mapping_" + k, Value: v})
+	}
+	for k, v := range c.globalTags {
+		telemetryConfigs = append(telemetryConfigs, telemetry.Configuration{Name: "global_tag_" + k, Value: v})
+	}
+	rules := append(c.spanRules, c.traceRules...)
+	for _, rule := range rules {
+		var service string
+		var name string
+		if rule.Service != nil {
+			service = rule.Service.String()
+		}
+		if rule.Name != nil {
+			name = rule.Name.String()
+		}
+		telemetryConfigs = append(telemetryConfigs,
+			telemetry.Configuration{Name: fmt.Sprintf("sr_%s_(%s)_(%s)", rule.ruleType.String(), service, name),
+				Value: fmt.Sprintf("rate:%f_maxPerSecond:%f", rule.Rate, rule.MaxPerSecond)})
+	}
+	telemetry.GlobalClient.ProductStart(telemetry.NamespaceTracers, telemetryConfigs)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/textmap.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/textmap.go
new file mode 100644
index 0000000000..9a6dfb8eac
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/textmap.go
@@ -0,0 +1,998 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"fmt"
+	"net/http"
+	"os"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
+)
+
+// HTTPHeadersCarrier wraps an http.Header as a TextMapWriter and TextMapReader, allowing
+// it to be used using the provided Propagator implementation.
+type HTTPHeadersCarrier http.Header
+
+var _ TextMapWriter = (*HTTPHeadersCarrier)(nil)
+var _ TextMapReader = (*HTTPHeadersCarrier)(nil)
+
+// Set implements TextMapWriter.
+func (c HTTPHeadersCarrier) Set(key, val string) {
+	http.Header(c).Set(key, val)
+}
+
+// ForeachKey implements TextMapReader.
+func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
+	for k, vals := range c {
+		for _, v := range vals {
+			if err := handler(k, v); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// TextMapCarrier allows the use of a regular map[string]string as both TextMapWriter
+// and TextMapReader, making it compatible with the provided Propagator.
+type TextMapCarrier map[string]string
+
+var _ TextMapWriter = (*TextMapCarrier)(nil)
+var _ TextMapReader = (*TextMapCarrier)(nil)
+
+// Set implements TextMapWriter.
+func (c TextMapCarrier) Set(key, val string) {
+	c[key] = val
+}
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
+	for k, v := range c {
+		if err := handler(k, v); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+const (
+	headerPropagationStyleInject  = "DD_TRACE_PROPAGATION_STYLE_INJECT"
+	headerPropagationStyleExtract = "DD_TRACE_PROPAGATION_STYLE_EXTRACT"
+	headerPropagationStyle        = "DD_TRACE_PROPAGATION_STYLE"
+
+	headerPropagationStyleInjectDeprecated  = "DD_PROPAGATION_STYLE_INJECT"  // deprecated
+	headerPropagationStyleExtractDeprecated = "DD_PROPAGATION_STYLE_EXTRACT" // deprecated
+)
+
+const (
+	// DefaultBaggageHeaderPrefix specifies the prefix that will be used in
+	// HTTP headers or text maps to prefix baggage keys.
+	DefaultBaggageHeaderPrefix = "ot-baggage-"
+
+	// DefaultTraceIDHeader specifies the key that will be used in HTTP headers
+	// or text maps to store the trace ID.
+	DefaultTraceIDHeader = "x-datadog-trace-id"
+
+	// DefaultParentIDHeader specifies the key that will be used in HTTP headers
+	// or text maps to store the parent ID.
+	DefaultParentIDHeader = "x-datadog-parent-id"
+
+	// DefaultPriorityHeader specifies the key that will be used in HTTP headers
+	// or text maps to store the sampling priority value.
+	DefaultPriorityHeader = "x-datadog-sampling-priority"
+)
+
+// originHeader specifies the name of the header indicating the origin of the trace.
+// It is used with the Synthetics product and usually has the value "synthetics".
+const originHeader = "x-datadog-origin"
+
+// traceTagsHeader holds the propagated trace tags
+const traceTagsHeader = "x-datadog-tags"
+
+// propagationExtractMaxSize limits the total size of incoming propagated tags to parse
+const propagationExtractMaxSize = 512
+
+// PropagatorConfig defines the configuration for initializing a propagator.
+type PropagatorConfig struct {
+	// BaggagePrefix specifies the prefix that will be used to store baggage
+	// items in a map. It defaults to DefaultBaggageHeaderPrefix.
+	BaggagePrefix string
+
+	// TraceHeader specifies the map key that will be used to store the trace ID.
+	// It defaults to DefaultTraceIDHeader.
+	TraceHeader string
+
+	// ParentHeader specifies the map key that will be used to store the parent ID.
+	// It defaults to DefaultParentIDHeader.
+	ParentHeader string
+
+	// PriorityHeader specifies the map key that will be used to store the sampling priority.
+	// It defaults to DefaultPriorityHeader.
+	PriorityHeader string
+
+	// MaxTagsHeaderLen specifies the maximum length of trace tags header value.
+	// It defaults to defaultMaxTagsHeaderLen, a value of 0 disables propagation of tags.
+	MaxTagsHeaderLen int
+
+	// B3 specifies if B3 headers should be added for trace propagation.
+	// See https://github.com/openzipkin/b3-propagation
+	B3 bool
+}
+
+// NewPropagator returns a new propagator which uses TextMap to inject
+// and extract values. It propagates trace and span IDs and baggage.
+// To use the defaults, nil may be provided in place of the config.
+//
+// The inject and extract propagators are determined using environment variables
+// with the following order of precedence:
+//  1. DD_TRACE_PROPAGATION_STYLE_INJECT
+//  2. DD_PROPAGATION_STYLE_INJECT (deprecated)
+//  3. DD_TRACE_PROPAGATION_STYLE (applies to both inject and extract)
+//  4. If none of the above, use default values
+func NewPropagator(cfg *PropagatorConfig, propagators ...Propagator) Propagator {
+	if cfg == nil {
+		cfg = new(PropagatorConfig)
+	}
+	if cfg.BaggagePrefix == "" {
+		cfg.BaggagePrefix = DefaultBaggageHeaderPrefix
+	}
+	if cfg.TraceHeader == "" {
+		cfg.TraceHeader = DefaultTraceIDHeader
+	}
+	if cfg.ParentHeader == "" {
+		cfg.ParentHeader = DefaultParentIDHeader
+	}
+	if cfg.PriorityHeader == "" {
+		cfg.PriorityHeader = DefaultPriorityHeader
+	}
+	if len(propagators) > 0 {
+		return &chainedPropagator{
+			injectors:  propagators,
+			extractors: propagators,
+		}
+	}
+	injectorsPs := os.Getenv(headerPropagationStyleInject)
+	if injectorsPs == "" {
+		if injectorsPs = os.Getenv(headerPropagationStyleInjectDeprecated); injectorsPs != "" {
+			log.Warn("%v is deprecated. Please use %v or %v instead.\n", headerPropagationStyleInjectDeprecated, headerPropagationStyleInject, headerPropagationStyle)
+		}
+	}
+	extractorsPs := os.Getenv(headerPropagationStyleExtract)
+	if extractorsPs == "" {
+		if extractorsPs = os.Getenv(headerPropagationStyleExtractDeprecated); extractorsPs != "" {
+			log.Warn("%v is deprecated. Please use %v or %v instead.\n", headerPropagationStyleExtractDeprecated, headerPropagationStyleExtract, headerPropagationStyle)
+		}
+	}
+	return &chainedPropagator{
+		injectors:  getPropagators(cfg, injectorsPs),
+		extractors: getPropagators(cfg, extractorsPs),
+	}
+}
+
+// chainedPropagator implements Propagator and applies a list of injectors and extractors.
+// When injecting, all injectors are called to propagate the span context.
+// When extracting, it tries each extractor, selecting the first successful one.
+type chainedPropagator struct {
+	injectors  []Propagator
+	extractors []Propagator
+}
+
+// getPropagators returns a list of propagators based on ps, which is a comma seperated
+// list of propagators. If the list doesn't contain any valid values, the
+// default propagator will be returned. Any invalid values in the list will log
+// a warning and be ignored.
+func getPropagators(cfg *PropagatorConfig, ps string) []Propagator {
+	dd := &propagator{cfg}
+	defaultPs := []Propagator{&propagatorW3c{}, dd}
+	if cfg.B3 {
+		defaultPs = append(defaultPs, &propagatorB3{})
+	}
+	if ps == "" {
+		if prop := os.Getenv(headerPropagationStyle); prop != "" {
+			ps = prop // use the generic DD_TRACE_PROPAGATION_STYLE if set
+		} else {
+			return defaultPs // no env set, so use default from configuration
+		}
+	}
+	ps = strings.ToLower(ps)
+	if ps == "none" {
+		return nil
+	}
+	var list []Propagator
+	if cfg.B3 {
+		list = append(list, &propagatorB3{})
+	}
+	for _, v := range strings.Split(ps, ",") {
+		switch strings.ToLower(v) {
+		case "datadog":
+			list = append(list, dd)
+		case "tracecontext":
+			list = append([]Propagator{&propagatorW3c{}}, list...)
+		case "b3", "b3multi":
+			if !cfg.B3 {
+				// propagatorB3 hasn't already been added, add a new one.
+				list = append(list, &propagatorB3{})
+			}
+		case "b3 single header":
+			list = append(list, &propagatorB3SingleHeader{})
+		case "none":
+			log.Warn("Propagator \"none\" has no effect when combined with other propagators. " +
+				"To disable the propagator, set to `none`")
+		default:
+			log.Warn("unrecognized propagator: %s\n", v)
+		}
+	}
+	if len(list) == 0 {
+		return defaultPs // no valid propagators, so return default
+	}
+	return list
+}
+
+// Inject defines the Propagator to propagate SpanContext data
+// out of the current process. The implementation propagates the
+// TraceID and the current active SpanID, as well as the Span baggage.
+func (p *chainedPropagator) Inject(spanCtx ddtrace.SpanContext, carrier interface{}) error {
+	for _, v := range p.injectors {
+		err := v.Inject(spanCtx, carrier)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Extract implements Propagator.
+func (p *chainedPropagator) Extract(carrier interface{}) (ddtrace.SpanContext, error) {
+	for _, v := range p.extractors {
+		ctx, err := v.Extract(carrier)
+		if ctx != nil {
+			// first extractor returns
+			log.Debug("Extracted span context: %#v", ctx)
+			return ctx, nil
+		}
+		if err == ErrSpanContextNotFound {
+			continue
+		}
+		return nil, err
+	}
+	return nil, ErrSpanContextNotFound
+}
+
+// propagator implements Propagator and injects/extracts span contexts
+// using datadog headers. Only TextMap carriers are supported.
+type propagator struct {
+	cfg *PropagatorConfig
+}
+
+func (p *propagator) Inject(spanCtx ddtrace.SpanContext, carrier interface{}) error {
+	switch c := carrier.(type) {
+	case TextMapWriter:
+		return p.injectTextMap(spanCtx, c)
+	default:
+		return ErrInvalidCarrier
+	}
+}
+
+func (p *propagator) injectTextMap(spanCtx ddtrace.SpanContext, writer TextMapWriter) error {
+	ctx, ok := spanCtx.(*spanContext)
+	if !ok || ctx.traceID.Empty() || ctx.spanID == 0 {
+		return ErrInvalidSpanContext
+	}
+	// propagate the TraceID and the current active SpanID
+	if ctx.traceID.HasUpper() {
+		setPropagatingTag(ctx, keyTraceID128, ctx.traceID.UpperHex())
+	} else if ctx.trace != nil {
+		ctx.trace.unsetPropagatingTag(keyTraceID128)
+	}
+	writer.Set(p.cfg.TraceHeader, strconv.FormatUint(ctx.traceID.Lower(), 10))
+	writer.Set(p.cfg.ParentHeader, strconv.FormatUint(ctx.spanID, 10))
+	if sp, ok := ctx.samplingPriority(); ok {
+		writer.Set(p.cfg.PriorityHeader, strconv.Itoa(sp))
+	}
+	if ctx.origin != "" {
+		writer.Set(originHeader, ctx.origin)
+	}
+	// propagate OpenTracing baggage
+	for k, v := range ctx.baggage {
+		writer.Set(p.cfg.BaggagePrefix+k, v)
+	}
+	if p.cfg.MaxTagsHeaderLen <= 0 {
+		return nil
+	}
+	if s := p.marshalPropagatingTags(ctx); len(s) > 0 {
+		writer.Set(traceTagsHeader, s)
+	}
+	return nil
+}
+
+// marshalPropagatingTags marshals all propagating tags included in ctx to a comma separated string
+func (p *propagator) marshalPropagatingTags(ctx *spanContext) string {
+	var sb strings.Builder
+	if ctx.trace == nil {
+		return ""
+	}
+
+	var properr string
+	ctx.trace.iteratePropagatingTags(func(k, v string) bool {
+		if err := isValidPropagatableTag(k, v); err != nil {
+			log.Warn("Won't propagate tag '%s': %v", k, err.Error())
+			properr = "encoding_error"
+			return true
+		}
+		if sb.Len()+len(k)+len(v) > p.cfg.MaxTagsHeaderLen {
+			sb.Reset()
+			log.Warn("Won't propagate tag: maximum trace tags header len (%d) reached.", p.cfg.MaxTagsHeaderLen)
+			properr = "inject_max_size"
+			return false
+		}
+		if sb.Len() > 0 {
+			sb.WriteByte(',')
+		}
+		sb.WriteString(k)
+		sb.WriteByte('=')
+		sb.WriteString(v)
+		return true
+	})
+	if properr != "" {
+		ctx.trace.setTag(keyPropagationError, properr)
+	}
+	return sb.String()
+}
+
+func (p *propagator) Extract(carrier interface{}) (ddtrace.SpanContext, error) {
+	switch c := carrier.(type) {
+	case TextMapReader:
+		return p.extractTextMap(c)
+	default:
+		return nil, ErrInvalidCarrier
+	}
+}
+
+func (p *propagator) extractTextMap(reader TextMapReader) (ddtrace.SpanContext, error) {
+	var ctx spanContext
+	err := reader.ForeachKey(func(k, v string) error {
+		var err error
+		key := strings.ToLower(k)
+		switch key {
+		case p.cfg.TraceHeader:
+			var lowerTid uint64
+			lowerTid, err = parseUint64(v)
+			if err != nil {
+				return ErrSpanContextCorrupted
+			}
+			ctx.traceID.SetLower(lowerTid)
+		case p.cfg.ParentHeader:
+			ctx.spanID, err = parseUint64(v)
+			if err != nil {
+				return ErrSpanContextCorrupted
+			}
+		case p.cfg.PriorityHeader:
+			priority, err := strconv.Atoi(v)
+			if err != nil {
+				return ErrSpanContextCorrupted
+			}
+			ctx.setSamplingPriority(priority, samplernames.Unknown)
+		case originHeader:
+			ctx.origin = v
+		case traceTagsHeader:
+			unmarshalPropagatingTags(&ctx, v)
+		default:
+			if strings.HasPrefix(key, p.cfg.BaggagePrefix) {
+				ctx.setBaggageItem(strings.TrimPrefix(key, p.cfg.BaggagePrefix), v)
+			}
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+	if ctx.trace != nil {
+		tid := ctx.trace.propagatingTag(keyTraceID128)
+		if err := validateTID(tid); err != nil {
+			log.Debug("Invalid hex traceID: %s", err)
+			ctx.trace.unsetPropagatingTag(keyTraceID128)
+		} else if err := ctx.traceID.SetUpperFromHex(tid); err != nil {
+			log.Debug("Attempted to set an invalid hex traceID: %s", err)
+			ctx.trace.unsetPropagatingTag(keyTraceID128)
+		}
+	}
+	if ctx.traceID.Empty() || (ctx.spanID == 0 && ctx.origin != "synthetics") {
+		return nil, ErrSpanContextNotFound
+	}
+	return &ctx, nil
+}
+
+func validateTID(tid string) error {
+	if len(tid) != 16 {
+		return fmt.Errorf("invalid length: %q", tid)
+	}
+	if !validIDRgx.MatchString(tid) {
+		return fmt.Errorf("malformed: %q", tid)
+	}
+	return nil
+}
+
+// unmarshalPropagatingTags unmarshals tags from v into ctx
+func unmarshalPropagatingTags(ctx *spanContext, v string) {
+	if ctx.trace == nil {
+		ctx.trace = newTrace()
+	}
+	if len(v) > propagationExtractMaxSize {
+		log.Warn("Did not extract %s, size limit exceeded: %d. Incoming tags will not be propagated further.", traceTagsHeader, propagationExtractMaxSize)
+		ctx.trace.setTag(keyPropagationError, "extract_max_size")
+		return
+	}
+	tags, err := parsePropagatableTraceTags(v)
+	if err != nil {
+		log.Warn("Did not extract %s: %v. Incoming tags will not be propagated further.", traceTagsHeader, err.Error())
+		ctx.trace.setTag(keyPropagationError, "decoding_error")
+	}
+	ctx.trace.replacePropagatingTags(tags)
+}
+
+// setPropagatingTag adds the key value pair to the map of propagating tags on the trace,
+// creating the map if one is not initialized.
+func setPropagatingTag(ctx *spanContext, k, v string) {
+	if ctx.trace == nil {
+		// extractors initialize a new spanContext, so the trace might be nil
+		ctx.trace = newTrace()
+	}
+	ctx.trace.setPropagatingTag(k, v)
+}
+
+const (
+	b3TraceIDHeader = "x-b3-traceid"
+	b3SpanIDHeader  = "x-b3-spanid"
+	b3SampledHeader = "x-b3-sampled"
+	b3SingleHeader  = "b3"
+)
+
+// propagatorB3 implements Propagator and injects/extracts span contexts
+// using B3 headers. Only TextMap carriers are supported.
+type propagatorB3 struct{}
+
+func (p *propagatorB3) Inject(spanCtx ddtrace.SpanContext, carrier interface{}) error {
+	switch c := carrier.(type) {
+	case TextMapWriter:
+		return p.injectTextMap(spanCtx, c)
+	default:
+		return ErrInvalidCarrier
+	}
+}
+
+func (*propagatorB3) injectTextMap(spanCtx ddtrace.SpanContext, writer TextMapWriter) error {
+	ctx, ok := spanCtx.(*spanContext)
+	if !ok || ctx.traceID.Empty() || ctx.spanID == 0 {
+		return ErrInvalidSpanContext
+	}
+	if !ctx.traceID.HasUpper() { // 64-bit trace id
+		writer.Set(b3TraceIDHeader, fmt.Sprintf("%016x", ctx.traceID.Lower()))
+	} else { // 128-bit trace id
+		var w3Cctx ddtrace.SpanContextW3C
+		if w3Cctx, ok = spanCtx.(ddtrace.SpanContextW3C); !ok {
+			return ErrInvalidSpanContext
+		}
+		writer.Set(b3TraceIDHeader, w3Cctx.TraceID128())
+	}
+	writer.Set(b3SpanIDHeader, fmt.Sprintf("%016x", ctx.spanID))
+	if p, ok := ctx.samplingPriority(); ok {
+		if p >= ext.PriorityAutoKeep {
+			writer.Set(b3SampledHeader, "1")
+		} else {
+			writer.Set(b3SampledHeader, "0")
+		}
+	}
+	return nil
+}
+
+func (p *propagatorB3) Extract(carrier interface{}) (ddtrace.SpanContext, error) {
+	switch c := carrier.(type) {
+	case TextMapReader:
+		return p.extractTextMap(c)
+	default:
+		return nil, ErrInvalidCarrier
+	}
+}
+
+func (*propagatorB3) extractTextMap(reader TextMapReader) (ddtrace.SpanContext, error) {
+	var ctx spanContext
+	err := reader.ForeachKey(func(k, v string) error {
+		var err error
+		key := strings.ToLower(k)
+		switch key {
+		case b3TraceIDHeader:
+			if err := extractTraceID128(&ctx, v); err != nil {
+				return nil
+			}
+		case b3SpanIDHeader:
+			ctx.spanID, err = strconv.ParseUint(v, 16, 64)
+			if err != nil {
+				return ErrSpanContextCorrupted
+			}
+		case b3SampledHeader:
+			priority, err := strconv.Atoi(v)
+			if err != nil {
+				return ErrSpanContextCorrupted
+			}
+			ctx.setSamplingPriority(priority, samplernames.Unknown)
+		default:
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+	if ctx.traceID.Empty() || ctx.spanID == 0 {
+		return nil, ErrSpanContextNotFound
+	}
+	return &ctx, nil
+}
+
+// propagatorB3 implements Propagator and injects/extracts span contexts
+// using B3 headers. Only TextMap carriers are supported.
+type propagatorB3SingleHeader struct{}
+
+func (p *propagatorB3SingleHeader) Inject(spanCtx ddtrace.SpanContext, carrier interface{}) error {
+	switch c := carrier.(type) {
+	case TextMapWriter:
+		return p.injectTextMap(spanCtx, c)
+	default:
+		return ErrInvalidCarrier
+	}
+}
+
+func (*propagatorB3SingleHeader) injectTextMap(spanCtx ddtrace.SpanContext, writer TextMapWriter) error {
+	ctx, ok := spanCtx.(*spanContext)
+	if !ok || ctx.traceID.Empty() || ctx.spanID == 0 {
+		return ErrInvalidSpanContext
+	}
+	sb := strings.Builder{}
+	var traceID string
+	if !ctx.traceID.HasUpper() { // 64-bit trace id
+		traceID = fmt.Sprintf("%016x", ctx.traceID.Lower())
+	} else { // 128-bit trace id
+		var w3Cctx ddtrace.SpanContextW3C
+		if w3Cctx, ok = spanCtx.(ddtrace.SpanContextW3C); !ok {
+			return ErrInvalidSpanContext
+		}
+		traceID = w3Cctx.TraceID128()
+	}
+	sb.WriteString(fmt.Sprintf("%s-%016x", traceID, ctx.spanID))
+	if p, ok := ctx.samplingPriority(); ok {
+		if p >= ext.PriorityAutoKeep {
+			sb.WriteString("-1")
+		} else {
+			sb.WriteString("-0")
+		}
+	}
+	writer.Set(b3SingleHeader, sb.String())
+	return nil
+}
+
+func (p *propagatorB3SingleHeader) Extract(carrier interface{}) (ddtrace.SpanContext, error) {
+	switch c := carrier.(type) {
+	case TextMapReader:
+		return p.extractTextMap(c)
+	default:
+		return nil, ErrInvalidCarrier
+	}
+}
+
+func (*propagatorB3SingleHeader) extractTextMap(reader TextMapReader) (ddtrace.SpanContext, error) {
+	var ctx spanContext
+	err := reader.ForeachKey(func(k, v string) error {
+		var err error
+		key := strings.ToLower(k)
+		switch key {
+		case b3SingleHeader:
+			b3Parts := strings.Split(v, "-")
+			if len(b3Parts) >= 2 {
+				if err = extractTraceID128(&ctx, b3Parts[0]); err != nil {
+					return err
+				}
+				ctx.spanID, err = strconv.ParseUint(b3Parts[1], 16, 64)
+				if err != nil {
+					return ErrSpanContextCorrupted
+				}
+				if len(b3Parts) >= 3 {
+					switch b3Parts[2] {
+					case "":
+						break
+					case "1", "d": // Treat 'debug' traces as priority 1
+						ctx.setSamplingPriority(1, samplernames.Unknown)
+					case "0":
+						ctx.setSamplingPriority(0, samplernames.Unknown)
+					default:
+						return ErrSpanContextCorrupted
+					}
+				}
+			} else {
+				return ErrSpanContextCorrupted
+			}
+		default:
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+	if ctx.traceID.Empty() || ctx.spanID == 0 {
+		return nil, ErrSpanContextNotFound
+	}
+	return &ctx, nil
+}
+
+const (
+	traceparentHeader = "traceparent"
+	tracestateHeader  = "tracestate"
+)
+
+// propagatorW3c implements Propagator and injects/extracts span contexts
+// using W3C tracecontext/traceparent headers. Only TextMap carriers are supported.
+type propagatorW3c struct{}
+
+func (p *propagatorW3c) Inject(spanCtx ddtrace.SpanContext, carrier interface{}) error {
+	switch c := carrier.(type) {
+	case TextMapWriter:
+		return p.injectTextMap(spanCtx, c)
+	default:
+		return ErrInvalidCarrier
+	}
+}
+
+// injectTextMap propagates span context attributes into the writer,
+// in the format of the traceparentHeader and tracestateHeader.
+// traceparentHeader encodes W3C Trace Propagation version, 128-bit traceID,
+// spanID, and a flags field, which supports 8 unique flags.
+// The current specification only supports a single flag called sampled,
+// which is equal to 00000001 when no other flag is present.
+// tracestateHeader is a comma-separated list of list-members with a <key>=<value> format,
+// where each list-member is managed by a vendor or instrumentation library.
+func (*propagatorW3c) injectTextMap(spanCtx ddtrace.SpanContext, writer TextMapWriter) error {
+	ctx, ok := spanCtx.(*spanContext)
+	if !ok || ctx.traceID.Empty() || ctx.spanID == 0 {
+		return ErrInvalidSpanContext
+	}
+	flags := ""
+	p, ok := ctx.samplingPriority()
+	if ok && p >= ext.PriorityAutoKeep {
+		flags = "01"
+	} else {
+		flags = "00"
+	}
+
+	var traceID string
+	if ctx.traceID.HasUpper() {
+		setPropagatingTag(ctx, keyTraceID128, ctx.traceID.UpperHex())
+		if w3Cctx, ok := spanCtx.(ddtrace.SpanContextW3C); ok {
+			traceID = w3Cctx.TraceID128()
+		}
+	} else {
+		traceID = fmt.Sprintf("%032x", ctx.traceID)
+		if ctx.trace != nil {
+			ctx.trace.unsetPropagatingTag(keyTraceID128)
+		}
+	}
+	writer.Set(traceparentHeader, fmt.Sprintf("00-%s-%016x-%v", traceID, ctx.spanID, flags))
+	// if context priority / origin / tags were updated after extraction,
+	// or the tracestateHeader doesn't start with `dd=`
+	// we need to recreate tracestate
+	if ctx.updated ||
+		(ctx.trace != nil && !strings.HasPrefix(ctx.trace.propagatingTag(tracestateHeader), "dd=")) ||
+		ctx.trace.propagatingTagsLen() == 0 {
+		writer.Set(tracestateHeader, composeTracestate(ctx, p, ctx.trace.propagatingTag(tracestateHeader)))
+	} else {
+		writer.Set(tracestateHeader, ctx.trace.propagatingTag(tracestateHeader))
+	}
+	return nil
+}
+
+var (
+	// keyRgx is used to sanitize the keys of the datadog propagating tags.
+	// Disallowed characters are comma (reserved as a list-member separator),
+	// equals (reserved for list-member key-value separator),
+	// space and characters outside the ASCII range 0x20 to 0x7E.
+	// Disallowed characters must be replaced with the underscore.
+	keyRgx = regexp.MustCompile(",|=|[^\\x20-\\x7E]+")
+
+	// valueRgx is used to sanitize the values of the datadog propagating tags.
+	// Disallowed characters are comma (reserved as a list-member separator),
+	// semi-colon (reserved for separator between entries in the dd list-member),
+	// tilde (reserved, will represent 0x3D (equals) in the encoded tag value,
+	// and characters outside the ASCII range 0x20 to 0x7E.
+	// Equals character must be encoded with a tilde.
+	// Other disallowed characters must be replaced with the underscore.
+	valueRgx = regexp.MustCompile(",|;|~|[^\\x20-\\x7E]+")
+
+	// originRgx is used to sanitize the value of the datadog origin tag.
+	// Disallowed characters are comma (reserved as a list-member separator),
+	// semi-colon (reserved for separator between entries in the dd list-member),
+	// equals (reserved for list-member key-value separator),
+	// and characters outside the ASCII range 0x21 to 0x7E.
+	// Equals character must be encoded with a tilde.
+	// Other disallowed characters must be replaced with the underscore.
+	originRgx = regexp.MustCompile(",|~|;|[^\\x21-\\x7E]+")
+
+	// validIDRgx is used to verify that the input is a valid hex string.
+	// The input must match the pattern from start to end.
+	// validIDRgx is applicable for both trace and span IDs.
+	validIDRgx = regexp.MustCompile("^[a-f0-9]+$")
+)
+
+// composeTracestate creates a tracestateHeader from the spancontext.
+// The Datadog tracing library is only responsible for managing the list member with key dd,
+// which holds the values of the sampling decision(`s:<value>`), origin(`o:<origin>`),
+// and propagated tags prefixed with `t.`(e.g. _dd.p.usr.id:usr_id tag will become `t.usr.id:usr_id`).
+func composeTracestate(ctx *spanContext, priority int, oldState string) string {
+	var b strings.Builder
+	b.Grow(128)
+	b.WriteString(fmt.Sprintf("dd=s:%d", priority))
+	listLength := 1
+
+	if ctx.origin != "" {
+		oWithSub := originRgx.ReplaceAllString(ctx.origin, "_")
+		b.WriteString(fmt.Sprintf(";o:%s",
+			strings.ReplaceAll(oWithSub, "=", "~")))
+	}
+
+	ctx.trace.iteratePropagatingTags(func(k, v string) bool {
+		if !strings.HasPrefix(k, "_dd.p.") {
+			return true
+		}
+		// Datadog propagating tags must be appended to the tracestateHeader
+		// with the `t.` prefix. Tag value must have all `=` signs replaced with a tilde (`~`).
+		tag := fmt.Sprintf("t.%s:%s",
+			keyRgx.ReplaceAllString(k[len("_dd.p."):], "_"),
+			strings.ReplaceAll(valueRgx.ReplaceAllString(v, "_"), "=", "~"))
+		if b.Len()+len(tag) > 256 {
+			return false
+		}
+		b.WriteString(";")
+		b.WriteString(tag)
+		return true
+	})
+	// the old state is split by vendors, must be concatenated with a `,`
+	if len(oldState) == 0 {
+		return b.String()
+	}
+	for _, s := range strings.Split(strings.Trim(oldState, " \t"), ",") {
+		if strings.HasPrefix(s, "dd=") {
+			continue
+		}
+		listLength++
+		// if the resulting tracestateHeader exceeds 32 list-members,
+		// remove the rightmost list-member(s)
+		if listLength > 32 {
+			break
+		}
+		b.WriteString("," + strings.Trim(s, " \t"))
+	}
+	return b.String()
+}
+
+func (p *propagatorW3c) Extract(carrier interface{}) (ddtrace.SpanContext, error) {
+	switch c := carrier.(type) {
+	case TextMapReader:
+		return p.extractTextMap(c)
+	default:
+		return nil, ErrInvalidCarrier
+	}
+}
+
+func (*propagatorW3c) extractTextMap(reader TextMapReader) (ddtrace.SpanContext, error) {
+	var parentHeader string
+	var stateHeader string
+	var ctx spanContext
+	// to avoid parsing tracestate header(s) if traceparent is invalid
+	if err := reader.ForeachKey(func(k, v string) error {
+		key := strings.ToLower(k)
+		switch key {
+		case traceparentHeader:
+			if parentHeader != "" {
+				return ErrSpanContextCorrupted
+			}
+			parentHeader = v
+		case tracestateHeader:
+			stateHeader = v
+		default:
+			if strings.HasPrefix(key, DefaultBaggageHeaderPrefix) {
+				ctx.setBaggageItem(strings.TrimPrefix(key, DefaultBaggageHeaderPrefix), v)
+			}
+		}
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+	if err := parseTraceparent(&ctx, parentHeader); err != nil {
+		return nil, err
+	}
+	parseTracestate(&ctx, stateHeader)
+	return &ctx, nil
+}
+
+// parseTraceparent attempts to parse traceparentHeader which describes the position
+// of the incoming request in its trace graph in a portable, fixed-length format.
+// The format of the traceparentHeader is `-` separated string with in the
+// following format: `version-traceId-spanID-flags`, with an optional `-<prefix>` if version > 0.
+// where:
+// - version - represents the version of the W3C Tracecontext Propagation format in hex format.
+// - traceId - represents the propagated traceID in the format of 32 hex-encoded digits.
+// - spanID - represents the propagated spanID (parentID) in the format of 16 hex-encoded digits.
+// - flags - represents the propagated flags in the format of 2 hex-encoded digits, and supports 8 unique flags.
+// Example value of HTTP `traceparent` header: `00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01`,
+// Currently, Go tracer doesn't support 128-bit traceIDs, so the full traceID (32 hex-encoded digits) must be
+// stored into a field that is accessible from the span’s context. TraceId will be parsed from the least significant 16
+// hex-encoded digits into a 64-bit number.
+func parseTraceparent(ctx *spanContext, header string) error {
+	nonWordCutset := "_-\t \n"
+	header = strings.ToLower(strings.Trim(header, "\t -"))
+	headerLen := len(header)
+	if headerLen == 0 {
+		return ErrSpanContextNotFound
+	}
+	if headerLen < 55 {
+		return ErrSpanContextCorrupted
+	}
+	parts := strings.SplitN(header, "-", 5) // 5 because we expect 4 required + 1 optional substrings
+	if len(parts) < 4 {
+		return ErrSpanContextCorrupted
+	}
+	version := strings.Trim(parts[0], nonWordCutset)
+	if len(version) != 2 {
+		return ErrSpanContextCorrupted
+	}
+	v, err := strconv.ParseUint(version, 16, 64)
+	if err != nil || v == 255 {
+		// version 255 (0xff) is invalid
+		return ErrSpanContextCorrupted
+	}
+	if v == 0 && headerLen != 55 {
+		// The header length in v0 has to be 55.
+		// It's allowed to be longer in other versions.
+		return ErrSpanContextCorrupted
+	}
+	// parsing traceID
+	fullTraceID := strings.Trim(parts[1], nonWordCutset)
+	if len(fullTraceID) != 32 {
+		return ErrSpanContextCorrupted
+	}
+	// checking that the entire TraceID is a valid hex string
+	if ok := validIDRgx.MatchString(fullTraceID); !ok {
+		return ErrSpanContextCorrupted
+	}
+	if ctx.trace != nil {
+		// Ensure that the 128-bit trace id tag doesn't propagate
+		ctx.trace.unsetPropagatingTag(keyTraceID128)
+	}
+	if err := extractTraceID128(ctx, fullTraceID); err != nil {
+		return err
+	}
+	// parsing spanID
+	spanID := strings.Trim(parts[2], nonWordCutset)
+	if len(spanID) != 16 {
+		return ErrSpanContextCorrupted
+	}
+	if ok := validIDRgx.MatchString(spanID); !ok {
+		return ErrSpanContextCorrupted
+	}
+	if ctx.spanID, err = strconv.ParseUint(spanID, 16, 64); err != nil {
+		return ErrSpanContextCorrupted
+	}
+	if ctx.spanID == 0 {
+		return ErrSpanContextNotFound
+	}
+	// parsing flags
+	flags := parts[3]
+	f, err := strconv.ParseInt(flags, 16, 8)
+	if err != nil {
+		return ErrSpanContextCorrupted
+	}
+	ctx.setSamplingPriority(int(f)&0x1, samplernames.Unknown)
+	return nil
+}
+
+// parseTracestate attempts to parse tracestateHeader which is a list
+// with up to 32 comma-separated (,) list-members.
+// An example value would be: `vendorname1=opaqueValue1,vendorname2=opaqueValue2,dd=s:1;o:synthetics`,
+// Where `dd` list contains values that would be in x-datadog-tags as well as those needed for propagation information.
+// The keys to the “dd“ values have been shortened as follows to save space:
+// `sampling_priority` = `s`
+// `origin` = `o`
+// `_dd.p.` prefix = `t.`
+func parseTracestate(ctx *spanContext, header string) {
+	if header == "" {
+		// The W3C spec says tracestate can be empty but should avoid sending it.
+		// https://www.w3.org/TR/trace-context-1/#tracestate-header-field-values
+		return
+	}
+	// if multiple headers are present, they must be combined and stored
+	setPropagatingTag(ctx, tracestateHeader, header)
+	combined := strings.Split(strings.Trim(header, "\t "), ",")
+	for _, group := range combined {
+		if !strings.HasPrefix(group, "dd=") {
+			continue
+		}
+		ddMembers := strings.Split(group[len("dd="):], ";")
+		dropDM := false
+		for _, member := range ddMembers {
+			keyVal := strings.SplitN(member, ":", 2)
+			if len(keyVal) != 2 {
+				continue
+			}
+			key, val := keyVal[0], keyVal[1]
+			if key == "o" {
+				ctx.origin = strings.ReplaceAll(val, "~", "=")
+			} else if key == "s" {
+				stateP, err := strconv.Atoi(val)
+				if err != nil {
+					// If the tracestate priority is absent,
+					// we rely on the traceparent sampled flag
+					// set in the parseTraceparent function.
+					continue
+				}
+				// The sampling priority and decision maker values are set based on
+				// the specification in the internal W3C context propagation RFC.
+				// See the document for more details.
+				parentP, _ := ctx.samplingPriority()
+				if (parentP == 1 && stateP > 0) || (parentP == 0 && stateP <= 0) {
+					// As extracted from tracestate
+					ctx.setSamplingPriority(stateP, samplernames.Unknown)
+				}
+				if parentP == 1 && stateP <= 0 {
+					// Auto keep (1) and set the decision maker to default
+					ctx.setSamplingPriority(1, samplernames.Default)
+				}
+				if parentP == 0 && stateP > 0 {
+					// Auto drop (0) and drop the decision maker
+					ctx.setSamplingPriority(0, samplernames.Unknown)
+					dropDM = true
+				}
+			} else if strings.HasPrefix(key, "t.dm") {
+				if ctx.trace.hasPropagatingTag(keyDecisionMaker) || dropDM {
+					continue
+				}
+				setPropagatingTag(ctx, keyDecisionMaker, val)
+			} else if strings.HasPrefix(key, "t.") {
+				keySuffix := key[len("t."):]
+				val = strings.ReplaceAll(val, "~", "=")
+				setPropagatingTag(ctx, "_dd.p."+keySuffix, val)
+			}
+		}
+	}
+}
+
+// extractTraceID128 extracts the trace id from v and populates the traceID
+// field, and the traceID128 field (if applicable) of the provided ctx,
+// returning an error if v is invalid.
+func extractTraceID128(ctx *spanContext, v string) error {
+	if len(v) > 32 {
+		v = v[len(v)-32:]
+	}
+	v = strings.TrimLeft(v, "0")
+	var err error
+	if len(v) <= 16 { // 64-bit trace id
+		var tid uint64
+		tid, err = strconv.ParseUint(v, 16, 64)
+		ctx.traceID.SetLower(tid)
+	} else { // 128-bit trace id
+		idUpper := v[:len(v)-16]
+		ctx.traceID.SetUpperFromHex(idUpper)
+		var l uint64
+		l, err = strconv.ParseUint(v[len(idUpper):], 16, 64)
+		ctx.traceID.SetLower(l)
+	}
+	if err != nil {
+		return ErrSpanContextCorrupted
+	}
+	return nil
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time.go
new file mode 100644
index 0000000000..3afe8fb18c
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time.go
@@ -0,0 +1,17 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+//go:build !windows
+// +build !windows
+
+package tracer
+
+import "time"
+
+// nowTime returns the current time, as computed by Time.Now().
+var nowTime = func() time.Time { return time.Now() }
+
+// now returns the current UNIX time in nanoseconds, as computed by Time.UnixNano().
+var now = func() int64 { return time.Now().UnixNano() }
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time_windows.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time_windows.go
new file mode 100644
index 0000000000..f1ecd4f903
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time_windows.go
@@ -0,0 +1,48 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"time"
+
+	"golang.org/x/sys/windows"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// This method is more precise than the go1.8 time.Now on Windows
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/hh706895(v=vs.85).aspx
+// It is however ~10x slower and requires Windows 8+.
+func highPrecisionNow() int64 {
+	var ft windows.Filetime
+	windows.GetSystemTimePreciseAsFileTime(&ft)
+	return ft.Nanoseconds()
+}
+
+func lowPrecisionNow() int64 {
+	return time.Now().UnixNano()
+}
+
+// We use this method of initializing now over an init function due to dependency issues. The init
+// function may run after other declarations, such as that in payload_test:19, which results in a
+// nil dereference panic.
+var now func() int64 = func() func() int64 {
+	if err := windows.LoadGetSystemTimePreciseAsFileTime(); err != nil {
+		log.Warn("Unable to load high precison timer, defaulting to time.Now()")
+		return lowPrecisionNow
+	} else {
+		return highPrecisionNow
+	}
+}()
+
+var nowTime func() time.Time = func() func() time.Time {
+	if err := windows.LoadGetSystemTimePreciseAsFileTime(); err != nil {
+		log.Warn("Unable to load high precison timer, defaulting to time.Now()")
+		return func() time.Time { return time.Unix(0, lowPrecisionNow()) }
+	} else {
+		return func() time.Time { return time.Unix(0, highPrecisionNow()) }
+	}
+}()
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer.go
new file mode 100644
index 0000000000..7efcdaea01
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer.go
@@ -0,0 +1,668 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	gocontext "context"
+	"os"
+	"runtime/pprof"
+	rt "runtime/trace"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+	globalinternal "gopkg.in/DataDog/dd-trace-go.v1/internal"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof"
+
+	"github.com/DataDog/datadog-agent/pkg/obfuscate"
+)
+
+var _ ddtrace.Tracer = (*tracer)(nil)
+
+// tracer creates, buffers and submits Spans which are used to time blocks of
+// computation. They are accumulated and streamed into an internal payload,
+// which is flushed to the agent whenever its size exceeds a specific threshold
+// or when a certain interval of time has passed, whichever happens first.
+//
+// tracer operates based on a worker loop which responds to various request
+// channels. It additionally holds two buffers which accumulates error and trace
+// queues to be processed by the payload encoder.
+type tracer struct {
+	config *config
+
+	// stats specifies the concentrator used to compute statistics, when client-side
+	// stats are enabled.
+	stats *concentrator
+
+	// traceWriter is responsible for sending finished traces to their
+	// destination, such as the Trace Agent or Datadog Forwarder.
+	traceWriter traceWriter
+
+	// out receives finishedTrace with spans  to be added to the payload.
+	out chan *finishedTrace
+
+	// flush receives a channel onto which it will confirm after a flush has been
+	// triggered and completed.
+	flush chan chan<- struct{}
+
+	// stop causes the tracer to shut down when closed.
+	stop chan struct{}
+
+	// stopOnce ensures the tracer is stopped exactly once.
+	stopOnce sync.Once
+
+	// wg waits for all goroutines to exit when stopping.
+	wg sync.WaitGroup
+
+	// prioritySampling holds an instance of the priority sampler.
+	prioritySampling *prioritySampler
+
+	// pid of the process
+	pid int
+
+	// These integers track metrics about spans and traces as they are started,
+	// finished, and dropped
+	spansStarted, spansFinished, tracesDropped uint32
+
+	// Records the number of dropped P0 traces and spans.
+	droppedP0Traces, droppedP0Spans uint32
+
+	// partialTrace the number of partially dropped traces.
+	partialTraces uint32
+
+	// rulesSampling holds an instance of the rules sampler used to apply either trace sampling,
+	// or single span sampling rules on spans. These are user-defined
+	// rules for applying a sampling rate to spans that match the designated service
+	// or operation name.
+	rulesSampling *rulesSampler
+
+	// obfuscator holds the obfuscator used to obfuscate resources in aggregated stats.
+	// obfuscator may be nil if disabled.
+	obfuscator *obfuscate.Obfuscator
+
+	// statsd is used for tracking metrics associated with the runtime and the tracer.
+	statsd statsdClient
+}
+
+const (
+	// flushInterval is the interval at which the payload contents will be flushed
+	// to the transport.
+	flushInterval = 2 * time.Second
+
+	// payloadMaxLimit is the maximum payload size allowed and should indicate the
+	// maximum size of the package that the agent can receive.
+	payloadMaxLimit = 9.5 * 1024 * 1024 // 9.5 MB
+
+	// payloadSizeLimit specifies the maximum allowed size of the payload before
+	// it will trigger a flush to the transport.
+	payloadSizeLimit = payloadMaxLimit / 2
+
+	// concurrentConnectionLimit specifies the maximum number of concurrent outgoing
+	// connections allowed.
+	concurrentConnectionLimit = 100
+)
+
+// statsInterval is the interval at which health metrics will be sent with the
+// statsd client; replaced in tests.
+var statsInterval = 10 * time.Second
+
+// Start starts the tracer with the given set of options. It will stop and replace
+// any running tracer, meaning that calling it several times will result in a restart
+// of the tracer by replacing the current instance with a new one.
+func Start(opts ...StartOption) {
+	if internal.Testing {
+		return // mock tracer active
+	}
+	defer telemetry.Time(telemetry.NamespaceGeneral, "init_time", nil, true)()
+	t := newTracer(opts...)
+	if !t.config.enabled {
+		// TODO: instrumentation telemetry client won't get started
+		// if tracing is disabled, but we still want to capture this
+		// telemetry information. Will be fixed when the tracer and profiler
+		// share control of the global telemetry client.
+		return
+	}
+	internal.SetGlobalTracer(t)
+	if t.config.logStartup {
+		logStartup(t)
+	}
+	// Start AppSec with remote configuration
+	cfg := remoteconfig.DefaultClientConfig()
+	cfg.AgentURL = t.config.agentURL.String()
+	cfg.AppVersion = t.config.version
+	cfg.Env = t.config.env
+	cfg.HTTP = t.config.httpClient
+	cfg.ServiceName = t.config.serviceName
+	appsec.Start(appsec.WithRCConfig(cfg))
+	// start instrumentation telemetry unless it is disabled through the
+	// DD_INSTRUMENTATION_TELEMETRY_ENABLED env var
+	startTelemetry(t.config)
+	_ = t.hostname() // Prime the hostname cache
+}
+
+// Stop stops the started tracer. Subsequent calls are valid but become no-op.
+func Stop() {
+	internal.SetGlobalTracer(&internal.NoopTracer{})
+	log.Flush()
+}
+
+// Span is an alias for ddtrace.Span. It is here to allow godoc to group methods returning
+// ddtrace.Span. It is recommended and is considered more correct to refer to this type as
+// ddtrace.Span instead.
+type Span = ddtrace.Span
+
+// StartSpan starts a new span with the given operation name and set of options.
+// If the tracer is not started, calling this function is a no-op.
+func StartSpan(operationName string, opts ...StartSpanOption) Span {
+	return internal.GetGlobalTracer().StartSpan(operationName, opts...)
+}
+
+// Extract extracts a SpanContext from the carrier. The carrier is expected
+// to implement TextMapReader, otherwise an error is returned.
+// If the tracer is not started, calling this function is a no-op.
+func Extract(carrier interface{}) (ddtrace.SpanContext, error) {
+	return internal.GetGlobalTracer().Extract(carrier)
+}
+
+// Inject injects the given SpanContext into the carrier. The carrier is
+// expected to implement TextMapWriter, otherwise an error is returned.
+// If the tracer is not started, calling this function is a no-op.
+func Inject(ctx ddtrace.SpanContext, carrier interface{}) error {
+	return internal.GetGlobalTracer().Inject(ctx, carrier)
+}
+
+// SetUser associates user information to the current trace which the
+// provided span belongs to. The options can be used to tune which user
+// bit of information gets monitored. In case of distributed traces,
+// the user id can be propagated across traces using the WithPropagation() option.
+// See https://docs.datadoghq.com/security_platform/application_security/setup_and_configure/?tab=set_user#add-user-information-to-traces
+func SetUser(s Span, id string, opts ...UserMonitoringOption) {
+	if s == nil {
+		return
+	}
+	sp, ok := s.(interface {
+		SetUser(string, ...UserMonitoringOption)
+	})
+	if !ok {
+		return
+	}
+	sp.SetUser(id, opts...)
+}
+
+// payloadQueueSize is the buffer size of the trace channel.
+const payloadQueueSize = 1000
+
+func newUnstartedTracer(opts ...StartOption) *tracer {
+	c := newConfig(opts...)
+	sampler := newPrioritySampler()
+	statsd, err := newStatsdClient(c)
+	if err != nil {
+		log.Warn("Runtime and health metrics disabled: %v", err)
+	}
+	var writer traceWriter
+	if c.logToStdout {
+		writer = newLogTraceWriter(c, statsd)
+	} else {
+		writer = newAgentTraceWriter(c, sampler, statsd)
+	}
+	traces, spans, err := samplingRulesFromEnv()
+	if err != nil {
+		log.Warn("DIAGNOSTICS Error(s) parsing sampling rules: found errors:%s", err)
+	}
+	if traces != nil {
+		c.traceRules = traces
+	}
+	if spans != nil {
+		c.spanRules = spans
+	}
+	t := &tracer{
+		config:           c,
+		traceWriter:      writer,
+		out:              make(chan *finishedTrace, payloadQueueSize),
+		stop:             make(chan struct{}),
+		flush:            make(chan chan<- struct{}),
+		rulesSampling:    newRulesSampler(c.traceRules, c.spanRules),
+		prioritySampling: sampler,
+		pid:              os.Getpid(),
+		stats:            newConcentrator(c, defaultStatsBucketSize),
+		obfuscator: obfuscate.NewObfuscator(obfuscate.Config{
+			SQL: obfuscate.SQLConfig{
+				TableNames:       c.agent.HasFlag("table_names"),
+				ReplaceDigits:    c.agent.HasFlag("quantize_sql_tables") || c.agent.HasFlag("replace_sql_digits"),
+				KeepSQLAlias:     c.agent.HasFlag("keep_sql_alias"),
+				DollarQuotedFunc: c.agent.HasFlag("dollar_quoted_func"),
+				Cache:            c.agent.HasFlag("sql_cache"),
+			},
+		}),
+		statsd: statsd,
+	}
+	return t
+}
+
+func newTracer(opts ...StartOption) *tracer {
+	t := newUnstartedTracer(opts...)
+	c := t.config
+	t.statsd.Incr("datadog.tracer.started", nil, 1)
+	if c.runtimeMetrics {
+		log.Debug("Runtime metrics enabled.")
+		t.wg.Add(1)
+		go func() {
+			defer t.wg.Done()
+			t.reportRuntimeMetrics(defaultMetricsReportInterval)
+		}()
+	}
+	t.wg.Add(1)
+	go func() {
+		defer t.wg.Done()
+		tick := t.config.tickChan
+		if tick == nil {
+			ticker := time.NewTicker(flushInterval)
+			defer ticker.Stop()
+			tick = ticker.C
+		}
+		t.worker(tick)
+	}()
+	t.wg.Add(1)
+	go func() {
+		defer t.wg.Done()
+		t.reportHealthMetrics(statsInterval)
+	}()
+	t.stats.Start()
+	return t
+}
+
+// Flush flushes any buffered traces. Flush is in effect only if a tracer
+// is started. Users do not have to call Flush in order to ensure that
+// traces reach Datadog. It is a convenience method dedicated to a specific
+// use case described below.
+//
+// Flush is of use in Lambda environments, where starting and stopping
+// the tracer on each invocation may create too much latency. In this
+// scenario, a tracer may be started and stopped by the parent process
+// whereas the invocation can make use of Flush to ensure any created spans
+// reach the agent.
+func Flush() {
+	if t, ok := internal.GetGlobalTracer().(*tracer); ok {
+		t.flushSync()
+	}
+}
+
+// flushSync triggers a flush and waits for it to complete.
+func (t *tracer) flushSync() {
+	done := make(chan struct{})
+	t.flush <- done
+	<-done
+}
+
+// worker receives finished traces to be added into the payload, as well
+// as periodically flushes traces to the transport.
+func (t *tracer) worker(tick <-chan time.Time) {
+	for {
+		select {
+		case trace := <-t.out:
+			t.sampleFinishedTrace(trace)
+			if len(trace.spans) != 0 {
+				t.traceWriter.add(trace.spans)
+			}
+		case <-tick:
+			t.statsd.Incr("datadog.tracer.flush_triggered", []string{"reason:scheduled"}, 1)
+			t.traceWriter.flush()
+
+		case done := <-t.flush:
+			t.statsd.Incr("datadog.tracer.flush_triggered", []string{"reason:invoked"}, 1)
+			t.traceWriter.flush()
+			t.statsd.Flush()
+			t.stats.flushAndSend(time.Now(), withCurrentBucket)
+			// TODO(x): In reality, the traceWriter.flush() call is not synchronous
+			// when using the agent traceWriter. However, this functionnality is used
+			// in Lambda so for that purpose this mechanism should suffice.
+			done <- struct{}{}
+
+		case <-t.stop:
+		loop:
+			// the loop ensures that the payload channel is fully drained
+			// before the final flush to ensure no traces are lost (see #526)
+			for {
+				select {
+				case trace := <-t.out:
+					t.sampleFinishedTrace(trace)
+					if len(trace.spans) != 0 {
+						t.traceWriter.add(trace.spans)
+					}
+				default:
+					break loop
+				}
+			}
+			return
+		}
+	}
+}
+
+// finishedTrace holds information about a trace that has finished, including its spans.
+type finishedTrace struct {
+	spans    []*span
+	willSend bool // willSend indicates whether the trace will be sent to the agent.
+}
+
+// sampleFinishedTrace applies single-span sampling to the provided trace, which is considered to be finished.
+func (t *tracer) sampleFinishedTrace(info *finishedTrace) {
+	if len(info.spans) > 0 {
+		if p, ok := info.spans[0].context.samplingPriority(); ok && p > 0 {
+			// The trace is kept, no need to run single span sampling rules.
+			return
+		}
+	}
+	var kept []*span
+	if t.rulesSampling.HasSpanRules() {
+		// Apply sampling rules to individual spans in the trace.
+		for _, span := range info.spans {
+			if t.rulesSampling.SampleSpan(span) {
+				kept = append(kept, span)
+			}
+		}
+		if len(kept) > 0 && len(kept) < len(info.spans) {
+			// Some spans in the trace were kept, so a partial trace will be sent.
+			atomic.AddUint32(&t.partialTraces, 1)
+		}
+	}
+	if len(kept) == 0 {
+		atomic.AddUint32(&t.droppedP0Traces, 1)
+	}
+	atomic.AddUint32(&t.droppedP0Spans, uint32(len(info.spans)-len(kept)))
+	if !info.willSend {
+		info.spans = kept
+	}
+}
+
+func (t *tracer) pushTrace(trace *finishedTrace) {
+	select {
+	case <-t.stop:
+		return
+	default:
+	}
+	select {
+	case t.out <- trace:
+	default:
+		log.Error("payload queue full, dropping %d traces", len(trace.spans))
+	}
+}
+
+// StartSpan creates, starts, and returns a new Span with the given `operationName`.
+func (t *tracer) StartSpan(operationName string, options ...ddtrace.StartSpanOption) ddtrace.Span {
+	var opts ddtrace.StartSpanConfig
+	for _, fn := range options {
+		fn(&opts)
+	}
+	var startTime int64
+	if opts.StartTime.IsZero() {
+		startTime = now()
+	} else {
+		startTime = opts.StartTime.UnixNano()
+	}
+	var context *spanContext
+	// The default pprof context is taken from the start options and is
+	// not nil when using StartSpanFromContext()
+	pprofContext := opts.Context
+	if opts.Parent != nil {
+		if ctx, ok := opts.Parent.(*spanContext); ok {
+			context = ctx
+			if pprofContext == nil && ctx.span != nil {
+				// Inherit the context.Context from parent span if it was propagated
+				// using ChildOf() rather than StartSpanFromContext(), see
+				// applyPPROFLabels() below.
+				pprofContext = ctx.span.pprofCtxActive
+			}
+		} else if p, ok := opts.Parent.(ddtrace.SpanContextW3C); ok {
+			context = &spanContext{
+				traceID: p.TraceID128Bytes(),
+				spanID:  p.SpanID(),
+			}
+		}
+	}
+	if pprofContext == nil {
+		// For root span's without context, there is no pprofContext, but we need
+		// one to avoid a panic() in pprof.WithLabels(). Using context.Background()
+		// is not ideal here, as it will cause us to remove all labels from the
+		// goroutine when the span finishes. However, the alternatives of not
+		// applying labels for such spans or to leave the endpoint/hotspot labels
+		// on the goroutine after it finishes are even less appealing. We'll have
+		// to properly document this for users.
+		pprofContext = gocontext.Background()
+	}
+	id := opts.SpanID
+	if id == 0 {
+		id = generateSpanID(startTime)
+	}
+	// span defaults
+	span := &span{
+		Name:         operationName,
+		Service:      t.config.serviceName,
+		Resource:     operationName,
+		SpanID:       id,
+		TraceID:      id,
+		Start:        startTime,
+		noDebugStack: t.config.noDebugStack,
+	}
+	if t.config.hostname != "" {
+		span.setMeta(keyHostname, t.config.hostname)
+	}
+	if context != nil {
+		// this is a child span
+		span.TraceID = context.traceID.Lower()
+		span.ParentID = context.spanID
+		if p, ok := context.samplingPriority(); ok {
+			span.setMetric(keySamplingPriority, float64(p))
+		}
+		if context.span != nil {
+			// local parent, inherit service
+			context.span.RLock()
+			span.Service = context.span.Service
+			context.span.RUnlock()
+		} else {
+			// remote parent
+			if context.origin != "" {
+				// mark origin
+				span.setMeta(keyOrigin, context.origin)
+			}
+		}
+	}
+	span.context = newSpanContext(span, context)
+	span.setMetric(ext.Pid, float64(t.pid))
+	span.setMeta("language", "go")
+
+	// add tags from options
+	for k, v := range opts.Tags {
+		span.SetTag(k, v)
+	}
+	// add global tags
+	for k, v := range t.config.globalTags {
+		span.SetTag(k, v)
+	}
+	if t.config.serviceMappings != nil {
+		if newSvc, ok := t.config.serviceMappings[span.Service]; ok {
+			span.Service = newSvc
+		}
+	}
+	isRootSpan := context == nil || context.span == nil
+	if isRootSpan {
+		traceprof.SetProfilerRootTags(span)
+		span.setMetric(keySpanAttributeSchemaVersion, float64(t.config.spanAttributeSchemaVersion))
+	}
+	if isRootSpan || context.span.Service != span.Service {
+		span.setMetric(keyTopLevel, 1)
+		// all top level spans are measured. So the measured tag is redundant.
+		delete(span.Metrics, keyMeasured)
+	}
+	if t.config.version != "" {
+		if t.config.universalVersion || (!t.config.universalVersion && span.Service == t.config.serviceName) {
+			span.setMeta(ext.Version, t.config.version)
+		}
+	}
+	if t.config.env != "" {
+		span.setMeta(ext.Environment, t.config.env)
+	}
+	if _, ok := span.context.samplingPriority(); !ok {
+		// if not already sampled or a brand new trace, sample it
+		t.sample(span)
+	}
+	pprofContext, span.taskEnd = startExecutionTracerTask(pprofContext, span)
+	if t.config.profilerHotspots || t.config.profilerEndpoints {
+		t.applyPPROFLabels(pprofContext, span)
+	}
+	if t.config.serviceMappings != nil {
+		if newSvc, ok := t.config.serviceMappings[span.Service]; ok {
+			span.Service = newSvc
+		}
+	}
+	if log.DebugEnabled() {
+		// avoid allocating the ...interface{} argument if debug logging is disabled
+		log.Debug("Started Span: %v, Operation: %s, Resource: %s, Tags: %v, %v",
+			span, span.Name, span.Resource, span.Meta, span.Metrics)
+	}
+	return span
+}
+
+// generateSpanID returns a random uint64 that has been XORd with the startTime.
+// This is done to get around the 32-bit random seed limitation that may create collisions if there is a large number
+// of go services all generating spans.
+func generateSpanID(startTime int64) uint64 {
+	return random.Uint64() ^ uint64(startTime)
+}
+
+// applyPPROFLabels applies pprof labels for the profiler's code hotspots and
+// endpoint filtering feature to span. When span finishes, any pprof labels
+// found in ctx are restored. Additionally, this func informs the profiler how
+// many times each endpoint is called.
+func (t *tracer) applyPPROFLabels(ctx gocontext.Context, span *span) {
+	var labels []string
+	if t.config.profilerHotspots {
+		// allocate the max-length slice to avoid growing it later
+		labels = make([]string, 0, 6)
+		labels = append(labels, traceprof.SpanID, strconv.FormatUint(span.SpanID, 10))
+	}
+	// nil checks might not be needed, but better be safe than sorry
+	if localRootSpan := span.root(); localRootSpan != nil {
+		if t.config.profilerHotspots {
+			labels = append(labels, traceprof.LocalRootSpanID, strconv.FormatUint(localRootSpan.SpanID, 10))
+		}
+		if t.config.profilerEndpoints && spanResourcePIISafe(localRootSpan) {
+			labels = append(labels, traceprof.TraceEndpoint, localRootSpan.Resource)
+			if span == localRootSpan {
+				// Inform the profiler of endpoint hits. This is used for the unit of
+				// work feature. We can't use APM stats for this since the stats don't
+				// have enough cardinality (e.g. runtime-id tags are missing).
+				traceprof.GlobalEndpointCounter().Inc(localRootSpan.Resource)
+			}
+		}
+	}
+	if len(labels) > 0 {
+		span.pprofCtxRestore = ctx
+		span.pprofCtxActive = pprof.WithLabels(ctx, pprof.Labels(labels...))
+		pprof.SetGoroutineLabels(span.pprofCtxActive)
+	}
+}
+
+// spanResourcePIISafe returns true if s.Resource can be considered to not
+// include PII with reasonable confidence. E.g. SQL queries may contain PII,
+// but http, rpc or custom (s.Type == "") span resource names generally do not.
+func spanResourcePIISafe(s *span) bool {
+	return s.Type == ext.SpanTypeWeb || s.Type == ext.AppTypeRPC || s.Type == ""
+}
+
+// Stop stops the tracer.
+func (t *tracer) Stop() {
+	t.stopOnce.Do(func() {
+		close(t.stop)
+		t.statsd.Incr("datadog.tracer.stopped", nil, 1)
+	})
+	t.stats.Stop()
+	t.wg.Wait()
+	t.traceWriter.stop()
+	t.statsd.Close()
+	appsec.Stop()
+}
+
+// Inject uses the configured or default TextMap Propagator.
+func (t *tracer) Inject(ctx ddtrace.SpanContext, carrier interface{}) error {
+	return t.config.propagator.Inject(ctx, carrier)
+}
+
+// Extract uses the configured or default TextMap Propagator.
+func (t *tracer) Extract(carrier interface{}) (ddtrace.SpanContext, error) {
+	return t.config.propagator.Extract(carrier)
+}
+
+// sampleRateMetricKey is the metric key holding the applied sample rate. Has to be the same as the Agent.
+const sampleRateMetricKey = "_sample_rate"
+
+// Sample samples a span with the internal sampler.
+func (t *tracer) sample(span *span) {
+	if _, ok := span.context.samplingPriority(); ok {
+		// sampling decision was already made
+		return
+	}
+	sampler := t.config.sampler
+	if !sampler.Sample(span) {
+		span.context.trace.drop()
+		return
+	}
+	if rs, ok := sampler.(RateSampler); ok && rs.Rate() < 1 {
+		span.setMetric(sampleRateMetricKey, rs.Rate())
+	}
+	if t.rulesSampling.SampleTrace(span) {
+		return
+	}
+	t.prioritySampling.apply(span)
+}
+
+func startExecutionTracerTask(ctx gocontext.Context, span *span) (gocontext.Context, func()) {
+	if !rt.IsEnabled() {
+		return ctx, func() {}
+	}
+	span.goExecTraced = true
+	// Task name is the resource (operationName) of the span, e.g.
+	// "POST /foo/bar" (http) or "/foo/pkg.Method" (grpc).
+	taskName := span.Resource
+	// If the resource could contain PII (e.g. SQL query that's not using bind
+	// arguments), play it safe and just use the span type as the taskName,
+	// e.g. "sql".
+	if !spanResourcePIISafe(span) {
+		taskName = span.Type
+	}
+	end := noopTaskEnd
+	if !globalinternal.IsExecutionTraced(ctx) {
+		var task *rt.Task
+		ctx, task = rt.NewTask(ctx, taskName)
+		end = task.End
+	} else {
+		// We only want to skip task creation for this particular span,
+		// not necessarily for child spans which can come from different
+		// integrations. So update this context to be "not" execution
+		// traced so that derived contexts used by child spans don't get
+		// skipped.
+		ctx = globalinternal.WithExecutionNotTraced(ctx)
+	}
+	rt.Log(ctx, "span id", strconv.FormatUint(span.SpanID, 10))
+	return ctx, end
+}
+
+func noopTaskEnd() {}
+
+func (t *tracer) hostname() string {
+	if !t.config.enableHostnameDetection {
+		return ""
+	}
+	return hostname.Get()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/transport.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/transport.go
new file mode 100644
index 0000000000..7a6e80a56a
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/transport.go
@@ -0,0 +1,214 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync/atomic"
+	"time"
+
+	traceinternal "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/version"
+
+	"github.com/tinylib/msgp/msgp"
+)
+
+const (
+	// headerComputedTopLevel specifies that the client has marked top-level spans, when set.
+	// Any non-empty value will mean 'yes'.
+	headerComputedTopLevel = "Datadog-Client-Computed-Top-Level"
+)
+
+var defaultDialer = &net.Dialer{
+	Timeout:   30 * time.Second,
+	KeepAlive: 30 * time.Second,
+	DualStack: true,
+}
+
+var defaultClient = &http.Client{
+	// We copy the transport to avoid using the default one, as it might be
+	// augmented with tracing and we don't want these calls to be recorded.
+	// See https://golang.org/pkg/net/http/#DefaultTransport .
+	Transport: &http.Transport{
+		Proxy:                 http.ProxyFromEnvironment,
+		DialContext:           defaultDialer.DialContext,
+		MaxIdleConns:          100,
+		IdleConnTimeout:       90 * time.Second,
+		TLSHandshakeTimeout:   10 * time.Second,
+		ExpectContinueTimeout: 1 * time.Second,
+	},
+	Timeout: defaultHTTPTimeout,
+}
+
+const (
+	defaultHostname    = "localhost"
+	defaultPort        = "8126"
+	defaultAddress     = defaultHostname + ":" + defaultPort
+	defaultURL         = "http://" + defaultAddress
+	defaultHTTPTimeout = 2 * time.Second         // defines the current timeout before giving up with the send process
+	traceCountHeader   = "X-Datadog-Trace-Count" // header containing the number of traces in the payload
+)
+
+// transport is an interface for communicating data to the agent.
+type transport interface {
+	// send sends the payload p to the agent using the transport set up.
+	// It returns a non-nil response body when no error occurred.
+	send(p *payload) (body io.ReadCloser, err error)
+	// sendStats sends the given stats payload to the agent.
+	sendStats(s *statsPayload) error
+	// endpoint returns the URL to which the transport will send traces.
+	endpoint() string
+}
+
+type httpTransport struct {
+	traceURL string            // the delivery URL for traces
+	statsURL string            // the delivery URL for stats
+	client   *http.Client      // the HTTP client used in the POST
+	headers  map[string]string // the Transport headers
+}
+
+// newTransport returns a new Transport implementation that sends traces to a
+// trace agent at the given url, using a given *http.Client.
+//
+// In general, using this method is only necessary if you have a trace agent
+// running on a non-default port, if it's located on another machine, or when
+// otherwise needing to customize the transport layer, for instance when using
+// a unix domain socket.
+func newHTTPTransport(url string, client *http.Client) *httpTransport {
+	// initialize the default EncoderPool with Encoder headers
+	defaultHeaders := map[string]string{
+		"Datadog-Meta-Lang":             "go",
+		"Datadog-Meta-Lang-Version":     strings.TrimPrefix(runtime.Version(), "go"),
+		"Datadog-Meta-Lang-Interpreter": runtime.Compiler + "-" + runtime.GOARCH + "-" + runtime.GOOS,
+		"Datadog-Meta-Tracer-Version":   version.Tag,
+		"Content-Type":                  "application/msgpack",
+	}
+	if cid := internal.ContainerID(); cid != "" {
+		defaultHeaders["Datadog-Container-ID"] = cid
+	}
+	return &httpTransport{
+		traceURL: fmt.Sprintf("%s/v0.4/traces", url),
+		statsURL: fmt.Sprintf("%s/v0.6/stats", url),
+		client:   client,
+		headers:  defaultHeaders,
+	}
+}
+
+func (t *httpTransport) sendStats(p *statsPayload) error {
+	var buf bytes.Buffer
+	if err := msgp.Encode(&buf, p); err != nil {
+		return err
+	}
+	req, err := http.NewRequest("POST", t.statsURL, &buf)
+	if err != nil {
+		return err
+	}
+	resp, err := t.client.Do(req)
+	if err != nil {
+		return err
+	}
+	if code := resp.StatusCode; code >= 400 {
+		// error, check the body for context information and
+		// return a nice error.
+		msg := make([]byte, 1000)
+		n, _ := resp.Body.Read(msg)
+		resp.Body.Close()
+		txt := http.StatusText(code)
+		if n > 0 {
+			return fmt.Errorf("%s (Status: %s)", msg[:n], txt)
+		}
+		return fmt.Errorf("%s", txt)
+	}
+	return nil
+}
+
+func (t *httpTransport) send(p *payload) (body io.ReadCloser, err error) {
+	req, err := http.NewRequest("POST", t.traceURL, p)
+	if err != nil {
+		return nil, fmt.Errorf("cannot create http request: %v", err)
+	}
+	for header, value := range t.headers {
+		req.Header.Set(header, value)
+	}
+	req.Header.Set(traceCountHeader, strconv.Itoa(p.itemCount()))
+	req.Header.Set("Content-Length", strconv.Itoa(p.size()))
+	req.Header.Set(headerComputedTopLevel, "yes")
+	if t, ok := traceinternal.GetGlobalTracer().(*tracer); ok {
+		if t.config.canComputeStats() {
+			req.Header.Set("Datadog-Client-Computed-Stats", "yes")
+		}
+		droppedTraces := int(atomic.SwapUint32(&t.droppedP0Traces, 0))
+		partialTraces := int(atomic.SwapUint32(&t.partialTraces, 0))
+		droppedSpans := int(atomic.SwapUint32(&t.droppedP0Spans, 0))
+		if stats := t.statsd; stats != nil {
+			stats.Count("datadog.tracer.dropped_p0_traces", int64(droppedTraces),
+				[]string{fmt.Sprintf("partial:%s", strconv.FormatBool(partialTraces > 0))}, 1)
+			stats.Count("datadog.tracer.dropped_p0_spans", int64(droppedSpans), nil, 1)
+		}
+		req.Header.Set("Datadog-Client-Dropped-P0-Traces", strconv.Itoa(droppedTraces))
+		req.Header.Set("Datadog-Client-Dropped-P0-Spans", strconv.Itoa(droppedSpans))
+	}
+	response, err := t.client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	if code := response.StatusCode; code >= 400 {
+		// error, check the body for context information and
+		// return a nice error.
+		msg := make([]byte, 1000)
+		n, _ := response.Body.Read(msg)
+		response.Body.Close()
+		txt := http.StatusText(code)
+		if n > 0 {
+			return nil, fmt.Errorf("%s (Status: %s)", msg[:n], txt)
+		}
+		return nil, fmt.Errorf("%s", txt)
+	}
+	return response.Body, nil
+}
+
+func (t *httpTransport) endpoint() string {
+	return t.traceURL
+}
+
+// resolveAgentAddr resolves the given agent address and fills in any missing host
+// and port using the defaults. Some environment variable settings will
+// take precedence over configuration.
+func resolveAgentAddr() *url.URL {
+	var host, port string
+	if v := os.Getenv("DD_AGENT_HOST"); v != "" {
+		host = v
+	}
+	if v := os.Getenv("DD_TRACE_AGENT_PORT"); v != "" {
+		port = v
+	}
+	if _, err := os.Stat(defaultSocketAPM); host == "" && port == "" && err == nil {
+		return &url.URL{
+			Scheme: "unix",
+			Path:   defaultSocketAPM,
+		}
+	}
+	if host == "" {
+		host = defaultHostname
+	}
+	if port == "" {
+		port = defaultPort
+	}
+	return &url.URL{
+		Scheme: "http",
+		Host:   fmt.Sprintf("%s:%s", host, port),
+	}
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/util.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/util.go
new file mode 100644
index 0000000000..67ee16149f
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/util.go
@@ -0,0 +1,124 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
+)
+
+// toFloat64 attempts to convert value into a float64. If the value is an integer
+// greater or equal to 2^53 or less than or equal to -2^53, it will not be converted
+// into a float64 to avoid losing precision. If it succeeds in converting, toFloat64
+// returns the value and true, otherwise 0 and false.
+func toFloat64(value interface{}) (f float64, ok bool) {
+	const max = (int64(1) << 53) - 1
+	const min = -max
+	switch i := value.(type) {
+	case byte:
+		return float64(i), true
+	case float32:
+		return float64(i), true
+	case float64:
+		return i, true
+	case int:
+		return float64(i), true
+	case int8:
+		return float64(i), true
+	case int16:
+		return float64(i), true
+	case int32:
+		return float64(i), true
+	case int64:
+		if i > max || i < min {
+			return 0, false
+		}
+		return float64(i), true
+	case uint:
+		return float64(i), true
+	case uint16:
+		return float64(i), true
+	case uint32:
+		return float64(i), true
+	case uint64:
+		if i > uint64(max) {
+			return 0, false
+		}
+		return float64(i), true
+	case samplernames.SamplerName:
+		return float64(i), true
+	default:
+		return 0, false
+	}
+}
+
+// parseUint64 parses a uint64 from either an unsigned 64 bit base-10 string
+// or a signed 64 bit base-10 string representing an unsigned integer
+func parseUint64(str string) (uint64, error) {
+	if strings.HasPrefix(str, "-") {
+		id, err := strconv.ParseInt(str, 10, 64)
+		if err != nil {
+			return 0, err
+		}
+		return uint64(id), nil
+	}
+	return strconv.ParseUint(str, 10, 64)
+}
+
+func isValidPropagatableTag(k, v string) error {
+	if len(k) == 0 {
+		return fmt.Errorf("key length must be greater than zero")
+	}
+	for _, ch := range k {
+		if ch < 32 || ch > 126 || ch == ' ' || ch == '=' || ch == ',' {
+			return fmt.Errorf("key contains an invalid character %d", ch)
+		}
+	}
+	if len(v) == 0 {
+		return fmt.Errorf("value length must be greater than zero")
+	}
+	for _, ch := range v {
+		if ch < 32 || ch > 126 || ch == ',' {
+			return fmt.Errorf("value contains an invalid character %d", ch)
+		}
+	}
+	return nil
+}
+
+func parsePropagatableTraceTags(s string) (map[string]string, error) {
+	if len(s) == 0 {
+		return nil, nil
+	}
+	tags := make(map[string]string)
+	searchingKey, start := true, 0
+	var key string
+	for i, ch := range s {
+		switch ch {
+		case '=':
+			if searchingKey {
+				if i-start == 0 {
+					return nil, fmt.Errorf("invalid format")
+				}
+				key = s[start:i]
+				searchingKey, start = false, i+1
+			}
+		case ',':
+			if searchingKey || i-start == 0 {
+				return nil, fmt.Errorf("invalid format")
+			}
+			tags[key] = s[start:i]
+			searchingKey, start = true, i+1
+		}
+	}
+	if searchingKey || len(s)-start == 0 {
+		return nil, fmt.Errorf("invalid format")
+	}
+	tags[key] = s[start:]
+	return tags, nil
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/writer.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/writer.go
new file mode 100644
index 0000000000..8027defa79
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/writer.go
@@ -0,0 +1,339 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracer
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"io"
+	"math"
+	"os"
+	"strconv"
+	"sync"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+type traceWriter interface {
+	// add adds traces to be sent by the writer.
+	add([]*span)
+
+	// flush causes the writer to send any buffered traces.
+	flush()
+
+	// stop gracefully shuts down the writer.
+	stop()
+}
+
+type agentTraceWriter struct {
+	// config holds the tracer configuration
+	config *config
+
+	// payload encodes and buffers traces in msgpack format
+	payload *payload
+
+	// climit limits the number of concurrent outgoing connections
+	climit chan struct{}
+
+	// wg waits for all uploads to finish
+	wg sync.WaitGroup
+
+	// prioritySampling is the prioritySampler into which agentTraceWriter will
+	// read sampling rates sent by the agent
+	prioritySampling *prioritySampler
+
+	// statsd is used to send metrics
+	statsd statsdClient
+}
+
+func newAgentTraceWriter(c *config, s *prioritySampler, statsdClient statsdClient) *agentTraceWriter {
+	return &agentTraceWriter{
+		config:           c,
+		payload:          newPayload(),
+		climit:           make(chan struct{}, concurrentConnectionLimit),
+		prioritySampling: s,
+		statsd:           statsdClient,
+	}
+}
+
+func (h *agentTraceWriter) add(trace []*span) {
+	if err := h.payload.push(trace); err != nil {
+		h.statsd.Incr("datadog.tracer.traces_dropped", []string{"reason:encoding_error"}, 1)
+		log.Error("Error encoding msgpack: %v", err)
+	}
+	if h.payload.size() > payloadSizeLimit {
+		h.statsd.Incr("datadog.tracer.flush_triggered", []string{"reason:size"}, 1)
+		h.flush()
+	}
+}
+
+func (h *agentTraceWriter) stop() {
+	h.statsd.Incr("datadog.tracer.flush_triggered", []string{"reason:shutdown"}, 1)
+	h.flush()
+	h.wg.Wait()
+}
+
+// flush will push any currently buffered traces to the server.
+func (h *agentTraceWriter) flush() {
+	if h.payload.itemCount() == 0 {
+		return
+	}
+	h.wg.Add(1)
+	h.climit <- struct{}{}
+	oldp := h.payload
+	h.payload = newPayload()
+	go func(p *payload) {
+		defer func(start time.Time) {
+			// Once the payload has been used, clear the buffer for garbage
+			// collection to avoid a memory leak when references to this object
+			// may still be kept by faulty transport implementations or the
+			// standard library. See dd-trace-go#976
+			p.clear()
+
+			<-h.climit
+			h.wg.Done()
+			h.statsd.Timing("datadog.tracer.flush_duration", time.Since(start), nil, 1)
+		}(time.Now())
+
+		var count, size int
+		var err error
+		for attempt := 0; attempt <= h.config.sendRetries; attempt++ {
+			size, count = p.size(), p.itemCount()
+			log.Debug("Sending payload: size: %d traces: %d\n", size, count)
+			rc, err := h.config.transport.send(p)
+			if err == nil {
+				log.Debug("sent traces after %d attempts", attempt+1)
+				h.statsd.Count("datadog.tracer.flush_bytes", int64(size), nil, 1)
+				h.statsd.Count("datadog.tracer.flush_traces", int64(count), nil, 1)
+				if err := h.prioritySampling.readRatesJSON(rc); err != nil {
+					h.statsd.Incr("datadog.tracer.decode_error", nil, 1)
+				}
+				return
+			}
+			log.Error("failure sending traces (attempt %d), will retry: %v", attempt+1, err)
+			p.reset()
+			time.Sleep(time.Millisecond)
+		}
+		h.statsd.Count("datadog.tracer.traces_dropped", int64(count), []string{"reason:send_failed"}, 1)
+		log.Error("lost %d traces: %v", count, err)
+	}(oldp)
+}
+
+// logWriter specifies the output target of the logTraceWriter; replaced in tests.
+var logWriter io.Writer = os.Stdout
+
+// logTraceWriter encodes traces into a format understood by the Datadog Forwarder
+// (https://github.com/DataDog/datadog-serverless-functions/tree/master/aws/logs_monitoring)
+// and writes them to os.Stdout. This is used to send traces from an AWS Lambda environment.
+type logTraceWriter struct {
+	config    *config
+	buf       bytes.Buffer
+	hasTraces bool
+	w         io.Writer
+	statsd    statsdClient
+}
+
+func newLogTraceWriter(c *config, statsdClient statsdClient) *logTraceWriter {
+	w := &logTraceWriter{
+		config: c,
+		w:      logWriter,
+		statsd: statsdClient,
+	}
+	w.resetBuffer()
+	return w
+}
+
+const (
+	// maxFloatLength is the maximum length that a string encoded by encodeFloat will be.
+	maxFloatLength = 24
+
+	// logBufferSuffix is the final string that the trace writer has to append to a buffer to close
+	// the JSON.
+	logBufferSuffix = "]}\n"
+
+	// logBufferLimit is the maximum size log line allowed by cloudwatch
+	logBufferLimit = 256 * 1024
+)
+
+func (h *logTraceWriter) resetBuffer() {
+	h.buf.Reset()
+	h.buf.WriteString(`{"traces": [`)
+	h.hasTraces = false
+}
+
+// encodeFloat correctly encodes float64 into the JSON format followed by ES6.
+// This code is reworked from Go's encoding/json package
+// (https://github.com/golang/go/blob/go1.15/src/encoding/json/encode.go#L573)
+//
+// One important departure from encoding/json is that infinities and nans are encoded
+// as null rather than signalling an error.
+func encodeFloat(p []byte, f float64) []byte {
+	if math.IsInf(f, 0) || math.IsNaN(f) {
+		return append(p, "null"...)
+	}
+	abs := math.Abs(f)
+	if abs != 0 && (abs < 1e-6 || abs >= 1e21) {
+		p = strconv.AppendFloat(p, f, 'e', -1, 64)
+		// clean up e-09 to e-9
+		n := len(p)
+		if n >= 4 && p[n-4] == 'e' && p[n-3] == '-' && p[n-2] == '0' {
+			p[n-2] = p[n-1]
+			p = p[:n-1]
+		}
+	} else {
+		p = strconv.AppendFloat(p, f, 'f', -1, 64)
+	}
+	return p
+}
+
+func (h *logTraceWriter) encodeSpan(s *span) {
+	var scratch [maxFloatLength]byte
+	h.buf.WriteString(`{"trace_id":"`)
+	h.buf.Write(strconv.AppendUint(scratch[:0], uint64(s.TraceID), 16))
+	h.buf.WriteString(`","span_id":"`)
+	h.buf.Write(strconv.AppendUint(scratch[:0], uint64(s.SpanID), 16))
+	h.buf.WriteString(`","parent_id":"`)
+	h.buf.Write(strconv.AppendUint(scratch[:0], uint64(s.ParentID), 16))
+	h.buf.WriteString(`","name":`)
+	h.marshalString(s.Name)
+	h.buf.WriteString(`,"resource":`)
+	h.marshalString(s.Resource)
+	h.buf.WriteString(`,"error":`)
+	h.buf.Write(strconv.AppendInt(scratch[:0], int64(s.Error), 10))
+	h.buf.WriteString(`,"meta":{`)
+	first := true
+	for k, v := range s.Meta {
+		if first {
+			first = false
+		} else {
+			h.buf.WriteString(`,`)
+		}
+		h.marshalString(k)
+		h.buf.WriteString(":")
+		h.marshalString(v)
+	}
+	h.buf.WriteString(`},"metrics":{`)
+	first = true
+	for k, v := range s.Metrics {
+		if math.IsNaN(v) || math.IsInf(v, 0) {
+			// The trace forwarder does not support infinity or nan, so we do not send metrics with those values.
+			continue
+		}
+		if first {
+			first = false
+		} else {
+			h.buf.WriteString(`,`)
+		}
+		h.marshalString(k)
+		h.buf.WriteString(`:`)
+		h.buf.Write(encodeFloat(scratch[:0], v))
+	}
+	h.buf.WriteString(`},"start":`)
+	h.buf.Write(strconv.AppendInt(scratch[:0], s.Start, 10))
+	h.buf.WriteString(`,"duration":`)
+	h.buf.Write(strconv.AppendInt(scratch[:0], s.Duration, 10))
+	h.buf.WriteString(`,"service":`)
+	h.marshalString(s.Service)
+	h.buf.WriteString(`}`)
+}
+
+// marshalString marshals the string str as JSON into the writer's buffer.
+// Should be used whenever writing non-constant string data to ensure correct sanitization.
+func (h *logTraceWriter) marshalString(str string) {
+	m, err := json.Marshal(str)
+	if err != nil {
+		log.Error("Error marshaling value %q: %v", str, err)
+	} else {
+		h.buf.Write(m)
+	}
+}
+
+type encodingError struct {
+	cause      error
+	dropReason string
+}
+
+// writeTrace makes an effort to write the trace into the current buffer. It returns
+// the number of spans (n) that it wrote and an error (err), if one occurred.
+// n may be less than len(trace), meaning that only the first n spans of the trace
+// fit into the current buffer. Once the buffer is flushed, the remaining spans
+// from the trace can be retried.
+// An error, if one is returned, indicates that a span in the trace is too large
+// to fit in one buffer, and the trace cannot be written.
+func (h *logTraceWriter) writeTrace(trace []*span) (n int, err *encodingError) {
+	startn := h.buf.Len()
+	if !h.hasTraces {
+		h.buf.WriteByte('[')
+	} else {
+		h.buf.WriteString(", [")
+	}
+	written := 0
+	for i, s := range trace {
+		n := h.buf.Len()
+		if i > 0 {
+			h.buf.WriteByte(',')
+		}
+		h.encodeSpan(s)
+		if h.buf.Len() > logBufferLimit-len(logBufferSuffix) {
+			// This span is too big to fit in the current buffer.
+			if i == 0 {
+				// This was the first span in this trace. This means we should truncate
+				// everything we wrote in writeTrace
+				h.buf.Truncate(startn)
+				if !h.hasTraces {
+					// This is the first span of the first trace in the buffer and it's too big.
+					// We will never be able to send this trace, so we will drop it.
+					return 0, &encodingError{cause: errors.New("span too large for buffer"), dropReason: "trace_too_large"}
+				}
+				return 0, nil
+			}
+			// This span was too big, but it might fit in the next buffer.
+			// We can finish this trace and try again with an empty buffer (see *logTaceWriter.add)
+			h.buf.Truncate(n)
+			break
+		}
+		written++
+	}
+	h.buf.WriteByte(']')
+	h.hasTraces = true
+	return written, nil
+}
+
+// add adds a trace to the writer's buffer.
+func (h *logTraceWriter) add(trace []*span) {
+	// Try adding traces to the buffer until we flush them all or encounter an error.
+	for len(trace) > 0 {
+		n, err := h.writeTrace(trace)
+		if err != nil {
+			log.Error("Lost a trace: %s", err.cause)
+			h.statsd.Count("datadog.tracer.traces_dropped", 1, []string{"reason:" + err.dropReason}, 1)
+			return
+		}
+		trace = trace[n:]
+		// If there are traces left that didn't fit into the buffer, flush the buffer and loop to
+		// write the remaining spans.
+		if len(trace) > 0 {
+			h.flush()
+		}
+	}
+}
+
+func (h *logTraceWriter) stop() {
+	h.statsd.Incr("datadog.tracer.flush_triggered", []string{"reason:shutdown"}, 1)
+	h.flush()
+}
+
+// flush will write any buffered traces to standard output.
+func (h *logTraceWriter) flush() {
+	if !h.hasTraces {
+		return
+	}
+	h.buf.WriteString(logBufferSuffix)
+	h.w.Write(h.buf.Bytes())
+	h.resetBuffer()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/agent.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/agent.go
new file mode 100644
index 0000000000..c8f835166c
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/agent.go
@@ -0,0 +1,36 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022 Datadog, Inc.
+
+package internal
+
+import (
+	"net/url"
+	"os"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// AgentURLFromEnv determines the trace agent URL from environment variable
+// DD_TRACE_AGENT_URL. If the determined value is valid and the scheme is
+// supported (unix, http or https), it will return an *url.URL. Otherwise,
+// it returns nil.
+func AgentURLFromEnv() *url.URL {
+	agentURL := os.Getenv("DD_TRACE_AGENT_URL")
+	if agentURL == "" {
+		return nil
+	}
+	u, err := url.Parse(agentURL)
+	if err != nil {
+		log.Warn("Failed to parse DD_TRACE_AGENT_URL: %v", err)
+		return nil
+	}
+	switch u.Scheme {
+	case "unix", "http", "https":
+		return u
+	default:
+		log.Warn("Unsupported protocol %q in Agent URL %q. Must be one of: http, https, unix.", u.Scheme, agentURL)
+		return nil
+	}
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/appsec.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/appsec.go
new file mode 100644
index 0000000000..98be47e8df
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/appsec.go
@@ -0,0 +1,183 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+//go:build appsec
+// +build appsec
+
+package appsec
+
+import (
+	"fmt"
+	"sync"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig"
+
+	"github.com/DataDog/go-libddwaf"
+)
+
+// Enabled returns true when AppSec is up and running. Meaning that the appsec build tag is enabled, the env var
+// DD_APPSEC_ENABLED is set to true, and the tracer is started.
+func Enabled() bool {
+	mu.RLock()
+	defer mu.RUnlock()
+	return activeAppSec != nil && activeAppSec.started
+}
+
+// Start AppSec when enabled is enabled by both using the appsec build tag and
+// setting the environment variable DD_APPSEC_ENABLED to true.
+func Start(opts ...StartOption) {
+	// AppSec can start either:
+	// 1. Manually thanks to DD_APPSEC_ENABLED
+	// 2. Remotely when DD_APPSEC_ENABLED is undefined
+	// Note: DD_APPSEC_ENABLED=false takes precedence over remote configuration
+	// and enforces to have AppSec disabled.
+	enabled, set, err := isEnabled()
+	if err != nil {
+		logUnexpectedStartError(err)
+		return
+	}
+
+	// Check if AppSec is explicitly disabled
+	if set && !enabled {
+		log.Debug("appsec: disabled by the configuration: set the environment variable DD_APPSEC_ENABLED to true to enable it")
+		return
+	}
+
+	// Check whether libddwaf - required for Threats Detection - is supported or not
+	if supported, err := waf.SupportsTarget(); !supported {
+		log.Error("appsec: threats detection is not supported: %v\nNo security activities will be collected. Please contact support at https://docs.datadoghq.com/help/ for help.", err)
+		return
+	}
+
+	// From this point we know that AppSec is either enabled or can be enabled through remote config
+	cfg, err := newConfig()
+	if err != nil {
+		logUnexpectedStartError(err)
+		return
+	}
+	for _, opt := range opts {
+		opt(cfg)
+	}
+	appsec := newAppSec(cfg)
+
+	// Start the remote configuration client
+	log.Debug("appsec: starting the remote configuration client")
+	appsec.startRC()
+
+	if !set {
+		// AppSec is not enforced by the env var and can be enabled through remote config
+		log.Debug("appsec: %s is not set, appsec won't start until activated through remote configuration", enabledEnvVar)
+		if err := appsec.enableRemoteActivation(); err != nil {
+			// ASM is not enabled and can't be enabled through remote configuration. Nothing more can be done.
+			logUnexpectedStartError(err)
+			appsec.stopRC()
+			return
+		}
+		log.Debug("appsec: awaiting for possible remote activation")
+	} else if err := appsec.start(); err != nil { // AppSec is specifically enabled
+		logUnexpectedStartError(err)
+		appsec.stopRC()
+		return
+	}
+	setActiveAppSec(appsec)
+}
+
+// Implement the AppSec log message C1
+func logUnexpectedStartError(err error) {
+	log.Error("appsec: could not start because of an unexpected error: %v\nNo security activities will be collected. Please contact support at https://docs.datadoghq.com/help/ for help.", err)
+}
+
+// Stop AppSec.
+func Stop() {
+	setActiveAppSec(nil)
+}
+
+var (
+	activeAppSec *appsec
+	mu           sync.RWMutex
+)
+
+func setActiveAppSec(a *appsec) {
+	mu.Lock()
+	defer mu.Unlock()
+	if activeAppSec != nil {
+		activeAppSec.stopRC()
+		activeAppSec.stop()
+	}
+	activeAppSec = a
+}
+
+type appsec struct {
+	cfg       *Config
+	limiter   *TokenTicker
+	rc        *remoteconfig.Client
+	wafHandle *wafHandle
+	started   bool
+}
+
+func newAppSec(cfg *Config) *appsec {
+	var client *remoteconfig.Client
+	var err error
+	if cfg.rc != nil {
+		client, err = remoteconfig.NewClient(*cfg.rc)
+	}
+	if err != nil {
+		log.Error("appsec: Remote config: disabled due to a client creation error: %v", err)
+	}
+	return &appsec{
+		cfg: cfg,
+		rc:  client,
+	}
+}
+
+// Start AppSec by registering its security protections according to the configured the security rules.
+func (a *appsec) start() error {
+	// Load the waf to catch early errors if any
+	if ok, err := waf.Load(); err != nil {
+		// 1. If there is an error and the loading is not ok: log as an unexpected error case and quit appsec
+		// Note that we assume here that the test for the unsupported target has been done before calling
+		// this method, so it is now considered an error for this method
+		if !ok {
+			return fmt.Errorf("error while loading libddwaf: %w", err)
+		}
+		// 2. If there is an error and the loading is ok: log as an informative error where appsec can be used
+		log.Error("appsec: non-critical error while loading libddwaf: %v", err)
+	}
+
+	a.limiter = NewTokenTicker(int64(a.cfg.traceRateLimit), int64(a.cfg.traceRateLimit))
+	a.limiter.Start()
+	// Register the WAF operation event listener
+	if err := a.swapWAF(a.cfg.rulesManager.latest); err != nil {
+		return err
+	}
+	a.enableRCBlocking()
+	a.started = true
+	log.Info("appsec: up and running")
+	// TODO: log the config like the APM tracer does but we first need to define
+	//   an user-friendly string representation of our config and its sources
+	return nil
+}
+
+// Stop AppSec by unregistering the security protections.
+func (a *appsec) stop() {
+	if !a.started {
+		return
+	}
+	a.started = false
+	// Disable RC blocking first so that the following is guaranteed not to be concurrent anymore.
+	a.disableRCBlocking()
+
+	// Disable the currently applied instrumentation
+	dyngo.SwapRootOperation(nil)
+	if a.wafHandle != nil {
+		a.wafHandle.Close()
+		a.wafHandle = nil
+	}
+	// TODO: block until no more requests are using dyngo operations
+
+	a.limiter.Stop()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/appsec_disabled.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/appsec_disabled.go
new file mode 100644
index 0000000000..09f262175f
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/appsec_disabled.go
@@ -0,0 +1,38 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+//go:build !appsec
+// +build !appsec
+
+package appsec
+
+import "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+
+// Enabled returns true when AppSec is up and running. Meaning that the appsec build tag is enabled, the env var
+// DD_APPSEC_ENABLED is set to true, and the tracer is started.
+func Enabled() bool {
+	return false
+}
+
+// Start AppSec when enabled by both using the appsec build tag and
+// setting the environment variable DD_APPSEC_ENABLED to true.
+func Start(...StartOption) {
+	if enabled, _, err := isEnabled(); err != nil {
+		// Something went wrong while checking the DD_APPSEC_ENABLED configuration
+		log.Error("appsec: error while checking if appsec is enabled: %v", err)
+	} else if enabled {
+		// The user is willing to enable appsec but didn't use the build tag
+		log.Info("appsec: enabled by the configuration but has not been activated during the compilation: please add the go build tag `appsec` to your build options to enable it")
+	} else {
+		// The user is not willing to start appsec, a simple debug log is enough
+		log.Debug("appsec: not been not enabled during the compilation: please add the go build tag `appsec` to your build options to enable it")
+	}
+}
+
+// Stop AppSec.
+func Stop() {}
+
+// Static rule stubs when disabled.
+const staticRecommendedRules = ""
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config.go
new file mode 100644
index 0000000000..1aaf419c9b
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config.go
@@ -0,0 +1,189 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package appsec
+
+import (
+	"fmt"
+	"os"
+	"regexp"
+	"strconv"
+	"time"
+	"unicode"
+	"unicode/utf8"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig"
+)
+
+const (
+	enabledEnvVar         = "DD_APPSEC_ENABLED"
+	rulesEnvVar           = "DD_APPSEC_RULES"
+	wafTimeoutEnvVar      = "DD_APPSEC_WAF_TIMEOUT"
+	traceRateLimitEnvVar  = "DD_APPSEC_TRACE_RATE_LIMIT"
+	obfuscatorKeyEnvVar   = "DD_APPSEC_OBFUSCATION_PARAMETER_KEY_REGEXP"
+	obfuscatorValueEnvVar = "DD_APPSEC_OBFUSCATION_PARAMETER_VALUE_REGEXP"
+)
+
+const (
+	defaultWAFTimeout           = 4 * time.Millisecond
+	defaultTraceRate            = 100 // up to 100 appsec traces/s
+	defaultObfuscatorKeyRegex   = `(?i)(?:p(?:ass)?w(?:or)?d|pass(?:_?phrase)?|secret|(?:api_?|private_?|public_?)key)|token|consumer_?(?:id|key|secret)|sign(?:ed|ature)|bearer|authorization`
+	defaultObfuscatorValueRegex = `(?i)(?:p(?:ass)?w(?:or)?d|pass(?:_?phrase)?|secret|(?:api_?|private_?|public_?|access_?|secret_?)key(?:_?id)?|token|consumer_?(?:id|key|secret)|sign(?:ed|ature)?|auth(?:entication|orization)?)(?:\s*=[^;]|"\s*:\s*"[^"]+")|bearer\s+[a-z0-9\._\-]+|token:[a-z0-9]{13}|gh[opsu]_[0-9a-zA-Z]{36}|ey[I-L][\w=-]+\.ey[I-L][\w=-]+(?:\.[\w.+\/=-]+)?|[\-]{5}BEGIN[a-z\s]+PRIVATE\sKEY[\-]{5}[^\-]+[\-]{5}END[a-z\s]+PRIVATE\sKEY|ssh-rsa\s*[a-z0-9\/\.+]{100,}`
+)
+
+// StartOption is used to customize the AppSec configuration when invoked with appsec.Start()
+type StartOption func(c *Config)
+
+// Config is the AppSec configuration.
+type Config struct {
+	// rules loaded via the env var DD_APPSEC_RULES. When not set, the builtin rules will be used
+	// and live-updated with remote configuration.
+	rulesManager *rulesManager
+	// Maximum WAF execution time
+	wafTimeout time.Duration
+	// AppSec trace rate limit (traces per second).
+	traceRateLimit uint
+	// Obfuscator configuration parameters
+	obfuscator ObfuscatorConfig
+	// rc is the remote configuration client used to receive product configuration updates. Nil if rc is disabled (default)
+	rc *remoteconfig.ClientConfig
+}
+
+// WithRCConfig sets the AppSec remote config client configuration to the specified cfg
+func WithRCConfig(cfg remoteconfig.ClientConfig) StartOption {
+	return func(c *Config) {
+		c.rc = &cfg
+	}
+}
+
+// ObfuscatorConfig wraps the key and value regexp to be passed to the WAF to perform obfuscation.
+type ObfuscatorConfig struct {
+	KeyRegex   string
+	ValueRegex string
+}
+
+// isEnabled returns true when appsec is enabled when the environment variable
+// DD_APPSEC_ENABLED is set to true.
+// It also returns whether the env var is actually set in the env or not.
+func isEnabled() (enabled bool, set bool, err error) {
+	enabledStr, set := os.LookupEnv(enabledEnvVar)
+	if enabledStr == "" {
+		return false, set, nil
+	} else if enabled, err = strconv.ParseBool(enabledStr); err != nil {
+		return false, set, fmt.Errorf("could not parse %s value `%s` as a boolean value", enabledEnvVar, enabledStr)
+	} else {
+		return enabled, set, nil
+	}
+}
+
+func newConfig() (*Config, error) {
+	rules, err := readRulesConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	r, err := newRulesManager(rules)
+	if err != nil {
+		return nil, err
+	}
+
+	return &Config{
+		rulesManager:   r,
+		wafTimeout:     readWAFTimeoutConfig(),
+		traceRateLimit: readRateLimitConfig(),
+		obfuscator:     readObfuscatorConfig(),
+	}, nil
+}
+
+func readWAFTimeoutConfig() (timeout time.Duration) {
+	timeout = defaultWAFTimeout
+	value := os.Getenv(wafTimeoutEnvVar)
+	if value == "" {
+		return
+	}
+
+	// Check if the value ends with a letter, which means the user has
+	// specified their own time duration unit(s) such as 1s200ms.
+	// Otherwise, default to microseconds.
+	if lastRune, _ := utf8.DecodeLastRuneInString(value); !unicode.IsLetter(lastRune) {
+		value += "us" // Add the default microsecond time-duration suffix
+	}
+
+	parsed, err := time.ParseDuration(value)
+	if err != nil {
+		logEnvVarParsingError(wafTimeoutEnvVar, value, err, timeout)
+		return
+	}
+	if parsed <= 0 {
+		logUnexpectedEnvVarValue(wafTimeoutEnvVar, parsed, "expecting a strictly positive duration", timeout)
+		return
+	}
+	return parsed
+}
+
+func readRateLimitConfig() (rate uint) {
+	rate = defaultTraceRate
+	value := os.Getenv(traceRateLimitEnvVar)
+	if value == "" {
+		return rate
+	}
+	parsed, err := strconv.ParseUint(value, 10, 0)
+	if err != nil {
+		logEnvVarParsingError(traceRateLimitEnvVar, value, err, rate)
+		return
+	}
+	if rate == 0 {
+		logUnexpectedEnvVarValue(traceRateLimitEnvVar, parsed, "expecting a value strictly greater than 0", rate)
+		return
+	}
+	return uint(parsed)
+}
+
+func readObfuscatorConfig() ObfuscatorConfig {
+	keyRE := readObfuscatorConfigRegexp(obfuscatorKeyEnvVar, defaultObfuscatorKeyRegex)
+	valueRE := readObfuscatorConfigRegexp(obfuscatorValueEnvVar, defaultObfuscatorValueRegex)
+	return ObfuscatorConfig{KeyRegex: keyRE, ValueRegex: valueRE}
+}
+
+func readObfuscatorConfigRegexp(name, defaultValue string) string {
+	val, present := os.LookupEnv(name)
+	if !present {
+		log.Debug("appsec: %s not defined, starting with the default obfuscator regular expression", name)
+		return defaultValue
+	}
+	if _, err := regexp.Compile(val); err != nil {
+		log.Error("appsec: could not compile the configured obfuscator regular expression `%s=%s`. Using the default value instead", name, val)
+		return defaultValue
+	}
+	log.Debug("appsec: starting with the configured obfuscator regular expression %s", name)
+	return val
+}
+
+func readRulesConfig() (rules []byte, err error) {
+	rules = []byte(staticRecommendedRules)
+	filepath := os.Getenv(rulesEnvVar)
+	if filepath == "" {
+		log.Debug("appsec: using the default built-in recommended security rules")
+		return rules, nil
+	}
+	buf, err := os.ReadFile(filepath)
+	if err != nil {
+		if os.IsNotExist(err) {
+			log.Error("appsec: could not find the rules file in path %s: %v.", filepath, err)
+		}
+		return nil, err
+	}
+	log.Debug("appsec: using the security rules from file %s", filepath)
+	return buf, nil
+}
+
+func logEnvVarParsingError(name, value string, err error, defaultValue interface{}) {
+	log.Error("appsec: could not parse the env var %s=%s as a duration: %v. Using default value %v.", name, value, err, defaultValue)
+}
+
+func logUnexpectedEnvVarValue(name string, value interface{}, reason string, defaultValue interface{}) {
+	log.Error("appsec: unexpected configuration value of %s=%v: %s. Using default value %v.", name, value, reason, defaultValue)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/common.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/common.go
new file mode 100644
index 0000000000..6784deff57
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/common.go
@@ -0,0 +1,166 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022 Datadog, Inc.
+
+// Package instrumentation holds code commonly used between all instrumentation declinations (currently httpsec/grpcsec).
+package instrumentation
+
+import (
+	"encoding/json"
+	"fmt"
+	"sync"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
+)
+
+// BlockedRequestTag used to convey whether a request is blocked
+const BlockedRequestTag = "appsec.blocked"
+
+type (
+	// TagSetter is the interface needed to set a span tag.
+	TagSetter interface {
+		SetTag(string, interface{})
+	}
+	// TagsHolder wraps a map holding tags. The purpose of this struct is to be used by composition in an Operation
+	// to allow said operation to handle tags addition/retrieval. See httpsec/http.go and grpcsec/grpc.go.
+	TagsHolder struct {
+		tags map[string]interface{}
+		mu   sync.Mutex
+	}
+	// SecurityEventsHolder is a wrapper around a thread safe security events slice. The purpose of this struct is to be
+	// used by composition in an Operation to allow said operation to handle security events addition/retrieval.
+	// See httpsec/http.go and grpcsec/grpc.go.
+	SecurityEventsHolder struct {
+		events []json.RawMessage
+		mu     sync.RWMutex
+	}
+	// ContextKey is used as a key to store operations in the request's context (gRPC/HTTP)
+	ContextKey struct{}
+)
+
+// NewTagsHolder returns a new instance of a TagsHolder struct.
+func NewTagsHolder() TagsHolder {
+	return TagsHolder{tags: map[string]interface{}{}}
+}
+
+// AddTag adds the key/value pair to the tags map
+func (m *TagsHolder) AddTag(k string, v interface{}) {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	m.tags[k] = v
+}
+
+// Tags returns the tags map
+func (m *TagsHolder) Tags() map[string]interface{} {
+	return m.tags
+}
+
+// AddSecurityEvents adds the security events to the collected events list.
+// Thread safe.
+func (s *SecurityEventsHolder) AddSecurityEvents(events ...json.RawMessage) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.events = append(s.events, events...)
+}
+
+// Events returns the list of stored events.
+func (s *SecurityEventsHolder) Events() []json.RawMessage {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+	return s.events
+}
+
+// ClearEvents clears the list of stored events
+func (s *SecurityEventsHolder) ClearEvents() {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.events = s.events[0:0]
+}
+
+// SetTags fills the span tags using the key/value pairs found in `tags`
+func SetTags(span TagSetter, tags map[string]interface{}) {
+	for k, v := range tags {
+		span.SetTag(k, v)
+	}
+}
+
+// SetStringTags fills the span tags using the key/value pairs of strings found
+// in `tags`
+func SetStringTags(span TagSetter, tags map[string]string) {
+	for k, v := range tags {
+		span.SetTag(k, v)
+	}
+}
+
+// SetAppSecEnabledTags sets the AppSec-specific span tags that are expected to be in
+// the web service entry span (span of type `web`) when AppSec is enabled.
+func SetAppSecEnabledTags(span TagSetter) {
+	span.SetTag("_dd.appsec.enabled", 1)
+	span.SetTag("_dd.runtime_family", "go")
+}
+
+// SetEventSpanTags sets the security event span tags into the service entry span.
+func SetEventSpanTags(span TagSetter, events []json.RawMessage) error {
+	// Set the appsec event span tag
+	val, err := makeEventTagValue(events)
+	if err != nil {
+		return err
+	}
+	span.SetTag("_dd.appsec.json", string(val))
+	// Keep this span due to the security event
+	//
+	// This is a workaround to tell the tracer that the trace was kept by AppSec.
+	// Passing any other value than `appsec.SamplerAppSec` has no effect.
+	// Customers should use `span.SetTag(ext.ManualKeep, true)` pattern
+	// to keep the trace, manually.
+	span.SetTag(ext.ManualKeep, samplernames.AppSec)
+	span.SetTag("_dd.origin", "appsec")
+	// Set the appsec.event tag needed by the appsec backend
+	span.SetTag("appsec.event", true)
+	return nil
+}
+
+// Create the value of the security event tag.
+// TODO(Julio-Guerra): a future libddwaf version should return something
+//
+//	avoiding us the following events concatenation logic which currently
+//	involves unserializing the top-level JSON arrays to concatenate them
+//	together.
+//
+// TODO(Julio-Guerra): avoid serializing the json in the request hot path
+func makeEventTagValue(events []json.RawMessage) (json.RawMessage, error) {
+	var v interface{}
+	if l := len(events); l == 1 {
+		// eventTag is the structure to use in the `_dd.appsec.json` span tag.
+		// In this case of 1 event, it already is an array as expected.
+		type eventTag struct {
+			Triggers json.RawMessage `json:"triggers"`
+		}
+		v = eventTag{Triggers: events[0]}
+	} else {
+		// eventTag is the structure to use in the `_dd.appsec.json` span tag.
+		// With more than one event, we need to concatenate the arrays together
+		// (ie. convert [][]json.RawMessage into []json.RawMessage).
+		type eventTag struct {
+			Triggers []json.RawMessage `json:"triggers"`
+		}
+		concatenated := make([]json.RawMessage, 0, l) // at least len(events)
+		for _, event := range events {
+			// Unmarshal the top level array
+			var tmp []json.RawMessage
+			if err := json.Unmarshal(event, &tmp); err != nil {
+				return nil, fmt.Errorf("unexpected error while unserializing the appsec event `%s`: %v", string(event), err)
+			}
+			concatenated = append(concatenated, tmp...)
+		}
+		v = eventTag{Triggers: concatenated}
+	}
+
+	tag, err := json.Marshal(v)
+	if err != nil {
+		return nil, fmt.Errorf("unexpected error while serializing the appsec event span tag: %v", err)
+	}
+	return tag, nil
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/grpcsec/grpc.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/grpcsec/grpc.go
new file mode 100644
index 0000000000..f95e9418ec
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/grpcsec/grpc.go
@@ -0,0 +1,209 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+// Package grpcsec is the gRPC instrumentation API and contract for AppSec
+// defining an abstract run-time representation of gRPC handlers.
+// gRPC integrations must use this package to enable AppSec features for gRPC,
+// which listens to this package's operation events.
+package grpcsec
+
+import (
+	"context"
+	"encoding/json"
+	"reflect"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation"
+
+	"github.com/DataDog/appsec-internal-go/netip"
+)
+
+// Abstract gRPC server handler operation definitions. It is based on two
+// operations allowing to describe every type of RPC: the HandlerOperation type
+// which represents the RPC handler, and the ReceiveOperation type which
+// represents the messages the RPC handler receives during its lifetime.
+// This means that the ReceiveOperation(s) will happen within the
+// HandlerOperation.
+// Every type of RPC, unary, client streaming, server streaming, and
+// bidirectional streaming RPCs, can be all represented with a HandlerOperation
+// having one or several ReceiveOperation.
+// The send operation is not required for now and therefore not defined, which
+// means that server and bidirectional streaming RPCs currently have the same
+// run-time representation as unary and client streaming RPCs.
+type (
+	// HandlerOperation represents a gRPC server handler operation.
+	// It must be created with StartHandlerOperation() and finished with its
+	// Finish() method.
+	// Security events observed during the operation lifetime should be added
+	// to the operation using its AddSecurityEvent() method.
+	HandlerOperation struct {
+		dyngo.Operation
+		instrumentation.TagsHolder
+		instrumentation.SecurityEventsHolder
+		Error error
+	}
+	// HandlerOperationArgs is the grpc handler arguments.
+	HandlerOperationArgs struct {
+		// Message received by the gRPC handler.
+		// Corresponds to the address `grpc.server.request.metadata`.
+		Metadata map[string][]string
+		ClientIP netip.Addr
+	}
+	// HandlerOperationRes is the grpc handler results. Empty as of today.
+	HandlerOperationRes struct{}
+
+	// ReceiveOperation type representing an gRPC server handler operation. It must
+	// be created with StartReceiveOperation() and finished with its Finish().
+	ReceiveOperation struct {
+		dyngo.Operation
+	}
+	// ReceiveOperationArgs is the gRPC handler receive operation arguments
+	// Empty as of today.
+	ReceiveOperationArgs struct{}
+	// ReceiveOperationRes is the gRPC handler receive operation results which
+	// contains the message the gRPC handler received.
+	ReceiveOperationRes struct {
+		// Message received by the gRPC handler.
+		// Corresponds to the address `grpc.server.request.message`.
+		Message interface{}
+	}
+
+	// MonitoringError is used to vehicle a gRPC error that also embeds a request status code
+	MonitoringError struct {
+		msg    string
+		status uint32
+	}
+)
+
+// NewMonitoringError creates and returns a new gRPC monitoring error, wrapped under
+// sharedesec.MonitoringError
+func NewMonitoringError(msg string, code uint32) error {
+	return &MonitoringError{
+		msg:    msg,
+		status: code,
+	}
+}
+
+// GRPCStatus returns the gRPC status code embedded in the error
+func (e *MonitoringError) GRPCStatus() uint32 {
+	return e.status
+}
+
+// Error implements the error interface
+func (e *MonitoringError) Error() string {
+	return e.msg
+}
+
+// TODO(Julio-Guerra): create a go-generate tool to generate the types, vars and methods below
+
+// StartHandlerOperation starts an gRPC server handler operation, along with the
+// given arguments and parent operation, and emits a start event up in the
+// operation stack. When parent is nil, the operation is linked to the global
+// root operation.
+func StartHandlerOperation(ctx context.Context, args HandlerOperationArgs, parent dyngo.Operation, listeners ...dyngo.DataListener) (context.Context, *HandlerOperation) {
+	op := &HandlerOperation{
+		Operation:  dyngo.NewOperation(parent),
+		TagsHolder: instrumentation.NewTagsHolder(),
+	}
+	for _, l := range listeners {
+		op.OnData(l)
+	}
+	newCtx := context.WithValue(ctx, instrumentation.ContextKey{}, op)
+	dyngo.StartOperation(op, args)
+	return newCtx, op
+}
+
+// Finish the gRPC handler operation, along with the given results, and emit a
+// finish event up in the operation stack.
+func (op *HandlerOperation) Finish(res HandlerOperationRes) []json.RawMessage {
+	dyngo.FinishOperation(op, res)
+	return op.Events()
+}
+
+// gRPC handler operation's start and finish event callback function types.
+type (
+	// OnHandlerOperationStart function type, called when an gRPC handler
+	// operation starts.
+	OnHandlerOperationStart func(*HandlerOperation, HandlerOperationArgs)
+	// OnHandlerOperationFinish function type, called when an gRPC handler
+	// operation finishes.
+	OnHandlerOperationFinish func(*HandlerOperation, HandlerOperationRes)
+)
+
+var (
+	handlerOperationArgsType = reflect.TypeOf((*HandlerOperationArgs)(nil)).Elem()
+	handlerOperationResType  = reflect.TypeOf((*HandlerOperationRes)(nil)).Elem()
+)
+
+// ListenedType returns the type a OnHandlerOperationStart event listener
+// listens to, which is the HandlerOperationArgs type.
+func (OnHandlerOperationStart) ListenedType() reflect.Type { return handlerOperationArgsType }
+
+// Call the underlying event listener function by performing the type-assertion
+// on v whose type is the one returned by ListenedType().
+func (f OnHandlerOperationStart) Call(op dyngo.Operation, v interface{}) {
+	f(op.(*HandlerOperation), v.(HandlerOperationArgs))
+}
+
+// ListenedType returns the type a OnHandlerOperationFinish event listener
+// listens to, which is the HandlerOperationRes type.
+func (OnHandlerOperationFinish) ListenedType() reflect.Type { return handlerOperationResType }
+
+// Call the underlying event listener function by performing the type-assertion
+// on v whose type is the one returned by ListenedType().
+func (f OnHandlerOperationFinish) Call(op dyngo.Operation, v interface{}) {
+	f(op.(*HandlerOperation), v.(HandlerOperationRes))
+}
+
+// StartReceiveOperation starts a receive operation of a gRPC handler, along
+// with the given arguments and parent operation, and emits a start event up in
+// the operation stack. When parent is nil, the operation is linked to the
+// global root operation.
+func StartReceiveOperation(args ReceiveOperationArgs, parent dyngo.Operation) ReceiveOperation {
+	op := ReceiveOperation{Operation: dyngo.NewOperation(parent)}
+	dyngo.StartOperation(op, args)
+	return op
+}
+
+// Finish the gRPC handler operation, along with the given results, and emits a
+// finish event up in the operation stack.
+func (op ReceiveOperation) Finish(res ReceiveOperationRes) {
+	dyngo.FinishOperation(op, res)
+}
+
+// gRPC receive operation's start and finish event callback function types.
+type (
+	// OnReceiveOperationStart function type, called when a gRPC receive
+	// operation starts.
+	OnReceiveOperationStart func(ReceiveOperation, ReceiveOperationArgs)
+	// OnReceiveOperationFinish function type, called when a grpc receive
+	// operation finishes.
+	OnReceiveOperationFinish func(ReceiveOperation, ReceiveOperationRes)
+)
+
+var (
+	receiveOperationArgsType = reflect.TypeOf((*ReceiveOperationArgs)(nil)).Elem()
+	receiveOperationResType  = reflect.TypeOf((*ReceiveOperationRes)(nil)).Elem()
+)
+
+// ListenedType returns the type a OnHandlerOperationStart event listener
+// listens to, which is the HandlerOperationArgs type.
+func (OnReceiveOperationStart) ListenedType() reflect.Type { return receiveOperationArgsType }
+
+// Call the underlying event listener function by performing the type-assertion
+// on v whose type is the one returned by ListenedType().
+func (f OnReceiveOperationStart) Call(op dyngo.Operation, v interface{}) {
+	f(op.(ReceiveOperation), v.(ReceiveOperationArgs))
+}
+
+// ListenedType returns the type a OnHandlerOperationFinish event listener
+// listens to, which is the HandlerOperationRes type.
+func (OnReceiveOperationFinish) ListenedType() reflect.Type { return receiveOperationResType }
+
+// Call the underlying event listener function by performing the type-assertion
+// on v whose type is the one returned by ListenedType().
+func (f OnReceiveOperationFinish) Call(op dyngo.Operation, v interface{}) {
+	f(op.(ReceiveOperation), v.(ReceiveOperationRes))
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/grpcsec/tags.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/grpcsec/tags.go
new file mode 100644
index 0000000000..871e81c266
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/grpcsec/tags.go
@@ -0,0 +1,35 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package grpcsec
+
+import (
+	"encoding/json"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/httpsec"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// SetSecurityEventTags sets the AppSec-specific span tags when a security event
+// occurred into the service entry span.
+func SetSecurityEventTags(span ddtrace.Span, events []json.RawMessage, md map[string][]string) {
+	if err := setSecurityEventTags(span, events, md); err != nil {
+		log.Error("appsec: %v", err)
+	}
+}
+
+func setSecurityEventTags(span ddtrace.Span, events []json.RawMessage, md map[string][]string) error {
+	if err := instrumentation.SetEventSpanTags(span, events); err != nil {
+		return err
+	}
+
+	for h, v := range httpsec.NormalizeHTTPHeaders(md) {
+		span.SetTag("grpc.metadata."+h, v)
+	}
+
+	return nil
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/httpsec/http.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/httpsec/http.go
new file mode 100644
index 0000000000..f77348cb32
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/httpsec/http.go
@@ -0,0 +1,323 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+// Package httpsec defines is the HTTP instrumentation API and contract for
+// AppSec. It defines an abstract representation of HTTP handlers, along with
+// helper functions to wrap (aka. instrument) standard net/http handlers.
+// HTTP integrations must use this package to enable AppSec features for HTTP,
+// which listens to this package's operation events.
+package httpsec
+
+import (
+	"context"
+	// Blank import needed to use embed for the default blocked response payloads
+	_ "embed"
+	"encoding/json"
+	"net/http"
+	"reflect"
+	"strings"
+	"sync"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+
+	"github.com/DataDog/appsec-internal-go/netip"
+)
+
+// Abstract HTTP handler operation definition.
+type (
+	// HandlerOperationArgs is the HTTP handler operation arguments.
+	HandlerOperationArgs struct {
+		// Method is the http method verb of the request, address is `server.request.method`
+		Method string
+		// RequestURI corresponds to the address `server.request.uri.raw`
+		RequestURI string
+		// Headers corresponds to the address `server.request.headers.no_cookies`
+		Headers map[string][]string
+		// Cookies corresponds to the address `server.request.cookies`
+		Cookies map[string][]string
+		// Query corresponds to the address `server.request.query`
+		Query map[string][]string
+		// PathParams corresponds to the address `server.request.path_params`
+		PathParams map[string]string
+		// ClientIP corresponds to the address `http.client_ip`
+		ClientIP netip.Addr
+	}
+
+	// HandlerOperationRes is the HTTP handler operation results.
+	HandlerOperationRes struct {
+		// Status corresponds to the address `server.response.status`.
+		Status int
+	}
+
+	// SDKBodyOperationArgs is the SDK body operation arguments.
+	SDKBodyOperationArgs struct {
+		// Body corresponds to the address `server.request.body`.
+		Body interface{}
+	}
+
+	// SDKBodyOperationRes is the SDK body operation results.
+	SDKBodyOperationRes struct{}
+
+	// MonitoringError is used to vehicle an HTTP error, usually resurfaced through Appsec SDKs.
+	MonitoringError struct {
+		msg string
+	}
+)
+
+// Error implements the Error interface
+func (e *MonitoringError) Error() string {
+	return e.msg
+}
+
+// NewMonitoringError creates and returns a new HTTP monitoring error, wrapped under
+// sharedesec.MonitoringError
+func NewMonitoringError(msg string) error {
+	return &MonitoringError{
+		msg: msg,
+	}
+}
+
+// MonitorParsedBody starts and finishes the SDK body operation.
+// This function should not be called when AppSec is disabled in order to
+// get preciser error logs.
+func MonitorParsedBody(ctx context.Context, body interface{}) error {
+	parent := fromContext(ctx)
+	if parent == nil {
+		log.Error("appsec: parsed http body monitoring ignored: could not find the http handler instrumentation metadata in the request context: the request handler is not being monitored by a middleware function or the provided context is not the expected request context")
+		return nil
+	}
+
+	return ExecuteSDKBodyOperation(parent, SDKBodyOperationArgs{Body: body})
+}
+
+// ExecuteSDKBodyOperation starts and finishes the SDK Body operation by emitting a dyngo start and finish events
+// An error is returned if the body associated to that operation must be blocked
+func ExecuteSDKBodyOperation(parent dyngo.Operation, args SDKBodyOperationArgs) error {
+	var err error
+	op := &SDKBodyOperation{Operation: dyngo.NewOperation(parent)}
+	sharedsec.OnErrorData(op, func(e error) {
+		err = e
+	})
+	dyngo.StartOperation(op, args)
+	dyngo.FinishOperation(op, SDKBodyOperationRes{})
+	return err
+}
+
+// WrapHandler wraps the given HTTP handler with the abstract HTTP operation defined by HandlerOperationArgs and
+// HandlerOperationRes.
+// The onBlock params are used to cleanup the context when needed.
+// It is a specific patch meant for Gin, for which we must abort the
+// context since it uses a queue of handlers and it's the only way to make
+// sure other queued handlers don't get executed.
+// TODO: this patch must be removed/improved when we rework our actions/operations system
+func WrapHandler(handler http.Handler, span ddtrace.Span, pathParams map[string]string, onBlock ...func()) http.Handler {
+	instrumentation.SetAppSecEnabledTags(span)
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		ipTags, clientIP := ClientIPTags(r.Header, true, r.RemoteAddr)
+		instrumentation.SetStringTags(span, ipTags)
+
+		var bypassHandler http.Handler
+		var blocking bool
+		args := MakeHandlerOperationArgs(r, clientIP, pathParams)
+		ctx, op := StartOperation(r.Context(), args, dyngo.NewDataListener(func(a *sharedsec.Action) {
+			bypassHandler = a.HTTP()
+			blocking = a.Blocking()
+		}))
+		r = r.WithContext(ctx)
+
+		defer func() {
+			var status int
+			if mw, ok := w.(interface{ Status() int }); ok {
+				status = mw.Status()
+			}
+
+			events := op.Finish(HandlerOperationRes{Status: status})
+			// Execute the onBlock functions to make sure blocking works properly
+			// in case we are instrumenting the Gin framework
+			if blocking {
+				op.AddTag(instrumentation.BlockedRequestTag, true)
+				for _, f := range onBlock {
+					f()
+				}
+			}
+			if bypassHandler != nil {
+				bypassHandler.ServeHTTP(w, r)
+			}
+			instrumentation.SetTags(span, op.Tags())
+			if len(events) == 0 {
+				return
+			}
+			SetSecurityEventTags(span, events, args.Headers, w.Header())
+		}()
+
+		if bypassHandler != nil {
+			handler = bypassHandler
+			bypassHandler = nil
+		}
+		handler.ServeHTTP(w, r)
+
+	})
+}
+
+// MakeHandlerOperationArgs creates the HandlerOperationArgs out of a standard
+// http.Request along with the given current span. It returns an empty structure
+// when appsec is disabled.
+func MakeHandlerOperationArgs(r *http.Request, clientIP netip.Addr, pathParams map[string]string) HandlerOperationArgs {
+	headers := make(http.Header, len(r.Header))
+	for k, v := range r.Header {
+		k := strings.ToLower(k)
+		if k == "cookie" {
+			// Do not include cookies in the request headers
+			continue
+		}
+		headers[k] = v
+	}
+	cookies := makeCookies(r) // TODO(Julio-Guerra): avoid actively parsing the cookies thanks to dynamic instrumentation
+	headers["host"] = []string{r.Host}
+	return HandlerOperationArgs{
+		Method:     r.Method,
+		RequestURI: r.RequestURI,
+		Headers:    headers,
+		Cookies:    cookies,
+		Query:      r.URL.Query(), // TODO(Julio-Guerra): avoid actively parsing the query values thanks to dynamic instrumentation
+		PathParams: pathParams,
+		ClientIP:   clientIP,
+	}
+}
+
+// Return the map of parsed cookies if any and following the specification of
+// the rule address `server.request.cookies`.
+func makeCookies(r *http.Request) map[string][]string {
+	parsed := r.Cookies()
+	if len(parsed) == 0 {
+		return nil
+	}
+	cookies := make(map[string][]string, len(parsed))
+	for _, c := range parsed {
+		cookies[c.Name] = append(cookies[c.Name], c.Value)
+	}
+	return cookies
+}
+
+// TODO(Julio-Guerra): create a go-generate tool to generate the types, vars and methods below
+
+// Operation type representing an HTTP operation. It must be created with
+// StartOperation() and finished with its Finish().
+type (
+	Operation struct {
+		dyngo.Operation
+		instrumentation.TagsHolder
+		instrumentation.SecurityEventsHolder
+		mu sync.RWMutex
+	}
+
+	// SDKBodyOperation type representing an SDK body
+	SDKBodyOperation struct {
+		dyngo.Operation
+	}
+)
+
+// StartOperation starts an HTTP handler operation, along with the given
+// context and arguments and emits a start event up in the operation stack.
+// The operation is linked to the global root operation since an HTTP operation
+// is always expected to be first in the operation stack.
+func StartOperation(ctx context.Context, args HandlerOperationArgs, listeners ...dyngo.DataListener) (context.Context, *Operation) {
+	op := &Operation{
+		Operation:  dyngo.NewOperation(nil),
+		TagsHolder: instrumentation.NewTagsHolder(),
+	}
+	for _, l := range listeners {
+		op.OnData(l)
+	}
+	newCtx := context.WithValue(ctx, instrumentation.ContextKey{}, op)
+	dyngo.StartOperation(op, args)
+	return newCtx, op
+}
+
+// fromContext returns the Operation object stored in the context, if any
+func fromContext(ctx context.Context) *Operation {
+	// Avoid a runtime panic in case of type-assertion error by collecting the 2 return values
+	op, _ := ctx.Value(instrumentation.ContextKey{}).(*Operation)
+	return op
+}
+
+// Finish the HTTP handler operation, along with the given results and emits a
+// finish event up in the operation stack.
+func (op *Operation) Finish(res HandlerOperationRes) []json.RawMessage {
+	dyngo.FinishOperation(op, res)
+	return op.Events()
+}
+
+// Finish finishes the SDKBody operation and emits a finish event
+func (op *SDKBodyOperation) Finish() {
+	dyngo.FinishOperation(op, SDKBodyOperationRes{})
+}
+
+// HTTP handler operation's start and finish event callback function types.
+type (
+	// OnHandlerOperationStart function type, called when an HTTP handler
+	// operation starts.
+	OnHandlerOperationStart func(*Operation, HandlerOperationArgs)
+	// OnHandlerOperationFinish function type, called when an HTTP handler
+	// operation finishes.
+	OnHandlerOperationFinish func(*Operation, HandlerOperationRes)
+	// OnSDKBodyOperationStart function type, called when an SDK body
+	// operation starts.
+	OnSDKBodyOperationStart func(*SDKBodyOperation, SDKBodyOperationArgs)
+	// OnSDKBodyOperationFinish function type, called when an SDK body
+	// operation finishes.
+	OnSDKBodyOperationFinish func(*SDKBodyOperation, SDKBodyOperationRes)
+)
+
+var (
+	handlerOperationArgsType = reflect.TypeOf((*HandlerOperationArgs)(nil)).Elem()
+	handlerOperationResType  = reflect.TypeOf((*HandlerOperationRes)(nil)).Elem()
+	sdkBodyOperationArgsType = reflect.TypeOf((*SDKBodyOperationArgs)(nil)).Elem()
+	sdkBodyOperationResType  = reflect.TypeOf((*SDKBodyOperationRes)(nil)).Elem()
+)
+
+// ListenedType returns the type a OnHandlerOperationStart event listener
+// listens to, which is the HandlerOperationArgs type.
+func (OnHandlerOperationStart) ListenedType() reflect.Type { return handlerOperationArgsType }
+
+// Call calls the underlying event listener function by performing the
+// type-assertion on v whose type is the one returned by ListenedType().
+func (f OnHandlerOperationStart) Call(op dyngo.Operation, v interface{}) {
+	f(op.(*Operation), v.(HandlerOperationArgs))
+}
+
+// ListenedType returns the type a OnHandlerOperationFinish event listener
+// listens to, which is the HandlerOperationRes type.
+func (OnHandlerOperationFinish) ListenedType() reflect.Type { return handlerOperationResType }
+
+// Call calls the underlying event listener function by performing the
+// type-assertion on v whose type is the one returned by ListenedType().
+func (f OnHandlerOperationFinish) Call(op dyngo.Operation, v interface{}) {
+	f(op.(*Operation), v.(HandlerOperationRes))
+}
+
+// ListenedType returns the type a OnSDKBodyOperationStart event listener
+// listens to, which is the SDKBodyOperationStartArgs type.
+func (OnSDKBodyOperationStart) ListenedType() reflect.Type { return sdkBodyOperationArgsType }
+
+// Call calls the underlying event listener function by performing the
+// type-assertion  on v whose type is the one returned by ListenedType().
+func (f OnSDKBodyOperationStart) Call(op dyngo.Operation, v interface{}) {
+	f(op.(*SDKBodyOperation), v.(SDKBodyOperationArgs))
+}
+
+// ListenedType returns the type a OnSDKBodyOperationFinish event listener
+// listens to, which is the SDKBodyOperationRes type.
+func (OnSDKBodyOperationFinish) ListenedType() reflect.Type { return sdkBodyOperationResType }
+
+// Call calls the underlying event listener function by performing the
+// type-assertion on v whose type is the one returned by ListenedType().
+func (f OnSDKBodyOperationFinish) Call(op dyngo.Operation, v interface{}) {
+	f(op.(*SDKBodyOperation), v.(SDKBodyOperationRes))
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/httpsec/tags.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/httpsec/tags.go
new file mode 100644
index 0000000000..8f2982dea4
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/httpsec/tags.go
@@ -0,0 +1,115 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package httpsec
+
+import (
+	"encoding/json"
+	"os"
+	"sort"
+	"strings"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+
+	"github.com/DataDog/appsec-internal-go/httpsec"
+	"github.com/DataDog/appsec-internal-go/netip"
+)
+
+const (
+	// envClientIPHeader is the name of the env var used to specify the IP header to be used for client IP collection.
+	envClientIPHeader = "DD_TRACE_CLIENT_IP_HEADER"
+)
+
+var (
+	// Default list of IP-related headers leveraged to retrieve the public
+	// client IP address.
+	defaultIPHeaders = []string{
+		"x-forwarded-for",
+		"x-real-ip",
+		"true-client-ip",
+		"x-client-ip",
+		"x-forwarded",
+		"forwarded-for",
+		"x-cluster-client-ip",
+		"fastly-client-ip",
+		"cf-connecting-ip",
+		"cf-connecting-ip6",
+	}
+
+	// Configured list of IP-related headers leveraged to retrieve the public
+	//client IP address. Defined at init-time in the init() function below.
+	monitoredClientIPHeadersCfg []string
+
+	// List of HTTP headers we collect and send.
+	collectedHTTPHeaders = append(defaultIPHeaders,
+		"host",
+		"content-length",
+		"content-type",
+		"content-encoding",
+		"content-language",
+		"forwarded",
+		"via",
+		"user-agent",
+		"accept",
+		"accept-encoding",
+		"accept-language")
+)
+
+func init() {
+	if cfg := os.Getenv(envClientIPHeader); cfg != "" {
+		// Collect this header value too
+		collectedHTTPHeaders = append(collectedHTTPHeaders, cfg)
+		// Set this IP header as the only one to consider for ClientIP()
+		monitoredClientIPHeadersCfg = []string{cfg}
+	} else {
+		monitoredClientIPHeadersCfg = defaultIPHeaders
+	}
+
+	// Ensure the list of headers are sorted for sort.SearchStrings()
+	sort.Strings(collectedHTTPHeaders[:])
+}
+
+// SetSecurityEventTags sets the AppSec-specific span tags when a security event occurred into the service entry span.
+func SetSecurityEventTags(span instrumentation.TagSetter, events []json.RawMessage, headers, respHeaders map[string][]string) {
+	if err := instrumentation.SetEventSpanTags(span, events); err != nil {
+		log.Error("appsec: unexpected error while creating the appsec event tags: %v", err)
+	}
+	for h, v := range NormalizeHTTPHeaders(headers) {
+		span.SetTag("http.request.headers."+h, v)
+	}
+	for h, v := range NormalizeHTTPHeaders(respHeaders) {
+		span.SetTag("http.response.headers."+h, v)
+	}
+}
+
+// NormalizeHTTPHeaders returns the HTTP headers following Datadog's
+// normalization format.
+func NormalizeHTTPHeaders(headers map[string][]string) (normalized map[string]string) {
+	if len(headers) == 0 {
+		return nil
+	}
+	normalized = make(map[string]string)
+	for k, v := range headers {
+		k = strings.ToLower(k)
+		if i := sort.SearchStrings(collectedHTTPHeaders[:], k); i < len(collectedHTTPHeaders) && collectedHTTPHeaders[i] == k {
+			normalized[k] = strings.Join(v, ",")
+		}
+	}
+	if len(normalized) == 0 {
+		return nil
+	}
+	return normalized
+}
+
+// ClientIPTags returns the resulting Datadog span tags `http.client_ip`
+// containing the client IP and `network.client.ip` containing the remote IP.
+// The tags are present only if a valid ip address has been returned by
+// ClientIP().
+func ClientIPTags(headers map[string][]string, hasCanonicalHeaders bool, remoteAddr string) (tags map[string]string, clientIP netip.Addr) {
+	remoteIP, clientIP := httpsec.ClientIP(headers, hasCanonicalHeaders, remoteAddr, monitoredClientIPHeadersCfg)
+	tags = httpsec.ClientIPTags(remoteIP, clientIP)
+	return tags, clientIP
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec/actions.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec/actions.go
new file mode 100644
index 0000000000..09d84828ea
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec/actions.go
@@ -0,0 +1,135 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022 Datadog, Inc.
+
+package sharedsec
+
+import (
+	_ "embed" // Blank import
+	"errors"
+	"net/http"
+	"os"
+	"strings"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// blockedTemplateJSON is the default JSON template used to write responses for blocked requests
+//
+//go:embed blocked-template.json
+var blockedTemplateJSON []byte
+
+// blockedTemplateHTML is the default HTML template used to write responses for blocked requests
+//
+//go:embed blocked-template.html
+var blockedTemplateHTML []byte
+
+const (
+	envBlockedTemplateHTML = "DD_APPSEC_HTTP_BLOCKED_TEMPLATE_HTML"
+	envBlockedTemplateJSON = "DD_APPSEC_HTTP_BLOCKED_TEMPLATE_JSON"
+)
+
+func init() {
+	for env, template := range map[string]*[]byte{envBlockedTemplateJSON: &blockedTemplateJSON, envBlockedTemplateHTML: &blockedTemplateHTML} {
+		if path, ok := os.LookupEnv(env); ok {
+			if t, err := os.ReadFile(path); err != nil {
+				log.Error("Could not read template at %s: %v", path, err)
+			} else {
+				*template = t
+			}
+		}
+
+	}
+}
+
+type (
+	// Action represents a WAF action.
+	// It holds the HTTP and gRPC handlers to be used instead of the regular
+	// request handler when said action is executed.
+	Action struct {
+		http     http.Handler
+		grpc     GRPCWrapper
+		blocking bool
+	}
+
+	// GRPCWrapper is an opaque prototype abstraction for a gRPC handler (to avoid importing grpc)
+	// that takes metadata as input and returns a status code and an error
+	// TODO: rely on strongly typed actions (with the actual grpc types) by introducing WAF constructors
+	//     living in the contrib packages, along with their dependencies - something like `appsec.RegisterWAFConstructor(newGRPCWAF)`
+	//    Such constructors would receive the full appsec config and rules, so that they would be able to build
+	//    specific blocking actions.
+	GRPCWrapper func(map[string][]string) (uint32, error)
+)
+
+// Blocking returns true if the action object represents a request blocking action
+func (a *Action) Blocking() bool {
+	return a.blocking
+}
+
+// NewBlockHandler creates, initializes and returns a new BlockRequestAction
+func NewBlockHandler(status int, template string) http.Handler {
+	htmlHandler := newBlockRequestHandler(status, "text/html", blockedTemplateHTML)
+	jsonHandler := newBlockRequestHandler(status, "application/json", blockedTemplateJSON)
+	switch template {
+	case "json":
+		return jsonHandler
+	case "html":
+		return htmlHandler
+	default:
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			h := jsonHandler
+			hdr := r.Header.Get("Accept")
+			htmlIdx := strings.Index(hdr, "text/html")
+			jsonIdx := strings.Index(hdr, "application/json")
+			// Switch to html handler if text/html comes before application/json in the Accept header
+			if htmlIdx != -1 && (jsonIdx == -1 || htmlIdx < jsonIdx) {
+				h = htmlHandler
+			}
+			h.ServeHTTP(w, r)
+		})
+	}
+}
+
+func newBlockRequestHandler(status int, ct string, payload []byte) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", ct)
+		w.WriteHeader(status)
+		w.Write(payload)
+	})
+}
+
+func newGRPCBlockHandler(status int) GRPCWrapper {
+	return func(_ map[string][]string) (uint32, error) {
+		return uint32(status), errors.New("Request blocked")
+	}
+}
+
+// NewBlockRequestAction creates an action for the "block" action type
+func NewBlockRequestAction(httpStatus, grpcStatus int, template string) *Action {
+	return &Action{
+		http:     NewBlockHandler(httpStatus, template),
+		grpc:     newGRPCBlockHandler(grpcStatus),
+		blocking: true,
+	}
+}
+
+// NewRedirectRequestAction creates an action for the "redirect" action type
+func NewRedirectRequestAction(status int, loc string) *Action {
+	return &Action{
+		http: http.RedirectHandler(loc, status),
+		// gRPC is not handled by our SRB RFCs so far
+		// Use the default block handler for now
+		grpc: newGRPCBlockHandler(10),
+	}
+}
+
+// HTTP returns the HTTP handler linked to the action object
+func (a *Action) HTTP() http.Handler {
+	return a.http
+}
+
+// GRPC returns the gRPC handler linked to the action object
+func (a *Action) GRPC() GRPCWrapper {
+	return a.grpc
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec/blocked-template.html b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec/blocked-template.html
new file mode 100644
index 0000000000..b43edd96dd
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec/blocked-template.html
@@ -0,0 +1 @@
+<!DOCTYPE html><html lang="en"><head><meta charset="UTF-8"><meta name="viewport" content="width=device-width,initial-scale=1"><title>You've been blocked</title><style>a,body,div,html,span{margin:0;padding:0;border:0;font-size:100%;font:inherit;vertical-align:baseline}body{background:-webkit-radial-gradient(26% 19%,circle,#fff,#f4f7f9);background:radial-gradient(circle at 26% 19%,#fff,#f4f7f9);display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-ms-flex-line-pack:center;align-content:center;width:100%;min-height:100vh;line-height:1;flex-direction:column}p{display:block}main{text-align:center;flex:1;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-ms-flex-line-pack:center;align-content:center;flex-direction:column}p{font-size:18px;line-height:normal;color:#646464;font-family:sans-serif;font-weight:400}a{color:#4842b7}footer{width:100%;text-align:center}footer p{font-size:16px}</style></head><body><main><p>Sorry, you cannot access this page. Please contact the customer service team.</p></main><footer><p>Security provided by <a href="https://www.datadoghq.com/product/security-platform/application-security-monitoring/" target="_blank">Datadog</a></p></footer></body></html>
\ No newline at end of file
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec/blocked-template.json b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec/blocked-template.json
new file mode 100644
index 0000000000..885d766c18
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec/blocked-template.json
@@ -0,0 +1 @@
+{"errors":[{"title":"You've been blocked","detail":"Sorry, you cannot access this page. Please contact the customer service team. Security provided by Datadog."}]}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec/shared.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec/shared.go
new file mode 100644
index 0000000000..0a0966a130
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec/shared.go
@@ -0,0 +1,80 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package sharedsec
+
+import (
+	"context"
+	"reflect"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+type (
+	// UserIDOperation type representing a call to appsec.SetUser(). It gets both created and destroyed in a single
+	// call to ExecuteUserIDOperation
+	UserIDOperation struct {
+		dyngo.Operation
+	}
+	// UserIDOperationArgs is the user ID operation arguments.
+	UserIDOperationArgs struct {
+		UserID string
+	}
+	// UserIDOperationRes is the user ID operation results.
+	UserIDOperationRes struct{}
+
+	// OnUserIDOperationStart function type, called when a user ID
+	// operation starts.
+	OnUserIDOperationStart func(operation *UserIDOperation, args UserIDOperationArgs)
+)
+
+var userIDOperationArgsType = reflect.TypeOf((*UserIDOperationArgs)(nil)).Elem()
+
+// ExecuteUserIDOperation starts and finishes the UserID operation by emitting a dyngo start and finish events
+// An error is returned if the user associated to that operation must be blocked
+func ExecuteUserIDOperation(parent dyngo.Operation, args UserIDOperationArgs) error {
+	var err error
+	op := &UserIDOperation{Operation: dyngo.NewOperation(parent)}
+	OnErrorData(op, func(e error) {
+		err = e
+	})
+	dyngo.StartOperation(op, args)
+	dyngo.FinishOperation(op, UserIDOperationRes{})
+	return err
+}
+
+// ListenedType returns the type a OnUserIDOperationStart event listener
+// listens to, which is the UserIDOperationStartArgs type.
+func (OnUserIDOperationStart) ListenedType() reflect.Type { return userIDOperationArgsType }
+
+// Call the underlying event listener function by performing the type-assertion
+// on v whose type is the one returned by ListenedType().
+func (f OnUserIDOperationStart) Call(op dyngo.Operation, v interface{}) {
+	f(op.(*UserIDOperation), v.(UserIDOperationArgs))
+}
+
+// MonitorUser starts and finishes a UserID operation.
+// A call to the WAF is made to check the user ID and an error is returned if the
+// user should be blocked. The return value is nil otherwise.
+func MonitorUser(ctx context.Context, userID string) error {
+	if parent, ok := ctx.Value(instrumentation.ContextKey{}).(dyngo.Operation); ok {
+		return ExecuteUserIDOperation(parent, UserIDOperationArgs{UserID: userID})
+	}
+	log.Error("appsec: user ID monitoring ignored: could not find the http handler instrumentation metadata in the request context: the request handler is not being monitored by a middleware function or the provided context is not the expected request context")
+	return nil
+
+}
+
+// OnData is a facilitator that wraps a dyngo.Operation.OnData() call
+func OnData[T any](op dyngo.Operation, f func(T)) {
+	op.OnData(dyngo.NewDataListener(f))
+}
+
+// OnErrorData is a facilitator that wraps a dyngo.Operation.OnData() call with an error type constraint
+func OnErrorData[T error](op dyngo.Operation, f func(T)) {
+	op.OnData(dyngo.NewDataListener(f))
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/operation.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/operation.go
new file mode 100644
index 0000000000..d45492d22f
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/operation.go
@@ -0,0 +1,353 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+// Package dyngo is the Go implementation of Datadog's Instrumentation Gateway
+// which provides an event-based instrumentation API based on a stack
+// representation of instrumented functions along with nested event listeners.
+// It allows to both correlate passed and future function calls in order to
+// react and monitor specific function call scenarios, while keeping the
+// monitoring state local to the monitoring logic thanks to nested Go function
+// closures.
+// dyngo is not intended to be directly used and should be instead wrapped
+// behind statically and strongly typed wrapper types. Indeed, dyngo is a
+// generic implementation relying on empty interface values (values of type
+// `interface{}`) and using it directly can be error-prone due to the lack of
+// compile-time type-checking. For example, AppSec provides the package
+// `httpsec`, built on top of dyngo, as its HTTP instrumentation API and which
+// defines the abstract HTTP operation representation expected by the AppSec
+// monitoring.
+package dyngo
+
+import (
+	"reflect"
+	"sync"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+
+	"go.uber.org/atomic"
+)
+
+// Operation interface type allowing to register event listeners to the
+// operation. The event listeners will be automatically removed from the
+// operation once it finishes so that it no longer can be called on finished
+// operations.
+type Operation interface {
+	// On allows to register an event listener to the operation. The event
+	// listener will be removed from the operation once it finishes.
+	On(EventListener)
+
+	// OnData allows to register a data listener to the operation
+	OnData(DataListener)
+
+	// EmitData sends data to the data listeners of the operation
+	EmitData(any)
+
+	// Parent return the parent operation. It returns nil for the root
+	// operation.
+	Parent() Operation
+
+	// emitEvent emits the event to listeners of the given argsType and calls
+	// them with the given op and v values.
+	// emitEvent is a private method implemented by the operation struct type so
+	// that no other package can define it.
+	emitEvent(argsType reflect.Type, op Operation, v interface{})
+
+	emitData(argsType reflect.Type, v any)
+
+	// add the given event listeners to the operation.
+	// add is a private method implemented by the operation struct type so
+	// that no other package can define it.
+	add(...EventListener)
+
+	// finish the operation. This method allows to pass the operation value to
+	// use to emit the finish event.
+	// finish is a private method implemented by the operation struct type so
+	// that no other package can define it.
+	finish(op Operation, results interface{})
+}
+
+// EventListener interface allowing to identify the Go type listened to and
+// dispatch calls to the underlying event listener function.
+type EventListener interface {
+	// ListenedType returns the Go type the event listener listens to.
+	ListenedType() reflect.Type
+	// Call the underlying event listener function. The type of the value v
+	// is the type the event listener listens to, according to the type
+	// returned by ListenedType().
+	Call(op Operation, v interface{})
+}
+
+// Atomic *Operation so we can atomically read or swap it.
+var rootOperation atomic.Pointer[Operation]
+
+// SwapRootOperation allows to atomically swap the current root operation with
+// the given new one. Concurrent uses of the old root operation on already
+// existing and running operation are still valid.
+func SwapRootOperation(new Operation) {
+	rootOperation.Swap(&new)
+	// Note: calling FinishOperation(old) could result into mem leaks because
+	// some finish event listeners, possibly releasing memory and resources,
+	// wouldn't be called anymore (because finish() disables the operation and
+	// removes the event listeners).
+}
+
+// operation structure allowing to subscribe to operation events and to
+// navigate in the operation stack. Events
+// bubble-up the operation stack, which allows listening to future events that
+// might happen in the operation lifetime.
+type operation struct {
+	parent Operation
+	eventRegister
+	dataBroadcaster
+
+	disabled bool
+	mu       sync.RWMutex
+}
+
+// NewRootOperation creates and returns a new root operation, with no parent
+// operation. Root operations are meant to be the top-level operation of an
+// operation stack, therefore receiving all the operation events. It allows to
+// prepare a new set of event listeners, to then atomically swap it with the
+// current one.
+func NewRootOperation() Operation {
+	return newOperation(nil)
+}
+
+// NewOperation creates and returns a new operation. It must be started by calling
+// StartOperation, and finished by calling FinishOperation. The returned
+// operation should be used in wrapper types to provide statically typed start
+// and finish functions. The following example shows how to wrap an operation
+// so that its functions are statically typed (instead of dyngo's interface{}
+// values):
+//
+//	package mypackage
+//	import "dyngo"
+//	type (
+//	  MyOperation struct {
+//	    dyngo.Operation
+//	  }
+//	  MyOperationArgs { /* ... */ }
+//	  MyOperationRes { /* ... */ }
+//	)
+//	func StartOperation(args MyOperationArgs, parent dyngo.Operation) MyOperation {
+//	  op := MyOperation{Operation: dyngo.NewOperation(parent)}
+//	  dyngo.StartOperation(op, args)
+//	  return op
+//	}
+//	func (op MyOperation) Finish(res MyOperationRes) {
+//	    dyngo.FinishOperation(op, res)
+//	  }
+func NewOperation(parent Operation) Operation {
+	if parent == nil {
+		if root := rootOperation.Load(); root != nil {
+			parent = *root
+		}
+	}
+	return newOperation(parent)
+}
+
+// StartOperation starts a new operation along with its arguments and emits a
+// start event with the operation arguments.
+func StartOperation(op Operation, args interface{}) {
+	argsType := reflect.TypeOf(args)
+	// Bubble-up the start event starting from the parent operation as you can't
+	// listen for your own start event
+	for current := op.Parent(); current != nil; current = current.Parent() {
+		current.emitEvent(argsType, op, args)
+	}
+}
+
+func newOperation(parent Operation) *operation {
+	return &operation{parent: parent}
+}
+
+// Parent return the parent operation. It returns nil for the root operation.
+func (o *operation) Parent() Operation {
+	return o.parent
+}
+
+// FinishOperation finishes the operation along with its results and emits a
+// finish event with the operation results.
+// The operation is then disabled and its event listeners removed.
+func FinishOperation(op Operation, results interface{}) {
+	op.finish(op, results)
+}
+
+func (o *operation) finish(op Operation, results interface{}) {
+	// Defer the call to o.disable() first so that the RWMutex gets unlocked first
+	defer o.disable()
+	o.mu.RLock()
+	defer o.mu.RUnlock() // Deferred and stacked on top of the previously deferred call to o.disable()
+	if o.disabled {
+		return
+	}
+	resType := reflect.TypeOf(results)
+	for current := op; current != nil; current = current.Parent() {
+		current.emitEvent(resType, op, results)
+	}
+}
+
+// Disable the operation and remove all its event listeners.
+func (o *operation) disable() {
+	o.mu.Lock()
+	defer o.mu.Unlock()
+	if o.disabled {
+		return
+	}
+	o.disabled = true
+	o.eventRegister.clear()
+}
+
+// Add the given event listeners to the operation.
+func (o *operation) add(l ...EventListener) {
+	o.mu.RLock()
+	defer o.mu.RUnlock()
+	if o.disabled {
+		return
+	}
+	for _, l := range l {
+		if l == nil {
+			continue
+		}
+		key := l.ListenedType()
+		o.eventRegister.add(key, l)
+	}
+}
+
+// On registers the event listener. The difference with the Register() is that
+// it doesn't return a function closure, which avoids unnecessary allocations
+// For example:
+//
+//	op.On(MyOperationStart(func (op MyOperation, args MyOperationArgs) {
+//	    // ...
+//	}))
+func (o *operation) On(l EventListener) {
+	o.mu.RLock()
+	defer o.mu.RUnlock()
+	if o.disabled {
+		return
+	}
+	o.eventRegister.add(l.ListenedType(), l)
+}
+
+func (o *operation) OnData(l DataListener) {
+	o.mu.RLock()
+	defer o.mu.RUnlock()
+	if o.disabled {
+		return
+	}
+	o.dataBroadcaster.add(l.ListenedType(), l)
+}
+
+func (o *operation) EmitData(data any) {
+	o.mu.RLock()
+	defer o.mu.RUnlock()
+	if o.disabled {
+		return
+	}
+	// Bubble up the data to the stack of operations. Contrary to events,
+	// we also send the data to ourselves since SDK operations are leaf operations
+	// that both emit and listen for data (errors).
+	for current := Operation(o); current != nil; current = current.Parent() {
+		current.emitData(reflect.TypeOf(data), data)
+	}
+}
+
+type (
+	// eventRegister implements a thread-safe list of event listeners.
+	eventRegister struct {
+		mu        sync.RWMutex
+		listeners eventListenerMap
+	}
+
+	// eventListenerMap is the map of event listeners. The list of listeners are
+	// indexed by the operation argument or result type the event listener
+	// expects.
+	eventListenerMap map[reflect.Type][]EventListener
+
+	dataBroadcaster struct {
+		mu        sync.RWMutex
+		listeners dataListenerMap
+	}
+
+	dataListenerSpec[T any] func(data T)
+	DataListener            EventListener
+	dataListenerMap         map[reflect.Type][]DataListener
+)
+
+func (l dataListenerSpec[T]) Call(_ Operation, v interface{}) {
+	l(v.(T))
+}
+
+func (l dataListenerSpec[T]) ListenedType() reflect.Type {
+	return reflect.TypeOf((*T)(nil)).Elem()
+}
+
+// NewDataListener creates a specialized generic data listener, wrapped under a DataListener interface
+func NewDataListener[T any](f func(data T)) DataListener {
+	return dataListenerSpec[T](f)
+}
+
+func (b *dataBroadcaster) add(key reflect.Type, l DataListener) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+
+	if b.listeners == nil {
+		b.listeners = make(dataListenerMap)
+	}
+	b.listeners[key] = append(b.listeners[key], l)
+
+}
+
+func (b *dataBroadcaster) clear() {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	b.listeners = nil
+}
+
+func (b *dataBroadcaster) emitData(key reflect.Type, v any) {
+	defer func() {
+		if r := recover(); r != nil {
+			log.Error("appsec: recovered from an unexpected panic from an event listener: %+v", r)
+		}
+	}()
+	b.mu.RLock()
+	defer b.mu.RUnlock()
+	for t := range b.listeners {
+		if key == t || key.Implements(t) {
+			for _, listener := range b.listeners[t] {
+				listener.Call(nil, v)
+			}
+		}
+	}
+}
+
+func (r *eventRegister) add(key reflect.Type, l EventListener) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.listeners == nil {
+		r.listeners = make(eventListenerMap)
+	}
+	r.listeners[key] = append(r.listeners[key], l)
+}
+
+func (r *eventRegister) clear() {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	r.listeners = nil
+}
+
+func (r *eventRegister) emitEvent(key reflect.Type, op Operation, v interface{}) {
+	defer func() {
+		if r := recover(); r != nil {
+			log.Error("appsec: recovered from an unexpected panic from an event listener: %+v", r)
+		}
+	}()
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	for _, listener := range r.listeners[key] {
+		listener.Call(op, v)
+	}
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/limiter.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/limiter.go
new file mode 100644
index 0000000000..c022ecfc63
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/limiter.go
@@ -0,0 +1,143 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022 Datadog, Inc.
+
+//go:build appsec
+// +build appsec
+
+package appsec
+
+import (
+	"sync/atomic"
+	"time"
+)
+
+// Limiter is used to abstract the rate limiter implementation to only expose the needed function for rate limiting.
+// This is for example useful for testing, allowing us to use a modified rate limiter tuned for testing through the same
+// interface.
+type Limiter interface {
+	Allow() bool
+}
+
+// TokenTicker is a thread-safe and lock-free rate limiter based on a token bucket.
+// The idea is to have a goroutine that will update  the bucket with fresh tokens at regular intervals using a time.Ticker.
+// The advantage of using a goroutine here is  that the implementation becomes easily thread-safe using a few
+// atomic operations with little overhead overall. TokenTicker.Start() *should* be called before the first call to
+// TokenTicker.Allow() and TokenTicker.Stop() *must* be called once done using. Note that calling TokenTicker.Allow()
+// before TokenTicker.Start() is valid, but it means the bucket won't be refilling until the call to TokenTicker.Start() is made
+type TokenTicker struct {
+	tokens    int64
+	maxTokens int64
+	ticker    *time.Ticker
+	stopChan  chan struct{}
+}
+
+// NewTokenTicker is a utility function that allocates a token ticker, initializes necessary fields and returns it
+func NewTokenTicker(tokens, maxTokens int64) *TokenTicker {
+	return &TokenTicker{
+		tokens:    tokens,
+		maxTokens: maxTokens,
+	}
+}
+
+// updateBucket performs a select loop to update the token amount in the bucket.
+// Used in a goroutine by the rate limiter.
+func (t *TokenTicker) updateBucket(ticksChan <-chan time.Time, startTime time.Time, syncChan chan struct{}) {
+	nsPerToken := time.Second.Nanoseconds() / t.maxTokens
+	elapsedNs := int64(0)
+	prevStamp := startTime
+
+	for {
+		select {
+		case <-t.stopChan:
+			if syncChan != nil {
+				close(syncChan)
+			}
+			return
+		case stamp := <-ticksChan:
+			// Compute the time in nanoseconds that passed between the previous timestamp and this one
+			// This will be used to know how many tokens can be added into the bucket depending on the limiter rate
+			elapsedNs += stamp.Sub(prevStamp).Nanoseconds()
+			if elapsedNs > t.maxTokens*nsPerToken {
+				elapsedNs = t.maxTokens * nsPerToken
+			}
+			prevStamp = stamp
+			// Update the number of tokens in the bucket if enough nanoseconds have passed
+			if elapsedNs >= nsPerToken {
+				// Atomic spin lock to make sure we don't race for `t.tokens`
+				for {
+					tokens := atomic.LoadInt64(&t.tokens)
+					if tokens == t.maxTokens {
+						break // Bucket is already full, nothing to do
+					}
+					inc := elapsedNs / nsPerToken
+					// Make sure not to add more tokens than we are allowed to into the bucket
+					if tokens+inc > t.maxTokens {
+						inc -= (tokens + inc) % t.maxTokens
+					}
+					if atomic.CompareAndSwapInt64(&t.tokens, tokens, tokens+inc) {
+						// Keep track of remaining elapsed ns that were not taken into account for this computation,
+						// so that increment computation remains precise over time
+						elapsedNs = elapsedNs % nsPerToken
+						break
+					}
+				}
+			}
+			// Sync channel used to signify that the goroutine is done updating the bucket. Used for tests to guarantee
+			// that the goroutine ticked at least once.
+			if syncChan != nil {
+				syncChan <- struct{}{}
+			}
+		}
+	}
+}
+
+// Start starts the ticker and launches the goroutine responsible for updating the token bucket.
+// The ticker is set to tick at a fixed rate of 500us.
+func (t *TokenTicker) Start() {
+	timeNow := time.Now()
+	t.ticker = time.NewTicker(500 * time.Microsecond)
+	t.start(t.ticker.C, timeNow, false)
+}
+
+// start is used for internal testing. Controlling the ticker means being able to test per-tick
+// rather than per-duration, which is more reliable if the app is under a lot of stress.
+// sync is used to decide whether the limiter should create a channel for synchronization with the testing app after a
+// bucket update. The limiter is in charge of closing the channel in this case.
+func (t *TokenTicker) start(ticksChan <-chan time.Time, startTime time.Time, sync bool) <-chan struct{} {
+	t.stopChan = make(chan struct{})
+	var syncChan chan struct{}
+
+	if sync {
+		syncChan = make(chan struct{})
+	}
+	go t.updateBucket(ticksChan, startTime, syncChan)
+	return syncChan
+}
+
+// Stop shuts down the rate limiter, taking care stopping the ticker and closing all channels
+func (t *TokenTicker) Stop() {
+	// Stop the ticker only if it has been instantiated (not the case when testing by calling start() directly)
+	if t.ticker != nil {
+		t.ticker.Stop()
+	}
+	// Close the stop channel only if it has been created. This covers the case where Stop() is called without any prior
+	// call to Start()
+	if t.stopChan != nil {
+		close(t.stopChan)
+	}
+}
+
+// Allow checks and returns whether a token can be retrieved from the bucket and consumed.
+// Thread-safe.
+func (t *TokenTicker) Allow() bool {
+	for {
+		tokens := atomic.LoadInt64(&t.tokens)
+		if tokens == 0 {
+			return false
+		} else if atomic.CompareAndSwapInt64(&t.tokens, tokens, tokens-1) {
+			return true
+		}
+	}
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/remoteconfig.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/remoteconfig.go
new file mode 100644
index 0000000000..23a116299f
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/remoteconfig.go
@@ -0,0 +1,381 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022 Datadog, Inc.
+
+//go:build appsec
+// +build appsec
+
+package appsec
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"os"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig"
+
+	rc "github.com/DataDog/datadog-agent/pkg/remoteconfig/state"
+)
+
+func genApplyStatus(ack bool, err error) rc.ApplyStatus {
+	status := rc.ApplyStatus{
+		State: rc.ApplyStateUnacknowledged,
+	}
+	if err != nil {
+		status.State = rc.ApplyStateError
+		status.Error = err.Error()
+	} else if ack {
+		status.State = rc.ApplyStateAcknowledged
+	}
+
+	return status
+}
+
+func statusesFromUpdate(u remoteconfig.ProductUpdate, ack bool, err error) map[string]rc.ApplyStatus {
+	statuses := make(map[string]rc.ApplyStatus, len(u))
+	for path := range u {
+		statuses[path] = genApplyStatus(ack, err)
+	}
+	return statuses
+}
+
+func mergeMaps[K comparable, V any](m1 map[K]V, m2 map[K]V) map[K]V {
+	for key, value := range m2 {
+		m1[key] = value
+	}
+	return m1
+}
+
+// combineRCRulesUpdates updates the state of the given rulesManager with the combination of all the provided rules updates
+func combineRCRulesUpdates(r *rulesManager, updates map[string]remoteconfig.ProductUpdate) (map[string]rc.ApplyStatus, error) {
+	statuses := map[string]rc.ApplyStatus{}
+	// Set the default statuses for all updates to unacknowledged
+	for _, u := range updates {
+		statuses = mergeMaps(statuses, statusesFromUpdate(u, false, nil))
+	}
+	var err error
+updateLoop:
+	// Process rules related updates
+	for p, u := range updates {
+		if u != nil && len(u) == 0 {
+			continue
+		}
+		switch p {
+		case rc.ProductASMData:
+			// Merge all rules data entries together and store them as a rulesManager edit entry
+			rulesData, status := mergeRulesData(u)
+			statuses = mergeMaps(statuses, status)
+			r.addEdit("asmdata", rulesFragment{RulesData: rulesData})
+		case rc.ProductASMDD:
+			// Switch the base rules of the rulesManager if the config received through ASM_DD is valid
+			// If the config was removed, switch back to the static recommended rules
+			if len(u) > 1 { // Don't process configs if more than one is received for ASM_DD
+				log.Debug("appsec: Remote config: more than one config received for ASM_DD. Updates won't be applied")
+				err = errors.New("More than one config received for ASM_DD")
+				statuses = mergeMaps(statuses, statusesFromUpdate(u, true, err))
+				break updateLoop
+			}
+			for path, data := range u {
+				if data == nil {
+					log.Debug("appsec: Remote config: ASM_DD config removed. Switching back to default rules")
+					r.changeBase(defaultRulesFragment(), "")
+					break
+				}
+				var newBase rulesFragment
+				if err = json.Unmarshal(data, &newBase); err != nil {
+					log.Debug("appsec: Remote config: could not unmarshall ASM_DD rules: %v", err)
+					statuses[path] = genApplyStatus(true, err)
+					break updateLoop
+				}
+				log.Debug("appsec: Remote config: switching to %s as the base rules file", path)
+				r.changeBase(newBase, path)
+			}
+		case rc.ProductASM:
+			// Store each config received through ASM as an edit entry in the rulesManager
+			// Those entries will get merged together when the final rules are compiled
+			// If a config gets removed, the rulesManager edit entry gets removed as well
+			for path, data := range u {
+				log.Debug("appsec: Remote config: processing the %s ASM config", path)
+				if data == nil {
+					log.Debug("appsec: Remote config: ASM config %s was removed", path)
+					r.removeEdit(path)
+					continue
+				}
+				var f rulesFragment
+				if err = json.Unmarshal(data, &f); err != nil {
+					log.Debug("appsec: Remote config: error processing ASM config %s: %v", path, err)
+					statuses[path] = genApplyStatus(true, err)
+					break updateLoop
+				}
+				r.addEdit(path, f)
+			}
+		default:
+			log.Debug("appsec: Remote config: ignoring unsubscribed product %s", p)
+		}
+	}
+
+	// Set all statuses to ack if no error occured
+	if err == nil {
+		for _, u := range updates {
+			statuses = mergeMaps(statuses, statusesFromUpdate(u, true, nil))
+		}
+	}
+
+	return statuses, err
+
+}
+
+// onRemoteActivation is the RC callback called when an update is received for ASM_FEATURES
+func (a *appsec) onRemoteActivation(updates map[string]remoteconfig.ProductUpdate) map[string]rc.ApplyStatus {
+	statuses := map[string]rc.ApplyStatus{}
+	if u, ok := updates[rc.ProductASMFeatures]; ok {
+		statuses = a.handleASMFeatures(u)
+	}
+	return statuses
+
+}
+
+// onRCRulesUpdate is the RC callback called when security rules related RC updates are available
+func (a *appsec) onRCRulesUpdate(updates map[string]remoteconfig.ProductUpdate) map[string]rc.ApplyStatus {
+	// If appsec was deactivated through RC, stop here
+	if !a.started {
+		return map[string]rc.ApplyStatus{}
+	}
+
+	// Create a new local rulesManager
+	r := a.cfg.rulesManager.clone()
+	statuses, err := combineRCRulesUpdates(r, updates)
+	if err != nil {
+		log.Debug("appsec: Remote config: not applying any updates because of error: %v", err)
+		return statuses
+	}
+
+	// Compile the final rules once all updates have been processed and no error occurred
+	r.compile()
+	log.Debug("appsec: Remote config: final compiled rules: %s", r)
+
+	// If an error occurs while updating the WAF handle, don't swap the rulesManager and propagate the error
+	// to all config statuses since we can't know which config is the faulty one
+	if err = a.swapWAF(r.latest); err != nil {
+		log.Error("appsec: Remote config: could not apply the new security rules: %v", err)
+		for k := range statuses {
+			statuses[k] = genApplyStatus(true, err)
+		}
+	} else {
+		// Replace the rulesManager with the new one holding the new state
+		a.cfg.rulesManager = r
+	}
+	return statuses
+}
+
+// handleASMFeatures deserializes an ASM_FEATURES configuration received through remote config
+// and starts/stops appsec accordingly.
+func (a *appsec) handleASMFeatures(u remoteconfig.ProductUpdate) map[string]rc.ApplyStatus {
+	statuses := statusesFromUpdate(u, false, nil)
+	if l := len(u); l > 1 {
+		log.Error("appsec: Remote config: %d configs received for ASM_FEATURES. Expected one at most, returning early", l)
+		return statuses
+	}
+	for path, raw := range u {
+		var data rc.ASMFeaturesData
+		status := rc.ApplyStatus{State: rc.ApplyStateAcknowledged}
+		var err error
+		log.Debug("appsec: Remote config: processing %s", path)
+
+		// A nil config means ASM was disabled, and we stopped receiving the config file
+		// Don't ack the config in this case and return early
+		if raw == nil {
+			log.Debug("appsec: Remote config: Stopping AppSec")
+			a.stop()
+			return statuses
+		}
+		if err = json.Unmarshal(raw, &data); err != nil {
+			log.Error("appsec: Remote config: error while unmarshalling %s: %v. Configuration won't be applied.", path, err)
+		} else if data.ASM.Enabled && !a.started {
+			log.Debug("appsec: Remote config: Starting AppSec")
+			if err = a.start(); err != nil {
+				log.Error("appsec: Remote config: error while processing %s. Configuration won't be applied: %v", path, err)
+			}
+		} else if !data.ASM.Enabled && a.started {
+			log.Debug("appsec: Remote config: Stopping AppSec")
+			a.stop()
+		}
+		if err != nil {
+			status = genApplyStatus(false, err)
+		}
+		statuses[path] = status
+	}
+
+	return statuses
+}
+
+func mergeRulesData(u remoteconfig.ProductUpdate) ([]ruleDataEntry, map[string]rc.ApplyStatus) {
+	// Following the RFC, merging should only happen when two rules data with the same ID and same Type are received
+	// allRulesData[ID][Type] will return the rules data of said id and type, if it exists
+	allRulesData := make(map[string]map[string]ruleDataEntry)
+	statuses := statusesFromUpdate(u, true, nil)
+
+	for path, raw := range u {
+		log.Debug("appsec: Remote config: processing %s", path)
+
+		// A nil config means ASM_DATA was disabled, and we stopped receiving the config file
+		// Don't ack the config in this case
+		if raw == nil {
+			log.Debug("appsec: remote config: %s disabled", path)
+			statuses[path] = genApplyStatus(false, nil)
+			continue
+		}
+
+		var rulesData rulesData
+		if err := json.Unmarshal(raw, &rulesData); err != nil {
+			log.Debug("appsec: Remote config: error while unmarshalling payload for %s: %v. Configuration won't be applied.", path, err)
+			statuses[path] = genApplyStatus(false, err)
+			continue
+		}
+
+		// Check each entry against allRulesData to see if merging is necessary
+		for _, ruleData := range rulesData.RulesData {
+			if allRulesData[ruleData.ID] == nil {
+				allRulesData[ruleData.ID] = make(map[string]ruleDataEntry)
+			}
+			if data, ok := allRulesData[ruleData.ID][ruleData.Type]; ok {
+				// Merge rules data entries with the same ID and Type
+				data.Data = mergeRulesDataEntries(data.Data, ruleData.Data)
+				allRulesData[ruleData.ID][ruleData.Type] = data
+			} else {
+				allRulesData[ruleData.ID][ruleData.Type] = ruleData
+			}
+		}
+	}
+
+	// Aggregate all the rules data before passing it over to the WAF
+	var rulesData []ruleDataEntry
+	for _, m := range allRulesData {
+		for _, data := range m {
+			rulesData = append(rulesData, data)
+		}
+	}
+	return rulesData, statuses
+}
+
+// mergeRulesDataEntries merges two slices of rules data entries together, removing duplicates and
+// only keeping the longest expiration values for similar entries.
+func mergeRulesDataEntries(entries1, entries2 []rc.ASMDataRuleDataEntry) []rc.ASMDataRuleDataEntry {
+	mergeMap := map[string]int64{}
+
+	for _, entry := range entries1 {
+		mergeMap[entry.Value] = entry.Expiration
+	}
+	// Replace the entry only if the new expiration timestamp goes later than the current one
+	// If no expiration timestamp was provided (default to 0), then the data doesn't expire
+	for _, entry := range entries2 {
+		if exp, ok := mergeMap[entry.Value]; !ok || entry.Expiration == 0 || entry.Expiration > exp {
+			mergeMap[entry.Value] = entry.Expiration
+		}
+	}
+	// Create the final slice and return it
+	entries := make([]rc.ASMDataRuleDataEntry, 0, len(mergeMap))
+	for val, exp := range mergeMap {
+		entries = append(entries, rc.ASMDataRuleDataEntry{
+			Value:      val,
+			Expiration: exp,
+		})
+	}
+	return entries
+}
+
+func (a *appsec) startRC() {
+	if a.rc != nil {
+		a.rc.Start()
+	}
+}
+
+func (a *appsec) stopRC() {
+	if a.rc != nil {
+		a.rc.Stop()
+	}
+}
+
+func (a *appsec) registerRCProduct(p string) error {
+	if a.rc == nil {
+		return fmt.Errorf("no valid remote configuration client")
+	}
+	a.cfg.rc.Products[p] = struct{}{}
+	a.rc.RegisterProduct(p)
+	return nil
+}
+
+func (a *appsec) unregisterRCProduct(p string) error {
+	if a.rc == nil {
+		return fmt.Errorf("no valid remote configuration client")
+	}
+	delete(a.cfg.rc.Products, p)
+	a.rc.UnregisterProduct(p)
+	return nil
+}
+
+func (a *appsec) registerRCCapability(c remoteconfig.Capability) error {
+	a.cfg.rc.Capabilities[c] = struct{}{}
+	if a.rc == nil {
+		return fmt.Errorf("no valid remote configuration client")
+	}
+	a.rc.RegisterCapability(c)
+	return nil
+}
+
+func (a *appsec) unregisterRCCapability(c remoteconfig.Capability) {
+	if a.rc == nil {
+		log.Debug("appsec: Remote config: no valid remote configuration client")
+		return
+	}
+	delete(a.cfg.rc.Capabilities, c)
+	a.rc.UnregisterCapability(c)
+}
+
+func (a *appsec) enableRemoteActivation() error {
+	if a.rc == nil {
+		return fmt.Errorf("no valid remote configuration client")
+	}
+	a.registerRCProduct(rc.ProductASMFeatures)
+	a.registerRCCapability(remoteconfig.ASMActivation)
+	a.rc.RegisterCallback(a.onRemoteActivation)
+	return nil
+}
+
+func (a *appsec) enableRCBlocking() {
+	if a.rc == nil {
+		log.Debug("appsec: Remote config: no valid remote configuration client")
+		return
+	}
+
+	a.registerRCProduct(rc.ProductASM)
+	a.registerRCProduct(rc.ProductASMDD)
+	a.registerRCProduct(rc.ProductASMData)
+	a.rc.RegisterCallback(a.onRCRulesUpdate)
+
+	if _, isSet := os.LookupEnv(rulesEnvVar); !isSet {
+		a.registerRCCapability(remoteconfig.ASMUserBlocking)
+		a.registerRCCapability(remoteconfig.ASMRequestBlocking)
+		a.registerRCCapability(remoteconfig.ASMIPBlocking)
+		a.registerRCCapability(remoteconfig.ASMDDRules)
+		a.registerRCCapability(remoteconfig.ASMExclusions)
+		a.registerRCCapability(remoteconfig.ASMCustomRules)
+		a.registerRCCapability(remoteconfig.ASMCustomBlockingResponse)
+	}
+}
+
+func (a *appsec) disableRCBlocking() {
+	if a.rc == nil {
+		return
+	}
+	a.unregisterRCCapability(remoteconfig.ASMDDRules)
+	a.unregisterRCCapability(remoteconfig.ASMExclusions)
+	a.unregisterRCCapability(remoteconfig.ASMIPBlocking)
+	a.unregisterRCCapability(remoteconfig.ASMRequestBlocking)
+	a.unregisterRCCapability(remoteconfig.ASMUserBlocking)
+	a.unregisterRCCapability(remoteconfig.ASMCustomRules)
+	a.rc.UnregisterCallback(a.onRCRulesUpdate)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/rules.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/rules.go
new file mode 100644
index 0000000000..f34e2a49a1
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/rules.go
@@ -0,0 +1,17 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+//go:build appsec
+// +build appsec
+
+package appsec
+
+import _ "embed"
+
+// Static recommended AppSec rule 1.7.1
+// Source: https://github.com/DataDog/appsec-event-rules/blob/1.7.1/build/recommended.json
+//
+//go:embed rules.json
+var staticRecommendedRules string
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/rules.json b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/rules.json
new file mode 100644
index 0000000000..ba65c5cf5c
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/rules.json
@@ -0,0 +1,7079 @@
+{
+  "version": "2.2",
+  "metadata": {
+    "rules_version": "1.7.1"
+  },
+  "rules": [
+    {
+      "id": "blk-001-001",
+      "name": "Block IP Addresses",
+      "tags": {
+        "type": "block_ip",
+        "category": "security_response"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "http.client_ip"
+              }
+            ],
+            "data": "blocked_ips"
+          },
+          "operator": "ip_match"
+        }
+      ],
+      "transformers": [],
+      "on_match": [
+        "block"
+      ]
+    },
+    {
+      "id": "blk-001-002",
+      "name": "Block User Addresses",
+      "tags": {
+        "type": "block_user",
+        "category": "security_response"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "usr.id"
+              }
+            ],
+            "data": "blocked_users"
+          },
+          "operator": "exact_match"
+        }
+      ],
+      "transformers": [],
+      "on_match": [
+        "block"
+      ]
+    },
+    {
+      "id": "crs-913-110",
+      "name": "Acunetix",
+      "tags": {
+        "type": "commercial_scanner",
+        "crs_id": "913110",
+        "category": "attack_attempt",
+        "tool_name": "Acunetix",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies"
+              }
+            ],
+            "list": [
+              "acunetix-product",
+              "(acunetix web vulnerability scanner",
+              "acunetix-scanning-agreement",
+              "acunetix-user-agreement",
+              "md5(acunetix_wvs_security_test)"
+            ]
+          },
+          "operator": "phrase_match"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "crs-913-120",
+      "name": "Known security scanner filename/argument",
+      "tags": {
+        "type": "security_scanner",
+        "crs_id": "913120",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              }
+            ],
+            "list": [
+              "/.adsensepostnottherenonobook",
+              "/<invalid>hello.html",
+              "/actsensepostnottherenonotive",
+              "/acunetix-wvs-test-for-some-inexistent-file",
+              "/antidisestablishmentarianism",
+              "/appscan_fingerprint/mac_address",
+              "/arachni-",
+              "/cybercop",
+              "/nessus_is_probing_you_",
+              "/nessustest",
+              "/netsparker-",
+              "/rfiinc.txt",
+              "/thereisnowaythat-you-canbethere",
+              "/w3af/remotefileinclude.html",
+              "appscan_fingerprint",
+              "w00tw00t.at.isc.sans.dfind",
+              "w00tw00t.at.blackhats.romanian.anti-sec"
+            ]
+          },
+          "operator": "phrase_match"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "crs-920-260",
+      "name": "Unicode Full/Half Width Abuse Attack Attempt",
+      "tags": {
+        "type": "http_protocol_violation",
+        "crs_id": "920260",
+        "category": "attack_attempt",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.uri.raw"
+              }
+            ],
+            "regex": "\\%u[fF]{2}[0-9a-fA-F]{2}",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 6
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-921-110",
+      "name": "HTTP Request Smuggling Attack",
+      "tags": {
+        "type": "http_protocol_violation",
+        "crs_id": "921110",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              }
+            ],
+            "regex": "(?:get|post|head|options|connect|put|delete|trace|track|patch|propfind|propatch|mkcol|copy|move|lock|unlock)\\s+[^\\s]+\\s+http/\\d",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 12
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "crs-921-160",
+      "name": "HTTP Header Injection Attack via payload (CR/LF and header-name detected)",
+      "tags": {
+        "type": "http_protocol_violation",
+        "crs_id": "921160",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.path_params"
+              }
+            ],
+            "regex": "[\\n\\r]+(?:refresh|(?:set-)?cookie|(?:x-)?(?:forwarded-(?:for|host|server)|via|remote-ip|remote-addr|originating-IP))\\s*:",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 3
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "crs-930-100",
+      "name": "Obfuscated Path Traversal Attack (/../)",
+      "tags": {
+        "type": "lfi",
+        "crs_id": "930100",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.uri.raw"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              }
+            ],
+            "regex": "(?:%(?:c(?:0%(?:[2aq]f|5c|9v)|1%(?:[19p]c|8s|af))|2(?:5(?:c(?:0%25af|1%259c)|2f|5c)|%46|f)|(?:(?:f(?:8%8)?0%8|e)0%80%a|bg%q)f|%3(?:2(?:%(?:%6|4)6|F)|5%%63)|u(?:221[56]|002f|EFC8|F025)|1u|5c)|0x(?:2f|5c)|\\/|\\x5c)(?:%(?:(?:f(?:(?:c%80|8)%8)?0%8|e)0%80%ae|2(?:(?:5(?:c0%25a|2))?e|%45)|u(?:(?:002|ff0)e|2024)|%32(?:%(?:%6|4)5|E)|c0(?:%[256aef]e|\\.))|\\.(?:%0[01])?|0x2e){2,3}(?:%(?:c(?:0%(?:[2aq]f|5c|9v)|1%(?:[19p]c|8s|af))|2(?:5(?:c(?:0%25af|1%259c)|2f|5c)|%46|f)|(?:(?:f(?:8%8)?0%8|e)0%80%a|bg%q)f|%3(?:2(?:%(?:%6|4)6|F)|5%%63)|u(?:221[56]|002f|EFC8|F025)|1u|5c)|0x(?:2f|5c)|\\/|\\x5c)",
+            "options": {
+              "min_length": 4
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "normalizePath"
+      ]
+    },
+    {
+      "id": "crs-930-110",
+      "name": "Simple Path Traversal Attack (/../)",
+      "tags": {
+        "type": "lfi",
+        "crs_id": "930110",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.uri.raw"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              }
+            ],
+            "regex": "(?:(?:^|[\\x5c/])\\.{2,3}[\\x5c/]|[\\x5c/]\\.{2,3}(?:[\\x5c/]|$))",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 3
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls"
+      ]
+    },
+    {
+      "id": "crs-930-120",
+      "name": "OS File Access Attempt",
+      "tags": {
+        "type": "lfi",
+        "crs_id": "930120",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "list": [
+              "/.htaccess",
+              "/.htdigest",
+              "/.htpasswd",
+              "/.addressbook",
+              "/.aptitude/config",
+              ".aws/config",
+              ".aws/credentials",
+              "/.bash_config",
+              "/.bash_history",
+              "/.bash_logout",
+              "/.bash_profile",
+              "/.bashrc",
+              ".cache/notify-osd.log",
+              ".config/odesk/odesk team.conf",
+              "/.cshrc",
+              "/.dockerignore",
+              ".drush/",
+              "/.eslintignore",
+              "/.fbcindex",
+              "/.forward",
+              "/.git",
+              ".git/",
+              "/.gitattributes",
+              "/.gitconfig",
+              ".gnupg/",
+              ".hplip/hplip.conf",
+              "/.ksh_history",
+              "/.lesshst",
+              ".lftp/",
+              "/.lhistory",
+              "/.lldb-history",
+              ".local/share/mc/",
+              "/.lynx_cookies",
+              "/.my.cnf",
+              "/.mysql_history",
+              "/.nano_history",
+              "/.node_repl_history",
+              "/.pearrc",
+              "/.pgpass",
+              "/.php_history",
+              "/.pinerc",
+              ".pki/",
+              "/.proclog",
+              "/.procmailrc",
+              "/.psql_history",
+              "/.python_history",
+              "/.rediscli_history",
+              "/.rhistory",
+              "/.rhosts",
+              "/.sh_history",
+              "/.sqlite_history",
+              ".ssh/authorized_keys",
+              ".ssh/config",
+              ".ssh/id_dsa",
+              ".ssh/id_dsa.pub",
+              ".ssh/id_rsa",
+              ".ssh/id_rsa.pub",
+              ".ssh/identity",
+              ".ssh/identity.pub",
+              ".ssh/id_ecdsa",
+              ".ssh/id_ecdsa.pub",
+              ".ssh/known_hosts",
+              ".subversion/auth",
+              ".subversion/config",
+              ".subversion/servers",
+              ".tconn/tconn.conf",
+              "/.tcshrc",
+              ".vidalia/vidalia.conf",
+              "/.viminfo",
+              "/.vimrc",
+              "/.www_acl",
+              "/.wwwacl",
+              "/.xauthority",
+              "/.zhistory",
+              "/.zshrc",
+              "/.zsh_history",
+              "/.nsconfig",
+              "data/elasticsearch",
+              "data/kafka",
+              "etc/ansible",
+              "etc/bind",
+              "etc/centos-release",
+              "etc/centos-release-upstream",
+              "etc/clam.d",
+              "etc/elasticsearch",
+              "etc/freshclam.conf",
+              "etc/gshadow",
+              "etc/gshadow-",
+              "etc/httpd",
+              "etc/kafka",
+              "etc/kibana",
+              "etc/logstash",
+              "etc/lvm",
+              "etc/mongod.conf",
+              "etc/my.cnf",
+              "etc/nuxeo.conf",
+              "etc/pki",
+              "etc/postfix",
+              "etc/scw-release",
+              "etc/subgid",
+              "etc/subgid-",
+              "etc/sudoers.d",
+              "etc/sysconfig",
+              "etc/system-release-cpe",
+              "opt/nuxeo",
+              "opt/tomcat",
+              "tmp/kafka-logs",
+              "usr/lib/rpm/rpm.log",
+              "var/data/elasticsearch",
+              "var/lib/elasticsearch",
+              "etc/.java",
+              "etc/acpi",
+              "etc/alsa",
+              "etc/alternatives",
+              "etc/apache2",
+              "etc/apm",
+              "etc/apparmor",
+              "etc/apparmor.d",
+              "etc/apport",
+              "etc/apt",
+              "etc/asciidoc",
+              "etc/avahi",
+              "etc/bash_completion.d",
+              "etc/binfmt.d",
+              "etc/bluetooth",
+              "etc/bonobo-activation",
+              "etc/brltty",
+              "etc/ca-certificates",
+              "etc/calendar",
+              "etc/chatscripts",
+              "etc/chromium-browser",
+              "etc/clamav",
+              "etc/cni",
+              "etc/console-setup",
+              "etc/coraza-waf",
+              "etc/cracklib",
+              "etc/cron.d",
+              "etc/cron.daily",
+              "etc/cron.hourly",
+              "etc/cron.monthly",
+              "etc/cron.weekly",
+              "etc/cups",
+              "etc/cups.save",
+              "etc/cupshelpers",
+              "etc/dbus-1",
+              "etc/dconf",
+              "etc/default",
+              "etc/depmod.d",
+              "etc/dhcp",
+              "etc/dictionaries-common",
+              "etc/dkms",
+              "etc/dnsmasq.d",
+              "etc/dockeretc/dpkg",
+              "etc/emacs",
+              "etc/environment.d",
+              "etc/fail2ban",
+              "etc/firebird",
+              "etc/firefox",
+              "etc/fonts",
+              "etc/fwupd",
+              "etc/gconf",
+              "etc/gdb",
+              "etc/gdm3",
+              "etc/geoclue",
+              "etc/ghostscript",
+              "etc/gimp",
+              "etc/glvnd",
+              "etc/gnome",
+              "etc/gnome-vfs-2.0",
+              "etc/gnucash",
+              "etc/gnustep",
+              "etc/groff",
+              "etc/grub.d",
+              "etc/gss",
+              "etc/gtk-2.0",
+              "etc/gtk-3.0",
+              "etc/hp",
+              "etc/ifplugd",
+              "etc/imagemagick-6",
+              "etc/init",
+              "etc/init.d",
+              "etc/initramfs-tools",
+              "etc/insserv.conf.d",
+              "etc/iproute2",
+              "etc/iptables",
+              "etc/java",
+              "etc/java-11-openjdk",
+              "etc/java-17-oracle",
+              "etc/java-8-openjdk",
+              "etc/kernel",
+              "etc/ld.so.conf.d",
+              "etc/ldap",
+              "etc/libblockdev",
+              "etc/libibverbs.d",
+              "etc/libnl-3",
+              "etc/libpaper.d",
+              "etc/libreoffice",
+              "etc/lighttpd",
+              "etc/logcheck",
+              "etc/logrotate.d",
+              "etc/lynx",
+              "etc/mail",
+              "etc/mc",
+              "etc/menu",
+              "etc/menu-methods",
+              "etc/modprobe.d",
+              "etc/modsecurity",
+              "etc/modules-load.d",
+              "etc/monit",
+              "etc/mono",
+              "etc/mplayer",
+              "etc/mpv",
+              "etc/muttrc.d",
+              "etc/mysql",
+              "etc/netplan",
+              "etc/network",
+              "etc/networkd-dispatcher",
+              "etc/networkmanager",
+              "etc/newt",
+              "etc/nghttpx",
+              "etc/nikto",
+              "etc/odbcdatasources",
+              "etc/openal",
+              "etc/openmpi",
+              "etc/opt",
+              "etc/osync",
+              "etc/packagekit",
+              "etc/pam.d",
+              "etc/pcmcia",
+              "etc/perl",
+              "etc/php",
+              "etc/pki",
+              "etc/pm",
+              "etc/polkit-1",
+              "etc/postfix",
+              "etc/ppp",
+              "etc/profile.d",
+              "etc/proftpd",
+              "etc/pulse",
+              "etc/python",
+              "etc/rc0.d",
+              "etc/rc1.d",
+              "etc/rc2.d",
+              "etc/rc3.d",
+              "etc/rc4.d",
+              "etc/rc5.d",
+              "etc/rc6.d",
+              "etc/rcs.d",
+              "etc/resolvconf",
+              "etc/rsyslog.d",
+              "etc/samba",
+              "etc/sane.d",
+              "etc/security",
+              "etc/selinux",
+              "etc/sensors.d",
+              "etc/sgml",
+              "etc/signon-ui",
+              "etc/skel",
+              "etc/snmp",
+              "etc/sound",
+              "etc/spamassassin",
+              "etc/speech-dispatcher",
+              "etc/ssh",
+              "etc/ssl",
+              "etc/sudoers.d",
+              "etc/sysctl.d",
+              "etc/sysstat",
+              "etc/systemd",
+              "etc/terminfo",
+              "etc/texmf",
+              "etc/thermald",
+              "etc/thnuclnt",
+              "etc/thunderbird",
+              "etc/timidity",
+              "etc/tmpfiles.d",
+              "etc/ubuntu-advantage",
+              "etc/udev",
+              "etc/udisks2",
+              "etc/ufw",
+              "etc/update-manager",
+              "etc/update-motd.d",
+              "etc/update-notifier",
+              "etc/upower",
+              "etc/urlview",
+              "etc/usb_modeswitch.d",
+              "etc/vim",
+              "etc/vmware",
+              "etc/vmware-installer",
+              "etc/vmware-vix",
+              "etc/vulkan",
+              "etc/w3m",
+              "etc/wireshark",
+              "etc/wpa_supplicant",
+              "etc/x11",
+              "etc/xdg",
+              "etc/xml",
+              "etc/redis.conf",
+              "etc/redis-sentinel.conf",
+              "etc/php.ini",
+              "bin/php.ini",
+              "etc/httpd/php.ini",
+              "usr/lib/php.ini",
+              "usr/lib/php/php.ini",
+              "usr/local/etc/php.ini",
+              "usr/local/lib/php.ini",
+              "usr/local/php/lib/php.ini",
+              "usr/local/php4/lib/php.ini",
+              "usr/local/php5/lib/php.ini",
+              "usr/local/apache/conf/php.ini",
+              "etc/php4.4/fcgi/php.ini",
+              "etc/php4/apache/php.ini",
+              "etc/php4/apache2/php.ini",
+              "etc/php5/apache/php.ini",
+              "etc/php5/apache2/php.ini",
+              "etc/php/php.ini",
+              "etc/php/php4/php.ini",
+              "etc/php/apache/php.ini",
+              "etc/php/apache2/php.ini",
+              "web/conf/php.ini",
+              "usr/local/zend/etc/php.ini",
+              "opt/xampp/etc/php.ini",
+              "var/local/www/conf/php.ini",
+              "etc/php/cgi/php.ini",
+              "etc/php4/cgi/php.ini",
+              "etc/php5/cgi/php.ini",
+              "home2/bin/stable/apache/php.ini",
+              "home/bin/stable/apache/php.ini",
+              "etc/httpd/conf.d/php.conf",
+              "php5/php.ini",
+              "php4/php.ini",
+              "php/php.ini",
+              "windows/php.ini",
+              "winnt/php.ini",
+              "apache/php/php.ini",
+              "xampp/apache/bin/php.ini",
+              "netserver/bin/stable/apache/php.ini",
+              "volumes/macintosh_hd1/usr/local/php/lib/php.ini",
+              "etc/mono/1.0/machine.config",
+              "etc/mono/2.0/machine.config",
+              "etc/mono/2.0/web.config",
+              "etc/mono/config",
+              "usr/local/cpanel/logs/stats_log",
+              "usr/local/cpanel/logs/access_log",
+              "usr/local/cpanel/logs/error_log",
+              "usr/local/cpanel/logs/license_log",
+              "usr/local/cpanel/logs/login_log",
+              "var/cpanel/cpanel.config",
+              "usr/local/psa/admin/logs/httpsd_access_log",
+              "usr/local/psa/admin/logs/panel.log",
+              "usr/local/psa/admin/conf/php.ini",
+              "etc/sw-cp-server/applications.d/plesk.conf",
+              "usr/local/psa/admin/conf/site_isolation_settings.ini",
+              "usr/local/sb/config",
+              "etc/sw-cp-server/applications.d/00-sso-cpserver.conf",
+              "etc/sso/sso_config.ini",
+              "etc/mysql/conf.d/old_passwords.cnf",
+              "var/mysql.log",
+              "var/mysql-bin.index",
+              "var/data/mysql-bin.index",
+              "program files/mysql/mysql server 5.0/data/{host}.err",
+              "program files/mysql/mysql server 5.0/data/mysql.log",
+              "program files/mysql/mysql server 5.0/data/mysql.err",
+              "program files/mysql/mysql server 5.0/data/mysql-bin.log",
+              "program files/mysql/mysql server 5.0/data/mysql-bin.index",
+              "program files/mysql/data/{host}.err",
+              "program files/mysql/data/mysql.log",
+              "program files/mysql/data/mysql.err",
+              "program files/mysql/data/mysql-bin.log",
+              "program files/mysql/data/mysql-bin.index",
+              "mysql/data/{host}.err",
+              "mysql/data/mysql.log",
+              "mysql/data/mysql.err",
+              "mysql/data/mysql-bin.log",
+              "mysql/data/mysql-bin.index",
+              "usr/local/mysql/data/mysql.log",
+              "usr/local/mysql/data/mysql.err",
+              "usr/local/mysql/data/mysql-bin.log",
+              "usr/local/mysql/data/mysql-slow.log",
+              "usr/local/mysql/data/mysqlderror.log",
+              "usr/local/mysql/data/{host}.err",
+              "usr/local/mysql/data/mysql-bin.index",
+              "var/lib/mysql/my.cnf",
+              "etc/mysql/my.cnf",
+              "etc/my.cnf",
+              "program files/mysql/mysql server 5.0/my.ini",
+              "program files/mysql/mysql server 5.0/my.cnf",
+              "program files/mysql/my.ini",
+              "program files/mysql/my.cnf",
+              "mysql/my.ini",
+              "mysql/my.cnf",
+              "mysql/bin/my.ini",
+              "var/postgresql/log/postgresql.log",
+              "usr/internet/pgsql/data/postmaster.log",
+              "usr/local/pgsql/data/postgresql.log",
+              "usr/local/pgsql/data/pg_log",
+              "postgresql/log/pgadmin.log",
+              "var/lib/pgsql/data/postgresql.conf",
+              "var/postgresql/db/postgresql.conf",
+              "var/nm2/postgresql.conf",
+              "usr/local/pgsql/data/postgresql.conf",
+              "usr/local/pgsql/data/pg_hba.conf",
+              "usr/internet/pgsql/data/pg_hba.conf",
+              "usr/local/pgsql/data/passwd",
+              "usr/local/pgsql/bin/pg_passwd",
+              "etc/postgresql/postgresql.conf",
+              "etc/postgresql/pg_hba.conf",
+              "home/postgres/data/postgresql.conf",
+              "home/postgres/data/pg_version",
+              "home/postgres/data/pg_ident.conf",
+              "home/postgres/data/pg_hba.conf",
+              "program files/postgresql/8.3/data/pg_hba.conf",
+              "program files/postgresql/8.3/data/pg_ident.conf",
+              "program files/postgresql/8.3/data/postgresql.conf",
+              "program files/postgresql/8.4/data/pg_hba.conf",
+              "program files/postgresql/8.4/data/pg_ident.conf",
+              "program files/postgresql/8.4/data/postgresql.conf",
+              "program files/postgresql/9.0/data/pg_hba.conf",
+              "program files/postgresql/9.0/data/pg_ident.conf",
+              "program files/postgresql/9.0/data/postgresql.conf",
+              "program files/postgresql/9.1/data/pg_hba.conf",
+              "program files/postgresql/9.1/data/pg_ident.conf",
+              "program files/postgresql/9.1/data/postgresql.conf",
+              "wamp/logs/access.log",
+              "wamp/logs/apache_error.log",
+              "wamp/logs/genquery.log",
+              "wamp/logs/mysql.log",
+              "wamp/logs/slowquery.log",
+              "wamp/bin/apache/apache2.2.22/logs/access.log",
+              "wamp/bin/apache/apache2.2.22/logs/error.log",
+              "wamp/bin/apache/apache2.2.21/logs/access.log",
+              "wamp/bin/apache/apache2.2.21/logs/error.log",
+              "wamp/bin/mysql/mysql5.5.24/data/mysql-bin.index",
+              "wamp/bin/mysql/mysql5.5.16/data/mysql-bin.index",
+              "wamp/bin/apache/apache2.2.21/conf/httpd.conf",
+              "wamp/bin/apache/apache2.2.22/conf/httpd.conf",
+              "wamp/bin/apache/apache2.2.21/wampserver.conf",
+              "wamp/bin/apache/apache2.2.22/wampserver.conf",
+              "wamp/bin/apache/apache2.2.22/conf/wampserver.conf",
+              "wamp/bin/mysql/mysql5.5.24/my.ini",
+              "wamp/bin/mysql/mysql5.5.24/wampserver.conf",
+              "wamp/bin/mysql/mysql5.5.16/my.ini",
+              "wamp/bin/mysql/mysql5.5.16/wampserver.conf",
+              "wamp/bin/php/php5.3.8/php.ini",
+              "wamp/bin/php/php5.4.3/php.ini",
+              "xampp/apache/logs/access.log",
+              "xampp/apache/logs/error.log",
+              "xampp/mysql/data/mysql-bin.index",
+              "xampp/mysql/data/mysql.err",
+              "xampp/mysql/data/{host}.err",
+              "xampp/sendmail/sendmail.log",
+              "xampp/apache/conf/httpd.conf",
+              "xampp/filezillaftp/filezilla server.xml",
+              "xampp/mercurymail/mercury.ini",
+              "xampp/php/php.ini",
+              "xampp/phpmyadmin/config.inc.php",
+              "xampp/sendmail/sendmail.ini",
+              "xampp/webalizer/webalizer.conf",
+              "opt/lampp/etc/httpd.conf",
+              "xampp/htdocs/aca.txt",
+              "xampp/htdocs/admin.php",
+              "xampp/htdocs/leer.txt",
+              "usr/local/apache/logs/audit_log",
+              "usr/local/apache2/logs/audit_log",
+              "logs/security_debug_log",
+              "logs/security_log",
+              "usr/local/apache/conf/modsec.conf",
+              "usr/local/apache2/conf/modsec.conf",
+              "winnt/system32/logfiles/msftpsvc",
+              "winnt/system32/logfiles/msftpsvc1",
+              "winnt/system32/logfiles/msftpsvc2",
+              "windows/system32/logfiles/msftpsvc",
+              "windows/system32/logfiles/msftpsvc1",
+              "windows/system32/logfiles/msftpsvc2",
+              "etc/logrotate.d/proftpd",
+              "www/logs/proftpd.system.log",
+              "etc/pam.d/proftpd",
+              "etc/proftp.conf",
+              "etc/protpd/proftpd.conf",
+              "etc/vhcs2/proftpd/proftpd.conf",
+              "etc/proftpd/modules.conf",
+              "etc/vsftpd.chroot_list",
+              "etc/logrotate.d/vsftpd.log",
+              "etc/vsftpd/vsftpd.conf",
+              "etc/vsftpd.conf",
+              "etc/chrootusers",
+              "var/adm/log/xferlog",
+              "etc/wu-ftpd/ftpaccess",
+              "etc/wu-ftpd/ftphosts",
+              "etc/wu-ftpd/ftpusers",
+              "logs/pure-ftpd.log",
+              "usr/sbin/pure-config.pl",
+              "usr/etc/pure-ftpd.conf",
+              "etc/pure-ftpd/pure-ftpd.conf",
+              "usr/local/etc/pure-ftpd.conf",
+              "usr/local/etc/pureftpd.pdb",
+              "usr/local/pureftpd/etc/pureftpd.pdb",
+              "usr/local/pureftpd/sbin/pure-config.pl",
+              "usr/local/pureftpd/etc/pure-ftpd.conf",
+              "etc/pure-ftpd.conf",
+              "etc/pure-ftpd/pure-ftpd.pdb",
+              "etc/pureftpd.pdb",
+              "etc/pureftpd.passwd",
+              "etc/pure-ftpd/pureftpd.pdb",
+              "usr/ports/ftp/pure-ftpd/pure-ftpd.conf",
+              "usr/ports/ftp/pure-ftpd/pureftpd.pdb",
+              "usr/ports/ftp/pure-ftpd/pureftpd.passwd",
+              "usr/ports/net/pure-ftpd/pure-ftpd.conf",
+              "usr/ports/net/pure-ftpd/pureftpd.pdb",
+              "usr/ports/net/pure-ftpd/pureftpd.passwd",
+              "usr/pkgsrc/net/pureftpd/pure-ftpd.conf",
+              "usr/pkgsrc/net/pureftpd/pureftpd.pdb",
+              "usr/pkgsrc/net/pureftpd/pureftpd.passwd",
+              "usr/ports/contrib/pure-ftpd/pure-ftpd.conf",
+              "usr/ports/contrib/pure-ftpd/pureftpd.pdb",
+              "usr/ports/contrib/pure-ftpd/pureftpd.passwd",
+              "usr/sbin/mudlogd",
+              "etc/muddleftpd/mudlog",
+              "etc/muddleftpd.com",
+              "etc/muddleftpd/mudlogd.conf",
+              "etc/muddleftpd/muddleftpd.conf",
+              "usr/sbin/mudpasswd",
+              "etc/muddleftpd/muddleftpd.passwd",
+              "etc/muddleftpd/passwd",
+              "etc/logrotate.d/ftp",
+              "etc/ftpchroot",
+              "etc/ftphosts",
+              "etc/ftpusers",
+              "winnt/system32/logfiles/smtpsvc",
+              "winnt/system32/logfiles/smtpsvc1",
+              "winnt/system32/logfiles/smtpsvc2",
+              "winnt/system32/logfiles/smtpsvc3",
+              "winnt/system32/logfiles/smtpsvc4",
+              "winnt/system32/logfiles/smtpsvc5",
+              "windows/system32/logfiles/smtpsvc",
+              "windows/system32/logfiles/smtpsvc1",
+              "windows/system32/logfiles/smtpsvc2",
+              "windows/system32/logfiles/smtpsvc3",
+              "windows/system32/logfiles/smtpsvc4",
+              "windows/system32/logfiles/smtpsvc5",
+              "etc/osxhttpd/osxhttpd.conf",
+              "system/library/webobjects/adaptors/apache2.2/apache.conf",
+              "etc/apache2/sites-available/default",
+              "etc/apache2/sites-available/default-ssl",
+              "etc/apache2/sites-enabled/000-default",
+              "etc/apache2/sites-enabled/default",
+              "etc/apache2/apache2.conf",
+              "etc/apache2/ports.conf",
+              "usr/local/etc/apache/httpd.conf",
+              "usr/pkg/etc/httpd/httpd.conf",
+              "usr/pkg/etc/httpd/httpd-default.conf",
+              "usr/pkg/etc/httpd/httpd-vhosts.conf",
+              "etc/httpd/mod_php.conf",
+              "etc/httpd/extra/httpd-ssl.conf",
+              "etc/rc.d/rc.httpd",
+              "usr/local/apache/conf/httpd.conf.default",
+              "usr/local/apache/conf/access.conf",
+              "usr/local/apache22/conf/httpd.conf",
+              "usr/local/apache22/httpd.conf",
+              "usr/local/etc/apache22/conf/httpd.conf",
+              "usr/local/apps/apache22/conf/httpd.conf",
+              "etc/apache22/conf/httpd.conf",
+              "etc/apache22/httpd.conf",
+              "opt/apache22/conf/httpd.conf",
+              "usr/local/etc/apache2/vhosts.conf",
+              "usr/local/apache/conf/vhosts.conf",
+              "usr/local/apache2/conf/vhosts.conf",
+              "usr/local/apache/conf/vhosts-custom.conf",
+              "usr/local/apache2/conf/vhosts-custom.conf",
+              "etc/apache/default-server.conf",
+              "etc/apache2/default-server.conf",
+              "usr/local/apache2/conf/extra/httpd-ssl.conf",
+              "usr/local/apache2/conf/ssl.conf",
+              "etc/httpd/conf.d",
+              "usr/local/etc/apache22/httpd.conf",
+              "usr/local/etc/apache2/httpd.conf",
+              "etc/apache2/httpd2.conf",
+              "etc/apache2/ssl-global.conf",
+              "etc/apache2/vhosts.d/00_default_vhost.conf",
+              "apache/conf/httpd.conf",
+              "etc/apache/httpd.conf",
+              "etc/httpd/conf",
+              "http/httpd.conf",
+              "usr/local/apache1.3/conf/httpd.conf",
+              "usr/local/etc/httpd/conf",
+              "var/apache/conf/httpd.conf",
+              "var/www/conf",
+              "www/apache/conf/httpd.conf",
+              "www/conf/httpd.conf",
+              "etc/init.d",
+              "etc/apache/access.conf",
+              "etc/rc.conf",
+              "www/logs/freebsddiary-error.log",
+              "www/logs/freebsddiary-access_log",
+              "library/webserver/documents/index.html",
+              "library/webserver/documents/index.htm",
+              "library/webserver/documents/default.html",
+              "library/webserver/documents/default.htm",
+              "library/webserver/documents/index.php",
+              "library/webserver/documents/default.php",
+              "usr/local/etc/webmin/miniserv.conf",
+              "etc/webmin/miniserv.conf",
+              "usr/local/etc/webmin/miniserv.users",
+              "etc/webmin/miniserv.users",
+              "winnt/system32/logfiles/w3svc/inetsvn1.log",
+              "winnt/system32/logfiles/w3svc1/inetsvn1.log",
+              "winnt/system32/logfiles/w3svc2/inetsvn1.log",
+              "winnt/system32/logfiles/w3svc3/inetsvn1.log",
+              "windows/system32/logfiles/w3svc/inetsvn1.log",
+              "windows/system32/logfiles/w3svc1/inetsvn1.log",
+              "windows/system32/logfiles/w3svc2/inetsvn1.log",
+              "windows/system32/logfiles/w3svc3/inetsvn1.log",
+              "apache/logs/error.log",
+              "apache/logs/access.log",
+              "apache2/logs/error.log",
+              "apache2/logs/access.log",
+              "logs/error.log",
+              "logs/access.log",
+              "etc/httpd/logs/access_log",
+              "etc/httpd/logs/access.log",
+              "etc/httpd/logs/error_log",
+              "etc/httpd/logs/error.log",
+              "usr/local/apache/logs/access_log",
+              "usr/local/apache/logs/access.log",
+              "usr/local/apache/logs/error_log",
+              "usr/local/apache/logs/error.log",
+              "usr/local/apache2/logs/access_log",
+              "usr/local/apache2/logs/access.log",
+              "usr/local/apache2/logs/error_log",
+              "usr/local/apache2/logs/error.log",
+              "var/www/logs/access_log",
+              "var/www/logs/access.log",
+              "var/www/logs/error_log",
+              "var/www/logs/error.log",
+              "opt/lampp/logs/access_log",
+              "opt/lampp/logs/error_log",
+              "opt/xampp/logs/access_log",
+              "opt/xampp/logs/error_log",
+              "opt/lampp/logs/access.log",
+              "opt/lampp/logs/error.log",
+              "opt/xampp/logs/access.log",
+              "opt/xampp/logs/error.log",
+              "program files/apache group/apache/logs/access.log",
+              "program files/apache group/apache/logs/error.log",
+              "program files/apache software foundation/apache2.2/logs/error.log",
+              "program files/apache software foundation/apache2.2/logs/access.log",
+              "opt/apache/apache.conf",
+              "opt/apache/conf/apache.conf",
+              "opt/apache2/apache.conf",
+              "opt/apache2/conf/apache.conf",
+              "opt/httpd/apache.conf",
+              "opt/httpd/conf/apache.conf",
+              "etc/httpd/apache.conf",
+              "etc/apache2/apache.conf",
+              "etc/httpd/conf/apache.conf",
+              "usr/local/apache/apache.conf",
+              "usr/local/apache/conf/apache.conf",
+              "usr/local/apache2/apache.conf",
+              "usr/local/apache2/conf/apache.conf",
+              "usr/local/php/apache.conf.php",
+              "usr/local/php4/apache.conf.php",
+              "usr/local/php5/apache.conf.php",
+              "usr/local/php/apache.conf",
+              "usr/local/php4/apache.conf",
+              "usr/local/php5/apache.conf",
+              "private/etc/httpd/apache.conf",
+              "opt/apache/apache2.conf",
+              "opt/apache/conf/apache2.conf",
+              "opt/apache2/apache2.conf",
+              "opt/apache2/conf/apache2.conf",
+              "opt/httpd/apache2.conf",
+              "opt/httpd/conf/apache2.conf",
+              "etc/httpd/apache2.conf",
+              "etc/httpd/conf/apache2.conf",
+              "usr/local/apache/apache2.conf",
+              "usr/local/apache/conf/apache2.conf",
+              "usr/local/apache2/apache2.conf",
+              "usr/local/apache2/conf/apache2.conf",
+              "usr/local/php/apache2.conf.php",
+              "usr/local/php4/apache2.conf.php",
+              "usr/local/php5/apache2.conf.php",
+              "usr/local/php/apache2.conf",
+              "usr/local/php4/apache2.conf",
+              "usr/local/php5/apache2.conf",
+              "private/etc/httpd/apache2.conf",
+              "usr/local/apache/conf/httpd.conf",
+              "usr/local/apache2/conf/httpd.conf",
+              "etc/httpd/conf/httpd.conf",
+              "etc/apache/apache.conf",
+              "etc/apache/conf/httpd.conf",
+              "etc/apache2/httpd.conf",
+              "usr/apache2/conf/httpd.conf",
+              "usr/apache/conf/httpd.conf",
+              "usr/local/etc/apache/conf/httpd.conf",
+              "usr/local/apache/httpd.conf",
+              "usr/local/apache2/httpd.conf",
+              "usr/local/httpd/conf/httpd.conf",
+              "usr/local/etc/apache2/conf/httpd.conf",
+              "usr/local/etc/httpd/conf/httpd.conf",
+              "usr/local/apps/apache2/conf/httpd.conf",
+              "usr/local/apps/apache/conf/httpd.conf",
+              "usr/local/php/httpd.conf.php",
+              "usr/local/php4/httpd.conf.php",
+              "usr/local/php5/httpd.conf.php",
+              "usr/local/php/httpd.conf",
+              "usr/local/php4/httpd.conf",
+              "usr/local/php5/httpd.conf",
+              "etc/apache2/conf/httpd.conf",
+              "etc/http/conf/httpd.conf",
+              "etc/httpd/httpd.conf",
+              "etc/http/httpd.conf",
+              "etc/httpd.conf",
+              "opt/apache/conf/httpd.conf",
+              "opt/apache2/conf/httpd.conf",
+              "var/www/conf/httpd.conf",
+              "private/etc/httpd/httpd.conf",
+              "private/etc/httpd/httpd.conf.default",
+              "etc/apache2/vhosts.d/default_vhost.include",
+              "etc/apache2/conf.d/charset",
+              "etc/apache2/conf.d/security",
+              "etc/apache2/envvars",
+              "etc/apache2/mods-available/autoindex.conf",
+              "etc/apache2/mods-available/deflate.conf",
+              "etc/apache2/mods-available/dir.conf",
+              "etc/apache2/mods-available/mem_cache.conf",
+              "etc/apache2/mods-available/mime.conf",
+              "etc/apache2/mods-available/proxy.conf",
+              "etc/apache2/mods-available/setenvif.conf",
+              "etc/apache2/mods-available/ssl.conf",
+              "etc/apache2/mods-enabled/alias.conf",
+              "etc/apache2/mods-enabled/deflate.conf",
+              "etc/apache2/mods-enabled/dir.conf",
+              "etc/apache2/mods-enabled/mime.conf",
+              "etc/apache2/mods-enabled/negotiation.conf",
+              "etc/apache2/mods-enabled/php5.conf",
+              "etc/apache2/mods-enabled/status.conf",
+              "program files/apache group/apache/conf/httpd.conf",
+              "program files/apache group/apache2/conf/httpd.conf",
+              "program files/xampp/apache/conf/apache.conf",
+              "program files/xampp/apache/conf/apache2.conf",
+              "program files/xampp/apache/conf/httpd.conf",
+              "program files/apache group/apache/apache.conf",
+              "program files/apache group/apache/conf/apache.conf",
+              "program files/apache group/apache2/conf/apache.conf",
+              "program files/apache group/apache/apache2.conf",
+              "program files/apache group/apache/conf/apache2.conf",
+              "program files/apache group/apache2/conf/apache2.conf",
+              "program files/apache software foundation/apache2.2/conf/httpd.conf",
+              "volumes/macintosh_hd1/opt/httpd/conf/httpd.conf",
+              "volumes/macintosh_hd1/opt/apache/conf/httpd.conf",
+              "volumes/macintosh_hd1/opt/apache2/conf/httpd.conf",
+              "volumes/macintosh_hd1/usr/local/php/httpd.conf.php",
+              "volumes/macintosh_hd1/usr/local/php4/httpd.conf.php",
+              "volumes/macintosh_hd1/usr/local/php5/httpd.conf.php",
+              "volumes/webbackup/opt/apache2/conf/httpd.conf",
+              "volumes/webbackup/private/etc/httpd/httpd.conf",
+              "volumes/webbackup/private/etc/httpd/httpd.conf.default",
+              "usr/local/etc/apache/vhosts.conf",
+              "usr/local/jakarta/tomcat/conf/jakarta.conf",
+              "usr/local/jakarta/tomcat/conf/server.xml",
+              "usr/local/jakarta/tomcat/conf/context.xml",
+              "usr/local/jakarta/tomcat/conf/workers.properties",
+              "usr/local/jakarta/tomcat/conf/logging.properties",
+              "usr/local/jakarta/dist/tomcat/conf/jakarta.conf",
+              "usr/local/jakarta/dist/tomcat/conf/server.xml",
+              "usr/local/jakarta/dist/tomcat/conf/context.xml",
+              "usr/local/jakarta/dist/tomcat/conf/workers.properties",
+              "usr/local/jakarta/dist/tomcat/conf/logging.properties",
+              "usr/share/tomcat6/conf/server.xml",
+              "usr/share/tomcat6/conf/context.xml",
+              "usr/share/tomcat6/conf/workers.properties",
+              "usr/share/tomcat6/conf/logging.properties",
+              "var/cpanel/tomcat.options",
+              "usr/local/jakarta/tomcat/logs/catalina.out",
+              "usr/local/jakarta/tomcat/logs/catalina.err",
+              "opt/tomcat/logs/catalina.out",
+              "opt/tomcat/logs/catalina.err",
+              "usr/share/logs/catalina.out",
+              "usr/share/logs/catalina.err",
+              "usr/share/tomcat/logs/catalina.out",
+              "usr/share/tomcat/logs/catalina.err",
+              "usr/share/tomcat6/logs/catalina.out",
+              "usr/share/tomcat6/logs/catalina.err",
+              "usr/local/apache/logs/mod_jk.log",
+              "usr/local/jakarta/tomcat/logs/mod_jk.log",
+              "usr/local/jakarta/dist/tomcat/logs/mod_jk.log",
+              "opt/[jboss]/server/default/conf/jboss-minimal.xml",
+              "opt/[jboss]/server/default/conf/jboss-service.xml",
+              "opt/[jboss]/server/default/conf/jndi.properties",
+              "opt/[jboss]/server/default/conf/log4j.xml",
+              "opt/[jboss]/server/default/conf/login-config.xml",
+              "opt/[jboss]/server/default/conf/standardjaws.xml",
+              "opt/[jboss]/server/default/conf/standardjboss.xml",
+              "opt/[jboss]/server/default/conf/server.log.properties",
+              "opt/[jboss]/server/default/deploy/jboss-logging.xml",
+              "usr/local/[jboss]/server/default/conf/jboss-minimal.xml",
+              "usr/local/[jboss]/server/default/conf/jboss-service.xml",
+              "usr/local/[jboss]/server/default/conf/jndi.properties",
+              "usr/local/[jboss]/server/default/conf/log4j.xml",
+              "usr/local/[jboss]/server/default/conf/login-config.xml",
+              "usr/local/[jboss]/server/default/conf/standardjaws.xml",
+              "usr/local/[jboss]/server/default/conf/standardjboss.xml",
+              "usr/local/[jboss]/server/default/conf/server.log.properties",
+              "usr/local/[jboss]/server/default/deploy/jboss-logging.xml",
+              "private/tmp/[jboss]/server/default/conf/jboss-minimal.xml",
+              "private/tmp/[jboss]/server/default/conf/jboss-service.xml",
+              "private/tmp/[jboss]/server/default/conf/jndi.properties",
+              "private/tmp/[jboss]/server/default/conf/log4j.xml",
+              "private/tmp/[jboss]/server/default/conf/login-config.xml",
+              "private/tmp/[jboss]/server/default/conf/standardjaws.xml",
+              "private/tmp/[jboss]/server/default/conf/standardjboss.xml",
+              "private/tmp/[jboss]/server/default/conf/server.log.properties",
+              "private/tmp/[jboss]/server/default/deploy/jboss-logging.xml",
+              "tmp/[jboss]/server/default/conf/jboss-minimal.xml",
+              "tmp/[jboss]/server/default/conf/jboss-service.xml",
+              "tmp/[jboss]/server/default/conf/jndi.properties",
+              "tmp/[jboss]/server/default/conf/log4j.xml",
+              "tmp/[jboss]/server/default/conf/login-config.xml",
+              "tmp/[jboss]/server/default/conf/standardjaws.xml",
+              "tmp/[jboss]/server/default/conf/standardjboss.xml",
+              "tmp/[jboss]/server/default/conf/server.log.properties",
+              "tmp/[jboss]/server/default/deploy/jboss-logging.xml",
+              "program files/[jboss]/server/default/conf/jboss-minimal.xml",
+              "program files/[jboss]/server/default/conf/jboss-service.xml",
+              "program files/[jboss]/server/default/conf/jndi.properties",
+              "program files/[jboss]/server/default/conf/log4j.xml",
+              "program files/[jboss]/server/default/conf/login-config.xml",
+              "program files/[jboss]/server/default/conf/standardjaws.xml",
+              "program files/[jboss]/server/default/conf/standardjboss.xml",
+              "program files/[jboss]/server/default/conf/server.log.properties",
+              "program files/[jboss]/server/default/deploy/jboss-logging.xml",
+              "[jboss]/server/default/conf/jboss-minimal.xml",
+              "[jboss]/server/default/conf/jboss-service.xml",
+              "[jboss]/server/default/conf/jndi.properties",
+              "[jboss]/server/default/conf/log4j.xml",
+              "[jboss]/server/default/conf/login-config.xml",
+              "[jboss]/server/default/conf/standardjaws.xml",
+              "[jboss]/server/default/conf/standardjboss.xml",
+              "[jboss]/server/default/conf/server.log.properties",
+              "[jboss]/server/default/deploy/jboss-logging.xml",
+              "opt/[jboss]/server/default/log/server.log",
+              "opt/[jboss]/server/default/log/boot.log",
+              "usr/local/[jboss]/server/default/log/server.log",
+              "usr/local/[jboss]/server/default/log/boot.log",
+              "private/tmp/[jboss]/server/default/log/server.log",
+              "private/tmp/[jboss]/server/default/log/boot.log",
+              "tmp/[jboss]/server/default/log/server.log",
+              "tmp/[jboss]/server/default/log/boot.log",
+              "program files/[jboss]/server/default/log/server.log",
+              "program files/[jboss]/server/default/log/boot.log",
+              "[jboss]/server/default/log/server.log",
+              "[jboss]/server/default/log/boot.log",
+              "var/lighttpd.log",
+              "var/logs/access.log",
+              "usr/local/apache2/logs/lighttpd.error.log",
+              "usr/local/apache2/logs/lighttpd.log",
+              "usr/local/apache/logs/lighttpd.error.log",
+              "usr/local/apache/logs/lighttpd.log",
+              "usr/local/lighttpd/log/lighttpd.error.log",
+              "usr/local/lighttpd/log/access.log",
+              "usr/home/user/var/log/lighttpd.error.log",
+              "usr/home/user/var/log/apache.log",
+              "home/user/lighttpd/lighttpd.conf",
+              "usr/home/user/lighttpd/lighttpd.conf",
+              "etc/lighttpd/lighthttpd.conf",
+              "usr/local/etc/lighttpd.conf",
+              "usr/local/lighttpd/conf/lighttpd.conf",
+              "usr/local/etc/lighttpd.conf.new",
+              "var/www/.lighttpdpassword",
+              "logs/access_log",
+              "logs/error_log",
+              "etc/nginx/nginx.conf",
+              "usr/local/etc/nginx/nginx.conf",
+              "usr/local/nginx/conf/nginx.conf",
+              "usr/local/zeus/web/global.cfg",
+              "usr/local/zeus/web/log/errors",
+              "opt/lsws/conf/httpd_conf.xml",
+              "usr/local/lsws/conf/httpd_conf.xml",
+              "opt/lsws/logs/error.log",
+              "opt/lsws/logs/access.log",
+              "usr/local/lsws/logs/error.log",
+              "usr/local/logs/access.log",
+              "usr/local/samba/lib/log.user",
+              "usr/local/logs/samba.log",
+              "etc/samba/netlogon",
+              "etc/smbpasswd",
+              "etc/smb.conf",
+              "etc/samba/dhcp.conf",
+              "etc/samba/smb.conf",
+              "etc/samba/samba.conf",
+              "etc/samba/smb.conf.user",
+              "etc/samba/smbpasswd",
+              "etc/samba/smbusers",
+              "etc/samba/private/smbpasswd",
+              "usr/local/etc/smb.conf",
+              "usr/local/samba/lib/smb.conf.user",
+              "etc/dhcp3/dhclient.conf",
+              "etc/dhcp3/dhcpd.conf",
+              "etc/dhcp/dhclient.conf",
+              "program files/vidalia bundle/polipo/polipo.conf",
+              "etc/tor/tor-tsocks.conf",
+              "etc/stunnel/stunnel.conf",
+              "etc/tsocks.conf",
+              "etc/tinyproxy/tinyproxy.conf",
+              "etc/miredo-server.conf",
+              "etc/miredo.conf",
+              "etc/miredo/miredo-server.conf",
+              "etc/miredo/miredo.conf",
+              "etc/wicd/dhclient.conf.template.default",
+              "etc/wicd/manager-settings.conf",
+              "etc/wicd/wired-settings.conf",
+              "etc/wicd/wireless-settings.conf",
+              "etc/ipfw.rules",
+              "etc/ipfw.conf",
+              "etc/firewall.rules",
+              "winnt/system32/logfiles/firewall/pfirewall.log",
+              "winnt/system32/logfiles/firewall/pfirewall.log.old",
+              "windows/system32/logfiles/firewall/pfirewall.log",
+              "windows/system32/logfiles/firewall/pfirewall.log.old",
+              "etc/clamav/clamd.conf",
+              "etc/clamav/freshclam.conf",
+              "etc/x11/xorg.conf",
+              "etc/x11/xorg.conf-vesa",
+              "etc/x11/xorg.conf-vmware",
+              "etc/x11/xorg.conf.beforevmwaretoolsinstall",
+              "etc/x11/xorg.conf.orig",
+              "etc/bluetooth/input.conf",
+              "etc/bluetooth/main.conf",
+              "etc/bluetooth/network.conf",
+              "etc/bluetooth/rfcomm.conf",
+              "etc/bash_completion.d/debconf",
+              "root/.bash_logout",
+              "root/.bash_history",
+              "root/.bash_config",
+              "root/.bashrc",
+              "etc/bash.bashrc",
+              "var/adm/syslog",
+              "var/adm/sulog",
+              "var/adm/utmp",
+              "var/adm/utmpx",
+              "var/adm/wtmp",
+              "var/adm/wtmpx",
+              "var/adm/lastlog/username",
+              "usr/spool/lp/log",
+              "var/adm/lp/lpd-errs",
+              "usr/lib/cron/log",
+              "var/adm/loginlog",
+              "var/adm/pacct",
+              "var/adm/dtmp",
+              "var/adm/acct/sum/loginlog",
+              "var/adm/x0msgs",
+              "var/adm/crash/vmcore",
+              "var/adm/crash/unix",
+              "etc/newsyslog.conf",
+              "var/adm/qacct",
+              "var/adm/ras/errlog",
+              "var/adm/ras/bootlog",
+              "var/adm/cron/log",
+              "etc/utmp",
+              "etc/security/lastlog",
+              "etc/security/failedlogin",
+              "usr/spool/mqueue/syslog",
+              "var/adm/messages",
+              "var/adm/aculogs",
+              "var/adm/aculog",
+              "var/adm/vold.log",
+              "var/adm/log/asppp.log",
+              "var/lp/logs/lpsched",
+              "var/lp/logs/lpnet",
+              "var/lp/logs/requests",
+              "var/cron/log",
+              "var/saf/_log",
+              "var/saf/port/log",
+              "tmp/access.log",
+              "etc/sensors.conf",
+              "etc/sensors3.conf",
+              "etc/host.conf",
+              "etc/pam.conf",
+              "etc/resolv.conf",
+              "etc/apt/apt.conf",
+              "etc/inetd.conf",
+              "etc/syslog.conf",
+              "etc/sysctl.conf",
+              "etc/sysctl.d/10-console-messages.conf",
+              "etc/sysctl.d/10-network-security.conf",
+              "etc/sysctl.d/10-process-security.conf",
+              "etc/sysctl.d/wine.sysctl.conf",
+              "etc/security/access.conf",
+              "etc/security/group.conf",
+              "etc/security/limits.conf",
+              "etc/security/namespace.conf",
+              "etc/security/pam_env.conf",
+              "etc/security/sepermit.conf",
+              "etc/security/time.conf",
+              "etc/ssh/sshd_config",
+              "etc/adduser.conf",
+              "etc/deluser.conf",
+              "etc/avahi/avahi-daemon.conf",
+              "etc/ca-certificates.conf",
+              "etc/ca-certificates.conf.dpkg-old",
+              "etc/casper.conf",
+              "etc/chkrootkit.conf",
+              "etc/debconf.conf",
+              "etc/dns2tcpd.conf",
+              "etc/e2fsck.conf",
+              "etc/esound/esd.conf",
+              "etc/etter.conf",
+              "etc/fuse.conf",
+              "etc/foremost.conf",
+              "etc/hdparm.conf",
+              "etc/kernel-img.conf",
+              "etc/kernel-pkg.conf",
+              "etc/ld.so.conf",
+              "etc/ltrace.conf",
+              "etc/mail/sendmail.conf",
+              "etc/manpath.config",
+              "etc/kbd/config",
+              "etc/ldap/ldap.conf",
+              "etc/logrotate.conf",
+              "etc/mtools.conf",
+              "etc/smi.conf",
+              "etc/updatedb.conf",
+              "etc/pulse/client.conf",
+              "usr/share/adduser/adduser.conf",
+              "etc/hostname",
+              "etc/networks",
+              "etc/timezone",
+              "etc/modules",
+              "etc/passwd",
+              "etc/shadow",
+              "etc/fstab",
+              "etc/motd",
+              "etc/hosts",
+              "etc/group",
+              "etc/alias",
+              "etc/crontab",
+              "etc/crypttab",
+              "etc/exports",
+              "etc/mtab",
+              "etc/hosts.allow",
+              "etc/hosts.deny",
+              "etc/os-release",
+              "etc/password.master",
+              "etc/profile",
+              "etc/default/grub",
+              "etc/resolvconf/update-libc.d/sendmail",
+              "etc/inittab",
+              "etc/issue",
+              "etc/issue.net",
+              "etc/login.defs",
+              "etc/sudoers",
+              "etc/sysconfig/network-scripts/ifcfg-eth0",
+              "etc/redhat-release",
+              "etc/scw-release",
+              "etc/system-release-cpe",
+              "etc/debian_version",
+              "etc/fedora-release",
+              "etc/mandrake-release",
+              "etc/slackware-release",
+              "etc/suse-release",
+              "etc/security/group",
+              "etc/security/passwd",
+              "etc/security/user",
+              "etc/security/environ",
+              "etc/security/limits",
+              "etc/security/opasswd",
+              "boot/grub/grub.cfg",
+              "boot/grub/menu.lst",
+              "root/.ksh_history",
+              "root/.xauthority",
+              "usr/lib/security/mkuser.default",
+              "var/lib/squirrelmail/prefs/squirrelmail.log",
+              "etc/squirrelmail/apache.conf",
+              "etc/squirrelmail/config_local.php",
+              "etc/squirrelmail/default_pref",
+              "etc/squirrelmail/index.php",
+              "etc/squirrelmail/config_default.php",
+              "etc/squirrelmail/config.php",
+              "etc/squirrelmail/filters_setup.php",
+              "etc/squirrelmail/sqspell_config.php",
+              "etc/squirrelmail/config/config.php",
+              "etc/httpd/conf.d/squirrelmail.conf",
+              "usr/share/squirrelmail/config/config.php",
+              "private/etc/squirrelmail/config/config.php",
+              "srv/www/htdos/squirrelmail/config/config.php",
+              "var/www/squirrelmail/config/config.php",
+              "var/www/html/squirrelmail/config/config.php",
+              "var/www/html/squirrelmail-1.2.9/config/config.php",
+              "usr/share/squirrelmail/plugins/squirrel_logger/setup.php",
+              "usr/local/squirrelmail/www/readme",
+              "windows/system32/drivers/etc/hosts",
+              "windows/system32/drivers/etc/lmhosts.sam",
+              "windows/system32/drivers/etc/networks",
+              "windows/system32/drivers/etc/protocol",
+              "windows/system32/drivers/etc/services",
+              "/boot.ini",
+              "windows/debug/netsetup.log",
+              "windows/comsetup.log",
+              "windows/repair/setup.log",
+              "windows/setupact.log",
+              "windows/setupapi.log",
+              "windows/setuperr.log",
+              "windows/updspapi.log",
+              "windows/wmsetup.log",
+              "windows/windowsupdate.log",
+              "windows/odbc.ini",
+              "usr/local/psa/admin/htdocs/domains/databases/phpmyadmin/libraries/config.default.php",
+              "etc/apache2/conf.d/phpmyadmin.conf",
+              "etc/phpmyadmin/config.inc.php",
+              "etc/openldap/ldap.conf",
+              "etc/cups/acroread.conf",
+              "etc/cups/cupsd.conf",
+              "etc/cups/cupsd.conf.default",
+              "etc/cups/pdftops.conf",
+              "etc/cups/printers.conf",
+              "windows/system32/macromed/flash/flashinstall.log",
+              "windows/system32/macromed/flash/install.log",
+              "etc/cvs-cron.conf",
+              "etc/cvs-pserver.conf",
+              "etc/subversion/config",
+              "etc/modprobe.d/vmware-tools.conf",
+              "etc/updatedb.conf.beforevmwaretoolsinstall",
+              "etc/vmware-tools/config",
+              "etc/vmware-tools/tpvmlp.conf",
+              "etc/vmware-tools/vmware-tools-libraries.conf",
+              "var/log",
+              "var/log/sw-cp-server/error_log",
+              "var/log/sso/sso.log",
+              "var/log/dpkg.log",
+              "var/log/btmp",
+              "var/log/utmp",
+              "var/log/wtmp",
+              "var/log/mysql/mysql-bin.log",
+              "var/log/mysql/mysql-bin.index",
+              "var/log/mysql/data/mysql-bin.index",
+              "var/log/mysql.log",
+              "var/log/mysql.err",
+              "var/log/mysqlderror.log",
+              "var/log/mysql/mysql.log",
+              "var/log/mysql/mysql-slow.log",
+              "var/log/mysql-bin.index",
+              "var/log/data/mysql-bin.index",
+              "var/log/postgresql/postgresql.log",
+              "var/log/postgres/pg_backup.log",
+              "var/log/postgres/postgres.log",
+              "var/log/postgresql.log",
+              "var/log/pgsql/pgsql.log",
+              "var/log/postgresql/postgresql-8.1-main.log",
+              "var/log/postgresql/postgresql-8.3-main.log",
+              "var/log/postgresql/postgresql-8.4-main.log",
+              "var/log/postgresql/postgresql-9.0-main.log",
+              "var/log/postgresql/postgresql-9.1-main.log",
+              "var/log/pgsql8.log",
+              "var/log/postgresql/postgres.log",
+              "var/log/pgsql_log",
+              "var/log/postgresql/main.log",
+              "var/log/cron",
+              "var/log/postgres.log",
+              "var/log/proftpd",
+              "var/log/proftpd/xferlog.legacy",
+              "var/log/proftpd.access_log",
+              "var/log/proftpd.xferlog",
+              "var/log/vsftpd.log",
+              "var/log/xferlog",
+              "var/log/pure-ftpd/pure-ftpd.log",
+              "var/log/pureftpd.log",
+              "var/log/muddleftpd",
+              "var/log/muddleftpd.conf",
+              "var/log/ftp-proxy/ftp-proxy.log",
+              "var/log/ftp-proxy",
+              "var/log/ftplog",
+              "var/log/exim_mainlog",
+              "var/log/exim/mainlog",
+              "var/log/maillog",
+              "var/log/exim_paniclog",
+              "var/log/exim/paniclog",
+              "var/log/exim/rejectlog",
+              "var/log/exim_rejectlog",
+              "var/log/webmin/miniserv.log",
+              "var/log/httpd/access_log",
+              "var/log/httpd/error_log",
+              "var/log/httpd/access.log",
+              "var/log/httpd/error.log",
+              "var/log/apache/access_log",
+              "var/log/apache/access.log",
+              "var/log/apache/error_log",
+              "var/log/apache/error.log",
+              "var/log/apache2/access_log",
+              "var/log/apache2/access.log",
+              "var/log/apache2/error_log",
+              "var/log/apache2/error.log",
+              "var/log/access_log",
+              "var/log/access.log",
+              "var/log/error_log",
+              "var/log/error.log",
+              "var/log/tomcat6/catalina.out",
+              "var/log/lighttpd.error.log",
+              "var/log/lighttpd.access.log",
+              "var/logs/access.log",
+              "var/log/lighttpd/",
+              "var/log/lighttpd/error.log",
+              "var/log/lighttpd/access.www.log",
+              "var/log/lighttpd/error.www.log",
+              "var/log/lighttpd/access.log",
+              "var/log/lighttpd/{domain}/access.log",
+              "var/log/lighttpd/{domain}/error.log",
+              "var/log/nginx/access_log",
+              "var/log/nginx/error_log",
+              "var/log/nginx/access.log",
+              "var/log/nginx/error.log",
+              "var/log/nginx.access_log",
+              "var/log/nginx.error_log",
+              "var/log/samba/log.smbd",
+              "var/log/samba/log.nmbd",
+              "var/log/samba.log",
+              "var/log/samba.log1",
+              "var/log/samba.log2",
+              "var/log/log.smb",
+              "var/log/ipfw.log",
+              "var/log/ipfw",
+              "var/log/ipfw/ipfw.log",
+              "var/log/ipfw.today",
+              "var/log/poplog",
+              "var/log/authlog",
+              "var/log/news.all",
+              "var/log/news/news.all",
+              "var/log/news/news.crit",
+              "var/log/news/news.err",
+              "var/log/news/news.notice",
+              "var/log/news/suck.err",
+              "var/log/news/suck.notice",
+              "var/log/messages",
+              "var/log/messages.1",
+              "var/log/user.log",
+              "var/log/user.log.1",
+              "var/log/auth.log",
+              "var/log/pm-powersave.log",
+              "var/log/xorg.0.log",
+              "var/log/daemon.log",
+              "var/log/daemon.log.1",
+              "var/log/kern.log",
+              "var/log/kern.log.1",
+              "var/log/mail.err",
+              "var/log/mail.info",
+              "var/log/mail.warn",
+              "var/log/ufw.log",
+              "var/log/boot.log",
+              "var/log/syslog",
+              "var/log/syslog.1",
+              "var/log/squirrelmail.log",
+              "var/log/apache2/squirrelmail.log",
+              "var/log/apache2/squirrelmail.err.log",
+              "var/log/mail.log",
+              "var/log/vmware/hostd.log",
+              "var/log/vmware/hostd-1.log",
+              "/wp-config.php",
+              "/wp-config.bak",
+              "/wp-config.old",
+              "/wp-config.temp",
+              "/wp-config.tmp",
+              "/wp-config.txt",
+              "/config.yml",
+              "/config_dev.yml",
+              "/config_prod.yml",
+              "/config_test.yml",
+              "/parameters.yml",
+              "/routing.yml",
+              "/security.yml",
+              "/services.yml",
+              "sites/default/default.settings.php",
+              "sites/default/settings.php",
+              "sites/default/settings.local.php",
+              "app/etc/local.xml",
+              "/sftp-config.json",
+              "/web.config",
+              "includes/config.php",
+              "includes/configure.php",
+              "/config.inc.php",
+              "/localsettings.php",
+              "inc/config.php",
+              "typo3conf/localconf.php",
+              "config/app.php",
+              "config/custom.php",
+              "config/database.php",
+              "/configuration.php",
+              "/config.php",
+              "var/mail/www-data",
+              "etc/network/",
+              "etc/init/",
+              "inetpub/wwwroot/global.asa",
+              "system32/inetsrv/config/applicationhost.config",
+              "system32/inetsrv/config/administration.config",
+              "system32/inetsrv/config/redirection.config",
+              "system32/config/default",
+              "system32/config/sam",
+              "system32/config/system",
+              "system32/config/software",
+              "winnt/repair/sam._",
+              "/package.json",
+              "/package-lock.json",
+              "/gruntfile.js",
+              "/npm-debug.log",
+              "/ormconfig.json",
+              "/tsconfig.json",
+              "/webpack.config.js",
+              "/yarn.lock",
+              "proc/0",
+              "proc/1",
+              "proc/2",
+              "proc/3",
+              "proc/4",
+              "proc/5",
+              "proc/6",
+              "proc/7",
+              "proc/8",
+              "proc/9",
+              "proc/acpi",
+              "proc/asound",
+              "proc/bootconfig",
+              "proc/buddyinfo",
+              "proc/bus",
+              "proc/cgroups",
+              "proc/cmdline",
+              "proc/config.gz",
+              "proc/consoles",
+              "proc/cpuinfo",
+              "proc/crypto",
+              "proc/devices",
+              "proc/diskstats",
+              "proc/dma",
+              "proc/docker",
+              "proc/driver",
+              "proc/dynamic_debug",
+              "proc/execdomains",
+              "proc/fb",
+              "proc/filesystems",
+              "proc/fs",
+              "proc/interrupts",
+              "proc/iomem",
+              "proc/ioports",
+              "proc/ipmi",
+              "proc/irq",
+              "proc/kallsyms",
+              "proc/kcore",
+              "proc/keys",
+              "proc/keys",
+              "proc/key-users",
+              "proc/kmsg",
+              "proc/kpagecgroup",
+              "proc/kpagecount",
+              "proc/kpageflags",
+              "proc/latency_stats",
+              "proc/loadavg",
+              "proc/locks",
+              "proc/mdstat",
+              "proc/meminfo",
+              "proc/misc",
+              "proc/modules",
+              "proc/mounts",
+              "proc/mpt",
+              "proc/mtd",
+              "proc/mtrr",
+              "proc/net",
+              "proc/net/tcp",
+              "proc/net/udp",
+              "proc/pagetypeinfo",
+              "proc/partitions",
+              "proc/pressure",
+              "proc/sched_debug",
+              "proc/schedstat",
+              "proc/scsi",
+              "proc/self",
+              "proc/self/cmdline",
+              "proc/self/environ",
+              "proc/self/fd/0",
+              "proc/self/fd/1",
+              "proc/self/fd/10",
+              "proc/self/fd/11",
+              "proc/self/fd/12",
+              "proc/self/fd/13",
+              "proc/self/fd/14",
+              "proc/self/fd/15",
+              "proc/self/fd/2",
+              "proc/self/fd/3",
+              "proc/self/fd/4",
+              "proc/self/fd/5",
+              "proc/self/fd/6",
+              "proc/self/fd/7",
+              "proc/self/fd/8",
+              "proc/self/fd/9",
+              "proc/self/mounts",
+              "proc/self/stat",
+              "proc/self/status",
+              "proc/slabinfo",
+              "proc/softirqs",
+              "proc/stat",
+              "proc/swaps",
+              "proc/sys",
+              "proc/sysrq-trigger",
+              "proc/sysvipc",
+              "proc/thread-self",
+              "proc/timer_list",
+              "proc/timer_stats",
+              "proc/tty",
+              "proc/uptime",
+              "proc/version",
+              "proc/version_signature",
+              "proc/vmallocinfo",
+              "proc/vmstat",
+              "proc/zoneinfo",
+              "sys/block",
+              "sys/bus",
+              "sys/class",
+              "sys/dev",
+              "sys/devices",
+              "sys/firmware",
+              "sys/fs",
+              "sys/hypervisor",
+              "sys/kernel",
+              "sys/module",
+              "sys/power"
+            ]
+          },
+          "operator": "phrase_match"
+        }
+      ],
+      "transformers": [
+        "lowercase",
+        "normalizePath"
+      ]
+    },
+    {
+      "id": "crs-931-110",
+      "name": "RFI: Common RFI Vulnerable Parameter Name used w/ URL Payload",
+      "tags": {
+        "type": "rfi",
+        "crs_id": "931110",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              }
+            ],
+            "regex": "(?:\\binclude\\s*\\([^)]*|mosConfig_absolute_path|_CONF\\[path\\]|_SERVER\\[DOCUMENT_ROOT\\]|GALLERY_BASEDIR|path\\[docroot\\]|appserv_root|config\\[root_dir\\])=(?:file|ftps?|https?)://",
+            "options": {
+              "min_length": 15
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-931-120",
+      "name": "RFI: URL Payload Used w/Trailing Question Mark Character (?)",
+      "tags": {
+        "type": "rfi",
+        "crs_id": "931120",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              }
+            ],
+            "regex": "^(?i:file|ftps?)://.*?\\?+$",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 4
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-932-160",
+      "name": "Remote Command Execution: Unix Shell Code Found",
+      "tags": {
+        "type": "command_injection",
+        "crs_id": "932160",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "list": [
+              "${cdpath}",
+              "${dirstack}",
+              "${home}",
+              "${hostname}",
+              "${ifs}",
+              "${oldpwd}",
+              "${ostype}",
+              "${path}",
+              "${pwd}",
+              "$cdpath",
+              "$dirstack",
+              "$home",
+              "$hostname",
+              "$ifs",
+              "$oldpwd",
+              "$ostype",
+              "$path",
+              "$pwd",
+              "dev/fd/",
+              "dev/null",
+              "dev/stderr",
+              "dev/stdin",
+              "dev/stdout",
+              "dev/tcp/",
+              "dev/udp/",
+              "dev/zero",
+              "etc/master.passwd",
+              "etc/pwd.db",
+              "etc/shells",
+              "etc/spwd.db",
+              "proc/self/",
+              "bin/7z",
+              "bin/7za",
+              "bin/7zr",
+              "bin/ab",
+              "bin/agetty",
+              "bin/ansible-playbook",
+              "bin/apt",
+              "bin/apt-get",
+              "bin/ar",
+              "bin/aria2c",
+              "bin/arj",
+              "bin/arp",
+              "bin/as",
+              "bin/ascii-xfr",
+              "bin/ascii85",
+              "bin/ash",
+              "bin/aspell",
+              "bin/at",
+              "bin/atobm",
+              "bin/awk",
+              "bin/base32",
+              "bin/base64",
+              "bin/basenc",
+              "bin/bash",
+              "bin/bpftrace",
+              "bin/bridge",
+              "bin/bundler",
+              "bin/bunzip2",
+              "bin/busctl",
+              "bin/busybox",
+              "bin/byebug",
+              "bin/bzcat",
+              "bin/bzcmp",
+              "bin/bzdiff",
+              "bin/bzegrep",
+              "bin/bzexe",
+              "bin/bzfgrep",
+              "bin/bzgrep",
+              "bin/bzip2",
+              "bin/bzip2recover",
+              "bin/bzless",
+              "bin/bzmore",
+              "bin/bzz",
+              "bin/c89",
+              "bin/c99",
+              "bin/cancel",
+              "bin/capsh",
+              "bin/cat",
+              "bin/cc",
+              "bin/certbot",
+              "bin/check_by_ssh",
+              "bin/check_cups",
+              "bin/check_log",
+              "bin/check_memory",
+              "bin/check_raid",
+              "bin/check_ssl_cert",
+              "bin/check_statusfile",
+              "bin/chmod",
+              "bin/choom",
+              "bin/chown",
+              "bin/chroot",
+              "bin/clang",
+              "bin/clang++",
+              "bin/cmp",
+              "bin/cobc",
+              "bin/column",
+              "bin/comm",
+              "bin/composer",
+              "bin/core_perl/zipdetails",
+              "bin/cowsay",
+              "bin/cowthink",
+              "bin/cp",
+              "bin/cpan",
+              "bin/cpio",
+              "bin/cpulimit",
+              "bin/crash",
+              "bin/crontab",
+              "bin/csh",
+              "bin/csplit",
+              "bin/csvtool",
+              "bin/cupsfilter",
+              "bin/curl",
+              "bin/cut",
+              "bin/dash",
+              "bin/date",
+              "bin/dd",
+              "bin/dev/fd/",
+              "bin/dev/null",
+              "bin/dev/stderr",
+              "bin/dev/stdin",
+              "bin/dev/stdout",
+              "bin/dev/tcp/",
+              "bin/dev/udp/",
+              "bin/dev/zero",
+              "bin/dialog",
+              "bin/diff",
+              "bin/dig",
+              "bin/dmesg",
+              "bin/dmidecode",
+              "bin/dmsetup",
+              "bin/dnf",
+              "bin/docker",
+              "bin/dosbox",
+              "bin/dpkg",
+              "bin/du",
+              "bin/dvips",
+              "bin/easy_install",
+              "bin/eb",
+              "bin/echo",
+              "bin/ed",
+              "bin/efax",
+              "bin/emacs",
+              "bin/env",
+              "bin/eqn",
+              "bin/es",
+              "bin/esh",
+              "bin/etc/group",
+              "bin/etc/master.passwd",
+              "bin/etc/passwd",
+              "bin/etc/pwd.db",
+              "bin/etc/shadow",
+              "bin/etc/shells",
+              "bin/etc/spwd.db",
+              "bin/ex",
+              "bin/exiftool",
+              "bin/expand",
+              "bin/expect",
+              "bin/expr",
+              "bin/facter",
+              "bin/fetch",
+              "bin/file",
+              "bin/find",
+              "bin/finger",
+              "bin/fish",
+              "bin/flock",
+              "bin/fmt",
+              "bin/fold",
+              "bin/fping",
+              "bin/ftp",
+              "bin/gawk",
+              "bin/gcc",
+              "bin/gcore",
+              "bin/gdb",
+              "bin/gem",
+              "bin/genie",
+              "bin/genisoimage",
+              "bin/ghc",
+              "bin/ghci",
+              "bin/gimp",
+              "bin/ginsh",
+              "bin/git",
+              "bin/grc",
+              "bin/grep",
+              "bin/gtester",
+              "bin/gunzip",
+              "bin/gzexe",
+              "bin/gzip",
+              "bin/hd",
+              "bin/head",
+              "bin/hexdump",
+              "bin/highlight",
+              "bin/hping3",
+              "bin/iconv",
+              "bin/id",
+              "bin/iftop",
+              "bin/install",
+              "bin/ionice",
+              "bin/ip",
+              "bin/irb",
+              "bin/ispell",
+              "bin/jjs",
+              "bin/join",
+              "bin/journalctl",
+              "bin/jq",
+              "bin/jrunscript",
+              "bin/knife",
+              "bin/ksh",
+              "bin/ksshell",
+              "bin/latex",
+              "bin/ld",
+              "bin/ldconfig",
+              "bin/less",
+              "bin/lftp",
+              "bin/ln",
+              "bin/loginctl",
+              "bin/logsave",
+              "bin/look",
+              "bin/lp",
+              "bin/ls",
+              "bin/ltrace",
+              "bin/lua",
+              "bin/lualatex",
+              "bin/luatex",
+              "bin/lwp-download",
+              "bin/lwp-request",
+              "bin/lz",
+              "bin/lz4",
+              "bin/lz4c",
+              "bin/lz4cat",
+              "bin/lzcat",
+              "bin/lzcmp",
+              "bin/lzdiff",
+              "bin/lzegrep",
+              "bin/lzfgrep",
+              "bin/lzgrep",
+              "bin/lzless",
+              "bin/lzma",
+              "bin/lzmadec",
+              "bin/lzmainfo",
+              "bin/lzmore",
+              "bin/mail",
+              "bin/make",
+              "bin/man",
+              "bin/mawk",
+              "bin/mkfifo",
+              "bin/mknod",
+              "bin/more",
+              "bin/mosquitto",
+              "bin/mount",
+              "bin/msgattrib",
+              "bin/msgcat",
+              "bin/msgconv",
+              "bin/msgfilter",
+              "bin/msgmerge",
+              "bin/msguniq",
+              "bin/mtr",
+              "bin/mv",
+              "bin/mysql",
+              "bin/nano",
+              "bin/nasm",
+              "bin/nawk",
+              "bin/nc",
+              "bin/ncat",
+              "bin/neofetch",
+              "bin/nice",
+              "bin/nl",
+              "bin/nm",
+              "bin/nmap",
+              "bin/node",
+              "bin/nohup",
+              "bin/npm",
+              "bin/nroff",
+              "bin/nsenter",
+              "bin/octave",
+              "bin/od",
+              "bin/openssl",
+              "bin/openvpn",
+              "bin/openvt",
+              "bin/opkg",
+              "bin/paste",
+              "bin/pax",
+              "bin/pdb",
+              "bin/pdflatex",
+              "bin/pdftex",
+              "bin/pdksh",
+              "bin/perf",
+              "bin/perl",
+              "bin/pg",
+              "bin/php",
+              "bin/php-cgi",
+              "bin/php5",
+              "bin/php7",
+              "bin/pic",
+              "bin/pico",
+              "bin/pidstat",
+              "bin/pigz",
+              "bin/pip",
+              "bin/pkexec",
+              "bin/pkg",
+              "bin/pr",
+              "bin/printf",
+              "bin/proc/self/",
+              "bin/pry",
+              "bin/ps",
+              "bin/psed",
+              "bin/psftp",
+              "bin/psql",
+              "bin/ptx",
+              "bin/puppet",
+              "bin/pxz",
+              "bin/python",
+              "bin/python2",
+              "bin/python3",
+              "bin/rake",
+              "bin/rbash",
+              "bin/rc",
+              "bin/readelf",
+              "bin/red",
+              "bin/redcarpet",
+              "bin/restic",
+              "bin/rev",
+              "bin/rlogin",
+              "bin/rlwrap",
+              "bin/rpm",
+              "bin/rpmquery",
+              "bin/rsync",
+              "bin/ruby",
+              "bin/run-mailcap",
+              "bin/run-parts",
+              "bin/rview",
+              "bin/rvim",
+              "bin/sash",
+              "bin/sbin/capsh",
+              "bin/sbin/logsave",
+              "bin/sbin/service",
+              "bin/sbin/start-stop-daemon",
+              "bin/scp",
+              "bin/screen",
+              "bin/script",
+              "bin/sed",
+              "bin/service",
+              "bin/setarch",
+              "bin/sftp",
+              "bin/sg",
+              "bin/sh",
+              "bin/shuf",
+              "bin/sleep",
+              "bin/slsh",
+              "bin/smbclient",
+              "bin/snap",
+              "bin/socat",
+              "bin/soelim",
+              "bin/sort",
+              "bin/split",
+              "bin/sqlite3",
+              "bin/ss",
+              "bin/ssh",
+              "bin/ssh-keygen",
+              "bin/ssh-keyscan",
+              "bin/sshpass",
+              "bin/start-stop-daemon",
+              "bin/stdbuf",
+              "bin/strace",
+              "bin/strings",
+              "bin/su",
+              "bin/sysctl",
+              "bin/systemctl",
+              "bin/systemd-resolve",
+              "bin/tac",
+              "bin/tail",
+              "bin/tar",
+              "bin/task",
+              "bin/taskset",
+              "bin/tbl",
+              "bin/tclsh",
+              "bin/tcpdump",
+              "bin/tcsh",
+              "bin/tee",
+              "bin/telnet",
+              "bin/tex",
+              "bin/tftp",
+              "bin/tic",
+              "bin/time",
+              "bin/timedatectl",
+              "bin/timeout",
+              "bin/tmux",
+              "bin/top",
+              "bin/troff",
+              "bin/tshark",
+              "bin/ul",
+              "bin/uname",
+              "bin/uncompress",
+              "bin/unexpand",
+              "bin/uniq",
+              "bin/unlz4",
+              "bin/unlzma",
+              "bin/unpigz",
+              "bin/unrar",
+              "bin/unshare",
+              "bin/unxz",
+              "bin/unzip",
+              "bin/unzstd",
+              "bin/update-alternatives",
+              "bin/uudecode",
+              "bin/uuencode",
+              "bin/valgrind",
+              "bin/vi",
+              "bin/view",
+              "bin/vigr",
+              "bin/vim",
+              "bin/vimdiff",
+              "bin/vipw",
+              "bin/virsh",
+              "bin/volatility",
+              "bin/wall",
+              "bin/watch",
+              "bin/wc",
+              "bin/wget",
+              "bin/whiptail",
+              "bin/who",
+              "bin/whoami",
+              "bin/whois",
+              "bin/wireshark",
+              "bin/wish",
+              "bin/xargs",
+              "bin/xelatex",
+              "bin/xetex",
+              "bin/xmodmap",
+              "bin/xmore",
+              "bin/xpad",
+              "bin/xxd",
+              "bin/xz",
+              "bin/xzcat",
+              "bin/xzcmp",
+              "bin/xzdec",
+              "bin/xzdiff",
+              "bin/xzegrep",
+              "bin/xzfgrep",
+              "bin/xzgrep",
+              "bin/xzless",
+              "bin/xzmore",
+              "bin/yarn",
+              "bin/yelp",
+              "bin/yes",
+              "bin/yum",
+              "bin/zathura",
+              "bin/zip",
+              "bin/zipcloak",
+              "bin/zipcmp",
+              "bin/zipdetails",
+              "bin/zipgrep",
+              "bin/zipinfo",
+              "bin/zipmerge",
+              "bin/zipnote",
+              "bin/zipsplit",
+              "bin/ziptool",
+              "bin/zsh",
+              "bin/zsoelim",
+              "bin/zstd",
+              "bin/zstdcat",
+              "bin/zstdgrep",
+              "bin/zstdless",
+              "bin/zstdmt",
+              "bin/zypper"
+            ]
+          },
+          "operator": "phrase_match"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "crs-932-171",
+      "name": "Remote Command Execution: Shellshock (CVE-2014-6271)",
+      "tags": {
+        "type": "command_injection",
+        "crs_id": "932171",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "^\\(\\s*\\)\\s+{",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 4
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-932-180",
+      "name": "Restricted File Upload Attempt",
+      "tags": {
+        "type": "command_injection",
+        "crs_id": "932180",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "x-filename"
+                ]
+              },
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "x_filename"
+                ]
+              },
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "x-file-name"
+                ]
+              }
+            ],
+            "list": [
+              ".htaccess",
+              ".htdigest",
+              ".htpasswd",
+              "wp-config.php",
+              "config.yml",
+              "config_dev.yml",
+              "config_prod.yml",
+              "config_test.yml",
+              "parameters.yml",
+              "routing.yml",
+              "security.yml",
+              "services.yml",
+              "default.settings.php",
+              "settings.php",
+              "settings.local.php",
+              "local.xml",
+              ".env"
+            ]
+          },
+          "operator": "phrase_match"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "crs-933-111",
+      "name": "PHP Injection Attack: PHP Script File Upload Found",
+      "tags": {
+        "type": "unrestricted_file_upload",
+        "crs_id": "933111",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "x-filename"
+                ]
+              },
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "x_filename"
+                ]
+              },
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "x.filename"
+                ]
+              },
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "x-file-name"
+                ]
+              }
+            ],
+            "regex": ".*\\.(?:php\\d*|phtml)\\..*$",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 5
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "crs-933-130",
+      "name": "PHP Injection Attack: Global Variables Found",
+      "tags": {
+        "type": "php_code_injection",
+        "crs_id": "933130",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "list": [
+              "$globals",
+              "$_cookie",
+              "$_env",
+              "$_files",
+              "$_get",
+              "$_post",
+              "$_request",
+              "$_server",
+              "$_session",
+              "$argc",
+              "$argv",
+              "$http_\\u200bresponse_\\u200bheader",
+              "$php_\\u200berrormsg",
+              "$http_cookie_vars",
+              "$http_env_vars",
+              "$http_get_vars",
+              "$http_post_files",
+              "$http_post_vars",
+              "$http_raw_post_data",
+              "$http_request_vars",
+              "$http_server_vars"
+            ]
+          },
+          "operator": "phrase_match"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "crs-933-131",
+      "name": "PHP Injection Attack: HTTP Headers Values Found",
+      "tags": {
+        "type": "php_code_injection",
+        "crs_id": "933131",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?:HTTP_(?:ACCEPT(?:_(?:ENCODING|LANGUAGE|CHARSET))?|(?:X_FORWARDED_FO|REFERE)R|(?:USER_AGEN|HOS)T|CONNECTION|KEEP_ALIVE)|PATH_(?:TRANSLATED|INFO)|ORIG_PATH_INFO|QUERY_STRING|REQUEST_URI|AUTH_TYPE)",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 9
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-933-140",
+      "name": "PHP Injection Attack: I/O Stream Found",
+      "tags": {
+        "type": "php_code_injection",
+        "crs_id": "933140",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "php://(?:std(?:in|out|err)|(?:in|out)put|fd|memory|temp|filter)",
+            "options": {
+              "min_length": 8
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-933-150",
+      "name": "PHP Injection Attack: High-Risk PHP Function Name Found",
+      "tags": {
+        "type": "php_code_injection",
+        "crs_id": "933150",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "list": [
+              "__halt_compiler",
+              "apache_child_terminate",
+              "base64_decode",
+              "bzdecompress",
+              "call_user_func",
+              "call_user_func_array",
+              "call_user_method",
+              "call_user_method_array",
+              "convert_uudecode",
+              "file_get_contents",
+              "file_put_contents",
+              "fsockopen",
+              "get_class_methods",
+              "get_class_vars",
+              "get_defined_constants",
+              "get_defined_functions",
+              "get_defined_vars",
+              "gzdecode",
+              "gzinflate",
+              "gzuncompress",
+              "include_once",
+              "invokeargs",
+              "pcntl_exec",
+              "pcntl_fork",
+              "pfsockopen",
+              "posix_getcwd",
+              "posix_getpwuid",
+              "posix_getuid",
+              "posix_uname",
+              "reflectionfunction",
+              "require_once",
+              "shell_exec",
+              "str_rot13",
+              "sys_get_temp_dir",
+              "wp_remote_fopen",
+              "wp_remote_get",
+              "wp_remote_head",
+              "wp_remote_post",
+              "wp_remote_request",
+              "wp_safe_remote_get",
+              "wp_safe_remote_head",
+              "wp_safe_remote_post",
+              "wp_safe_remote_request",
+              "zlib_decode"
+            ]
+          },
+          "operator": "phrase_match"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "crs-933-160",
+      "name": "PHP Injection Attack: High-Risk PHP Function Call Found",
+      "tags": {
+        "type": "php_code_injection",
+        "crs_id": "933160",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "\\b(?:s(?:e(?:t(?:_(?:e(?:xception|rror)_handler|magic_quotes_runtime|include_path)|defaultstub)|ssion_s(?:et_save_handler|tart))|qlite_(?:(?:(?:unbuffered|single|array)_)?query|create_(?:aggregate|function)|p?open|exec)|tr(?:eam_(?:context_create|socket_client)|ipc?slashes|rev)|implexml_load_(?:string|file)|ocket_c(?:onnect|reate)|h(?:ow_sourc|a1_fil)e|pl_autoload_register|ystem)|p(?:r(?:eg_(?:replace(?:_callback(?:_array)?)?|match(?:_all)?|split)|oc_(?:(?:terminat|clos|nic)e|get_status|open)|int_r)|o(?:six_(?:get(?:(?:e[gu]|g)id|login|pwnam)|mk(?:fifo|nod)|ttyname|kill)|pen)|hp(?:_(?:strip_whitespac|unam)e|version|info)|g_(?:(?:execut|prepar)e|connect|query)|a(?:rse_(?:ini_file|str)|ssthru)|utenv)|r(?:unkit_(?:function_(?:re(?:defin|nam)e|copy|add)|method_(?:re(?:defin|nam)e|copy|add)|constant_(?:redefine|add))|e(?:(?:gister_(?:shutdown|tick)|name)_function|ad(?:(?:gz)?file|_exif_data|dir))|awurl(?:de|en)code)|i(?:mage(?:createfrom(?:(?:jpe|pn)g|x[bp]m|wbmp|gif)|(?:jpe|pn)g|g(?:d2?|if)|2?wbmp|xbm)|s_(?:(?:(?:execut|write?|read)ab|fi)le|dir)|ni_(?:get(?:_all)?|set)|terator_apply|ptcembed)|g(?:et(?:_(?:c(?:urrent_use|fg_va)r|meta_tags)|my(?:[gpu]id|inode)|(?:lastmo|cw)d|imagesize|env)|z(?:(?:(?:defla|wri)t|encod|fil)e|compress|open|read)|lob)|a(?:rray_(?:u(?:intersect(?:_u?assoc)?|diff(?:_u?assoc)?)|intersect_u(?:assoc|key)|diff_u(?:assoc|key)|filter|reduce|map)|ssert(?:_options)?|tob)|h(?:tml(?:specialchars(?:_decode)?|_entity_decode|entities)|(?:ash(?:_(?:update|hmac))?|ighlight)_file|e(?:ader_register_callback|x2bin))|f(?:i(?:le(?:(?:[acm]tim|inod)e|(?:_exist|perm)s|group)?|nfo_open)|tp_(?:nb_(?:ge|pu)|connec|ge|pu)t|(?:unction_exis|pu)ts|write|open)|o(?:b_(?:get_(?:c(?:ontents|lean)|flush)|end_(?:clean|flush)|clean|flush|start)|dbc_(?:result(?:_all)?|exec(?:ute)?|connect)|pendir)|m(?:b_(?:ereg(?:_(?:replace(?:_callback)?|match)|i(?:_replace)?)?|parse_str)|(?:ove_uploaded|d5)_file|ethod_exists|ysql_query|kdir)|e(?:x(?:if_(?:t(?:humbnail|agname)|imagetype|read_data)|ec)|scapeshell(?:arg|cmd)|rror_reporting|val)|c(?:url_(?:file_create|exec|init)|onvert_uuencode|reate_function|hr)|u(?:n(?:serialize|pack)|rl(?:de|en)code|[ak]?sort)|b(?:(?:son_(?:de|en)|ase64_en)code|zopen|toa)|(?:json_(?:de|en)cod|debug_backtrac|tmpfil)e|var_dump)(?:\\s|/\\*.*\\*/|//.*|#.*|\\\"|')*\\((?:(?:\\s|/\\*.*\\*/|//.*|#.*)*(?:\\$\\w+|[A-Z\\d]\\w*|\\w+\\(.*\\)|\\\\?\"(?:[^\"]|\\\\\"|\"\"|\"\\+\")*\\\\?\"|\\\\?'(?:[^']|''|'\\+')*\\\\?')(?:\\s|/\\*.*\\*/|//.*|#.*)*(?:(?:::|\\.|->)(?:\\s|/\\*.*\\*/|//.*|#.*)*\\w+(?:\\(.*\\))?)?,)*(?:(?:\\s|/\\*.*\\*/|//.*|#.*)*(?:\\$\\w+|[A-Z\\d]\\w*|\\w+\\(.*\\)|\\\\?\"(?:[^\"]|\\\\\"|\"\"|\"\\+\")*\\\\?\"|\\\\?'(?:[^']|''|'\\+')*\\\\?')(?:\\s|/\\*.*\\*/|//.*|#.*)*(?:(?:::|\\.|->)(?:\\s|/\\*.*\\*/|//.*|#.*)*\\w+(?:\\(.*\\))?)?)?\\)",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 5
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-933-170",
+      "name": "PHP Injection Attack: Serialized Object Injection",
+      "tags": {
+        "type": "php_code_injection",
+        "crs_id": "933170",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "[oOcC]:\\d+:\\\".+?\\\":\\d+:{[\\W\\w]*}",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 12
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-933-200",
+      "name": "PHP Injection Attack: Wrapper scheme detected",
+      "tags": {
+        "type": "php_code_injection",
+        "crs_id": "933200",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?:(?:bzip|ssh)2|z(?:lib|ip)|(?:ph|r)ar|expect|glob|ogg)://",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 6
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls"
+      ]
+    },
+    {
+      "id": "crs-934-100",
+      "name": "Node.js Injection Attack 1/2",
+      "tags": {
+        "type": "js_code_injection",
+        "crs_id": "934100",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "\\b(?:(?:l(?:(?:utimes|chmod)(?:Sync)?|(?:stat|ink)Sync)|w(?:rite(?:(?:File|v)(?:Sync)?|Sync)|atchFile)|u(?:n(?:watchFile|linkSync)|times(?:Sync)?)|s(?:(?:ymlink|tat)Sync|pawn(?:File|Sync))|ex(?:ec(?:File(?:Sync)?|Sync)|istsSync)|a(?:ppendFile|ccess)(?:Sync)?|(?:Caveat|Inode)s|open(?:dir)?Sync|new\\s+Function|Availability|\\beval)\\s*\\(|m(?:ain(?:Module\\s*(?:\\W*\\s*(?:constructor|require)|\\[)|\\s*(?:\\W*\\s*(?:constructor|require)|\\[))|kd(?:temp(?:Sync)?|irSync)\\s*\\(|odule\\.exports\\s*=)|c(?:(?:(?:h(?:mod|own)|lose)Sync|reate(?:Write|Read)Stream|p(?:Sync)?)\\s*\\(|o(?:nstructor\\s*(?:\\W*\\s*_load|\\[)|pyFile(?:Sync)?\\s*\\())|f(?:(?:(?:s(?:(?:yncS)?|tatS)|datas(?:yncS)?)ync|ch(?:mod|own)(?:Sync)?)\\s*\\(|u(?:nction\\s*\\(\\s*\\)\\s*{|times(?:Sync)?\\s*\\())|r(?:e(?:(?:ad(?:(?:File|link|dir)?Sync|v(?:Sync)?)|nameSync)\\s*\\(|quire\\s*(?:\\W*\\s*main|\\[))|m(?:Sync)?\\s*\\()|process\\s*(?:\\W*\\s*(?:mainModule|binding)|\\[)|t(?:his\\.constructor|runcateSync\\s*\\()|_(?:\\$\\$ND_FUNC\\$\\$_|_js_function)|global\\s*(?:\\W*\\s*process|\\[)|String\\s*\\.\\s*fromCharCode|binding\\s*\\[)",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 3
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-934-101",
+      "name": "Node.js Injection Attack 2/2",
+      "tags": {
+        "type": "js_code_injection",
+        "crs_id": "934101",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "\\b(?:w(?:atch|rite)|(?:spaw|ope)n|exists|close|fork|read)\\s*\\(",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 5
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-941-110",
+      "name": "XSS Filter - Category 1: Script Tag Vector",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941110",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              },
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "referer"
+                ]
+              },
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "<script[^>]*>[\\s\\S]*?",
+            "options": {
+              "min_length": 8
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls",
+        "urlDecodeUni"
+      ]
+    },
+    {
+      "id": "crs-941-120",
+      "name": "XSS Filter - Category 2: Event Handler Vector",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941120",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              },
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "referer"
+                ]
+              },
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "[\\s\\\"'`;\\/0-9=\\x0B\\x09\\x0C\\x3B\\x2C\\x28\\x3B]on(?:d(?:r(?:ag(?:en(?:ter|d)|leave|start|over)?|op)|urationchange|blclick)|s(?:e(?:ek(?:ing|ed)|arch|lect)|u(?:spend|bmit)|talled|croll|how)|m(?:ouse(?:(?:lea|mo)ve|o(?:ver|ut)|enter|down|up)|essage)|p(?:a(?:ge(?:hide|show)|(?:st|us)e)|lay(?:ing)?|rogress)|c(?:anplay(?:through)?|o(?:ntextmenu|py)|hange|lick|ut)|a(?:nimation(?:iteration|start|end)|(?:fterprin|bor)t)|t(?:o(?:uch(?:cancel|start|move|end)|ggle)|imeupdate)|f(?:ullscreen(?:change|error)|ocus(?:out|in)?)|(?:(?:volume|hash)chang|o(?:ff|n)lin)e|b(?:efore(?:unload|print)|lur)|load(?:ed(?:meta)?data|start)?|r(?:es(?:ize|et)|atechange)|key(?:press|down|up)|w(?:aiting|heel)|in(?:valid|put)|e(?:nded|rror)|unload)[\\s\\x0B\\x09\\x0C\\x3B\\x2C\\x28\\x3B]*?=[^=]",
+            "options": {
+              "min_length": 8
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls",
+        "urlDecodeUni"
+      ]
+    },
+    {
+      "id": "crs-941-140",
+      "name": "XSS Filter - Category 4: Javascript URI Vector",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941140",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              },
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "referer"
+                ]
+              },
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "[a-z]+=(?:[^:=]+:.+;)*?[^:=]+:url\\(javascript",
+            "options": {
+              "min_length": 18
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls",
+        "urlDecodeUni"
+      ]
+    },
+    {
+      "id": "crs-941-170",
+      "name": "NoScript XSS InjectionChecker: Attribute Injection",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941170",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              },
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "referer"
+                ]
+              },
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              }
+            ],
+            "regex": "(?:\\W|^)(?:javascript:(?:[\\s\\S]+[=\\x5c\\(\\[\\.<]|[\\s\\S]*?(?:\\bname\\b|\\x5c[ux]\\d)))|@\\W*?i\\W*?m\\W*?p\\W*?o\\W*?r\\W*?t\\W*?(?:/\\*[\\s\\S]*?)?(?:[\\\"']|\\W*?u\\W*?r\\W*?l[\\s\\S]*?\\()|[^-]*?-\\W*?m\\W*?o\\W*?z\\W*?-\\W*?b\\W*?i\\W*?n\\W*?d\\W*?i\\W*?n\\W*?g[^:]*?:\\W*?u\\W*?r\\W*?l[\\s\\S]*?\\(",
+            "options": {
+              "min_length": 6
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls",
+        "urlDecodeUni"
+      ]
+    },
+    {
+      "id": "crs-941-180",
+      "name": "Node-Validator Deny List Keywords",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941180",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "list": [
+              "document.cookie",
+              "document.write",
+              ".parentnode",
+              ".innerhtml",
+              "window.location",
+              "-moz-binding"
+            ]
+          },
+          "operator": "phrase_match"
+        }
+      ],
+      "transformers": [
+        "removeNulls",
+        "lowercase"
+      ]
+    },
+    {
+      "id": "crs-941-200",
+      "name": "IE XSS Filters - Attack Detected via vmlframe tag",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941200",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?i:<.*[:]?vmlframe.*?[\\s/+]*?src[\\s/+]*=)",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 13
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls"
+      ]
+    },
+    {
+      "id": "crs-941-210",
+      "name": "IE XSS Filters - Obfuscated Attack Detected via javascript injection",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941210",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?i:(?:j|&#x?0*(?:74|4A|106|6A);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:a|&#x?0*(?:65|41|97|61);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:v|&#x?0*(?:86|56|118|76);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:a|&#x?0*(?:65|41|97|61);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:s|&#x?0*(?:83|53|115|73);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:c|&#x?0*(?:67|43|99|63);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:r|&#x?0*(?:82|52|114|72);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:i|&#x?0*(?:73|49|105|69);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:p|&#x?0*(?:80|50|112|70);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:t|&#x?0*(?:84|54|116|74);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?::|&(?:#x?0*(?:58|3A);?|colon;)).)",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 12
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls"
+      ]
+    },
+    {
+      "id": "crs-941-220",
+      "name": "IE XSS Filters - Obfuscated Attack Detected via vbscript injection",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941220",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?i:(?:v|&#x?0*(?:86|56|118|76);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:b|&#x?0*(?:66|42|98|62);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:s|&#x?0*(?:83|53|115|73);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:c|&#x?0*(?:67|43|99|63);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:r|&#x?0*(?:82|52|114|72);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:i|&#x?0*(?:73|49|105|69);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:p|&#x?0*(?:80|50|112|70);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:t|&#x?0*(?:84|54|116|74);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?::|&(?:#x?0*(?:58|3A);?|colon;)).)",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 10
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls"
+      ]
+    },
+    {
+      "id": "crs-941-230",
+      "name": "IE XSS Filters - Attack Detected via embed tag",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941230",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "<EMBED[\\s/+].*?(?:src|type).*?=",
+            "options": {
+              "min_length": 11
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls"
+      ]
+    },
+    {
+      "id": "crs-941-240",
+      "name": "IE XSS Filters - Attack Detected via import tag",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941240",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "<[?]?import[\\s/+\\S]*?implementation[\\s/+]*?=",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 22
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "lowercase",
+        "removeNulls"
+      ]
+    },
+    {
+      "id": "crs-941-270",
+      "name": "IE XSS Filters - Attack Detected via link tag",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941270",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "<LINK[\\s/+].*?href[\\s/+]*=",
+            "options": {
+              "min_length": 11
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls"
+      ]
+    },
+    {
+      "id": "crs-941-280",
+      "name": "IE XSS Filters - Attack Detected via base tag",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941280",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "<BASE[\\s/+].*?href[\\s/+]*=",
+            "options": {
+              "min_length": 11
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls"
+      ]
+    },
+    {
+      "id": "crs-941-290",
+      "name": "IE XSS Filters - Attack Detected via applet tag",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941290",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "<APPLET[\\s/+>]",
+            "options": {
+              "min_length": 8
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls"
+      ]
+    },
+    {
+      "id": "crs-941-300",
+      "name": "IE XSS Filters - Attack Detected via object tag",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941300",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "<OBJECT[\\s/+].*?(?:type|codetype|classid|code|data)[\\s/+]*=",
+            "options": {
+              "min_length": 13
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls"
+      ]
+    },
+    {
+      "id": "crs-941-350",
+      "name": "UTF-7 Encoding IE XSS - Attack Detected",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941350",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "\\+ADw-.*(?:\\+AD4-|>)|<.*\\+AD4-",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 6
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-941-360",
+      "name": "JSFuck / Hieroglyphy obfuscation detected",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941360",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "![!+ ]\\[\\]",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 4
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-941-390",
+      "name": "Javascript method detected",
+      "tags": {
+        "type": "xss",
+        "crs_id": "941390",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "\\b(?i:eval|settimeout|setinterval|new\\s+Function|alert|prompt)[\\s+]*\\([^\\)]",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 5
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-942-100",
+      "name": "SQL Injection Attack Detected via libinjection",
+      "tags": {
+        "type": "sql_injection",
+        "crs_id": "942100",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ]
+          },
+          "operator": "is_sqli"
+        }
+      ],
+      "transformers": [
+        "removeNulls"
+      ]
+    },
+    {
+      "id": "crs-942-160",
+      "name": "Detects blind sqli tests using sleep() or benchmark()",
+      "tags": {
+        "type": "sql_injection",
+        "crs_id": "942160",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?i:sleep\\(\\s*?\\d*?\\s*?\\)|benchmark\\(.*?\\,.*?\\))",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 7
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-942-240",
+      "name": "Detects MySQL charset switch and MSSQL DoS attempts",
+      "tags": {
+        "type": "sql_injection",
+        "crs_id": "942240",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?:[\\\"'`](?:;*?\\s*?waitfor\\s+(?:delay|time)\\s+[\\\"'`]|;.*?:\\s*?goto)|alter\\s*?\\w+.*?cha(?:racte)?r\\s+set\\s+\\w+)",
+            "options": {
+              "min_length": 7
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-942-250",
+      "name": "Detects MATCH AGAINST, MERGE and EXECUTE IMMEDIATE injections",
+      "tags": {
+        "type": "sql_injection",
+        "crs_id": "942250",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?i:merge.*?using\\s*?\\(|execute\\s*?immediate\\s*?[\\\"'`]|match\\s*?[\\w(?:),+-]+\\s*?against\\s*?\\()",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 11
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-942-270",
+      "name": "Basic SQL injection",
+      "tags": {
+        "type": "sql_injection",
+        "crs_id": "942270",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "union.*?select.*?from",
+            "options": {
+              "min_length": 15
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-942-280",
+      "name": "SQL Injection with delay functions",
+      "tags": {
+        "type": "sql_injection",
+        "crs_id": "942280",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?:;\\s*?shutdown\\s*?(?:[#;{]|\\/\\*|--)|waitfor\\s*?delay\\s?[\\\"'`]+\\s?\\d|select\\s*?pg_sleep)",
+            "options": {
+              "min_length": 10
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-942-290",
+      "name": "Finds basic MongoDB SQL injection attempts",
+      "tags": {
+        "type": "nosql_injection",
+        "crs_id": "942290",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?i:(?:\\[?\\$(?:(?:s(?:lic|iz)|wher)e|e(?:lemMatch|xists|q)|n(?:o[rt]|in?|e)|l(?:ike|te?)|t(?:ext|ype)|a(?:ll|nd)|jsonSchema|between|regex|x?or|div|mod)\\]?)\\b)",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 3
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "keys_only"
+      ]
+    },
+    {
+      "id": "crs-942-360",
+      "name": "Detects concatenated basic SQL injection and SQLLFI attempts",
+      "tags": {
+        "type": "sql_injection",
+        "crs_id": "942360",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?:^[\\W\\d]+\\s*?(?:alter\\s*(?:a(?:(?:pplication\\s*rol|ggregat)e|s(?:ymmetric\\s*ke|sembl)y|u(?:thorization|dit)|vailability\\s*group)|c(?:r(?:yptographic\\s*provider|edential)|o(?:l(?:latio|um)|nversio)n|ertificate|luster)|s(?:e(?:rv(?:ice|er)|curity|quence|ssion|arch)|y(?:mmetric\\s*key|nonym)|togroup|chema)|m(?:a(?:s(?:ter\\s*key|k)|terialized)|e(?:ssage\\s*type|thod)|odule)|l(?:o(?:g(?:file\\s*group|in)|ckdown)|a(?:ngua|r)ge|ibrary)|t(?:(?:abl(?:espac)?|yp)e|r(?:igger|usted)|hreshold|ext)|p(?:a(?:rtition|ckage)|ro(?:cedur|fil)e|ermission)|d(?:i(?:mension|skgroup)|atabase|efault|omain)|r(?:o(?:l(?:lback|e)|ute)|e(?:sourc|mot)e)|f(?:u(?:lltext|nction)|lashback|oreign)|e(?:xte(?:nsion|rnal)|(?:ndpoi|ve)nt)|in(?:dex(?:type)?|memory|stance)|b(?:roker\\s*priority|ufferpool)|x(?:ml\\s*schema|srobject)|w(?:ork(?:load)?|rapper)|hi(?:erarchy|stogram)|o(?:perator|utline)|(?:nicknam|queu)e|us(?:age|er)|group|java|view)|union\\s*(?:(?:distin|sele)ct|all))\\b|\\b(?:(?:(?:trunc|cre|upd)at|renam)e|(?:inser|selec)t|de(?:lete|sc)|alter|load)\\s+(?:group_concat|load_file|char)\\b\\s*\\(?|[\\s(]load_file\\s*?\\(|[\\\"'`]\\s+regexp\\W)",
+            "options": {
+              "min_length": 5
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-942-500",
+      "name": "MySQL in-line comment detected",
+      "tags": {
+        "type": "sql_injection",
+        "crs_id": "942500",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?i:/\\*[!+](?:[\\w\\s=_\\-(?:)]+)?\\*/)",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 5
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-943-100",
+      "name": "Possible Session Fixation Attack: Setting Cookie Values in HTML",
+      "tags": {
+        "type": "http_protocol_violation",
+        "crs_id": "943100",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              }
+            ],
+            "regex": "(?i:\\.cookie\\b.*?;\\W*?(?:expires|domain)\\W*?=|\\bhttp-equiv\\W+set-cookie\\b)",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 15
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "crs-944-100",
+      "name": "Remote Command Execution: Suspicious Java class detected",
+      "tags": {
+        "type": "java_code_injection",
+        "crs_id": "944100",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "java\\.lang\\.(?:runtime|processbuilder)",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 17
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "crs-944-110",
+      "name": "Remote Command Execution: Java process spawn (CVE-2017-9805)",
+      "tags": {
+        "type": "java_code_injection",
+        "crs_id": "944110",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?:runtime|processbuilder)",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 7
+            }
+          },
+          "operator": "match_regex"
+        },
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?:unmarshaller|base64data|java\\.)",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 5
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "crs-944-130",
+      "name": "Suspicious Java class detected",
+      "tags": {
+        "type": "java_code_injection",
+        "crs_id": "944130",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "list": [
+              "com.opensymphony.xwork2",
+              "com.sun.org.apache",
+              "java.io.bufferedinputstream",
+              "java.io.bufferedreader",
+              "java.io.bytearrayinputstream",
+              "java.io.bytearrayoutputstream",
+              "java.io.chararrayreader",
+              "java.io.datainputstream",
+              "java.io.file",
+              "java.io.fileoutputstream",
+              "java.io.filepermission",
+              "java.io.filewriter",
+              "java.io.filterinputstream",
+              "java.io.filteroutputstream",
+              "java.io.filterreader",
+              "java.io.inputstream",
+              "java.io.inputstreamreader",
+              "java.io.linenumberreader",
+              "java.io.objectoutputstream",
+              "java.io.outputstream",
+              "java.io.pipedoutputstream",
+              "java.io.pipedreader",
+              "java.io.printstream",
+              "java.io.pushbackinputstream",
+              "java.io.reader",
+              "java.io.stringreader",
+              "java.lang.class",
+              "java.lang.integer",
+              "java.lang.number",
+              "java.lang.object",
+              "java.lang.process",
+              "java.lang.reflect",
+              "java.lang.string",
+              "java.lang.stringbuilder",
+              "java.lang.system",
+              "javax.script.scriptenginemanager",
+              "org.apache.commons",
+              "org.apache.struts",
+              "org.apache.struts2",
+              "org.omg.corba",
+              "java.beans.xmldecode"
+            ]
+          },
+          "operator": "phrase_match"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "crs-944-260",
+      "name": "Remote Command Execution: Malicious class-loading payload",
+      "tags": {
+        "type": "java_code_injection",
+        "crs_id": "944260",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?:class\\.module\\.classLoader\\.resources\\.context\\.parent\\.pipeline|springframework\\.context\\.support\\.FileSystemXmlApplicationContext)",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 58
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "dog-000-001",
+      "name": "Look for Cassandra injections",
+      "tags": {
+        "type": "nosql_injection",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              }
+            ],
+            "regex": "\\ballow\\s+filtering\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeComments"
+      ]
+    },
+    {
+      "id": "dog-000-002",
+      "name": "OGNL - Look for formatting injection patterns",
+      "tags": {
+        "type": "java_code_injection",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.uri.raw"
+              },
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "[#%$]{(?:[^}]+[^\\w\\s}\\-_][^}]+|\\d+-\\d+)}",
+            "options": {
+              "case_sensitive": true
+            }
+          }
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "dog-000-003",
+      "name": "OGNL - Detect OGNL exploitation primitives",
+      "tags": {
+        "type": "java_code_injection",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "[@#]ognl",
+            "options": {
+              "case_sensitive": true
+            }
+          }
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "dog-000-004",
+      "name": "Spring4Shell - Attempts to exploit the Spring4shell vulnerability",
+      "tags": {
+        "type": "exploit_detection",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.body"
+              }
+            ],
+            "regex": "^class\\.module\\.classLoader\\.",
+            "options": {
+              "case_sensitive": false
+            }
+          }
+        }
+      ],
+      "transformers": [
+        "keys_only"
+      ]
+    },
+    {
+      "id": "dog-000-005",
+      "name": "Node.js: Prototype pollution through __proto__",
+      "tags": {
+        "type": "js_code_injection",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              }
+            ],
+            "regex": "^__proto__$"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "keys_only"
+      ]
+    },
+    {
+      "id": "dog-000-006",
+      "name": "Node.js: Prototype pollution through constructor.prototype",
+      "tags": {
+        "type": "js_code_injection",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              }
+            ],
+            "regex": "^constructor$"
+          },
+          "operator": "match_regex"
+        },
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              }
+            ],
+            "regex": "^prototype$"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "keys_only"
+      ]
+    },
+    {
+      "id": "dog-000-007",
+      "name": "Server side template injection: Velocity & Freemarker",
+      "tags": {
+        "type": "java_code_injection",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "#(?:set|foreach|macro|parse|if)\\(.*\\)|<#assign.*>"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "dog-913-001",
+      "name": "BurpCollaborator OOB domain",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "BurpCollaborator",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "\\b(?:burpcollaborator\\.net|oastify\\.com)\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "dog-913-002",
+      "name": "Qualys OOB domain",
+      "tags": {
+        "type": "commercial_scanner",
+        "category": "attack_attempt",
+        "tool_name": "Qualys",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "\\bqualysperiscope\\.com\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "dog-913-003",
+      "name": "Probely OOB domain",
+      "tags": {
+        "type": "commercial_scanner",
+        "category": "attack_attempt",
+        "tool_name": "Probely",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "\\bprbly\\.win\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "dog-913-004",
+      "name": "Known malicious out-of-band interaction domain",
+      "tags": {
+        "type": "security_scanner",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "\\b(?:webhook\\.site|\\.canarytokens\\.com|vii\\.one|act1on3\\.ru|gdsburp\\.com)\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "dog-913-005",
+      "name": "Known suspicious out-of-band interaction domain",
+      "tags": {
+        "type": "security_scanner",
+        "category": "attack_attempt",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "\\b(?:\\.ngrok\\.io|requestbin\\.com|requestbin\\.net)\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "dog-913-006",
+      "name": "Rapid7 OOB domain",
+      "tags": {
+        "type": "commercial_scanner",
+        "category": "attack_attempt",
+        "tool_name": "Rapid7",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "\\bappspidered\\.rapid7\\."
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "dog-913-007",
+      "name": "Interact.sh OOB domain",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "interact.sh",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "\\b(?:interact\\.sh|oast\\.(?:pro|live|site|online|fun|me))\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "dog-931-001",
+      "name": "RFI: URL Payload to well known RFI target",
+      "tags": {
+        "type": "rfi",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              }
+            ],
+            "regex": "^(?i:file|ftps?|https?).*/rfiinc\\.txt\\?+$",
+            "options": {
+              "case_sensitive": true,
+              "min_length": 17
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "dog-934-001",
+      "name": "XXE - XML file loads external entity",
+      "tags": {
+        "type": "xxe",
+        "category": "attack_attempt",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?:<\\?xml[^>]*>.*)<!ENTITY[^>]+SYSTEM\\s+[^>]+>",
+            "options": {
+              "case_sensitive": false,
+              "min_length": 24
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "dog-942-001",
+      "name": "Blind XSS callback domains",
+      "tags": {
+        "type": "xss",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "https?:\\/\\/(?:.*\\.)?(?:bxss\\.in|xss\\.ht|js\\.rip)",
+            "options": {
+              "case_sensitive": false
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "nfd-000-001",
+      "name": "Detect common directory discovery scans",
+      "tags": {
+        "type": "security_scanner",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.response.status"
+              }
+            ],
+            "regex": "^404$",
+            "options": {
+              "case_sensitive": true
+            }
+          }
+        },
+        {
+          "operator": "phrase_match",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.uri.raw"
+              }
+            ],
+            "list": [
+              "/wordpress/",
+              "/etc/",
+              "/login.php",
+              "/install.php",
+              "/administrator",
+              "/admin.php",
+              "/wp-config",
+              "/phpmyadmin",
+              "/fckeditor",
+              "/mysql",
+              "/manager/html",
+              ".htaccess",
+              "/config.php",
+              "/configuration",
+              "/cgi-bin/php",
+              "/search.php",
+              "/tinymce",
+              "/tiny_mce",
+              "/settings.php",
+              "../../..",
+              "/install/",
+              "/download.php",
+              "/webdav",
+              "/forum.php",
+              "/user.php",
+              "/style.php",
+              "/jmx-console",
+              "/modules.php",
+              "/include.php",
+              "/default.asp",
+              "/help.php",
+              "/database.yml",
+              "/database.yml.pgsql",
+              "/database.yml.sqlite3",
+              "/database.yml.sqlite",
+              "/database.yml.mysql",
+              ".%2e/",
+              "/view.php",
+              "/header.php",
+              "/search.asp",
+              "%5c%5c",
+              "/server/php/",
+              "/invoker/jmxinvokerservlet",
+              "/phpmyadmin/index.php",
+              "/data/admin/allowurl.txt",
+              "/verify.php",
+              "/misc/ajax.js",
+              "/.idea",
+              "/module.php",
+              "/backup.rar",
+              "/backup.tar",
+              "/backup.zip",
+              "/backup.7z",
+              "/backup.gz",
+              "/backup.tgz",
+              "/backup.tar.gz",
+              "waitfor%20delay",
+              "/calendar.php",
+              "/news.php",
+              "/dompdf.php",
+              "))))))))))))))))",
+              "/web.config",
+              "tree.php",
+              "/cgi-bin-sdb/printenv",
+              "/comments.php",
+              "/detail.asp",
+              "/license.txt",
+              "/admin.asp",
+              "/auth.php",
+              "/list.php",
+              "/content.php",
+              "/mod.php",
+              "/mini.php",
+              "/install.pgsql",
+              "/install.mysql",
+              "/install.sqlite",
+              "/install.sqlite3",
+              "/install.txt",
+              "/install.md",
+              "/doku.php",
+              "/main.asp",
+              "/myadmin",
+              "/force-download.php",
+              "/iisprotect/admin",
+              "/.gitignore",
+              "/print.php",
+              "/common.php",
+              "/mainfile.php",
+              "/functions.php",
+              "/scripts/setup.php",
+              "/faq.php",
+              "/op/op.login.php",
+              "/home.php",
+              "/includes/hnmain.inc.php3",
+              "/preview.php",
+              "/dump.rar",
+              "/dump.tar",
+              "/dump.zip",
+              "/dump.7z",
+              "/dump.gz",
+              "/dump.tgz",
+              "/dump.tar.gz",
+              "/thumbnail.php",
+              "/sendcard.php",
+              "/global.asax",
+              "/directory.php",
+              "/footer.php",
+              "/error.asp",
+              "/forum.asp",
+              "/save.php",
+              "/htmlsax3.php",
+              "/adm/krgourl.php",
+              "/includes/converter.inc.php",
+              "/nucleus/libs/pluginadmin.php",
+              "/base_qry_common.php",
+              "/fileadmin",
+              "/bitrix/admin/",
+              "/adm.php",
+              "/util/barcode.php",
+              "/action.php",
+              "/rss.asp",
+              "/downloads.php",
+              "/page.php",
+              "/snarf_ajax.php",
+              "/fck/editor",
+              "/sendmail.php",
+              "/detail.php",
+              "/iframe.php",
+              "/swfupload.swf",
+              "/jenkins/login",
+              "/phpmyadmin/main.php",
+              "/phpmyadmin/scripts/setup.php",
+              "/user/index.php",
+              "/checkout.php",
+              "/process.php",
+              "/ks_inc/ajax.js",
+              "/export.php",
+              "/register.php",
+              "/cart.php",
+              "/console.php",
+              "/friend.php",
+              "/readmsg.php",
+              "/install.asp",
+              "/dagent/downloadreport.asp",
+              "/system/index.php",
+              "/core/changelog.txt",
+              "/js/util.js",
+              "/interna.php",
+              "/gallery.php",
+              "/links.php",
+              "/data/admin/ver.txt",
+              "/language/zh-cn.xml",
+              "/productdetails.asp",
+              "/admin/template/article_more/config.htm",
+              "/components/com_moofaq/includes/file_includer.php",
+              "/licence.txt",
+              "/rss.xsl",
+              "/vtigerservice.php",
+              "/mysql/main.php",
+              "/passwiki.php",
+              "/scr/soustab.php",
+              "/global.php",
+              "/email.php",
+              "/user.asp",
+              "/msd",
+              "/products.php",
+              "/cultbooking.php",
+              "/cron.php",
+              "/static/js/admincp.js",
+              "/comment.php",
+              "/maintainers",
+              "/modules/plain/adminpart/addplain.php",
+              "/wp-content/plugins/ungallery/source_vuln.php",
+              "/upgrade.txt",
+              "/category.php",
+              "/index_logged.php",
+              "/members.asp",
+              "/script/html.js",
+              "/images/ad.js",
+              "/awstats/awstats.pl",
+              "/includes/esqueletos/skel_null.php",
+              "/modules/profile/user.php",
+              "/window_top.php",
+              "/openbrowser.php",
+              "/thread.php",
+              "tinfoil_xss",
+              "/includes/include.php",
+              "/urheber.php",
+              "/header.inc.php",
+              "/mysqldumper",
+              "/display.php",
+              "/website.php",
+              "/stats.php",
+              "/assets/plugins/mp3_id/mp3_id.php",
+              "/siteminderagent/forms/smpwservices.fcc"
+            ]
+          }
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "nfd-000-002",
+      "name": "Detect failed attempt to fetch readme files",
+      "tags": {
+        "type": "security_scanner",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.response.status"
+              }
+            ],
+            "regex": "^404$",
+            "options": {
+              "case_sensitive": true
+            }
+          }
+        },
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.uri.raw"
+              }
+            ],
+            "regex": "readme\\.[\\.a-z0-9]+$",
+            "options": {
+              "case_sensitive": false
+            }
+          }
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "nfd-000-003",
+      "name": "Detect failed attempt to fetch Java EE resource files",
+      "tags": {
+        "type": "security_scanner",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.response.status"
+              }
+            ],
+            "regex": "^404$",
+            "options": {
+              "case_sensitive": true
+            }
+          }
+        },
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.uri.raw"
+              }
+            ],
+            "regex": "^(?:.*web\\-inf)(?:.*web\\.xml).*$",
+            "options": {
+              "case_sensitive": false
+            }
+          }
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "nfd-000-004",
+      "name": "Detect failed attempt to fetch code files",
+      "tags": {
+        "type": "security_scanner",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.response.status"
+              }
+            ],
+            "regex": "^404$",
+            "options": {
+              "case_sensitive": true
+            }
+          }
+        },
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.uri.raw"
+              }
+            ],
+            "regex": "\\.(java|pyc?|rb|class)\\b",
+            "options": {
+              "case_sensitive": false
+            }
+          }
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "nfd-000-005",
+      "name": "Detect failed attempt to fetch source code archives",
+      "tags": {
+        "type": "security_scanner",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.response.status"
+              }
+            ],
+            "regex": "^404$",
+            "options": {
+              "case_sensitive": true
+            }
+          }
+        },
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.uri.raw"
+              }
+            ],
+            "regex": "\\.(sql|log|ndb|gz|zip|tar\\.gz|tar|regVV|reg|conf|bz2|ini|db|war|bat|inc|btr|server|ds|conf|config|admin|master|sln|bak)\\b(?:[^.]|$)",
+            "options": {
+              "case_sensitive": false
+            }
+          }
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "nfd-000-006",
+      "name": "Detect failed attempt to fetch sensitive files",
+      "tags": {
+        "type": "security_scanner",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.response.status"
+              }
+            ],
+            "regex": "^404$",
+            "options": {
+              "case_sensitive": true
+            }
+          }
+        },
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.uri.raw"
+              }
+            ],
+            "regex": "\\.(cgi|bat|dll|exe|key|cert|crt|pem|der|pkcs|pkcs|pkcs[0-9]*|nsf|jsa|war|java|class|vb|vba|so|git|svn|hg|cvs)([^a-zA-Z0-9_]|$)",
+            "options": {
+              "case_sensitive": false
+            }
+          }
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "nfd-000-007",
+      "name": "Detect failed attempt to fetch archives",
+      "tags": {
+        "type": "security_scanner",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.response.status"
+              }
+            ],
+            "regex": "^404$",
+            "options": {
+              "case_sensitive": true
+            }
+          }
+        },
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.uri.raw"
+              }
+            ],
+            "regex": "/[\\d\\-_]*\\.(rar|tar|zip|7z|gz|tgz|tar.gz)",
+            "options": {
+              "case_sensitive": false
+            }
+          }
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "nfd-000-008",
+      "name": "Detect failed attempt to trigger incorrect application behavior",
+      "tags": {
+        "type": "security_scanner",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.response.status"
+              }
+            ],
+            "regex": "^404$",
+            "options": {
+              "case_sensitive": true
+            }
+          }
+        },
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.uri.raw"
+              }
+            ],
+            "regex": "(/(administrator/components/com.*\\.php|response\\.write\\(.+\\))|select\\(.+\\)from|\\(.*sleep\\(.+\\)|(%[a-zA-Z0-9]{2}[a-zA-Z]{0,1})+\\))",
+            "options": {
+              "case_sensitive": false
+            }
+          }
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "nfd-000-009",
+      "name": "Detect failed attempt to leak the structure of the application",
+      "tags": {
+        "type": "security_scanner",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.response.status"
+              }
+            ],
+            "regex": "^404$",
+            "options": {
+              "case_sensitive": true
+            }
+          }
+        },
+        {
+          "operator": "match_regex",
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.uri.raw"
+              }
+            ],
+            "regex": "/(login\\.rol|LICENSE|[\\w-]+\\.(plx|pwd))$",
+            "options": {
+              "case_sensitive": false
+            }
+          }
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "sqr-000-001",
+      "name": "SSRF: Try to access the credential manager of the main cloud services",
+      "tags": {
+        "type": "ssrf",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?i)^\\W*((http|ftp)s?://)?\\W*((::f{4}:)?(169|(0x)?0*a9|0+251)\\.?(254|(0x)?0*fe|0+376)[0-9a-fx\\.:]+|metadata\\.google\\.internal|metadata\\.goog)\\W*/",
+            "options": {
+              "min_length": 4
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls"
+      ]
+    },
+    {
+      "id": "sqr-000-002",
+      "name": "Server-side Javascript injection: Try to detect obvious JS injection",
+      "tags": {
+        "type": "js_code_injection",
+        "category": "attack_attempt"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "require\\(['\"][\\w\\.]+['\"]\\)|process\\.\\w+\\([\\w\\.]*\\)|\\.toString\\(\\)",
+            "options": {
+              "min_length": 4
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "removeNulls"
+      ]
+    },
+    {
+      "id": "sqr-000-008",
+      "name": "Windows: Detect attempts to exfiltrate .ini files",
+      "tags": {
+        "type": "command_injection",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?i)[&|]\\s*type\\s+%\\w+%\\\\+\\w+\\.ini\\s*[&|]"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "sqr-000-009",
+      "name": "Linux: Detect attempts to exfiltrate passwd files",
+      "tags": {
+        "type": "command_injection",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?i)[&|]\\s*cat\\s+\\/etc\\/[\\w\\.\\/]*passwd\\s*[&|]"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "sqr-000-010",
+      "name": "Windows: Detect attempts to timeout a shell",
+      "tags": {
+        "type": "command_injection",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(?i)[&|]\\s*timeout\\s+/t\\s+\\d+\\s*[&|]"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "sqr-000-011",
+      "name": "SSRF: Try to access internal OMI service (CVE-2021-38647)",
+      "tags": {
+        "type": "ssrf",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "http(s?):\\/\\/([A-Za-z0-9\\.\\-\\_]+|\\[[A-Fa-f0-9\\:]+\\]|):5986\\/wsman",
+            "options": {
+              "min_length": 4
+            }
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "sqr-000-012",
+      "name": "SSRF: Detect SSRF attempt on internal service",
+      "tags": {
+        "type": "ssrf",
+        "category": "attack_attempt",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "^(jar:)?(http|https):\\/\\/([0-9oq]{1,5}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}|[0-9]{1,10})(:[0-9]{1,5})?(\\/[^:@]*)?$"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "sqr-000-013",
+      "name": "SSRF: Detect SSRF attempts using IPv6 or octal/hexdecimal obfuscation",
+      "tags": {
+        "type": "ssrf",
+        "category": "attack_attempt",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "^(jar:)?(http|https):\\/\\/((\\[)?[:0-9a-f\\.x]{2,}(\\])?)(:[0-9]{1,5})?(\\/[^:@]*)?$"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "sqr-000-014",
+      "name": "SSRF: Detect SSRF domain redirection bypass",
+      "tags": {
+        "type": "ssrf",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "(http|https):\\/\\/(?:.*\\.)?(?:burpcollaborator\\.net|localtest\\.me|mail\\.ebc\\.apple\\.com|bugbounty\\.dod\\.network|.*\\.[nx]ip\\.io|oastify\\.com|oast\\.(?:pro|live|site|online|fun|me)|sslip\\.io|requestbin\\.com|requestbin\\.net|hookbin\\.com|webhook\\.site|canarytokens\\.com|interact\\.sh|ngrok\\.io|bugbounty\\.click|prbly\\.win|qualysperiscope\\.com|vii.one|act1on3.ru)"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "sqr-000-015",
+      "name": "SSRF: Detect SSRF attempt using non HTTP protocol",
+      "tags": {
+        "type": "ssrf",
+        "category": "attack_attempt",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "^(jar:)?((file|netdoc):\\/\\/[\\\\\\/]+|(dict|gopher|ldap|sftp|tftp):\\/\\/.*:[0-9]{1,5})"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "lowercase"
+      ]
+    },
+    {
+      "id": "sqr-000-017",
+      "name": "Log4shell: Attempt to exploit log4j CVE-2021-44228",
+      "tags": {
+        "type": "exploit_detection",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.uri.raw"
+              },
+              {
+                "address": "server.request.query"
+              },
+              {
+                "address": "server.request.body"
+              },
+              {
+                "address": "server.request.path_params"
+              },
+              {
+                "address": "server.request.headers.no_cookies"
+              },
+              {
+                "address": "grpc.server.request.message"
+              }
+            ],
+            "regex": "\\${[^j]*j[^n]*n[^d]*d[^i]*i[^:]*:[^}]*}"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [
+        "unicode_normalize"
+      ]
+    },
+    {
+      "id": "ua0-600-0xx",
+      "name": "Joomla exploitation tool",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Joomla exploitation tool",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "JDatabaseDriverMysqli"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-10x",
+      "name": "Nessus",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Nessus",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)^Nessus(/|([ :]+SOAP))"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-12x",
+      "name": "Arachni",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Arachni",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "^Arachni\\/v"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-13x",
+      "name": "Jorgee",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Jorgee",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bJorgee\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-14x",
+      "name": "Probely",
+      "tags": {
+        "type": "commercial_scanner",
+        "category": "attack_attempt",
+        "tool_name": "Probely",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bProbely\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-15x",
+      "name": "Metis",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Metis",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bmetis\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-16x",
+      "name": "SQL power injector",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "SQLPowerInjector",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "sql power injector"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-18x",
+      "name": "N-Stealth",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "N-Stealth",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bn-stealth\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-19x",
+      "name": "Brutus",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Brutus",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bbrutus\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-1xx",
+      "name": "Shellshock exploitation tool",
+      "tags": {
+        "type": "security_scanner",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "\\(\\) \\{ :; *\\}"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-20x",
+      "name": "Netsparker",
+      "tags": {
+        "type": "commercial_scanner",
+        "category": "attack_attempt",
+        "tool_name": "Netsparker",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "\\bnetsparker\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-22x",
+      "name": "JAASCois",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "JAASCois",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bjaascois\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-26x",
+      "name": "Nsauditor",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Nsauditor",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bnsauditor\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-27x",
+      "name": "Paros",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Paros",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)Mozilla/.* Paros/"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-28x",
+      "name": "DirBuster",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "DirBuster",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bdirbuster\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-29x",
+      "name": "Pangolin",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Pangolin",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bpangolin\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-2xx",
+      "name": "Qualys",
+      "tags": {
+        "type": "commercial_scanner",
+        "category": "attack_attempt",
+        "tool_name": "Qualys",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bqualys\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-30x",
+      "name": "SQLNinja",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "SQLNinja",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bsqlninja\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-31x",
+      "name": "Nikto",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Nikto",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "\\(Nikto/[\\d\\.]+\\)"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-33x",
+      "name": "BlackWidow",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "BlackWidow",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bblack\\s?widow\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-34x",
+      "name": "Grendel-Scan",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Grendel-Scan",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bgrendel-scan\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-35x",
+      "name": "Havij",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Havij",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bhavij\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-36x",
+      "name": "w3af",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "w3af",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bw3af\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-37x",
+      "name": "Nmap",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Nmap",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "nmap (nse|scripting engine)"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-39x",
+      "name": "Nessus Scripted",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Nessus",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)^'?[a-z0-9_]+\\.nasl'?$"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-3xx",
+      "name": "Evil Scanner",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "EvilScanner",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bevilScanner\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-40x",
+      "name": "WebFuck",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "WebFuck",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bWebFuck\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-42x",
+      "name": "OpenVAS",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "OpenVAS",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)OpenVAS\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-43x",
+      "name": "Spider-Pig",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Spider-Pig",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "Powered by Spider-Pig by tinfoilsecurity\\.com"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-44x",
+      "name": "Zgrab",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Zgrab",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "Mozilla/\\d+.\\d+ zgrab"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-45x",
+      "name": "Zmeu",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Zmeu",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bZmEu\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-47x",
+      "name": "GoogleSecurityScanner",
+      "tags": {
+        "type": "commercial_scanner",
+        "category": "attack_attempt",
+        "tool_name": "GoogleSecurityScanner",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bGoogleSecurityScanner\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-48x",
+      "name": "Commix",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Commix",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "^commix\\/"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-49x",
+      "name": "Gobuster",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Gobuster",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "^gobuster\\/"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-4xx",
+      "name": "CGIchk",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "CGIchk",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bcgichk\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-51x",
+      "name": "FFUF",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "FFUF",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)^Fuzz Faster U Fool\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-52x",
+      "name": "Nuclei",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Nuclei",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)^Nuclei\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-53x",
+      "name": "Tsunami",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Tsunami",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bTsunamiSecurityScanner\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-54x",
+      "name": "Nimbostratus",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Nimbostratus",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bnimbostratus-bot\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-55x",
+      "name": "Datadog test scanner: user-agent",
+      "tags": {
+        "type": "security_scanner",
+        "category": "attack_attempt",
+        "tool_name": "Datadog Canary Test",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              },
+              {
+                "address": "grpc.server.request.metadata",
+                "key_path": [
+                  "dd-canary"
+                ]
+              }
+            ],
+            "regex": "^dd-test-scanner-log(?:$|/|\\s)"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-56x",
+      "name": "Datadog test scanner - blocking version: user-agent",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Datadog Canary Test",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              },
+              {
+                "address": "grpc.server.request.metadata",
+                "key_path": [
+                  "dd-canary"
+                ]
+              }
+            ],
+            "regex": "^dd-test-scanner-log-block(?:$|/|\\s)"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": [],
+      "on_match": [
+        "block"
+      ]
+    },
+    {
+      "id": "ua0-600-57x",
+      "name": "AlertLogic",
+      "tags": {
+        "type": "commercial_scanner",
+        "category": "attack_attempt",
+        "tool_name": "AlertLogic",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "\\bAlertLogic-MDR-"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-58x",
+      "name": "wfuzz",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "wfuzz",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "\\bwfuzz\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-59x",
+      "name": "Detectify",
+      "tags": {
+        "type": "commercial_scanner",
+        "category": "attack_attempt",
+        "tool_name": "Detectify",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "\\bdetectify\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-5xx",
+      "name": "Blind SQL Injection Brute Forcer",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "BSQLBF",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)\\bbsqlbf\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-60x",
+      "name": "masscan",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "masscan",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "^masscan/"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-61x",
+      "name": "WPScan",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "WPScan",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "^wpscan\\b"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-62x",
+      "name": "Aon pentesting services",
+      "tags": {
+        "type": "commercial_scanner",
+        "category": "attack_attempt",
+        "tool_name": "Aon",
+        "confidence": "0"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "^Aon/"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-6xx",
+      "name": "Stealthy scanner",
+      "tags": {
+        "type": "security_scanner",
+        "category": "attack_attempt",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "mozilla/4\\.0 \\(compatible(; msie 6\\.0; win32)?\\)"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-7xx",
+      "name": "SQLmap",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "SQLmap",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "^sqlmap/"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    },
+    {
+      "id": "ua0-600-9xx",
+      "name": "Skipfish",
+      "tags": {
+        "type": "attack_tool",
+        "category": "attack_attempt",
+        "tool_name": "Skipfish",
+        "confidence": "1"
+      },
+      "conditions": [
+        {
+          "parameters": {
+            "inputs": [
+              {
+                "address": "server.request.headers.no_cookies",
+                "key_path": [
+                  "user-agent"
+                ]
+              }
+            ],
+            "regex": "(?i)mozilla/5\\.0 sf/"
+          },
+          "operator": "match_regex"
+        }
+      ],
+      "transformers": []
+    }
+  ]
+}
\ No newline at end of file
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/rules_manager.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/rules_manager.go
new file mode 100644
index 0000000000..8bf12b1821
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/rules_manager.go
@@ -0,0 +1,147 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package appsec
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+
+	rc "github.com/DataDog/datadog-agent/pkg/remoteconfig/state"
+)
+
+type (
+	// rulesManager is used to build a full rules file from a combination of rules fragments
+	// The `base` fragment is the default rules (either local or received through ASM_DD),
+	// and the `edits` fragments each represent a remote configuration update that affects the rules.
+	// `basePath` is either empty if the local base rules are used, or holds the path of the ASM_DD config.
+	rulesManager struct {
+		latest   rulesFragment
+		base     rulesFragment
+		basePath string
+		edits    map[string]rulesFragment
+	}
+	// rulesFragment can represent a full ruleset or a fragment of it.
+	rulesFragment struct {
+		Version     string          `json:"version,omitempty"`
+		Metadata    interface{}     `json:"metadata,omitempty"`
+		Rules       []interface{}   `json:"rules,omitempty"`
+		Overrides   []interface{}   `json:"rules_override,omitempty"`
+		Exclusions  []interface{}   `json:"exclusions,omitempty"`
+		RulesData   []ruleDataEntry `json:"rules_data,omitempty"`
+		Actions     []actionEntry   `json:"actions,omitempty"`
+		CustomRules []interface{}   `json:"custom_rules,omitempty"`
+	}
+
+	ruleDataEntry rc.ASMDataRuleData
+	rulesData     struct {
+		RulesData []ruleDataEntry `json:"rules_data"`
+	}
+
+	actionEntry struct {
+		ID         string `json:"id"`
+		Type       string `json:"type"`
+		Parameters struct {
+			StatusCode     int    `json:"status_code"`
+			GRPCStatusCode *int   `json:"grpc_status_code,omitempty"`
+			Type           string `json:"type,omitempty"`
+			Location       string `json:"location,omitempty"`
+		} `json:"parameters,omitempty"`
+	}
+)
+
+// defaultRulesFragment returns a rulesFragment created using the default static recommended rules
+func defaultRulesFragment() rulesFragment {
+	var f rulesFragment
+	if err := json.Unmarshal([]byte(staticRecommendedRules), &f); err != nil {
+		log.Debug("appsec: error unmarshalling default rules: %v", err)
+	}
+	return f
+}
+
+func (r_ *rulesFragment) clone() rulesFragment {
+	var f rulesFragment
+	f.Version = r_.Version
+	f.Metadata = r_.Metadata
+	f.Overrides = append(f.Overrides, r_.Overrides...)
+	f.Exclusions = append(f.Exclusions, r_.Exclusions...)
+	f.RulesData = append(f.RulesData, r_.RulesData...)
+	f.CustomRules = append(f.CustomRules, r_.CustomRules...)
+	// TODO (Francois Mazeau): copy more fields once we handle them
+	return f
+}
+
+// newRulesManager initializes and returns a new rulesManager using the provided rules.
+// If no rules are provided (nil), the default rules are used instead.
+// If the provided rules are invalid, an error is returned
+func newRulesManager(rules []byte) (*rulesManager, error) {
+	var f rulesFragment
+	if rules == nil {
+		f = defaultRulesFragment()
+		log.Debug("appsec: rulesManager: using default rules configuration")
+	} else if err := json.Unmarshal(rules, &f); err != nil {
+		log.Debug("appsec: cannot create rulesManager from specified rules")
+		return nil, err
+	}
+	return &rulesManager{
+		latest: f,
+		base:   f,
+		edits:  map[string]rulesFragment{},
+	}, nil
+}
+
+func (r *rulesManager) clone() *rulesManager {
+	var clone rulesManager
+	clone.edits = make(map[string]rulesFragment, len(r.edits))
+	for k, v := range r.edits {
+		clone.edits[k] = v
+	}
+	clone.base = r.base.clone()
+	clone.latest = r.latest.clone()
+	return &clone
+}
+
+func (r *rulesManager) addEdit(cfgPath string, f rulesFragment) {
+	r.edits[cfgPath] = f
+}
+
+func (r *rulesManager) removeEdit(cfgPath string) {
+	delete(r.edits, cfgPath)
+}
+
+func (r *rulesManager) changeBase(f rulesFragment, basePath string) {
+	r.base = f
+	r.basePath = basePath
+}
+
+// compile compiles the rulesManager fragments together stores the result in r.latest
+func (r *rulesManager) compile() {
+	if r.base.Rules == nil || len(r.base.Rules) == 0 {
+		r.base = defaultRulesFragment()
+	}
+	r.latest = r.base
+
+	// Simply concatenate the content of each top level rule field as specified in our RFCs
+	for _, v := range r.edits {
+		r.latest.Overrides = append(r.latest.Overrides, v.Overrides...)
+		r.latest.Exclusions = append(r.latest.Exclusions, v.Exclusions...)
+		r.latest.Actions = append(r.latest.Actions, v.Actions...)
+		r.latest.RulesData = append(r.latest.RulesData, v.RulesData...)
+		r.latest.CustomRules = append(r.latest.CustomRules, v.CustomRules...)
+	}
+}
+
+// raw returns a compact json version of the rules
+func (r *rulesManager) raw() []byte {
+	data, _ := json.Marshal(r.latest)
+	return data
+}
+
+// String returns the string representation of the latest compiled json rules.
+func (r *rulesManager) String() string {
+	return fmt.Sprintf("%+v", r.latest)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/waf.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/waf.go
new file mode 100644
index 0000000000..fa1fb50758
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/waf.go
@@ -0,0 +1,555 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+//go:build appsec
+// +build appsec
+
+package appsec
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"sort"
+	"sync"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/grpcsec"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/httpsec"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
+
+	waf "github.com/DataDog/go-libddwaf"
+	"go.uber.org/atomic"
+)
+
+const (
+	eventRulesVersionTag = "_dd.appsec.event_rules.version"
+	eventRulesErrorsTag  = "_dd.appsec.event_rules.errors"
+	eventRulesLoadedTag  = "_dd.appsec.event_rules.loaded"
+	eventRulesFailedTag  = "_dd.appsec.event_rules.error_count"
+	wafDurationTag       = "_dd.appsec.waf.duration"
+	wafDurationExtTag    = "_dd.appsec.waf.duration_ext"
+	wafTimeoutTag        = "_dd.appsec.waf.timeouts"
+	wafVersionTag        = "_dd.appsec.waf.version"
+)
+
+type wafHandle struct {
+	*waf.Handle
+	// Actions are tightly link to a ruleset, which is linked to a waf handle
+	actions map[string]*sharedsec.Action
+}
+
+func (a *appsec) swapWAF(rules rulesFragment) (err error) {
+	// Instantiate a new WAF handle and verify its state
+	newHandle, err := newWAFHandle(rules, a.cfg)
+	if err != nil {
+		return err
+	}
+
+	// Close the WAF handle in case of an error in what's following
+	defer func() {
+		if err != nil {
+			newHandle.Close()
+		}
+	}()
+
+	listeners, err := newWAFEventListeners(newHandle, a.cfg, a.limiter)
+	if err != nil {
+		return err
+	}
+
+	// Register the event listeners now that we know that the new handle is valid
+	newRoot := dyngo.NewRootOperation()
+	for _, l := range listeners {
+		newRoot.On(l)
+	}
+
+	// Hot-swap dyngo's root operation
+	dyngo.SwapRootOperation(newRoot)
+
+	// Close old handle.
+	// Note that concurrent requests are still using it, and it will be released
+	// only when no more requests use it.
+	// TODO: implement in dyngo ref-counting of the root operation so we can
+	//   rely on a Finish event listener on the root operation instead?
+	//   Avoiding saving the current WAF handle would guarantee no one is
+	//   accessing a.wafHandle while we swap
+	oldHandle := a.wafHandle
+	a.wafHandle = newHandle
+	if oldHandle != nil {
+		oldHandle.Close()
+	}
+
+	return nil
+}
+
+func actionFromEntry(e *actionEntry) *sharedsec.Action {
+	switch e.Type {
+	case "block_request":
+		grpcCode := 10 // use the grpc.Codes value for "Aborted" by default
+		if e.Parameters.GRPCStatusCode != nil {
+			grpcCode = *e.Parameters.GRPCStatusCode
+		}
+		return sharedsec.NewBlockRequestAction(e.Parameters.StatusCode, grpcCode, e.Parameters.Type)
+	case "redirect_request":
+		return sharedsec.NewRedirectRequestAction(e.Parameters.StatusCode, e.Parameters.Location)
+	default:
+		log.Debug("appsec: unknown action type `%s`", e.Type)
+		return nil
+	}
+}
+
+func newWAFHandle(rules rulesFragment, cfg *Config) (*wafHandle, error) {
+	handle, err := waf.NewHandle(rules, cfg.obfuscator.KeyRegex, cfg.obfuscator.ValueRegex)
+	actions := map[string]*sharedsec.Action{
+		// Default built-in block action
+		"block": sharedsec.NewBlockRequestAction(403, 10, "auto"),
+	}
+
+	for _, entry := range rules.Actions {
+		a := actionFromEntry(&entry)
+		if a != nil {
+			actions[entry.ID] = a
+		}
+	}
+	return &wafHandle{
+		Handle:  handle,
+		actions: actions,
+	}, err
+}
+
+func newWAFEventListeners(waf *wafHandle, cfg *Config, l Limiter) (listeners []dyngo.EventListener, err error) {
+	// Check if there are addresses in the rule
+	ruleAddresses := waf.Addresses()
+	if len(ruleAddresses) == 0 {
+		return nil, errors.New("no addresses found in the rule")
+	}
+
+	// Check there are supported addresses in the rule
+	httpAddresses, grpcAddresses, notSupported := supportedAddresses(ruleAddresses)
+	if len(httpAddresses) == 0 && len(grpcAddresses) == 0 {
+		return nil, fmt.Errorf("the addresses present in the rules are not supported: %v", notSupported)
+	}
+
+	if len(notSupported) > 0 {
+		log.Debug("appsec: the addresses present in the rules are partially supported: not supported=%v", notSupported)
+	}
+
+	// Register the WAF event listeners
+	if len(httpAddresses) > 0 {
+		log.Debug("appsec: creating http waf event listener of the rules addresses %v", httpAddresses)
+		listeners = append(listeners, newHTTPWAFEventListener(waf, httpAddresses, cfg.wafTimeout, l))
+	}
+
+	if len(grpcAddresses) > 0 {
+		log.Debug("appsec: creating the grpc waf event listener of the rules addresses %v", grpcAddresses)
+		listeners = append(listeners, newGRPCWAFEventListener(waf, grpcAddresses, cfg.wafTimeout, l))
+	}
+
+	return listeners, nil
+}
+
+// newWAFEventListener returns the WAF event listener to register in order to enable it.
+func newHTTPWAFEventListener(handle *wafHandle, addresses map[string]struct{}, timeout time.Duration, limiter Limiter) dyngo.EventListener {
+	var monitorRulesOnce sync.Once // per instantiation
+
+	return httpsec.OnHandlerOperationStart(func(op *httpsec.Operation, args httpsec.HandlerOperationArgs) {
+		wafCtx := waf.NewContext(handle.Handle)
+		if wafCtx == nil {
+			// The WAF event listener got concurrently released
+			return
+		}
+
+		if _, ok := addresses[userIDAddr]; ok {
+			// OnUserIDOperationStart happens when appsec.SetUser() is called. We run the WAF and apply actions to
+			// see if the associated user should be blocked. Since we don't control the execution flow in this case
+			// (SetUser is SDK), we delegate the responsibility of interrupting the handler to the user.
+			op.On(sharedsec.OnUserIDOperationStart(func(operation *sharedsec.UserIDOperation, args sharedsec.UserIDOperationArgs) {
+				matches, actionIds := runWAF(wafCtx, map[string]interface{}{userIDAddr: args.UserID}, timeout)
+				if len(matches) > 0 {
+					processHTTPSDKAction(operation, handle.actions, actionIds)
+					addSecurityEvents(op, limiter, matches)
+					log.Debug("appsec: WAF detected a suspicious user: %s", args.UserID)
+				}
+			}))
+		}
+
+		values := map[string]interface{}{}
+		for addr := range addresses {
+			switch addr {
+			case httpClientIPAddr:
+				if args.ClientIP.IsValid() {
+					values[httpClientIPAddr] = args.ClientIP.String()
+				}
+			case serverRequestMethodAddr:
+				values[serverRequestMethodAddr] = args.Method
+			case serverRequestRawURIAddr:
+				values[serverRequestRawURIAddr] = args.RequestURI
+			case serverRequestHeadersNoCookiesAddr:
+				if headers := args.Headers; headers != nil {
+					values[serverRequestHeadersNoCookiesAddr] = headers
+				}
+			case serverRequestCookiesAddr:
+				if cookies := args.Cookies; cookies != nil {
+					values[serverRequestCookiesAddr] = cookies
+				}
+			case serverRequestQueryAddr:
+				if query := args.Query; query != nil {
+					values[serverRequestQueryAddr] = query
+				}
+			case serverRequestPathParamsAddr:
+				if pathParams := args.PathParams; pathParams != nil {
+					values[serverRequestPathParamsAddr] = pathParams
+				}
+			}
+		}
+
+		matches, actionIds := runWAF(wafCtx, values, timeout)
+		if len(matches) > 0 {
+			interrupt := processActions(op, handle.actions, actionIds)
+			addSecurityEvents(op, limiter, matches)
+			log.Debug("appsec: WAF detected an attack before executing the request")
+			if interrupt {
+				wafCtx.Close()
+				return
+			}
+		}
+
+		if _, ok := addresses[serverRequestBodyAddr]; ok {
+			op.On(httpsec.OnSDKBodyOperationStart(func(sdkBodyOp *httpsec.SDKBodyOperation, args httpsec.SDKBodyOperationArgs) {
+				matches, actionIds := runWAF(wafCtx, map[string]interface{}{serverRequestBodyAddr: args.Body}, timeout)
+				if len(matches) > 0 {
+					processHTTPSDKAction(sdkBodyOp, handle.actions, actionIds)
+					addSecurityEvents(op, limiter, matches)
+					log.Debug("appsec: WAF detected a suspicious request body")
+				}
+			}))
+		}
+
+		op.On(httpsec.OnHandlerOperationFinish(func(op *httpsec.Operation, res httpsec.HandlerOperationRes) {
+			defer wafCtx.Close()
+
+			values := make(map[string]interface{}, 1)
+			if _, ok := addresses[serverResponseStatusAddr]; ok {
+				values[serverResponseStatusAddr] = res.Status
+			}
+
+			// Run the WAF, ignoring the returned actions - if any - since blocking after the request handler's
+			// response is not supported at the moment.
+			matches, _ := runWAF(wafCtx, values, timeout)
+
+			// Add WAF metrics.
+			rInfo := handle.RulesetInfo()
+			overallRuntimeNs, internalRuntimeNs := wafCtx.TotalRuntime()
+			addWAFMonitoringTags(op, rInfo.Version, overallRuntimeNs, internalRuntimeNs, wafCtx.TotalTimeouts())
+
+			// Add the following metrics once per instantiation of a WAF handle
+			monitorRulesOnce.Do(func() {
+				addRulesMonitoringTags(op, rInfo)
+				op.AddTag(ext.ManualKeep, samplernames.AppSec)
+			})
+
+			// Log the attacks if any
+			if len(matches) == 0 {
+				return
+			}
+			log.Debug("appsec: attack detected by the waf")
+			addSecurityEvents(op, limiter, matches)
+		}))
+	})
+}
+
+// newGRPCWAFEventListener returns the WAF event listener to register in order
+// to enable it.
+func newGRPCWAFEventListener(handle *wafHandle, addresses map[string]struct{}, timeout time.Duration, limiter Limiter) dyngo.EventListener {
+	var monitorRulesOnce sync.Once // per instantiation
+
+	return grpcsec.OnHandlerOperationStart(func(op *grpcsec.HandlerOperation, handlerArgs grpcsec.HandlerOperationArgs) {
+		// Limit the maximum number of security events, as a streaming RPC could
+		// receive unlimited number of messages where we could find security events
+		const maxWAFEventsPerRequest = 10
+		var (
+			nbEvents          atomic.Uint32
+			logOnce           sync.Once // per request
+			overallRuntimeNs  atomic.Uint64
+			internalRuntimeNs atomic.Uint64
+			nbTimeouts        atomic.Uint64
+
+			events []json.RawMessage
+			mu     sync.Mutex // events mutex
+		)
+
+		wafCtx := waf.NewContext(handle.Handle)
+		if wafCtx == nil {
+			// The WAF event listener got concurrently released
+			return
+		}
+
+		// OnUserIDOperationStart happens when appsec.SetUser() is called. We run the WAF and apply actions to
+		// see if the associated user should be blocked. Since we don't control the execution flow in this case
+		// (SetUser is SDK), we delegate the responsibility of interrupting the handler to the user.
+		op.On(sharedsec.OnUserIDOperationStart(func(userIDOp *sharedsec.UserIDOperation, args sharedsec.UserIDOperationArgs) {
+			values := map[string]interface{}{}
+			for addr := range addresses {
+				if addr == userIDAddr {
+					values[userIDAddr] = args.UserID
+				}
+			}
+			matches, actionIds := runWAF(wafCtx, values, timeout)
+			if len(matches) > 0 {
+				for _, id := range actionIds {
+					if a, ok := handle.actions[id]; ok && a.Blocking() {
+						code, err := a.GRPC()(map[string][]string{})
+						userIDOp.EmitData(grpcsec.NewMonitoringError(err.Error(), code))
+					}
+				}
+				addSecurityEvents(op, limiter, matches)
+				log.Debug("appsec: WAF detected an authenticated user attack: %s", args.UserID)
+			}
+		}))
+
+		// The same address is used for gRPC and http when it comes to client ip
+		values := map[string]interface{}{}
+		for addr := range addresses {
+			if addr == httpClientIPAddr && handlerArgs.ClientIP.IsValid() {
+				values[httpClientIPAddr] = handlerArgs.ClientIP.String()
+			}
+		}
+
+		matches, actionIds := runWAF(wafCtx, values, timeout)
+		if len(matches) > 0 {
+			interrupt := processActions(op, handle.actions, actionIds)
+			addSecurityEvents(op, limiter, matches)
+			log.Debug("appsec: WAF detected an attack before executing the request")
+			if interrupt {
+				wafCtx.Close()
+				return
+			}
+		}
+
+		op.On(grpcsec.OnReceiveOperationFinish(func(_ grpcsec.ReceiveOperation, res grpcsec.ReceiveOperationRes) {
+			if nbEvents.Load() == maxWAFEventsPerRequest {
+				logOnce.Do(func() {
+					log.Debug("appsec: ignoring the rpc message due to the maximum number of security events per grpc call reached")
+				})
+				return
+			}
+			// The current workaround of the WAF context limitations is to
+			// simply instantiate and release the WAF context for the operation
+			// lifetime so that:
+			//   1. We avoid growing the memory usage of the context every time
+			//      a grpc.server.request.message value is added to it during
+			//      the RPC lifetime.
+			//   2. We avoid the limitation of 1 event per attack type.
+			// TODO(Julio-Guerra): a future libddwaf API should solve this out.
+			wafCtx := waf.NewContext(handle.Handle)
+			if wafCtx == nil {
+				// The WAF event listener got concurrently released
+				return
+			}
+			defer wafCtx.Close()
+			// Run the WAF on the rule addresses available in the args
+			// Note that we don't check if the address is present in the rules
+			// as we only support one at the moment, so this callback cannot be
+			// set when the address is not present.
+			values := map[string]interface{}{grpcServerRequestMessage: res.Message}
+			if md := handlerArgs.Metadata; len(md) > 0 {
+				values[grpcServerRequestMetadata] = md
+			}
+			// Run the WAF, ignoring the returned actions - if any - since blocking after the request handler's
+			// response is not supported at the moment.
+			event, _ := runWAF(wafCtx, values, timeout)
+
+			// WAF run durations are WAF context bound. As of now we need to keep track of those externally since
+			// we use a new WAF context for each callback. When we are able to re-use the same WAF context across
+			// callbacks, we can get rid of these variables and simply use the WAF bindings in OnHandlerOperationFinish.
+			overall, internal := wafCtx.TotalRuntime()
+			overallRuntimeNs.Add(overall)
+			internalRuntimeNs.Add(internal)
+			nbTimeouts.Add(wafCtx.TotalTimeouts())
+
+			if len(event) == 0 {
+				return
+			}
+			log.Debug("appsec: attack detected by the grpc waf")
+			nbEvents.Inc()
+			mu.Lock()
+			events = append(events, event)
+			mu.Unlock()
+		}))
+
+		op.On(grpcsec.OnHandlerOperationFinish(func(op *grpcsec.HandlerOperation, _ grpcsec.HandlerOperationRes) {
+			defer wafCtx.Close()
+			rInfo := handle.RulesetInfo()
+			addWAFMonitoringTags(op, rInfo.Version, overallRuntimeNs.Load(), internalRuntimeNs.Load(), nbTimeouts.Load())
+
+			// Log the following metrics once per instantiation of a WAF handle
+			monitorRulesOnce.Do(func() {
+				addRulesMonitoringTags(op, rInfo)
+				op.AddTag(ext.ManualKeep, samplernames.AppSec)
+			})
+
+			addSecurityEvents(op, limiter, events...)
+		}))
+	})
+}
+
+func runWAF(wafCtx *waf.Context, values map[string]interface{}, timeout time.Duration) ([]byte, []string) {
+	matches, actions, err := wafCtx.Run(values, timeout)
+	if err != nil {
+		if err == waf.ErrTimeout {
+			log.Debug("appsec: waf timeout value of %s reached", timeout)
+		} else {
+			log.Error("appsec: unexpected waf error: %v", err)
+			return nil, nil
+		}
+	}
+	return matches, actions
+}
+
+// HTTP rule addresses currently supported by the WAF
+const (
+	serverRequestMethodAddr           = "server.request.method"
+	serverRequestRawURIAddr           = "server.request.uri.raw"
+	serverRequestHeadersNoCookiesAddr = "server.request.headers.no_cookies"
+	serverRequestCookiesAddr          = "server.request.cookies"
+	serverRequestQueryAddr            = "server.request.query"
+	serverRequestPathParamsAddr       = "server.request.path_params"
+	serverRequestBodyAddr             = "server.request.body"
+	serverResponseStatusAddr          = "server.response.status"
+	httpClientIPAddr                  = "http.client_ip"
+	userIDAddr                        = "usr.id"
+)
+
+// List of HTTP rule addresses currently supported by the WAF
+var httpAddresses = []string{
+	serverRequestMethodAddr,
+	serverRequestRawURIAddr,
+	serverRequestHeadersNoCookiesAddr,
+	serverRequestCookiesAddr,
+	serverRequestQueryAddr,
+	serverRequestPathParamsAddr,
+	serverRequestBodyAddr,
+	serverResponseStatusAddr,
+	httpClientIPAddr,
+	userIDAddr,
+}
+
+// gRPC rule addresses currently supported by the WAF
+const (
+	grpcServerRequestMessage  = "grpc.server.request.message"
+	grpcServerRequestMetadata = "grpc.server.request.metadata"
+)
+
+// List of gRPC rule addresses currently supported by the WAF
+var grpcAddresses = []string{
+	grpcServerRequestMessage,
+	grpcServerRequestMetadata,
+	httpClientIPAddr,
+	userIDAddr,
+}
+
+func init() {
+	// sort the address lists to avoid mistakes and use sort.SearchStrings()
+	sort.Strings(httpAddresses)
+	sort.Strings(grpcAddresses)
+}
+
+// supportedAddresses returns the list of addresses we actually support from the
+// given rule addresses.
+func supportedAddresses(ruleAddresses []string) (supportedHTTP, supportedGRPC map[string]struct{}, notSupported []string) {
+	// Filter the supported addresses only
+	supportedHTTP = map[string]struct{}{}
+	supportedGRPC = map[string]struct{}{}
+	for _, addr := range ruleAddresses {
+		supported := false
+		if i := sort.SearchStrings(httpAddresses, addr); i < len(httpAddresses) && httpAddresses[i] == addr {
+			supportedHTTP[addr] = struct{}{}
+			supported = true
+		}
+		if i := sort.SearchStrings(grpcAddresses, addr); i < len(grpcAddresses) && grpcAddresses[i] == addr {
+			supportedGRPC[addr] = struct{}{}
+			supported = true
+		}
+
+		if !supported {
+			notSupported = append(notSupported, addr)
+		}
+	}
+
+	return supportedHTTP, supportedGRPC, notSupported
+}
+
+type tagsHolder interface {
+	AddTag(string, interface{})
+}
+
+// Add the tags related to security rules monitoring
+func addRulesMonitoringTags(th tagsHolder, rInfo waf.RulesetInfo) {
+	if len(rInfo.Errors) == 0 {
+		rInfo.Errors = nil
+	}
+	rulesetErrors, err := json.Marshal(rInfo.Errors)
+	if err != nil {
+		log.Error("appsec: could not marshal the waf ruleset info errors to json")
+	}
+	th.AddTag(eventRulesErrorsTag, string(rulesetErrors)) // avoid the tracer's call to fmt.Sprintf on the value
+	th.AddTag(eventRulesLoadedTag, float64(rInfo.Loaded))
+	th.AddTag(eventRulesFailedTag, float64(rInfo.Failed))
+	th.AddTag(wafVersionTag, waf.Version())
+}
+
+// Add the tags related to the monitoring of the WAF
+func addWAFMonitoringTags(th tagsHolder, rulesVersion string, overallRuntimeNs, internalRuntimeNs, timeouts uint64) {
+	// Rules version is set for every request to help the backend associate WAF duration metrics with rule version
+	th.AddTag(eventRulesVersionTag, rulesVersion)
+	th.AddTag(wafTimeoutTag, float64(timeouts))
+	th.AddTag(wafDurationTag, float64(internalRuntimeNs)/1e3)   // ns to us
+	th.AddTag(wafDurationExtTag, float64(overallRuntimeNs)/1e3) // ns to us
+}
+
+type securityEventsAdder interface {
+	AddSecurityEvents(events ...json.RawMessage)
+}
+
+// Helper function to add sec events to an operation taking into account the rate limiter.
+func addSecurityEvents(op securityEventsAdder, limiter Limiter, matches ...json.RawMessage) {
+	if len(matches) > 0 && limiter.Allow() {
+		op.AddSecurityEvents(matches...)
+	}
+}
+
+// processActions sends the relevant actions to the operation's data listener.
+// It returns true if at least one of those actions require interrupting the request handler
+func processActions(op dyngo.Operation, actions map[string]*sharedsec.Action, actionIds []string) (interrupt bool) {
+	for _, id := range actionIds {
+		if a, ok := actions[id]; ok {
+			op.EmitData(actions[id])
+			interrupt = interrupt || a.Blocking()
+		}
+	}
+	return interrupt
+}
+
+// processHTTPSDKAction does two things:
+//   - send actions to the parent operation's data listener, for their handlers to be executed after the user handler
+//   - send an error to the current operation's data listener (created by an SDK call), to signal users to interrupt
+//     their handler.
+func processHTTPSDKAction(op dyngo.Operation, actions map[string]*sharedsec.Action, actionIds []string) {
+	for _, id := range actionIds {
+		if action, ok := actions[id]; ok {
+			if op.Parent() != nil {
+				op.Parent().EmitData(action) // Send the action so that the handler gets executed
+			}
+			if action.Blocking() { // Send the error to be returned by the SDK
+				op.EmitData(httpsec.NewMonitoringError("Request blocked")) // Send error
+			}
+		}
+	}
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/container.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/container.go
new file mode 100644
index 0000000000..2f6a52b3e1
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/container.go
@@ -0,0 +1,71 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package internal
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"regexp"
+)
+
+const (
+	// cgroupPath is the path to the cgroup file where we can find the container id if one exists.
+	cgroupPath = "/proc/self/cgroup"
+)
+
+const (
+	uuidSource      = "[0-9a-f]{8}[-_][0-9a-f]{4}[-_][0-9a-f]{4}[-_][0-9a-f]{4}[-_][0-9a-f]{12}|[0-9a-f]{8}(?:-[0-9a-f]{4}){4}$"
+	containerSource = "[0-9a-f]{64}"
+	taskSource      = "[0-9a-f]{32}-\\d+"
+)
+
+var (
+	// expLine matches a line in the /proc/self/cgroup file. It has a submatch for the last element (path), which contains the container ID.
+	expLine = regexp.MustCompile(`^\d+:[^:]*:(.+)$`)
+
+	// expContainerID matches contained IDs and sources. Source: https://github.com/Qard/container-info/blob/master/index.js
+	expContainerID = regexp.MustCompile(fmt.Sprintf(`(%s|%s|%s)(?:.scope)?$`, uuidSource, containerSource, taskSource))
+
+	// containerID is the containerID read at init from /proc/self/cgroup
+	containerID string
+)
+
+func init() {
+	containerID = readContainerID(cgroupPath)
+}
+
+// parseContainerID finds the first container ID reading from r and returns it.
+func parseContainerID(r io.Reader) string {
+	scn := bufio.NewScanner(r)
+	for scn.Scan() {
+		path := expLine.FindStringSubmatch(scn.Text())
+		if len(path) != 2 {
+			// invalid entry, continue
+			continue
+		}
+		if parts := expContainerID.FindStringSubmatch(path[1]); len(parts) == 2 {
+			return parts[1]
+		}
+	}
+	return ""
+}
+
+// readContainerID attempts to return the container ID from the provided file path or empty on failure.
+func readContainerID(fpath string) string {
+	f, err := os.Open(fpath)
+	if err != nil {
+		return ""
+	}
+	defer f.Close()
+	return parseContainerID(f)
+}
+
+// ContainerID attempts to return the container ID from /proc/self/cgroup or empty on failure.
+func ContainerID() string {
+	return containerID
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/env.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/env.go
new file mode 100644
index 0000000000..7c4ed6c0cd
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/env.go
@@ -0,0 +1,94 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package internal
+
+import (
+	"os"
+	"strconv"
+	"strings"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// BoolEnv returns the parsed boolean value of an environment variable, or
+// def otherwise.
+func BoolEnv(key string, def bool) bool {
+	vv, ok := os.LookupEnv(key)
+	if !ok {
+		return def
+	}
+	v, err := strconv.ParseBool(vv)
+	if err != nil {
+		log.Warn("Non-boolean value for env var %s, defaulting to %t. Parse failed with error: %v", key, def, err)
+		return def
+	}
+	return v
+}
+
+// IntEnv returns the parsed int value of an environment variable, or
+// def otherwise.
+func IntEnv(key string, def int) int {
+	vv, ok := os.LookupEnv(key)
+	if !ok {
+		return def
+	}
+	v, err := strconv.Atoi(vv)
+	if err != nil {
+		log.Warn("Non-integer value for env var %s, defaulting to %d. Parse failed with error: %v", key, def, err)
+		return def
+	}
+	return v
+}
+
+// DurationEnv returns the parsed duration value of an environment variable, or
+// def otherwise.
+func DurationEnv(key string, def time.Duration) time.Duration {
+	vv, ok := os.LookupEnv(key)
+	if !ok {
+		return def
+	}
+	v, err := time.ParseDuration(vv)
+	if err != nil {
+		log.Warn("Non-duration value for env var %s, defaulting to %d. Parse failed with error: %v", key, def, err)
+		return def
+	}
+	return v
+}
+
+// ForEachStringTag runs fn on every key:val pair encountered in str.
+// str may contain multiple key:val pairs separated by either space
+// or comma (but not a mixture of both).
+func ForEachStringTag(str string, fn func(key string, val string)) {
+	sep := " "
+	if strings.Index(str, ",") > -1 {
+		// falling back to comma as separator
+		sep = ","
+	}
+	for _, tag := range strings.Split(str, sep) {
+		tag = strings.TrimSpace(tag)
+		if tag == "" {
+			continue
+		}
+		kv := strings.SplitN(tag, ":", 2)
+		key := strings.TrimSpace(kv[0])
+		if key == "" {
+			continue
+		}
+		var val string
+		if len(kv) == 2 {
+			val = strings.TrimSpace(kv[1])
+		}
+		fn(key, val)
+	}
+}
+
+// ParseTagString returns tags parsed from string as map
+func ParseTagString(str string) map[string]string {
+	res := make(map[string]string)
+	ForEachStringTag(str, func(key, val string) { res[key] = val })
+	return res
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/gitmetadata.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/gitmetadata.go
new file mode 100644
index 0000000000..5e6566a6f1
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/gitmetadata.go
@@ -0,0 +1,123 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package internal
+
+import (
+	"os"
+	"sync"
+)
+
+const (
+	// EnvGitMetadataEnabledFlag specifies the environment variable name for enable/disable
+	EnvGitMetadataEnabledFlag = "DD_TRACE_GIT_METADATA_ENABLED"
+	// EnvGitRepositoryURL specifies the environment variable name for git repository URL
+	EnvGitRepositoryURL = "DD_GIT_REPOSITORY_URL"
+	// EnvGitCommitSha specifies the environment variable name git commit sha
+	EnvGitCommitSha = "DD_GIT_COMMIT_SHA"
+	// EnvDDTags specifies the environment variable name global tags
+	EnvDDTags = "DD_TAGS"
+
+	// TagRepositoryURL specifies the tag name for git repository URL
+	TagRepositoryURL = "git.repository_url"
+	// TagCommitSha specifies the tag name for git commit sha
+	TagCommitSha = "git.commit.sha"
+	// TagGoPath specifies the tag name for go module path
+	TagGoPath = "go_path"
+
+	// TraceTagRepositoryURL specifies the trace tag name for git repository URL
+	TraceTagRepositoryURL = "_dd.git.repository_url"
+	// TraceTagCommitSha specifies the trace tag name for git commit sha
+	TraceTagCommitSha = "_dd.git.commit.sha"
+	// TraceTagGoPath specifies the trace tag name for go module path
+	TraceTagGoPath = "_dd.go_path"
+)
+
+var (
+	lock = sync.Mutex{}
+
+	gitMetadataTags map[string]string
+)
+
+func updateTags(tags map[string]string, key string, value string) {
+	if _, ok := tags[key]; !ok && value != "" {
+		tags[key] = value
+	}
+}
+
+func updateAllTags(tags map[string]string, newtags map[string]string) {
+	for k, v := range newtags {
+		updateTags(tags, k, v)
+	}
+}
+
+// Get git metadata from environment variables
+func getTagsFromEnv() map[string]string {
+	return map[string]string{
+		TagRepositoryURL: os.Getenv(EnvGitRepositoryURL),
+		TagCommitSha:     os.Getenv(EnvGitCommitSha),
+	}
+}
+
+// Get git metadata from DD_TAGS
+func getTagsFromDDTags() map[string]string {
+	etags := ParseTagString(os.Getenv(EnvDDTags))
+
+	return map[string]string{
+		TagRepositoryURL: etags[TagRepositoryURL],
+		TagCommitSha:     etags[TagCommitSha],
+		TagGoPath:        etags[TagGoPath],
+	}
+}
+
+// GetGitMetadataTags returns git metadata tags
+func GetGitMetadataTags() map[string]string {
+	lock.Lock()
+	defer lock.Unlock()
+
+	if gitMetadataTags != nil {
+		return gitMetadataTags
+	}
+
+	gitMetadataTags = make(map[string]string)
+
+	if BoolEnv(EnvGitMetadataEnabledFlag, true) {
+		updateAllTags(gitMetadataTags, getTagsFromEnv())
+		updateAllTags(gitMetadataTags, getTagsFromDDTags())
+		updateAllTags(gitMetadataTags, getTagsFromBinary())
+	}
+
+	return gitMetadataTags
+}
+
+// ResetGitMetadataTags reset cashed metadata tags
+func ResetGitMetadataTags() {
+	lock.Lock()
+	defer lock.Unlock()
+
+	gitMetadataTags = nil
+}
+
+// CleanGitMetadataTags cleans up tags from git metadata
+func CleanGitMetadataTags(tags map[string]string) {
+	delete(tags, TagRepositoryURL)
+	delete(tags, TagCommitSha)
+	delete(tags, TagGoPath)
+}
+
+// GetTracerGitMetadataTags returns git metadata tags for tracer
+// NB: Currently tracer inject tags with some workaround
+// (only with _dd prefix and only for the first span in payload)
+// So we provide different tag names
+func GetTracerGitMetadataTags() map[string]string {
+	res := make(map[string]string)
+	tags := GetGitMetadataTags()
+
+	updateTags(res, TraceTagRepositoryURL, tags[TagRepositoryURL])
+	updateTags(res, TraceTagCommitSha, tags[TagCommitSha])
+	updateTags(res, TraceTagGoPath, tags[TagGoPath])
+
+	return res
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/gitmetadatabinary.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/gitmetadatabinary.go
new file mode 100644
index 0000000000..84ded5eda2
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/gitmetadatabinary.go
@@ -0,0 +1,41 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+//go:build go1.18
+// +build go1.18
+
+package internal
+
+import (
+	"runtime/debug"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// getTagsFromBinary extracts git metadata from binary metadata
+func getTagsFromBinary() map[string]string {
+	res := make(map[string]string)
+	info, ok := debug.ReadBuildInfo()
+	if !ok {
+		log.Debug("ReadBuildInfo failed, skip source code metadata extracting")
+		return res
+	}
+	goPath := info.Path
+	var vcs, commitSha string
+	for _, s := range info.Settings {
+		if s.Key == "vcs" {
+			vcs = s.Value
+		} else if s.Key == "vcs.revision" {
+			commitSha = s.Value
+		}
+	}
+	if vcs != "git" {
+		log.Debug("Unknown VCS: '%s', skip source code metadata extracting", vcs)
+		return res
+	}
+	res[TagCommitSha] = commitSha
+	res[TagGoPath] = goPath
+	return res
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/gitmetadatabinary_legacy.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/gitmetadatabinary_legacy.go
new file mode 100644
index 0000000000..85bcb43cc6
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/gitmetadatabinary_legacy.go
@@ -0,0 +1,19 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+//go:build !go1.18
+// +build !go1.18
+
+package internal
+
+import (
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// getTagsFromBinary extracts git metadata from binary metadata
+func getTagsFromBinary() map[string]string {
+	log.Warn("go version below 1.18, BuildInfo has no vcs info, skip source code metadata extracting")
+	return make(map[string]string)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig/globalconfig.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig/globalconfig.go
new file mode 100644
index 0000000000..e715954acf
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig/globalconfig.go
@@ -0,0 +1,95 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+// Package globalconfig stores configuration which applies globally to both the tracer
+// and integrations.
+package globalconfig
+
+import (
+	"math"
+	"sync"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal"
+
+	"github.com/google/uuid"
+)
+
+var cfg = &config{
+	analyticsRate: math.NaN(),
+	runtimeID:     uuid.New().String(),
+	headersAsTags: internal.NewLockMap(map[string]string{}),
+}
+
+type config struct {
+	mu            sync.RWMutex
+	analyticsRate float64
+	serviceName   string
+	runtimeID     string
+	headersAsTags *internal.LockMap
+}
+
+// AnalyticsRate returns the sampling rate at which events should be marked. It uses
+// synchronizing mechanisms, meaning that for optimal performance it's best to read it
+// once and store it.
+func AnalyticsRate() float64 {
+	cfg.mu.RLock()
+	defer cfg.mu.RUnlock()
+	return cfg.analyticsRate
+}
+
+// SetAnalyticsRate sets the given event sampling rate globally.
+func SetAnalyticsRate(rate float64) {
+	cfg.mu.Lock()
+	defer cfg.mu.Unlock()
+	cfg.analyticsRate = rate
+}
+
+// ServiceName returns the default service name used by non-client integrations such as servers and frameworks.
+func ServiceName() string {
+	cfg.mu.RLock()
+	defer cfg.mu.RUnlock()
+	return cfg.serviceName
+}
+
+// SetServiceName sets the global service name set for this application.
+func SetServiceName(name string) {
+	cfg.mu.Lock()
+	defer cfg.mu.Unlock()
+	cfg.serviceName = name
+}
+
+// RuntimeID returns this process's unique runtime id.
+func RuntimeID() string {
+	cfg.mu.RLock()
+	defer cfg.mu.RUnlock()
+	return cfg.runtimeID
+}
+
+// HeaderTagMap returns the mappings of headers to their tag values
+func HeaderTagMap() *internal.LockMap {
+	return cfg.headersAsTags
+}
+
+// HeaderTag returns the configured tag for a given header.
+// This function exists for testing purposes, for performance you may want to use `HeaderTagMap`
+func HeaderTag(header string) string {
+	return cfg.headersAsTags.Get(header)
+}
+
+// SetHeaderTag adds config for header `from` with tag value `to`
+func SetHeaderTag(from, to string) {
+	cfg.headersAsTags.Set(from, to)
+}
+
+// HeaderTagsLen returns the length of globalconfig's headersAsTags map, 0 for empty map
+func HeaderTagsLen() int {
+	return cfg.headersAsTags.Len()
+}
+
+// ClearHeaderTags assigns headersAsTags to a new, empty map
+// It is invoked when WithHeaderTags is called, in order to overwrite the config
+func ClearHeaderTags() {
+	cfg.headersAsTags.Clear()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/azure/azure.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/azure/azure.go
new file mode 100644
index 0000000000..cb07c256a4
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/azure/azure.go
@@ -0,0 +1,63 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package azure
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/cachedfetch"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/httputils"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/validate"
+)
+
+// declare these as vars not const to ease testing
+var (
+	metadataURL = "http://169.254.169.254"
+	timeout     = 300 * time.Millisecond
+
+	// CloudProviderName contains the inventory name of for Azure
+	CloudProviderName = "Azure"
+)
+
+func getResponse(ctx context.Context, url string) (string, error) {
+	return httputils.Get(ctx, url, map[string]string{"Metadata": "true"}, timeout)
+}
+
+// GetHostname returns hostname based on Azure instance metadata.
+func GetHostname(ctx context.Context) (string, error) {
+	metadataJSON, err := instanceMetaFetcher.Fetch(ctx)
+	if err != nil {
+		return "", err
+	}
+
+	var metadata struct {
+		VMID string
+	}
+	if err := json.Unmarshal([]byte(metadataJSON), &metadata); err != nil {
+		return "", fmt.Errorf("failed to parse Azure instance metadata: %s", err)
+	}
+
+	if err := validate.ValidHostname(metadata.VMID); err != nil {
+		return "", err
+	}
+
+	return metadata.VMID, nil
+}
+
+var instanceMetaFetcher = cachedfetch.Fetcher{
+	Name: "Azure Instance Metadata",
+	Attempt: func(ctx context.Context) (string, error) {
+		metadataJSON, err := getResponse(ctx,
+			metadataURL+"/metadata/instance/compute?api-version=2017-08-01")
+		if err != nil {
+			return "", fmt.Errorf("failed to get Azure instance metadata: %s", err)
+		}
+		return metadataJSON, nil
+	},
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/cachedfetch/fetcher.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/cachedfetch/fetcher.go
new file mode 100644
index 0000000000..17d1a3837c
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/cachedfetch/fetcher.go
@@ -0,0 +1,86 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// This file is pulled from datadog-agent/pkg/util/cachedfetch changing the logger and using strings only
+
+// Package cachedfetch provides a read-through cache for fetched values.
+package cachedfetch
+
+import (
+	"context"
+	"sync"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// Fetcher supports fetching a value, such as from a cloud service API.  An
+// attempt is made to fetch the value on each call to Fetch, but if that
+// attempt fails then a cached value from the last successful attempt is
+// returned, if such a value exists.  This helps the agent to "ride out"
+// temporary failures in cloud APIs while still fetching fresh data when those
+// APIs are functioning properly.  Cached values do not expire.
+//
+// Callers should instantiate one fetcher per piece of data required.
+type Fetcher struct {
+	// function that attempts to fetch the value
+	Attempt func(context.Context) (string, error)
+
+	// the name of the thing being fetched, used in the default log message.  At
+	// least one of Name and LogFailure must be non-nil.
+	Name string
+
+	// function to log a fetch failure, given the error and the last successful
+	// value.  This function is not called if there is no last successful value.
+	// If left at its zero state, a default log message will be generated, using
+	// Name.
+	LogFailure func(error, interface{})
+
+	// previous successfully fetched value
+	lastValue interface{}
+
+	// mutex to protect access to lastValue
+	sync.Mutex
+}
+
+// Fetch attempts to fetch the value, returning the result or the last successful
+// value, or an error if no attempt has ever been successful.  No special handling
+// is included for the Context: both context.Cancelled and context.DeadlineExceeded
+// are handled like any other error by returning the cached value.
+//
+// This can be called from multiple goroutines, in which case it will call Attempt
+// concurrently.
+func (f *Fetcher) Fetch(ctx context.Context) (string, error) {
+	value, err := f.Attempt(ctx)
+	if err == nil {
+		f.Lock()
+		f.lastValue = value
+		f.Unlock()
+		return value, nil
+	}
+
+	f.Lock()
+	lastValue := f.lastValue
+	f.Unlock()
+
+	if lastValue == nil {
+		// attempt was never successful
+		return value, err
+	}
+
+	if f.LogFailure == nil {
+		log.Debug("Unable to get %s; returning cached value instead", f.Name)
+	} else {
+		f.LogFailure(err, lastValue)
+	}
+
+	return lastValue.(string), nil
+}
+
+// Reset resets the cached value (used for testing)
+func (f *Fetcher) Reset() {
+	f.Lock()
+	f.lastValue = nil
+	f.Unlock()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/ec2/ec2.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/ec2/ec2.go
new file mode 100644
index 0000000000..42c76ba96d
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/ec2/ec2.go
@@ -0,0 +1,72 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package ec2
+
+import (
+	"context"
+	"fmt"
+	"strings"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/cachedfetch"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/httputils"
+)
+
+// declare these as vars not const to ease testing
+var (
+	metadataURL     = "http://169.254.169.254/latest/meta-data"
+	defaultPrefixes = []string{"ip-", "domu", "ec2amaz-"}
+
+	MaxHostnameSize = 255
+)
+
+var instanceIDFetcher = cachedfetch.Fetcher{
+	Name: "EC2 InstanceID",
+	Attempt: func(ctx context.Context) (string, error) {
+		return getMetadataItemWithMaxLength(ctx,
+			"/instance-id",
+			MaxHostnameSize,
+		)
+	},
+}
+
+// GetInstanceID fetches the instance id for current host from the EC2 metadata API
+func GetInstanceID(ctx context.Context) (string, error) {
+	return instanceIDFetcher.Fetch(ctx)
+}
+
+func getMetadataItemWithMaxLength(ctx context.Context, endpoint string, maxLength int) (string, error) {
+	result, err := getMetadataItem(ctx, endpoint)
+	if err != nil {
+		return result, err
+	}
+	if len(result) > maxLength {
+		return "", fmt.Errorf("%v gave a response with length > to %v", endpoint, maxLength)
+	}
+	return result, err
+}
+
+func getMetadataItem(ctx context.Context, endpoint string) (string, error) {
+	return doHTTPRequest(ctx, metadataURL+endpoint)
+}
+
+func doHTTPRequest(ctx context.Context, url string) (string, error) {
+	headers := map[string]string{}
+	// Note: This assumes IMDS v1. IMDS v2 won't work in a containerized app and requires an API Token
+	// Users who have disabled IMDS v1 in favor of v2 will get a fallback hostname from a different provider (likely OS).
+	return httputils.Get(ctx, url, headers, 300*time.Millisecond)
+}
+
+// IsDefaultHostname checks if a hostname is an EC2 default
+func IsDefaultHostname(hostname string) bool {
+	hostname = strings.ToLower(hostname)
+	isDefault := false
+
+	for _, val := range defaultPrefixes {
+		isDefault = isDefault || strings.HasPrefix(hostname, val)
+	}
+	return isDefault
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/ecs/aws.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/ecs/aws.go
new file mode 100644
index 0000000000..2623774b8b
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/ecs/aws.go
@@ -0,0 +1,54 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package ecs
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"os"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/cachedfetch"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/httputils"
+)
+
+// declare these as vars not const to ease testing
+var (
+	metadataURL = os.Getenv("ECS_CONTAINER_METADATA_URI_V4")
+	timeout     = 300 * time.Millisecond
+)
+
+var taskFetcher = cachedfetch.Fetcher{
+	Name: "ECS LaunchType",
+	Attempt: func(ctx context.Context) (string, error) {
+		taskJSON, err := getResponse(ctx, metadataURL+"/task")
+		if err != nil {
+			return "", fmt.Errorf("failed to get ECS task metadata: %s", err)
+		}
+		return taskJSON, nil
+	},
+}
+
+func getResponse(ctx context.Context, url string) (string, error) {
+	return httputils.Get(ctx, url, map[string]string{}, timeout)
+}
+
+// GetLaunchType gets the launch-type based on the ECS Task metadata endpoint
+func GetLaunchType(ctx context.Context) (string, error) {
+	taskJSON, err := taskFetcher.Fetch(ctx)
+	if err != nil {
+		return "", err
+	}
+
+	var metadata struct {
+		LaunchType string
+	}
+	if err := json.Unmarshal([]byte(taskJSON), &metadata); err != nil {
+		return "", fmt.Errorf("failed to parse ecs task metadata: %s", err)
+	}
+	return metadata.LaunchType, nil
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/fqdn_nix.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/fqdn_nix.go
new file mode 100644
index 0000000000..336b809478
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/fqdn_nix.go
@@ -0,0 +1,28 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// This file is exactly pulled from datadog-agent/pkg/util/hostname
+
+//go:build !windows
+// +build !windows
+
+package hostname
+
+import (
+	"context"
+	"os/exec"
+	"strings"
+	"time"
+)
+
+func getSystemFQDN() (string, error) {
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*1)
+	defer cancel()
+
+	cmd := exec.CommandContext(ctx, "/bin/hostname", "-f")
+
+	out, err := cmd.Output()
+	return strings.TrimSpace(string(out)), err
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/fqdn_windows.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/fqdn_windows.go
new file mode 100644
index 0000000000..e76da716d5
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/fqdn_windows.go
@@ -0,0 +1,14 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package hostname
+
+import (
+	"fmt"
+)
+
+func getSystemFQDN() (string, error) {
+	return "", fmt.Errorf("SystemFQDN provider not implemented for windows")
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/gce/gce.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/gce/gce.go
new file mode 100644
index 0000000000..e6965d5569
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/gce/gce.go
@@ -0,0 +1,120 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package gce
+
+import (
+	"context"
+	"fmt"
+	"strings"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/cachedfetch"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/httputils"
+)
+
+// declare these as vars not const to ease testing
+var (
+	metadataURL = "http://169.254.169.254/computeMetadata/v1"
+)
+
+var hostnameFetcher = cachedfetch.Fetcher{
+	Name: "GCP Hostname",
+	Attempt: func(ctx context.Context) (string, error) {
+		hostname, err := getResponseWithMaxLength(ctx, metadataURL+"/instance/hostname",
+			255)
+		if err != nil {
+			return "", fmt.Errorf("unable to retrieve hostname from GCE: %s", err)
+		}
+		return hostname, nil
+	},
+}
+
+var projectIDFetcher = cachedfetch.Fetcher{
+	Name: "GCP Project ID",
+	Attempt: func(ctx context.Context) (string, error) {
+		projectID, err := getResponseWithMaxLength(ctx,
+			metadataURL+"/project/project-id",
+			255)
+		if err != nil {
+			return "", fmt.Errorf("unable to retrieve project ID from GCE: %s", err)
+		}
+		return projectID, err
+	},
+}
+
+var nameFetcher = cachedfetch.Fetcher{
+	Name: "GCP Instance Name",
+	Attempt: func(ctx context.Context) (string, error) {
+		return getResponseWithMaxLength(ctx,
+			metadataURL+"/instance/name",
+			255)
+	},
+}
+
+// GetCanonicalHostname returns the DD canonical hostname (prefer: <instance-name>.<project-id>, otherwise <hostname>)
+func GetCanonicalHostname(ctx context.Context) (string, error) {
+	hostname, err := GetHostname(ctx)
+	if err != nil {
+		return "", err
+	}
+
+	instanceAlias, err := getInstanceAlias(ctx, hostname)
+	if err != nil {
+		return hostname, nil
+	}
+	return instanceAlias, nil
+}
+
+func getInstanceAlias(ctx context.Context, hostname string) (string, error) {
+	instanceName, err := nameFetcher.Fetch(ctx)
+	if err != nil {
+		// If the endpoint is not reachable, fallback on the old way to get the alias.
+		// For instance, it happens in GKE, where the metadata server is only a subset
+		// of the Compute Engine metadata server.
+		// See https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#gke_mds
+		if hostname == "" {
+			return "", fmt.Errorf("unable to retrieve instance name and hostname from GCE: %s", err)
+		}
+		instanceName = strings.SplitN(hostname, ".", 2)[0]
+	}
+
+	projectID, err := projectIDFetcher.Fetch(ctx)
+	if err != nil {
+		return "", err
+	}
+
+	return fmt.Sprintf("%s.%s", instanceName, projectID), nil
+}
+
+// GetHostname returns the hostname querying GCE Metadata api
+func GetHostname(ctx context.Context) (string, error) {
+	return hostnameFetcher.Fetch(ctx)
+}
+
+func getResponseWithMaxLength(ctx context.Context, endpoint string, maxLength int) (string, error) {
+	result, err := getResponse(ctx, endpoint)
+	if err != nil {
+		return result, err
+	}
+	if len(result) > maxLength {
+		return "", fmt.Errorf("%v gave a response with length > to %v", endpoint, maxLength)
+	}
+	return result, err
+}
+
+func getResponse(ctx context.Context, url string) (string, error) {
+	res, err := httputils.Get(ctx, url, map[string]string{"Metadata-Flavor": "Google"}, 1000*time.Millisecond)
+	if err != nil {
+		return "", fmt.Errorf("GCE metadata API error: %s", err)
+	}
+
+	// Some cloud platforms will respond with an empty body, causing the agent to assume a faulty hostname
+	if len(res) <= 0 {
+		return "", fmt.Errorf("empty response body")
+	}
+
+	return res, nil
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/httputils/helpers.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/httputils/helpers.go
new file mode 100644
index 0000000000..861acc356c
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/httputils/helpers.go
@@ -0,0 +1,74 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+// This file is pulled from datadog-agent/pkg/util/http (Only removing agent SSL config and unused funcs)
+
+package httputils
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"time"
+)
+
+func createTransport() *http.Transport {
+	return &http.Transport{
+		DialContext: (&net.Dialer{
+			Timeout: 30 * time.Second,
+			// Enables TCP keep-alives to detect broken connections
+			KeepAlive: 30 * time.Second,
+			// Disable RFC 6555 Fast Fallback ("Happy Eyeballs")
+			FallbackDelay: -1 * time.Nanosecond,
+		}).DialContext,
+		MaxIdleConns:        100,
+		MaxIdleConnsPerHost: 5,
+		// This parameter is set to avoid connections sitting idle in the pool indefinitely
+		IdleConnTimeout:       90 * time.Second,
+		TLSHandshakeTimeout:   10 * time.Second,
+		ExpectContinueTimeout: 1 * time.Second,
+		Proxy:                 http.ProxyFromEnvironment,
+	}
+}
+
+// Get is a high level helper to query a URL and return its body as a string
+func Get(ctx context.Context, URL string, headers map[string]string, timeout time.Duration) (string, error) {
+	client := http.Client{
+		Transport: createTransport(),
+		Timeout:   timeout,
+	}
+
+	req, err := http.NewRequestWithContext(ctx, http.MethodGet, URL, nil)
+	if err != nil {
+		return "", err
+	}
+
+	for header, value := range headers {
+		req.Header.Add(header, value)
+	}
+
+	res, err := client.Do(req)
+	if err != nil {
+		return "", err
+	}
+
+	return parseResponse(res, "GET", URL)
+}
+
+func parseResponse(res *http.Response, method string, URL string) (string, error) {
+	if res.StatusCode != 200 {
+		return "", fmt.Errorf("status code %d trying to %s %s", res.StatusCode, method, URL)
+	}
+
+	defer res.Body.Close()
+	all, err := io.ReadAll(res.Body)
+	if err != nil {
+		return "", fmt.Errorf("error while reading response from %s: %s", URL, err)
+	}
+
+	return string(all), nil
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/providers.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/providers.go
new file mode 100644
index 0000000000..85c685df2d
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/providers.go
@@ -0,0 +1,245 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package hostname
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/azure"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/ec2"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/ecs"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/gce"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/validate"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// For testing purposes
+var (
+	fargatePf = fargate
+)
+
+var (
+	cachedHostname  string
+	cachedAt        time.Time
+	cachedProvider  string
+	cacheExpiration = 5 * time.Minute
+	m               sync.RWMutex
+	isRefreshing    atomic.Value
+)
+
+const fargateName = "fargate"
+
+func init() {
+	isRefreshing.Store(false)
+}
+
+// getCached returns the cached hostname, cached provider and a bool indicating if the hostname has expired
+func getCached(now time.Time) (string, string, bool) {
+	m.RLock()
+	defer m.RUnlock()
+	if now.Sub(cachedAt) > cacheExpiration {
+		return cachedHostname, cachedProvider, true
+	}
+	return cachedHostname, cachedProvider, false
+}
+
+// setCached caches the newHostname
+func setCached(now time.Time, newHostname string, newProvider string) {
+	m.Lock()
+	defer m.Unlock()
+	cachedHostname = newHostname
+	cachedAt = now
+	cachedProvider = newProvider
+}
+
+type provider struct {
+	name string
+	// Should we stop going down the list of providers if this one is successful
+	stopIfSuccessful bool
+	pf               providerFetch
+}
+
+type providerFetch func(ctx context.Context, currentHostname string) (string, error)
+
+var providerCatalog = []provider{
+	{
+		name:             "configuration",
+		stopIfSuccessful: true,
+		pf:               fromConfig,
+	},
+	{
+		name:             fargateName,
+		stopIfSuccessful: true,
+		pf:               fromFargate,
+	},
+	{
+		name:             "gce",
+		stopIfSuccessful: true,
+		pf:               fromGce,
+	},
+	{
+		name:             "azure",
+		stopIfSuccessful: true,
+		pf:               fromAzure,
+	},
+	// The following providers are coupled. Their behavior changes depending on the result of the previous provider.
+	// Therefore, 'stopIfSuccessful' is set to false.
+	{
+		name:             "fqdn",
+		stopIfSuccessful: false,
+		pf:               fromFQDN,
+	},
+	{
+		name:             "container",
+		stopIfSuccessful: false,
+		pf:               fromContainer,
+	},
+	{
+		name:             "os",
+		stopIfSuccessful: false,
+		pf:               fromOS,
+	},
+	{
+		name:             "aws",
+		stopIfSuccessful: false,
+		pf:               fromEC2,
+	},
+}
+
+// Get returns the cached hostname for the tracer, empty if we haven't found one yet.
+// Spawning a go routine to update the hostname if it is empty or out of date
+func Get() string {
+	now := time.Now()
+	var (
+		ch      string
+		expired bool
+		pv      string
+	)
+	// if provider is fargate never refresh
+	// Otherwise, refresh on expiration or if hostname hasn't been found.
+	if ch, pv, expired = getCached(now); pv == fargateName || (!expired && ch != "") {
+		return ch
+	}
+	// Use CAS to avoid spawning more than one go-routine trying to update the cached hostname
+	ir := isRefreshing.CompareAndSwap(false, true)
+	if ir {
+		// TODO: One optimization we could do here is hook into the tracer shutdown signal to gracefully disconnect here
+		// For now, we think the added complexity isn't worth it for this single go routine that only runs every 5 minutes.
+		go func() {
+			updateHostname(now)
+		}()
+	}
+	return ch
+}
+
+func updateHostname(now time.Time) {
+	defer isRefreshing.Store(false)
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
+	defer cancel()
+	var hostname string
+	var hnProvider string
+
+	for _, p := range providerCatalog {
+		detectedHostname, err := p.pf(ctx, hostname)
+		if err != nil {
+			log.Debug("Unable to get hostname from provider %s: %v", p.name, err)
+			continue
+		}
+		hostname = detectedHostname
+		hnProvider = p.name
+		log.Debug("Found hostname %s, from provider %s", hostname, p.name)
+		if p.stopIfSuccessful {
+			log.Debug("Hostname detection stopping early")
+			setCached(now, hostname, p.name)
+			return
+		}
+	}
+	if hostname != "" {
+		log.Debug("Winning hostname %s from provider %s", hostname, hnProvider)
+		setCached(now, hostname, hnProvider)
+	} else {
+		log.Debug("Unable to reliably determine hostname. You can define one via env var DD_HOSTNAME")
+	}
+}
+
+func fromConfig(_ context.Context, _ string) (string, error) {
+	hn := os.Getenv("DD_HOSTNAME")
+	err := validate.ValidHostname(hn)
+	if err != nil {
+		return "", err
+	}
+	return hn, nil
+}
+
+func fromFargate(ctx context.Context, _ string) (string, error) {
+	return fargatePf(ctx)
+}
+
+func fargate(ctx context.Context) (string, error) {
+	if _, ok := os.LookupEnv("ECS_CONTAINER_METADATA_URI_V4"); !ok {
+		return "", fmt.Errorf("not running in fargate")
+	}
+	launchType, err := ecs.GetLaunchType(ctx)
+	if err != nil {
+		return "", err
+	}
+	if launchType == "FARGATE" {
+		// If we're running on fargate we strip the hostname
+		return "", nil
+	}
+	return "", fmt.Errorf("not running in fargate")
+}
+
+func fromGce(ctx context.Context, _ string) (string, error) {
+	return gce.GetCanonicalHostname(ctx)
+}
+
+func fromAzure(ctx context.Context, _ string) (string, error) {
+	return azure.GetHostname(ctx)
+}
+
+func fromFQDN(_ context.Context, _ string) (string, error) {
+	//TODO: test this on windows
+	fqdn, err := getSystemFQDN()
+	if err != nil {
+		return "", fmt.Errorf("unable to get FQDN from system: %s", err)
+	}
+	return fqdn, nil
+}
+
+func fromOS(_ context.Context, currentHostname string) (string, error) {
+	if currentHostname == "" {
+		return os.Hostname()
+	}
+	return "", fmt.Errorf("skipping OS hostname as a previous provider found a valid hostname")
+}
+
+func fromContainer(_ context.Context, _ string) (string, error) {
+	// This provider is not implemented as most customers do not provide access to kube-api server, kubelet, or docker socket
+	// on their application containers. Providing this access is almost always a not-good idea and could be burdensome for customers.
+	return "", fmt.Errorf("container hostname detection not implemented")
+}
+
+func fromEC2(ctx context.Context, currentHostname string) (string, error) {
+	if ec2.IsDefaultHostname(currentHostname) {
+		// If the current hostname is a default one we try to get the instance id
+		instanceID, err := ec2.GetInstanceID(ctx)
+		if err != nil {
+			return "", fmt.Errorf("unable to determine hostname from EC2: %s", err)
+		}
+		err = validate.ValidHostname(instanceID)
+		if err != nil {
+			return "", fmt.Errorf("EC2 instance id is not a valid hostname: %s", err)
+		}
+		return instanceID, nil
+	}
+	return "", fmt.Errorf("not retrieving hostname from AWS: the host is not an ECS instance and other providers already retrieve non-default hostnames")
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/validate/validate.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/validate/validate.go
new file mode 100644
index 0000000000..fa97b1c998
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/validate/validate.go
@@ -0,0 +1,57 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// This file is exactly pulled from datadog-agent/pkg/util/hostname/validate only changing the logger
+
+// Package validate provides hostname validation helpers
+package validate
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+const maxLength = 255
+
+var (
+	validHostnameRfc1123 = regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`)
+	localhostIdentifiers = []string{
+		"localhost",
+		"localhost.localdomain",
+		"localhost6.localdomain6",
+		"ip6-localhost",
+	}
+)
+
+// ValidHostname determines whether the passed string is a valid hostname.
+// In case it's not, the returned error contains the details of the failure.
+func ValidHostname(hostname string) error {
+	if hostname == "" {
+		return fmt.Errorf("hostname is empty")
+	} else if isLocal(hostname) {
+		return fmt.Errorf("%s is a local hostname", hostname)
+	} else if len(hostname) > maxLength {
+		log.Error("ValidHostname: name exceeded the maximum length of %d characters", maxLength)
+		return fmt.Errorf("name exceeded the maximum length of %d characters", maxLength)
+	} else if !validHostnameRfc1123.MatchString(hostname) {
+		log.Error("ValidHostname: %s is not RFC1123 compliant", hostname)
+		return fmt.Errorf("%s is not RFC1123 compliant", hostname)
+	}
+	return nil
+}
+
+// check whether the name is in the list of local hostnames
+func isLocal(name string) bool {
+	name = strings.ToLower(name)
+	for _, val := range localhostIdentifiers {
+		if val == name {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/log/log.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/log/log.go
new file mode 100644
index 0000000000..b6f732913e
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/log/log.go
@@ -0,0 +1,243 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+// Package log provides logging utilities for the tracer.
+package log
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/version"
+)
+
+// Level specifies the logging level that the log package prints at.
+type Level int
+
+const (
+	// LevelDebug represents debug level messages.
+	LevelDebug Level = iota
+	// LevelWarn represents warning and errors.
+	LevelWarn
+)
+
+var prefixMsg = fmt.Sprintf("Datadog Tracer %s", version.Tag)
+
+// Logger implementations are able to log given messages that the tracer might
+// output. This interface is duplicated here to avoid a cyclic dependency
+// between this package and ddtrace
+type Logger interface {
+	// Log prints the given message.
+	Log(msg string)
+}
+
+var (
+	mu     sync.RWMutex // guards below fields
+	level               = LevelWarn
+	logger Logger       = &defaultLogger{l: log.New(os.Stderr, "", log.LstdFlags)}
+)
+
+// UseLogger sets l as the active logger and returns a function to restore the
+// previous logger. The return value is mostly useful when testing.
+func UseLogger(l Logger) (undo func()) {
+	Flush()
+	mu.Lock()
+	defer mu.Unlock()
+	old := logger
+	logger = l
+	return func() {
+		logger = old
+	}
+}
+
+// SetLevel sets the given lvl for logging.
+func SetLevel(lvl Level) {
+	mu.Lock()
+	defer mu.Unlock()
+	level = lvl
+}
+
+// DebugEnabled returns true if debug log messages are enabled. This can be used in extremely
+// hot code paths to avoid allocating the ...interface{} argument.
+func DebugEnabled() bool {
+	mu.RLock()
+	lvl := level
+	mu.RUnlock()
+	return lvl == LevelDebug
+}
+
+// Debug prints the given message if the level is LevelDebug.
+func Debug(fmt string, a ...interface{}) {
+	if !DebugEnabled() {
+		return
+	}
+	printMsg("DEBUG", fmt, a...)
+}
+
+// Warn prints a warning message.
+func Warn(fmt string, a ...interface{}) {
+	printMsg("WARN", fmt, a...)
+}
+
+// Info prints an informational message.
+func Info(fmt string, a ...interface{}) {
+	printMsg("INFO", fmt, a...)
+}
+
+var (
+	errmu   sync.RWMutex                // guards below fields
+	erragg  = map[string]*errorReport{} // aggregated errors
+	errrate = time.Minute               // the rate at which errors are reported
+	erron   bool                        // true if errors are being aggregated
+)
+
+func init() {
+	if v := os.Getenv("DD_LOGGING_RATE"); v != "" {
+		if sec, err := strconv.ParseUint(v, 10, 64); err != nil {
+			Warn("Invalid value for DD_LOGGING_RATE: %v", err)
+		} else {
+			errrate = time.Duration(sec) * time.Second
+		}
+	}
+}
+
+type errorReport struct {
+	first time.Time // time when first error occurred
+	err   error
+	count uint64
+}
+
+// Error reports an error. Errors get aggregated and logged periodically. The
+// default is once per minute or once every DD_LOGGING_RATE number of seconds.
+func Error(format string, a ...interface{}) {
+	key := format // format should 99.9% of the time be constant
+	if reachedLimit(key) {
+		// avoid too much lock contention on spammy errors
+		return
+	}
+	errmu.Lock()
+	defer errmu.Unlock()
+	report, ok := erragg[key]
+	if !ok {
+		erragg[key] = &errorReport{
+			err:   fmt.Errorf(format, a...),
+			first: time.Now(),
+		}
+		report = erragg[key]
+	}
+	report.count++
+	if errrate == 0 {
+		flushLocked()
+		return
+	}
+	if !erron {
+		erron = true
+		time.AfterFunc(errrate, Flush)
+	}
+}
+
+// defaultErrorLimit specifies the maximum number of errors gathered in a report.
+const defaultErrorLimit = 200
+
+// reachedLimit reports whether the maximum count has been reached for this key.
+func reachedLimit(key string) bool {
+	errmu.RLock()
+	e, ok := erragg[key]
+	confirm := ok && e.count > defaultErrorLimit
+	errmu.RUnlock()
+	return confirm
+}
+
+// Flush flushes and resets all aggregated errors to the logger.
+func Flush() {
+	errmu.Lock()
+	defer errmu.Unlock()
+	flushLocked()
+}
+
+func flushLocked() {
+	for _, report := range erragg {
+		msg := fmt.Sprintf("%v", report.err)
+		if report.count > defaultErrorLimit {
+			msg += fmt.Sprintf(", %d+ additional messages skipped (first occurrence: %s)", defaultErrorLimit, report.first.Format(time.RFC822))
+		} else if report.count > 1 {
+			msg += fmt.Sprintf(", %d additional messages skipped (first occurrence: %s)", report.count-1, report.first.Format(time.RFC822))
+		} else {
+			msg += fmt.Sprintf(" (occurred: %s)", report.first.Format(time.RFC822))
+		}
+		printMsg("ERROR", msg)
+	}
+	for k := range erragg {
+		// compiler-optimized map-clearing post go1.11 (golang/go#20138)
+		delete(erragg, k)
+	}
+	erron = false
+}
+
+func printMsg(lvl, format string, a ...interface{}) {
+	msg := fmt.Sprintf("%s %s: %s", prefixMsg, lvl, fmt.Sprintf(format, a...))
+	mu.RLock()
+	logger.Log(msg)
+	mu.RUnlock()
+}
+
+type defaultLogger struct{ l *log.Logger }
+
+func (p *defaultLogger) Log(msg string) { p.l.Print(msg) }
+
+// DiscardLogger discards every call to Log().
+type DiscardLogger struct{}
+
+// Log implements Logger.
+func (d DiscardLogger) Log(_ string) {}
+
+// RecordLogger records every call to Log() and makes it available via Logs().
+type RecordLogger struct {
+	m      sync.Mutex
+	logs   []string
+	ignore []string // a log is ignored if it contains a string in ignored
+}
+
+// Ignore adds substrings to the ignore field of RecordLogger, allowing
+// the RecordLogger to ignore attempts to log strings with certain substrings.
+func (r *RecordLogger) Ignore(substrings ...string) {
+	r.m.Lock()
+	defer r.m.Unlock()
+	r.ignore = append(r.ignore, substrings...)
+}
+
+// Log implements Logger.
+func (r *RecordLogger) Log(msg string) {
+	r.m.Lock()
+	defer r.m.Unlock()
+	for _, ignored := range r.ignore {
+		if strings.Contains(msg, ignored) {
+			return
+		}
+	}
+	r.logs = append(r.logs, msg)
+}
+
+// Logs returns the ordered list of logs recorded by the logger.
+func (r *RecordLogger) Logs() []string {
+	r.m.Lock()
+	defer r.m.Unlock()
+	copied := make([]string, len(r.logs))
+	copy(copied, r.logs)
+	return copied
+}
+
+// Reset resets the logger's internal logs
+func (r *RecordLogger) Reset() {
+	r.m.Lock()
+	defer r.m.Unlock()
+	r.logs = r.logs[:0]
+	r.ignore = r.ignore[:0]
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/namingschema.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/namingschema.go
new file mode 100644
index 0000000000..3265ba33ab
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/namingschema.go
@@ -0,0 +1,117 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+// Package namingschema provides functionality to create naming schemas used by integrations to set different
+// service and span/operation names based on the value of the DD_TRACE_SPAN_ATTRIBUTE_SCHEMA environment variable.
+// It also provides some already implemented schemas for common use cases (client-server, db, messaging, etc.).
+//
+// How to use this package:
+// 1. Implement the VersionSupportSchema interface containing the correct name for each version.
+// 2. Create a new Schema using the New function.
+// 3. Call Schema.GetName to get the correct name based on the user configuration.
+package namingschema
+
+import (
+	"strings"
+	"sync"
+)
+
+// Version represents the available naming schema versions.
+type Version int
+
+const (
+	// SchemaV0 represents naming schema v0.
+	SchemaV0 Version = iota
+	// SchemaV1 represents naming schema v1.
+	SchemaV1
+)
+
+const (
+	defaultSchemaVersion = SchemaV0
+)
+
+var (
+	sv   Version
+	svMu sync.RWMutex
+
+	useGlobalServiceName   bool
+	useGlobalServiceNameMu sync.RWMutex
+)
+
+// ParseVersion attempts to parse the version string.
+func ParseVersion(v string) (Version, bool) {
+	switch strings.ToLower(v) {
+	case "", "v0":
+		return SchemaV0, true
+	case "v1":
+		return SchemaV1, true
+	default:
+		return SchemaV0, false
+	}
+}
+
+// GetVersion returns the global naming schema version used for this application.
+func GetVersion() Version {
+	svMu.RLock()
+	defer svMu.RUnlock()
+	return sv
+}
+
+// SetVersion sets the global naming schema version used for this application.
+func SetVersion(v Version) {
+	svMu.Lock()
+	defer svMu.Unlock()
+	sv = v
+}
+
+// SetDefaultVersion sets the default global naming schema version.
+func SetDefaultVersion() Version {
+	SetVersion(defaultSchemaVersion)
+	return defaultSchemaVersion
+}
+
+// UseGlobalServiceName returns the value of the useGlobalServiceName setting for this application.
+func UseGlobalServiceName() bool {
+	useGlobalServiceNameMu.RLock()
+	defer useGlobalServiceNameMu.RUnlock()
+	return useGlobalServiceName
+}
+
+// SetUseGlobalServiceName sets the value of the useGlobalServiceName setting used for this application.
+func SetUseGlobalServiceName(v bool) {
+	useGlobalServiceNameMu.Lock()
+	defer useGlobalServiceNameMu.Unlock()
+	useGlobalServiceName = v
+}
+
+// VersionSupportSchema is an interface that ensures all the available naming schema versions are implemented by the caller.
+type VersionSupportSchema interface {
+	V0() string
+	V1() string
+}
+
+// Schema allows to select the proper name to use based on the given VersionSupportSchema.
+type Schema struct {
+	selectedVersion Version
+	vSchema         VersionSupportSchema
+}
+
+// New initializes a new Schema.
+func New(vSchema VersionSupportSchema) *Schema {
+	return &Schema{
+		selectedVersion: GetVersion(),
+		vSchema:         vSchema,
+	}
+}
+
+// GetName returns the proper name for this Schema for the user selected version.
+func (s *Schema) GetName() string {
+	switch s.selectedVersion {
+	case SchemaV1:
+		return s.vSchema.V1()
+	default:
+		return s.vSchema.V0()
+	}
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/op_cache.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/op_cache.go
new file mode 100644
index 0000000000..d3574d2925
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/op_cache.go
@@ -0,0 +1,46 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package namingschema
+
+import "fmt"
+
+type cacheOutboundOp struct {
+	cfg    *config
+	system string
+}
+
+// NewCacheOutboundOp creates a new naming schema for outbound operations from caching systems.
+// The V0 implementation defaults to the v1 and is meant to be overwritten if needed, since (generally) it does not
+// follow any pattern among cache integrations.
+func NewCacheOutboundOp(system string, opts ...Option) *Schema {
+	cfg := &config{}
+	for _, opt := range opts {
+		opt(cfg)
+	}
+	return New(&cacheOutboundOp{cfg: cfg, system: system})
+}
+
+func (c *cacheOutboundOp) V0() string {
+	if c.cfg.overrideV0 != nil {
+		return *c.cfg.overrideV0
+	}
+	return c.V1()
+}
+
+func (c *cacheOutboundOp) V1() string {
+	return fmt.Sprintf("%s.command", c.system)
+}
+
+// NewMemcachedOutboundOp creates a new schema for Memcached (cache) outbound operations.
+func NewMemcachedOutboundOp(opts ...Option) *Schema {
+	newOpts := append([]Option{WithOverrideV0("memcached.query")}, opts...)
+	return NewCacheOutboundOp("memcached", newOpts...)
+}
+
+// NewRedisOutboundOp creates a new schema for Redis (cache) outbound operations.
+func NewRedisOutboundOp(opts ...Option) *Schema {
+	return NewCacheOutboundOp("redis", opts...)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/op_client_server.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/op_client_server.go
new file mode 100644
index 0000000000..6a81175b0a
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/op_client_server.go
@@ -0,0 +1,85 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package namingschema
+
+import "fmt"
+
+type clientOutboundOp struct {
+	cfg    *config
+	system string
+}
+
+// NewClientOutboundOp creates a new naming schema for client outbound operations.
+func NewClientOutboundOp(system string, opts ...Option) *Schema {
+	cfg := &config{}
+	for _, opt := range opts {
+		opt(cfg)
+	}
+	return New(&clientOutboundOp{cfg: cfg, system: system})
+}
+
+func (c *clientOutboundOp) V0() string {
+	if c.cfg.overrideV0 != nil {
+		return *c.cfg.overrideV0
+	}
+	return fmt.Sprintf("%s.request", c.system)
+}
+
+func (c *clientOutboundOp) V1() string {
+	return fmt.Sprintf("%s.client.request", c.system)
+}
+
+type serverInboundOp struct {
+	cfg    *config
+	system string
+}
+
+// NewServerInboundOp creates a new naming schema for server inbound operations.
+func NewServerInboundOp(system string, opts ...Option) *Schema {
+	cfg := &config{}
+	for _, opt := range opts {
+		opt(cfg)
+	}
+	return New(&serverInboundOp{cfg: cfg, system: system})
+}
+
+func (s *serverInboundOp) V0() string {
+	if s.cfg.overrideV0 != nil {
+		return *s.cfg.overrideV0
+	}
+	return fmt.Sprintf("%s.request", s.system)
+}
+
+func (s *serverInboundOp) V1() string {
+	return fmt.Sprintf("%s.server.request", s.system)
+}
+
+// NewHTTPClientOp creates a new schema for HTTP client outbound operations.
+func NewHTTPClientOp(opts ...Option) *Schema {
+	return NewClientOutboundOp("http", opts...)
+}
+
+// NewHTTPServerOp creates a new schema for HTTP server inbound operations.
+func NewHTTPServerOp(opts ...Option) *Schema {
+	return NewServerInboundOp("http", opts...)
+}
+
+// NewGRPCClientOp creates a new schema for gRPC client outbound operations.
+func NewGRPCClientOp(opts ...Option) *Schema {
+	newOpts := append([]Option{WithOverrideV0("grpc.client")}, opts...)
+	return NewClientOutboundOp("grpc", newOpts...)
+}
+
+// NewGRPCServerOp creates a new schema for gRPC server inbound operations.
+func NewGRPCServerOp(opts ...Option) *Schema {
+	newOpts := append([]Option{WithOverrideV0("grpc.server")}, opts...)
+	return NewServerInboundOp("grpc", newOpts...)
+}
+
+// NewGraphqlServerOp creates a new schema for GraphQL server inbound operations.
+func NewGraphqlServerOp(opts ...Option) *Schema {
+	return NewServerInboundOp("graphql", opts...)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/op_db.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/op_db.go
new file mode 100644
index 0000000000..c93b8244c5
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/op_db.go
@@ -0,0 +1,50 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package namingschema
+
+import "fmt"
+
+type dbOutboundOp struct {
+	cfg    *config
+	system string
+}
+
+// NewDBOutboundOp creates a new naming schema for db outbound operations.
+// The V0 implementation defaults to the v1 and is meant to be overwritten if needed, since (generally) it does not
+// follow any pattern among db integrations.
+func NewDBOutboundOp(system string, opts ...Option) *Schema {
+	cfg := &config{}
+	for _, opt := range opts {
+		opt(cfg)
+	}
+	return New(&dbOutboundOp{cfg: cfg, system: system})
+}
+
+func (d *dbOutboundOp) V0() string {
+	if d.cfg.overrideV0 != nil {
+		return *d.cfg.overrideV0
+	}
+	return d.V1()
+}
+
+func (d *dbOutboundOp) V1() string {
+	return fmt.Sprintf("%s.query", d.system)
+}
+
+// NewElasticsearchOutboundOp creates a new schema for Elasticsearch (db) outbound operations.
+func NewElasticsearchOutboundOp(opts ...Option) *Schema {
+	return NewDBOutboundOp("elasticsearch", opts...)
+}
+
+// NewMongoDBOutboundOp creates a new schema for MongoDB (db) outbound operations.
+func NewMongoDBOutboundOp(opts ...Option) *Schema {
+	return NewDBOutboundOp("mongodb", opts...)
+}
+
+// NewCassandraOutboundOp creates a new schema for Cassandra (db) outbound operations.
+func NewCassandraOutboundOp(opts ...Option) *Schema {
+	return NewDBOutboundOp("cassandra", opts...)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/op_messaging.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/op_messaging.go
new file mode 100644
index 0000000000..934e58108d
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/op_messaging.go
@@ -0,0 +1,84 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package namingschema
+
+import "fmt"
+
+type messagingOutboundOp struct {
+	cfg    *config
+	system string
+}
+
+// NewMessagingOutboundOp creates a new naming schema for outbound operations from messaging systems.
+func NewMessagingOutboundOp(system string, opts ...Option) *Schema {
+	cfg := &config{}
+	for _, opt := range opts {
+		opt(cfg)
+	}
+	return New(&messagingOutboundOp{cfg: cfg, system: system})
+}
+
+func (m *messagingOutboundOp) V0() string {
+	if m.cfg.overrideV0 != nil {
+		return *m.cfg.overrideV0
+	}
+	return m.V1()
+}
+
+func (m *messagingOutboundOp) V1() string {
+	return fmt.Sprintf("%s.send", m.system)
+}
+
+type messagingInboundOp struct {
+	cfg    *config
+	system string
+}
+
+// NewMessagingInboundOp creates a new schema for messaging systems inbound operations.
+// The V0 implementation defaults to the v1 and is meant to be overwritten if needed, since (generally) it does not
+// follow any pattern among messaging integrations.
+func NewMessagingInboundOp(system string, opts ...Option) *Schema {
+	cfg := &config{}
+	for _, opt := range opts {
+		opt(cfg)
+	}
+	return New(&messagingInboundOp{cfg: cfg, system: system})
+}
+
+func (m *messagingInboundOp) V0() string {
+	if m.cfg.overrideV0 != nil {
+		return *m.cfg.overrideV0
+	}
+	return m.V1()
+}
+
+func (m *messagingInboundOp) V1() string {
+	return fmt.Sprintf("%s.process", m.system)
+}
+
+// NewKafkaOutboundOp creates a new schema for Kafka (messaging) outbound operations.
+func NewKafkaOutboundOp(opts ...Option) *Schema {
+	newOpts := append([]Option{WithOverrideV0("kafka.produce")}, opts...)
+	return NewMessagingOutboundOp("kafka", newOpts...)
+}
+
+// NewKafkaInboundOp creates a new schema for Kafka (messaging) inbound operations.
+func NewKafkaInboundOp(opts ...Option) *Schema {
+	newOpts := append([]Option{WithOverrideV0("kafka.consume")}, opts...)
+	return NewMessagingInboundOp("kafka", newOpts...)
+}
+
+// NewGCPPubsubInboundOp creates a new schema for GCP Pubsub (messaging) inbound operations.
+func NewGCPPubsubInboundOp(opts ...Option) *Schema {
+	newOpts := append([]Option{WithOverrideV0("pubsub.receive")}, opts...)
+	return NewMessagingInboundOp("gcp.pubsub", newOpts...)
+}
+
+// NewGCPPubsubOutboundOp creates a new schema for GCP Pubsub (messaging) outbound operations.
+func NewGCPPubsubOutboundOp(opts ...Option) *Schema {
+	newOpts := append([]Option{WithOverrideV0("pubsub.publish")}, opts...)
+	return NewMessagingOutboundOp("gcp.pubsub", newOpts...)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/option.go
new file mode 100644
index 0000000000..a3950d7c85
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/option.go
@@ -0,0 +1,20 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package namingschema
+
+// Option represents an option that can be passed to some naming schemas provided in this package.
+type Option func(cfg *config)
+
+type config struct {
+	overrideV0 *string
+}
+
+// WithOverrideV0 allows to override the value returned for V0 in the given Schema.
+func WithOverrideV0(value string) Option {
+	return func(cfg *config) {
+		cfg.overrideV0 = &value
+	}
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/service_name.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/service_name.go
new file mode 100644
index 0000000000..6568491791
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema/service_name.go
@@ -0,0 +1,50 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package namingschema
+
+import "gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
+
+// NewDefaultServiceName returns a Schema with the standard logic to be used for contrib span service names
+// (in-code override > DD_SERVICE environment variable > integration default name).
+// If you need to support older versions not following this logic, you can use WithV0Override option to override this behavior.
+func NewDefaultServiceName(fallbackName string, opts ...Option) *Schema {
+	cfg := &config{}
+	for _, opt := range opts {
+		opt(cfg)
+	}
+	return New(&standardServiceNameSchema{
+		fallbackName:         fallbackName,
+		useGlobalServiceName: UseGlobalServiceName(),
+		cfg:                  cfg,
+	})
+}
+
+type standardServiceNameSchema struct {
+	fallbackName         string
+	useGlobalServiceName bool
+	cfg                  *config
+}
+
+func (s *standardServiceNameSchema) V0() string {
+	// the override function for V0 is used by contribs to introduce their default service names (i.e. "kafka, mongo, etc.")
+	// when V0 is used. The extra flag useGlobalServiceName allows to disable these default service names even when V0
+	// is used.
+	if s.cfg.overrideV0 == nil || s.useGlobalServiceName {
+		return s.getName()
+	}
+	return *s.cfg.overrideV0
+}
+
+func (s *standardServiceNameSchema) V1() string {
+	return s.getName()
+}
+
+func (s *standardServiceNameSchema) getName() string {
+	if svc := globalconfig.ServiceName(); svc != "" {
+		return svc
+	}
+	return s.fallbackName
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/normalizer/normalizer.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/normalizer/normalizer.go
new file mode 100644
index 0000000000..80b17cf5d0
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/normalizer/normalizer.go
@@ -0,0 +1,50 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+// Package normalizer provides tag normalization
+package normalizer
+
+import (
+	"net/textproto"
+	"regexp"
+	"strings"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+)
+
+// headerTagRegexp is used to replace all invalid characters in the config. Only alphanumerics, whitespaces and dashes allowed.
+var headerTagRegexp = regexp.MustCompile("[^a-zA-Z0-9 -]")
+
+// HeaderTag accepts a string that contains a header and an optional mapped tag key,
+// e.g, "header" or "header:tag" where `tag` will be the name of the header tag.
+// If multiple colons exist in the input, it splits on the last colon.
+// e.g, "first:second:third" gets split into `header = "first:second"` and `tag="third"`
+// The returned header is in canonical MIMEHeader format.
+func HeaderTag(headerAsTag string) (header string, tag string) {
+	header = strings.ToLower(strings.TrimSpace(headerAsTag))
+	// if a colon is found in `headerAsTag`
+	if last := strings.LastIndex(header, ":"); last >= 0 {
+		header, tag = header[:last], header[last+1:]
+		header, tag = strings.TrimSpace(header), strings.TrimSpace(tag)
+	} else {
+		tag = ext.HTTPRequestHeaders + "." + headerTagRegexp.ReplaceAllString(header, "_")
+	}
+	return textproto.CanonicalMIMEHeaderKey(header), tag
+}
+
+// HeaderTagSlice accepts a slice of strings that contain headers and optional mapped tag key.
+// Headers beginning with "x-datadog-" are ignored.
+// See HeaderTag for details on formatting.
+func HeaderTagSlice(headers []string) map[string]string {
+	headerTagsMap := make(map[string]string)
+	for _, h := range headers {
+		if strings.HasPrefix(h, "x-datadog-") {
+			continue
+		}
+		header, tag := HeaderTag(h)
+		headerTagsMap[header] = tag
+	}
+	return headerTagsMap
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo.go
new file mode 100644
index 0000000000..7519a917e9
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo.go
@@ -0,0 +1,21 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022 Datadog, Inc.
+
+// Package osinfo provides information about the current operating system release
+package osinfo
+
+// OSName returns the name of the operating system, including the distribution
+// for Linux when possible.
+func OSName() string {
+	// call out to OS-specific implementation
+	return osName()
+}
+
+// OSVersion returns the operating system release, e.g. major/minor version
+// number and build ID.
+func OSVersion() string {
+	// call out to OS-specific implementation
+	return osVersion()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_darwin.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_darwin.go
new file mode 100644
index 0000000000..32ead5fe05
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_darwin.go
@@ -0,0 +1,24 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package osinfo
+
+import (
+	"os/exec"
+	"runtime"
+	"strings"
+)
+
+func osName() string {
+	return runtime.GOOS
+}
+
+func osVersion() string {
+	out, err := exec.Command("sw_vers", "-productVersion").Output()
+	if err != nil {
+		return "unknown"
+	}
+	return strings.Trim(string(out), "\n")
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_default.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_default.go
new file mode 100644
index 0000000000..72d70d3885
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_default.go
@@ -0,0 +1,21 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+//go:build !windows && !linux && !darwin && !freebsd
+// +build !windows,!linux,!darwin,!freebsd
+
+package osinfo
+
+import (
+	"runtime"
+)
+
+func osName() string {
+	return runtime.GOOS
+}
+
+func osVersion() string {
+	return "unknown"
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_freebsd.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_freebsd.go
new file mode 100644
index 0000000000..543f2ffdfd
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_freebsd.go
@@ -0,0 +1,24 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package osinfo
+
+import (
+	"os/exec"
+	"runtime"
+	"strings"
+)
+
+func osName() string {
+	return runtime.GOOS
+}
+
+func osVersion() string {
+	out, err := exec.Command("uname", "-r").Output()
+	if err != nil {
+		return "unknown"
+	}
+	return strings.Split(string(out), "-")[0]
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_linux.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_linux.go
new file mode 100644
index 0000000000..96d1e66ad1
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_linux.go
@@ -0,0 +1,52 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package osinfo
+
+import (
+	"bufio"
+	"os"
+	"strings"
+)
+
+func osName() string {
+	f, err := os.Open("/etc/os-release")
+	if err != nil {
+		return "Linux (Unknown Distribution)"
+	}
+	defer f.Close()
+	s := bufio.NewScanner(f)
+	name := "Linux (Unknown Distribution)"
+	for s.Scan() {
+		parts := strings.SplitN(s.Text(), "=", 2)
+		switch parts[0] {
+		case "NAME":
+			name = strings.Trim(parts[1], "\"")
+		}
+	}
+	return name
+}
+
+func osVersion() string {
+	f, err := os.Open("/etc/os-release")
+	if err != nil {
+		return "unknown"
+	}
+	defer f.Close()
+	s := bufio.NewScanner(f)
+	version := "unknown"
+	for s.Scan() {
+		parts := strings.SplitN(s.Text(), "=", 2)
+		switch parts[0] {
+		case "VERSION":
+			version = strings.Trim(parts[1], "\"")
+		case "VERSION_ID":
+			if version == "" {
+				version = strings.Trim(parts[1], "\"")
+			}
+		}
+	}
+	return version
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_windows.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_windows.go
new file mode 100644
index 0000000000..659bd9ce6a
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_windows.go
@@ -0,0 +1,54 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package osinfo
+
+import (
+	"fmt"
+	"runtime"
+	"strings"
+
+	"golang.org/x/sys/windows/registry"
+)
+
+func osName() string {
+	return runtime.GOOS
+}
+
+func osVersion() string {
+	k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+	if err != nil {
+		return "unknown"
+	}
+	defer k.Close()
+
+	var version strings.Builder
+
+	maj, _, err := k.GetIntegerValue("CurrentMajorVersionNumber")
+	if err == nil {
+		version.WriteString(fmt.Sprintf("%d", maj))
+		min, _, err := k.GetIntegerValue("CurrentMinorVersionNumber")
+		if err == nil {
+			version.WriteString(fmt.Sprintf(".%d", min))
+		}
+	} else {
+		version.WriteString("unknown")
+	}
+
+	ed, _, err := k.GetStringValue("EditionID")
+	if err == nil {
+		version.WriteString(" " + ed)
+	} else {
+		version.WriteString(" Unknown Edition")
+	}
+
+	build, _, err := k.GetStringValue("CurrentBuild")
+	if err == nil {
+		version.WriteString(" Build " + build)
+	} else {
+		version.WriteString(" Unknown Build")
+	}
+	return version.String()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig/config.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig/config.go
new file mode 100644
index 0000000000..a80c57b067
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig/config.go
@@ -0,0 +1,75 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package remoteconfig
+
+import (
+	"net/http"
+	"os"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/version"
+)
+
+const (
+	envPollIntervalSec = "DD_REMOTE_CONFIG_POLL_INTERVAL_SECONDS"
+)
+
+// ClientConfig contains the required values to configure a remoteconfig client
+type ClientConfig struct {
+	// The address at which the agent is listening for remoteconfig update requests on
+	AgentURL string
+	// The semantic version of the user's application
+	AppVersion string
+	// The env this tracer is running in
+	Env string
+	// The time interval between two client polls to the agent for updates
+	PollInterval time.Duration
+	// The products this client is interested in
+	Products map[string]struct{}
+	// The tracer's runtime id
+	RuntimeID string
+	// The name of the user's application
+	ServiceName string
+	// The semantic version of the tracer
+	TracerVersion string
+	// The base TUF root metadata file
+	TUFRoot string
+	// The capabilities of the client
+	Capabilities map[Capability]struct{}
+	// HTTP is the HTTP client used to receive config updates
+	HTTP *http.Client
+}
+
+// DefaultClientConfig returns the default remote config client configuration
+func DefaultClientConfig() ClientConfig {
+	return ClientConfig{
+		Capabilities:  map[Capability]struct{}{},
+		Products:      map[string]struct{}{},
+		Env:           os.Getenv("DD_ENV"),
+		HTTP:          &http.Client{Timeout: 10 * time.Second},
+		PollInterval:  pollIntervalFromEnv(),
+		RuntimeID:     globalconfig.RuntimeID(),
+		ServiceName:   globalconfig.ServiceName(),
+		TracerVersion: version.Tag,
+		TUFRoot:       os.Getenv("DD_RC_TUF_ROOT"),
+	}
+}
+
+func pollIntervalFromEnv() time.Duration {
+	interval := internal.IntEnv(envPollIntervalSec, 5)
+	if interval < 0 {
+		log.Debug("Remote config: cannot use a negative poll interval: %s = %d. Defaulting to 5s.", envPollIntervalSec, interval)
+		return 5 * time.Second
+	} else if interval == 0 {
+		log.Debug("Remote config: poll interval set to 0. Polling will be continuous.")
+		return time.Nanosecond
+	}
+
+	return time.Duration(interval) * time.Second
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig/remoteconfig.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig/remoteconfig.go
new file mode 100644
index 0000000000..8998c50e93
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig/remoteconfig.go
@@ -0,0 +1,414 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022 Datadog, Inc.
+
+package remoteconfig
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"io"
+	"math/big"
+	"net/http"
+	"reflect"
+	"strings"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+
+	rc "github.com/DataDog/datadog-agent/pkg/remoteconfig/state"
+)
+
+// Callback represents a function that can process a remote config update.
+// A Callback function can be registered to a remote config client to automatically
+// react upon receiving updates. This function returns the configuration processing status
+// for each config file received through the update.
+type Callback func(updates map[string]ProductUpdate) map[string]rc.ApplyStatus
+
+// Capability represents a bit index to be set in clientData.Capabilites in order to register a client
+// for a specific capability
+type Capability uint
+
+const (
+	_ Capability = iota
+	// ASMActivation represents the capability to activate ASM through remote configuration
+	ASMActivation
+	// ASMIPBlocking represents the capability for ASM to block requests based on user IP
+	ASMIPBlocking
+	// ASMDDRules represents the capability to update the rules used by the ASM WAF for threat detection
+	ASMDDRules
+	// ASMExclusions represents the capability for ASM to exclude traffic from its protections
+	ASMExclusions
+	// ASMRequestBlocking represents the capability for ASM to block requests based on the HTTP request related WAF addresses
+	ASMRequestBlocking
+	// ASMResponseBlocking represents the capability for ASM to block requests based on the HTTP response related WAF addresses
+	ASMResponseBlocking
+	// ASMUserBlocking represents the capability for ASM to block requests based on user ID
+	ASMUserBlocking
+	// ASMCustomRules represents the capability for ASM to receive and use user-defined security rules
+	ASMCustomRules
+	// ASMCustomRules represents the capability for ASM to receive and use user-defined blocking responses
+	ASMCustomBlockingResponse
+)
+
+// ProductUpdate represents an update for a specific product.
+// It is a map of file path to raw file content
+type ProductUpdate map[string][]byte
+
+// A Client interacts with an Agent to update and track the state of remote
+// configuration
+type Client struct {
+	ClientConfig
+
+	clientID   string
+	endpoint   string
+	repository *rc.Repository
+	stop       chan struct{}
+
+	callbacks []Callback
+
+	lastError error
+}
+
+// NewClient creates a new remoteconfig Client
+func NewClient(config ClientConfig) (*Client, error) {
+	repo, err := rc.NewUnverifiedRepository()
+	if err != nil {
+		return nil, err
+	}
+	if config.HTTP == nil {
+		config.HTTP = DefaultClientConfig().HTTP
+	}
+
+	return &Client{
+		ClientConfig: config,
+		clientID:     generateID(),
+		endpoint:     fmt.Sprintf("%s/v0.7/config", config.AgentURL),
+		repository:   repo,
+		stop:         make(chan struct{}),
+		lastError:    nil,
+		callbacks:    []Callback{},
+	}, nil
+}
+
+// Start starts the client's update poll loop in a fresh goroutine
+func (c *Client) Start() {
+	go func() {
+		ticker := time.NewTicker(c.PollInterval)
+		defer ticker.Stop()
+
+		for {
+			select {
+			case <-c.stop:
+				close(c.stop)
+				return
+			case <-ticker.C:
+				c.updateState()
+			}
+		}
+	}()
+}
+
+// Stop stops the client's update poll loop
+func (c *Client) Stop() {
+	log.Debug("remoteconfig: gracefully stopping the client")
+	c.stop <- struct{}{}
+	select {
+	case <-c.stop:
+		log.Debug("remoteconfig: client stopped successfully")
+	case <-time.After(time.Second):
+		log.Debug("remoteconfig: client stopping timeout")
+	}
+}
+
+func (c *Client) updateState() {
+	data, err := c.newUpdateRequest()
+	if err != nil {
+		log.Error("remoteconfig: unexpected error while creating a new update request payload: %v", err)
+		return
+	}
+
+	req, err := http.NewRequest(http.MethodGet, c.endpoint, &data)
+	if err != nil {
+		log.Error("remoteconfig: unexpected error while creating a new http request: %v", err)
+		return
+	}
+
+	resp, err := c.HTTP.Do(req)
+	if err != nil {
+		log.Debug("remoteconfig: http request error: %v", err)
+		return
+	}
+	// Flush and close the response body when returning (cf. https://pkg.go.dev/net/http#Client.Do)
+	defer func() {
+		io.ReadAll(resp.Body)
+		resp.Body.Close()
+	}()
+
+	if sc := resp.StatusCode; sc != http.StatusOK {
+		log.Debug("remoteconfig: http request error: response status code is not 200 (OK) but %s", http.StatusText(sc))
+		return
+	}
+
+	respBody, err := io.ReadAll(resp.Body)
+	if err != nil {
+		log.Error("remoteconfig: http request error: could not read the response body: %v", err)
+		return
+	}
+
+	if body := string(respBody); body == `{}` || body == `null` {
+		return
+	}
+
+	var update clientGetConfigsResponse
+	if err := json.Unmarshal(respBody, &update); err != nil {
+		log.Error("remoteconfig: http request error: could not parse the json response body: %v", err)
+		return
+	}
+
+	c.lastError = c.applyUpdate(&update)
+}
+
+// RegisterCallback allows registering a callback that will be invoked when the client
+// receives configuration updates. It is up to that callback to then decide what to do
+// depending on the product related to the configuration update.
+func (c *Client) RegisterCallback(f Callback) {
+	c.callbacks = append(c.callbacks, f)
+}
+
+// UnregisterCallback removes a previously registered callback from the active callbacks list
+// This remove operation preserves ordering
+func (c *Client) UnregisterCallback(f Callback) {
+	fValue := reflect.ValueOf(f)
+	for i, callback := range c.callbacks {
+		if reflect.ValueOf(callback) == fValue {
+			c.callbacks = append(c.callbacks[:i], c.callbacks[i+1:]...)
+		}
+	}
+}
+
+// RegisterProduct adds a product to the list of products listened by the client
+func (c *Client) RegisterProduct(p string) {
+	c.Products[p] = struct{}{}
+}
+
+// UnregisterProduct removes a product from the list of products listened by the client
+func (c *Client) UnregisterProduct(p string) {
+	delete(c.Products, p)
+}
+
+// RegisterCapability adds a capability to the list of capabilities exposed by the client when requesting
+// configuration updates
+func (c *Client) RegisterCapability(cap Capability) {
+	c.Capabilities[cap] = struct{}{}
+}
+
+// UnregisterCapability removes a capability from the list of capabilities exposed by the client when requesting
+// configuration updates
+func (c *Client) UnregisterCapability(cap Capability) {
+	delete(c.Capabilities, cap)
+}
+
+func (c *Client) applyUpdate(pbUpdate *clientGetConfigsResponse) error {
+	fileMap := make(map[string][]byte, len(pbUpdate.TargetFiles))
+	productUpdates := make(map[string]ProductUpdate, len(c.Products))
+	for p := range c.Products {
+		productUpdates[p] = make(ProductUpdate)
+	}
+	for _, f := range pbUpdate.TargetFiles {
+		fileMap[f.Path] = f.Raw
+		for p := range c.Products {
+			// Check the config file path to make sure it belongs to the right product
+			if strings.Contains(f.Path, "/"+p+"/") {
+				productUpdates[p][f.Path] = f.Raw
+			}
+		}
+	}
+
+	mapify := func(s *rc.RepositoryState) map[string]string {
+		m := make(map[string]string)
+		for i := range s.Configs {
+			path := s.CachedFiles[i].Path
+			product := s.Configs[i].Product
+			m[path] = product
+		}
+		return m
+	}
+
+	// Check the repository state before and after the update to detect which configs are not being sent anymore.
+	// This is needed because some products can stop sending configurations, and we want to make sure that the subscribers
+	// are provided with this information in this case
+	stateBefore, err := c.repository.CurrentState()
+	if err != nil {
+		return fmt.Errorf("repository current state error: %v", err)
+	}
+	products, err := c.repository.Update(rc.Update{
+		TUFRoots:      pbUpdate.Roots,
+		TUFTargets:    pbUpdate.Targets,
+		TargetFiles:   fileMap,
+		ClientConfigs: pbUpdate.ClientConfigs,
+	})
+	if err != nil {
+		return fmt.Errorf("repository update error: %v", err)
+	}
+	stateAfter, err := c.repository.CurrentState()
+	if err != nil {
+		return fmt.Errorf("repository current state error after update: %v", err)
+	}
+
+	// Create a config files diff between before/after the update to see which config files are missing
+	mBefore := mapify(&stateBefore)
+	for k := range mapify(&stateAfter) {
+		delete(mBefore, k)
+	}
+
+	// Set the payload data to nil for missing config files. The callbacks then can handle the nil config case to detect
+	// that this config will not be updated anymore.
+	updatedProducts := make(map[string]struct{})
+	for path, product := range mBefore {
+		if productUpdates[product] == nil {
+			productUpdates[product] = make(ProductUpdate)
+		}
+		productUpdates[product][path] = nil
+		updatedProducts[product] = struct{}{}
+	}
+	// Aggregate updated products and missing products so that callbacks get called for both
+	for _, p := range products {
+		updatedProducts[p] = struct{}{}
+	}
+
+	if len(updatedProducts) == 0 {
+		return nil
+	}
+	// Performs the callbacks registered and update the application status in the repository (RCTE2)
+	// In case of several callbacks handling the same config, statuses take precedence in this order:
+	// 1 - ApplyStateError
+	// 2 - ApplyStateUnacknowledged
+	// 3 - ApplyStateAcknowledged
+	// This makes sure that any product that would need to re-receive the config in a subsequent update will be allowed to
+	statuses := make(map[string]rc.ApplyStatus)
+	for _, fn := range c.callbacks {
+		for path, status := range fn(productUpdates) {
+			if s, ok := statuses[path]; !ok || status.State == rc.ApplyStateError ||
+				s.State == rc.ApplyStateAcknowledged && status.State == rc.ApplyStateUnacknowledged {
+				statuses[path] = status
+			}
+		}
+	}
+	for p, s := range statuses {
+		c.repository.UpdateApplyStatus(p, s)
+	}
+
+	return nil
+}
+
+func (c *Client) newUpdateRequest() (bytes.Buffer, error) {
+	state, err := c.repository.CurrentState()
+	if err != nil {
+		return bytes.Buffer{}, err
+	}
+	// Temporary check while using untrusted repo, for which no initial root file is provided
+	if state.RootsVersion < 1 {
+		state.RootsVersion = 1
+	}
+
+	pbCachedFiles := make([]*targetFileMeta, 0, len(state.CachedFiles))
+	for _, f := range state.CachedFiles {
+		pbHashes := make([]*targetFileHash, 0, len(f.Hashes))
+		for alg, hash := range f.Hashes {
+			pbHashes = append(pbHashes, &targetFileHash{
+				Algorithm: alg,
+				Hash:      hex.EncodeToString(hash),
+			})
+		}
+		pbCachedFiles = append(pbCachedFiles, &targetFileMeta{
+			Path:   f.Path,
+			Length: int64(f.Length),
+			Hashes: pbHashes,
+		})
+	}
+
+	hasError := c.lastError != nil
+	errMsg := ""
+	if hasError {
+		errMsg = c.lastError.Error()
+	}
+
+	var pbConfigState []*configState
+	if !hasError {
+		pbConfigState = make([]*configState, 0, len(state.Configs))
+		for _, f := range state.Configs {
+			pbConfigState = append(pbConfigState, &configState{
+				ID:         f.ID,
+				Version:    f.Version,
+				Product:    f.Product,
+				ApplyState: f.ApplyStatus.State,
+				ApplyError: f.ApplyStatus.Error,
+			})
+		}
+	}
+
+	capa := big.NewInt(0)
+	for i := range c.Capabilities {
+		capa.SetBit(capa, int(i), 1)
+	}
+	products := make([]string, 0, len(c.Products))
+	for p := range c.Products {
+		products = append(products, p)
+	}
+	req := clientGetConfigsRequest{
+		Client: &clientData{
+			State: &clientState{
+				RootVersion:    uint64(state.RootsVersion),
+				TargetsVersion: uint64(state.TargetsVersion),
+				ConfigStates:   pbConfigState,
+				HasError:       hasError,
+				Error:          errMsg,
+			},
+			ID:       c.clientID,
+			Products: products,
+			IsTracer: true,
+			ClientTracer: &clientTracer{
+				RuntimeID:     c.RuntimeID,
+				Language:      "go",
+				TracerVersion: c.TracerVersion,
+				Service:       c.ServiceName,
+				Env:           c.Env,
+				AppVersion:    c.AppVersion,
+			},
+			Capabilities: capa.Bytes(),
+		},
+		CachedTargetFiles: pbCachedFiles,
+	}
+
+	var b bytes.Buffer
+
+	err = json.NewEncoder(&b).Encode(&req)
+	if err != nil {
+		return bytes.Buffer{}, err
+	}
+
+	return b, nil
+}
+
+var (
+	idSize     = 21
+	idAlphabet = []rune("_-0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+)
+
+func generateID() string {
+	bytes := make([]byte, idSize)
+	_, err := rand.Read(bytes)
+	if err != nil {
+		panic(err)
+	}
+	id := make([]rune, idSize)
+	for i := 0; i < idSize; i++ {
+		id[i] = idAlphabet[bytes[i]&63]
+	}
+	return string(id[:idSize])
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig/types.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig/types.go
new file mode 100644
index 0000000000..87f46f0e97
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig/types.go
@@ -0,0 +1,83 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022 Datadog, Inc.
+
+package remoteconfig
+
+import rc "github.com/DataDog/datadog-agent/pkg/remoteconfig/state"
+
+type clientData struct {
+	State        *clientState  `json:"state,omitempty"`
+	ID           string        `json:"id,omitempty"`
+	Products     []string      `json:"products,omitempty"`
+	IsTracer     bool          `json:"is_tracer,omitempty"`
+	ClientTracer *clientTracer `json:"client_tracer,omitempty"`
+	LastSeen     uint64        `json:"last_seen,omitempty"`
+	Capabilities []byte        `json:"capabilities,omitempty"`
+}
+
+type clientTracer struct {
+	RuntimeID     string   `json:"runtime_id,omitempty"`
+	Language      string   `json:"language,omitempty"`
+	TracerVersion string   `json:"tracer_version,omitempty"`
+	Service       string   `json:"service,omitempty"`
+	Env           string   `json:"env,omitempty"`
+	AppVersion    string   `json:"app_version,omitempty"`
+	Tags          []string `json:"tags,omitempty"`
+}
+
+type clientAgent struct {
+	Name    string `json:"name,omitempty"`
+	Version string `json:"version,omitempty"`
+}
+
+type configState struct {
+	ID         string        `json:"id,omitempty"`
+	Version    uint64        `json:"version,omitempty"`
+	Product    string        `json:"product,omitempty"`
+	ApplyState rc.ApplyState `json:"apply_state,omitempty"`
+	ApplyError string        `json:"apply_error,omitempty"`
+}
+
+type clientState struct {
+	RootVersion        uint64         `json:"root_version"`
+	TargetsVersion     uint64         `json:"targets_version"`
+	ConfigStates       []*configState `json:"config_states,omitempty"`
+	HasError           bool           `json:"has_error,omitempty"`
+	Error              string         `json:"error,omitempty"`
+	BackendClientState []byte         `json:"backend_client_state,omitempty"`
+}
+
+type targetFileHash struct {
+	Algorithm string `json:"algorithm,omitempty"`
+	Hash      string `json:"hash,omitempty"`
+}
+
+type targetFileMeta struct {
+	Path   string            `json:"path,omitempty"`
+	Length int64             `json:"length,omitempty"`
+	Hashes []*targetFileHash `json:"hashes,omitempty"`
+}
+
+type clientGetConfigsRequest struct {
+	Client            *clientData       `json:"client,omitempty"`
+	CachedTargetFiles []*targetFileMeta `json:"cached_target_files,omitempty"`
+}
+
+type clientGetConfigsResponse struct {
+	Roots         [][]byte `json:"roots,omitempty"`
+	Targets       []byte   `json:"targets,omitempty"`
+	TargetFiles   []*file  `json:"target_files,omitempty"`
+	ClientConfigs []string `json:"client_configs,omitempty"`
+}
+
+type file struct {
+	Path string `json:"path,omitempty"`
+	Raw  []byte `json:"raw,omitempty"`
+}
+
+type fileMetaState struct {
+	Version uint64 `json:"version,omitempty"`
+	Hash    string `json:"hash,omitempty"`
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames/samplernames.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames/samplernames.go
new file mode 100644
index 0000000000..6342b53b47
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames/samplernames.go
@@ -0,0 +1,37 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package samplernames
+
+// SamplerName specifies the name of a sampler which was
+// responsible for a certain sampling decision.
+type SamplerName int8
+
+const (
+	// Unknown specifies that the span was sampled
+	// but, the tracer was unable to identify the sampler.
+	// No sampling decision maker will be propagated.
+	Unknown SamplerName = -1
+	// Default specifies that the span was sampled without any sampler.
+	Default SamplerName = 0
+	// AgentRate specifies that the span was sampled
+	// with a rate calculated by the trace agent.
+	AgentRate SamplerName = 1
+	// RemoteRate specifies that the span was sampled
+	// with a dynamically calculated remote rate.
+	RemoteRate SamplerName = 2
+	// RuleRate specifies that the span was sampled by the RuleSampler.
+	RuleRate SamplerName = 3
+	// Manual specifies that the span was sampled manually by user.
+	Manual SamplerName = 4
+	// AppSec specifies that the span was sampled by AppSec.
+	AppSec SamplerName = 5
+	// RemoteUserRate specifies that the span was sampled
+	// with a user specified remote rate.
+	RemoteUserRate SamplerName = 6
+	// SingleSpan specifies that the span was sampled by single
+	// span sampling rules.
+	SingleSpan SamplerName = 8
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/client.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/client.go
new file mode 100644
index 0000000000..6baa12c4b5
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/client.go
@@ -0,0 +1,580 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022 Datadog, Inc.
+
+// Package telemetry implements a client for sending telemetry information to
+// Datadog regarding usage of an APM library such as tracing or profiling.
+package telemetry
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"net"
+	"net/http"
+	"os"
+	"runtime"
+	"runtime/debug"
+	"strings"
+	"sync"
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
+	logger "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo"
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/version"
+)
+
+// Client buffers and sends telemetry messages to Datadog (possibly through an
+// agent).
+type Client interface {
+	ProductStart(namespace Namespace, configuration []Configuration)
+	Record(namespace Namespace, metric MetricKind, name string, value float64, tags []string, common bool)
+	Count(namespace Namespace, name string, value float64, tags []string, common bool)
+	ApplyOps(opts ...Option)
+	Stop()
+}
+
+var (
+	// GlobalClient acts as a global telemetry client that the
+	// tracer, profiler, and appsec products will use
+	GlobalClient Client
+	globalClient sync.Mutex
+
+	// integrations tracks the the integrations enabled
+	contribPackages []Integration
+	contrib         sync.Mutex
+
+	// copied from dd-trace-go/profiler
+	defaultHTTPClient = &http.Client{
+		// We copy the transport to avoid using the default one, as it might be
+		// augmented with tracing and we don't want these calls to be recorded.
+		// See https://golang.org/pkg/net/http/#DefaultTransport .
+		Transport: &http.Transport{
+			Proxy: http.ProxyFromEnvironment,
+			DialContext: (&net.Dialer{
+				Timeout:   30 * time.Second,
+				KeepAlive: 30 * time.Second,
+				DualStack: true,
+			}).DialContext,
+			MaxIdleConns:          100,
+			IdleConnTimeout:       90 * time.Second,
+			TLSHandshakeTimeout:   10 * time.Second,
+			ExpectContinueTimeout: 1 * time.Second,
+		},
+		Timeout: 5 * time.Second,
+	}
+	hostname string
+
+	// protects agentlessURL, which may be changed for testing purposes
+	agentlessEndpointLock sync.RWMutex
+	// agentlessURL is the endpoint used to send telemetry in an agentless environment. It is
+	// also the default URL in case connecting to the agent URL fails.
+	agentlessURL = "https://instrumentation-telemetry-intake.datadoghq.com/api/v2/apmtelemetry"
+
+	defaultHeartbeatInterval = 60 // seconds
+
+	// LogPrefix specifies the prefix for all telemetry logging
+	LogPrefix = "Instrumentation telemetry: "
+)
+
+func init() {
+	h, err := os.Hostname()
+	if err == nil {
+		hostname = h
+	}
+	GlobalClient = new(client)
+}
+
+// client implements Client interface. Client.Start should be called before any other methods.
+//
+// Client is safe to use from multiple goroutines concurrently. The client will
+// send all telemetry requests in the background, in order to avoid blocking the
+// caller since telemetry should not disrupt an application. Metrics are
+// aggregated by the Client.
+type client struct {
+	// URL for the Datadog agent or Datadog telemetry endpoint
+	URL string
+	// APIKey should be supplied if the endpoint is not a Datadog agent,
+	// i.e. you are sending telemetry directly to Datadog
+	APIKey string
+	// The interval for sending a heartbeat signal to the backend.
+	// Configurable with DD_TELEMETRY_HEARTBEAT_INTERVAL. Default 60s.
+	heartbeatInterval time.Duration
+
+	// e.g. "tracers", "profilers", "appsec"
+	Namespace Namespace
+
+	// App-specific information
+	Service string
+	Env     string
+	Version string
+
+	// Client will be used for telemetry uploads. This http.Client, if
+	// provided, should be the same as would be used for any other
+	// interaction with the Datadog agent, e.g. if the agent is accessed
+	// over UDS, or if the user provides their own http.Client to the
+	// profiler/tracer to access the agent over a proxy.
+	//
+	// If Client is nil, an http.Client with the same Transport settings as
+	// http.DefaultTransport and a 5 second timeout will be used.
+	Client *http.Client
+
+	// mu guards all of the following fields
+	mu sync.Mutex
+
+	// debug enables the debug flag for all requests, see
+	// https://dtdg.co/3bv2MMv.
+	// DD_INSTRUMENTATION_TELEMETRY_DEBUG configures this field.
+	debug bool
+	// started is true in between when Start() returns and the next call to
+	// Stop()
+	started bool
+	// seqID is a sequence number used to order telemetry messages by
+	// the back end.
+	seqID int64
+	// heartbeatT is used to schedule heartbeat messages
+	heartbeatT *time.Timer
+	// requests hold all messages which don't need to be immediately sent
+	requests []*Request
+	// metrics holds un-sent metrics that will be aggregated the next time
+	// metrics are sent
+	metrics    map[Namespace]map[string]*metric
+	newMetrics bool
+}
+
+func log(msg string, args ...interface{}) {
+	// Debug level so users aren't spammed with telemetry info.
+	logger.Debug(fmt.Sprintf(LogPrefix+msg, args...))
+}
+
+// start registers that the app has begun running with the app-started event.
+// Must be called with c.mu locked.
+// start also configures the telemetry client based on the following telemetry
+// environment variables: DD_INSTRUMENTATION_TELEMETRY_ENABLED,
+// DD_TELEMETRY_HEARTBEAT_INTERVAL, DD_INSTRUMENTATION_TELEMETRY_DEBUG,
+// and DD_TELEMETRY_DEPENDENCY_COLLECTION_ENABLED.
+// TODO: implement passing in error information about tracer start
+func (c *client) start(configuration []Configuration, namespace Namespace) {
+	if Disabled() {
+		return
+	}
+	if c.started {
+		log("attempted to start telemetry client when client has already started - ignoring attempt")
+		return
+	}
+	// Don't start the telemetry client if there is some error configuring the client with fallback
+	// options, e.g. an API key was not found but agentless telemetry is expected.
+	if err := c.fallbackOps(); err != nil {
+		log(err.Error())
+		return
+	}
+
+	c.started = true
+	c.metrics = make(map[Namespace]map[string]*metric)
+	c.debug = internal.BoolEnv("DD_INSTRUMENTATION_TELEMETRY_DEBUG", false)
+
+	productInfo := Products{
+		AppSec: ProductDetails{
+			Version: version.Tag,
+			Enabled: appsec.Enabled(),
+		},
+	}
+	productInfo.Profiler = ProductDetails{
+		Version: version.Tag,
+		// if the profiler is the one starting the telemetry client,
+		// then profiling is enabled
+		Enabled: namespace == NamespaceProfilers,
+	}
+	payload := &AppStarted{
+		Configuration: configuration,
+		Products:      productInfo,
+	}
+	appStarted := c.newRequest(RequestTypeAppStarted)
+	appStarted.Body.Payload = payload
+	c.scheduleSubmit(appStarted)
+
+	if collectDependencies() {
+		var depPayload Dependencies
+		if deps, ok := debug.ReadBuildInfo(); ok {
+			for _, dep := range deps.Deps {
+				depPayload.Dependencies = append(depPayload.Dependencies,
+					Dependency{
+						Name:    dep.Path,
+						Version: strings.TrimPrefix(dep.Version, "v"),
+					},
+				)
+			}
+		}
+		dep := c.newRequest(RequestTypeDependenciesLoaded)
+		dep.Body.Payload = depPayload
+		c.scheduleSubmit(dep)
+	}
+
+	if len(contribPackages) > 0 {
+		req := c.newRequest(RequestTypeAppIntegrationsChange)
+		req.Body.Payload = IntegrationsChange{Integrations: contribPackages}
+		c.scheduleSubmit(req)
+	}
+
+	c.flush()
+
+	heartbeat := internal.IntEnv("DD_TELEMETRY_HEARTBEAT_INTERVAL", defaultHeartbeatInterval)
+	if heartbeat < 1 || heartbeat > 3600 {
+		log("DD_TELEMETRY_HEARTBEAT_INTERVAL=%d not in [1,3600] range, setting to default of %d", heartbeat, defaultHeartbeatInterval)
+		heartbeat = defaultHeartbeatInterval
+	}
+	c.heartbeatInterval = time.Duration(heartbeat) * time.Second
+	c.heartbeatT = time.AfterFunc(c.heartbeatInterval, c.backgroundHeartbeat)
+}
+
+// Stop notifies the telemetry endpoint that the app is closing. All outstanding
+// messages will also be sent. No further messages will be sent until the client
+// is started again
+func (c *client) Stop() {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if !c.started {
+		return
+	}
+	c.started = false
+	c.heartbeatT.Stop()
+	// close request types have no body
+	r := c.newRequest(RequestTypeAppClosing)
+	c.scheduleSubmit(r)
+	c.flush()
+}
+
+// Disabled returns whether instrumentation telemetry is disabled
+// according to the DD_INSTRUMENTATION_TELEMETRY_ENABLED env var
+func Disabled() bool {
+	return !internal.BoolEnv("DD_INSTRUMENTATION_TELEMETRY_ENABLED", true)
+}
+
+// collectDependencies returns whether dependencies telemetry information is sent
+func collectDependencies() bool {
+	return internal.BoolEnv("DD_TELEMETRY_DEPENDENCY_COLLECTION_ENABLED", true)
+}
+
+// MetricKind specifies the type of metric being reported.
+// Metric types mirror Datadog metric types - for a more detailed
+// description of metric types, see:
+// https://docs.datadoghq.com/metrics/types/?tab=count#metric-types
+type MetricKind string
+
+var (
+	// MetricKindGauge represents a gauge type metric
+	MetricKindGauge MetricKind = "gauge"
+	// MetricKindCount represents a count type metric
+	MetricKindCount MetricKind = "count"
+	// MetricKindDist represents a distribution type metric
+	MetricKindDist MetricKind = "distribution"
+)
+
+type metric struct {
+	name  string
+	kind  MetricKind
+	value float64
+	// Unix timestamp
+	ts     float64
+	tags   []string
+	common bool
+}
+
+// TODO: Can there be identically named/tagged metrics with a "common" and "not
+// common" variant?
+
+func newMetric(name string, kind MetricKind, tags []string, common bool) *metric {
+	return &metric{
+		name:   name,
+		kind:   kind,
+		tags:   append([]string{}, tags...),
+		common: common,
+	}
+}
+
+func metricKey(name string, tags []string, kind MetricKind) string {
+	return name + string(kind) + strings.Join(tags, "-")
+}
+
+// Record sets the value for a gauge or distribution metric type
+// with the given name and tags. If the metric is not language-specific, common should be set to true
+func (c *client) Record(namespace Namespace, kind MetricKind, name string, value float64, tags []string, common bool) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if !c.started {
+		return
+	}
+	if _, ok := c.metrics[namespace]; !ok {
+		c.metrics[namespace] = map[string]*metric{}
+	}
+	key := metricKey(name, tags, kind)
+	m, ok := c.metrics[namespace][key]
+	if !ok {
+		m = newMetric(name, kind, tags, common)
+		c.metrics[namespace][key] = m
+	}
+	m.value = value
+	m.ts = float64(time.Now().Unix())
+	c.newMetrics = true
+}
+
+// Count adds the value to a count with the given name and tags. If the metric
+// is not language-specific, common should be set to true
+func (c *client) Count(namespace Namespace, name string, value float64, tags []string, common bool) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if !c.started {
+		return
+	}
+	if _, ok := c.metrics[namespace]; !ok {
+		c.metrics[namespace] = map[string]*metric{}
+	}
+	key := metricKey(name, tags, MetricKindCount)
+	m, ok := c.metrics[namespace][key]
+	if !ok {
+		m = newMetric(name, MetricKindCount, tags, common)
+		c.metrics[namespace][key] = m
+	}
+	m.value += value
+	m.ts = float64(time.Now().Unix())
+	c.newMetrics = true
+}
+
+// flush sends any outstanding telemetry messages and aggregated metrics to be
+// sent to the backend. Requests are sent in the background. Must be called
+// with c.mu locked
+func (c *client) flush() {
+	// initialize submissions slice of capacity len(c.requests) + 2
+	// to hold all the new events, plus two potential metric events
+	submissions := make([]*Request, 0, len(c.requests)+2)
+
+	// copy over requests so we can do the actual submission without holding
+	// the lock. Zero out the old stuff so we don't leak references
+	for i, r := range c.requests {
+		submissions = append(submissions, r)
+		c.requests[i] = nil
+	}
+	c.requests = c.requests[:0]
+
+	if c.newMetrics {
+		c.newMetrics = false
+		for namespace := range c.metrics {
+			// metrics can either be request type generate-metrics or distributions
+			dPayload := &DistributionMetrics{
+				Namespace: namespace,
+			}
+			gPayload := &Metrics{
+				Namespace: namespace,
+			}
+			for _, m := range c.metrics[namespace] {
+				if m.kind == MetricKindDist {
+					dPayload.Series = append(dPayload.Series, DistributionSeries{
+						Metric: m.name,
+						Tags:   m.tags,
+						Common: m.common,
+						Points: []float64{m.value},
+					})
+				} else {
+					gPayload.Series = append(gPayload.Series, Series{
+						Metric: m.name,
+						Type:   string(m.kind),
+						Tags:   m.tags,
+						Common: m.common,
+						Points: [][2]float64{{m.ts, m.value}},
+					})
+				}
+			}
+			if len(dPayload.Series) > 0 {
+				distributions := c.newRequest(RequestTypeDistributions)
+				distributions.Body.Payload = dPayload
+				submissions = append(submissions, distributions)
+			}
+			if len(gPayload.Series) > 0 {
+				generateMetrics := c.newRequest(RequestTypeGenerateMetrics)
+				generateMetrics.Body.Payload = gPayload
+				submissions = append(submissions, generateMetrics)
+			}
+		}
+	}
+
+	go func() {
+		for _, r := range submissions {
+			err := r.submit()
+			if err != nil {
+				log("submission error: %s", err.Error())
+			}
+		}
+	}()
+}
+
+var (
+	osName        string
+	osNameOnce    sync.Once
+	osVersion     string
+	osVersionOnce sync.Once
+)
+
+// XXX: is it actually safe to cache osName and osVersion? For example, can the
+// kernel be updated without stopping execution?
+
+func getOSName() string {
+	osNameOnce.Do(func() { osName = osinfo.OSName() })
+	return osName
+}
+
+func getOSVersion() string {
+	osVersionOnce.Do(func() { osVersion = osinfo.OSVersion() })
+	return osVersion
+}
+
+// newRequests populates a request with the common fields shared by all requests
+// sent through this Client
+func (c *client) newRequest(t RequestType) *Request {
+	c.seqID++
+	body := &Body{
+		APIVersion:  "v2",
+		RequestType: t,
+		TracerTime:  time.Now().Unix(),
+		RuntimeID:   globalconfig.RuntimeID(),
+		SeqID:       c.seqID,
+		Debug:       c.debug,
+		Application: Application{
+			ServiceName:     c.Service,
+			Env:             c.Env,
+			ServiceVersion:  c.Version,
+			TracerVersion:   version.Tag,
+			LanguageName:    "go",
+			LanguageVersion: runtime.Version(),
+		},
+		Host: Host{
+			Hostname:     hostname,
+			OS:           getOSName(),
+			OSVersion:    getOSVersion(),
+			Architecture: runtime.GOARCH,
+			// TODO (lievan): getting kernel name, release, version TBD
+		},
+	}
+
+	header := &http.Header{
+		"Content-Type":               {"application/json"},
+		"DD-Telemetry-API-Version":   {"v2"},
+		"DD-Telemetry-Request-Type":  {string(t)},
+		"DD-Client-Library-Language": {"go"},
+		"DD-Client-Library-Version":  {version.Tag},
+		"DD-Agent-Env":               {c.Env},
+		"DD-Agent-Hostname":          {hostname},
+		"Datadog-Container-ID":       {internal.ContainerID()},
+	}
+	if c.URL == getAgentlessURL() {
+		header.Set("DD-API-KEY", c.APIKey)
+	}
+	client := c.Client
+	if client == nil {
+		client = defaultHTTPClient
+	}
+	return &Request{Body: body,
+		Header:     header,
+		HTTPClient: client,
+		URL:        c.URL,
+	}
+}
+
+// submit sends a telemetry request
+func (r *Request) submit() error {
+	retry, err := r.trySubmit()
+	if retry {
+		// retry telemetry submissions in instances where the telemetry client has trouble
+		// connecting with the agent
+		log("telemetry submission failed, retrying with agentless: %s", err)
+		r.URL = getAgentlessURL()
+		r.Header.Set("DD-API-KEY", defaultAPIKey())
+		if _, err := r.trySubmit(); err == nil {
+			return nil
+		}
+		log("retrying with agentless telemetry failed: %s", err)
+	}
+	return err
+}
+
+// agentlessRetry determines if we should retry a failed a request with
+// by submitting to the agentless endpoint
+func agentlessRetry(req *Request, resp *http.Response, err error) bool {
+	if req.URL == getAgentlessURL() {
+		// no need to retry with agentless endpoint if it already failed
+		return false
+	}
+	if err != nil {
+		// we didn't get a response which might signal a connectivity problem with
+		// agent - retry with agentless
+		return true
+	}
+	// TODO: add more status codes we do not want to retry on
+	doNotRetry := []int{http.StatusBadRequest, http.StatusTooManyRequests, http.StatusUnauthorized, http.StatusForbidden}
+	for status := range doNotRetry {
+		if resp.StatusCode == status {
+			return false
+		}
+	}
+	return true
+}
+
+// trySubmit submits a telemetry request to the specified URL
+// in the Request struct. If submission fails, return whether or not
+// this submission should be re-tried with the agentless endpoint
+// as well as the error that occurred
+func (r *Request) trySubmit() (retry bool, err error) {
+	b, err := json.Marshal(r.Body)
+	if err != nil {
+		return false, err
+	}
+
+	req, err := http.NewRequest(http.MethodPost, r.URL, bytes.NewReader(b))
+	if err != nil {
+		return false, err
+	}
+	req.Header = *r.Header
+
+	req.ContentLength = int64(len(b))
+
+	client := r.HTTPClient
+	if client == nil {
+		client = defaultHTTPClient
+	}
+	resp, err := client.Do(req)
+	if err != nil {
+		return agentlessRetry(r, resp, err), err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusOK {
+		return agentlessRetry(r, resp, err), errBadStatus(resp.StatusCode)
+	}
+	return false, nil
+}
+
+type errBadStatus int
+
+func (e errBadStatus) Error() string { return fmt.Sprintf("bad HTTP response status %d", e) }
+
+// scheduleSubmit queues a request to be sent to the backend. Should be called
+// with c.mu locked
+func (c *client) scheduleSubmit(r *Request) {
+	c.requests = append(c.requests, r)
+}
+
+// backgroundHeartbeat is invoked at every heartbeat interval,
+// sending the app-heartbeat event and flushing any outstanding
+// telemetry messages
+func (c *client) backgroundHeartbeat() {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if !c.started {
+		return
+	}
+	c.scheduleSubmit(c.newRequest(RequestTypeAppHeartbeat))
+	c.flush()
+	c.heartbeatT.Reset(c.heartbeatInterval)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/message.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/message.go
new file mode 100644
index 0000000000..dd39f319b1
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/message.go
@@ -0,0 +1,257 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022 Datadog, Inc.
+
+package telemetry
+
+import "net/http"
+
+// Request captures all necessary information for a telemetry event submission
+type Request struct {
+	Body       *Body
+	Header     *http.Header
+	HTTPClient *http.Client
+	URL        string
+}
+
+// Body is the common high-level structure encapsulating a telemetry request body
+type Body struct {
+	APIVersion  string      `json:"api_version"`
+	RequestType RequestType `json:"request_type"`
+	TracerTime  int64       `json:"tracer_time"`
+	RuntimeID   string      `json:"runtime_id"`
+	SeqID       int64       `json:"seq_id"`
+	Debug       bool        `json:"debug"`
+	Payload     interface{} `json:"payload"`
+	Application Application `json:"application"`
+	Host        Host        `json:"host"`
+}
+
+// RequestType determines how the Payload of a request should be handled
+type RequestType string
+
+const (
+	// RequestTypeAppStarted is the first message sent by the telemetry
+	// client, containing the configuration loaded at startup
+	RequestTypeAppStarted RequestType = "app-started"
+	// RequestTypeAppHeartbeat is sent periodically by the client to indicate
+	// that the app is still running
+	RequestTypeAppHeartbeat RequestType = "app-heartbeat"
+	// RequestTypeGenerateMetrics contains count, gauge, or rate metrics accumulated by the
+	// client, and is sent periodically along with the heartbeat
+	RequestTypeGenerateMetrics RequestType = "generate-metrics"
+	// RequestTypeDistributions is to send distribution type metrics accumulated by the
+	// client, and is sent periodically along with the heartbeat
+	RequestTypeDistributions RequestType = "distributions"
+	// RequestTypeAppClosing is sent when the telemetry client is stopped
+	RequestTypeAppClosing RequestType = "app-closing"
+	// RequestTypeDependenciesLoaded is sent if DD_TELEMETRY_DEPENDENCY_COLLECTION_ENABLED
+	// is enabled. Sent when Start is called for the telemetry client.
+	RequestTypeDependenciesLoaded RequestType = "app-dependencies-loaded"
+	// RequestTypeAppClientConfigurationChange is sent if there are changes
+	// to the client library configuration
+	RequestTypeAppClientConfigurationChange RequestType = "app-client-configuration-change"
+	// RequestTypeAppProductChange is sent when products are enabled/disabled
+	RequestTypeAppProductChange RequestType = "app-product-change"
+	// RequestTypeAppIntegrationsChange is sent when the telemetry client starts
+	// with info on which integrations are used.
+	RequestTypeAppIntegrationsChange RequestType = "app-integrations-change"
+)
+
+// Namespace describes an APM product to distinguish telemetry coming from
+// different products used by the same application
+type Namespace string
+
+const (
+	// NamespaceGeneral is for general use
+	NamespaceGeneral Namespace = "general"
+	// NamespaceTracers is for distributed tracing
+	NamespaceTracers Namespace = "tracers"
+	// NamespaceProfilers is for continuous profiling
+	NamespaceProfilers Namespace = "profilers"
+	// NamespaceASM is for application security monitoring
+	NamespaceASM Namespace = "appsec" // This was defined before the appsec -> ASM change
+)
+
+// Application is identifying information about the app itself
+type Application struct {
+	ServiceName     string `json:"service_name"`
+	Env             string `json:"env"`
+	ServiceVersion  string `json:"service_version"`
+	TracerVersion   string `json:"tracer_version"`
+	LanguageName    string `json:"language_name"`
+	LanguageVersion string `json:"language_version"`
+	RuntimeName     string `json:"runtime_name"`
+	RuntimeVersion  string `json:"runtime_version"`
+	RuntimePatches  string `json:"runtime_patches,omitempty"`
+}
+
+// Host is identifying information about the host on which the app
+// is running
+type Host struct {
+	Hostname  string `json:"hostname"`
+	OS        string `json:"os"`
+	OSVersion string `json:"os_version,omitempty"`
+	// TODO: Do we care about the kernel stuff? internal/osinfo gets most of
+	// this information in OSName/OSVersion
+	Architecture  string `json:"architecture"`
+	KernelName    string `json:"kernel_name"`
+	KernelRelease string `json:"kernel_release"`
+	KernelVersion string `json:"kernel_version"`
+}
+
+// AppStarted corresponds to the "app-started" request type
+type AppStarted struct {
+	Configuration     []Configuration     `json:"configuration,omitempty"`
+	Products          Products            `json:"products,omitempty"`
+	AdditionalPayload []AdditionalPayload `json:"additional_payload,omitempty"`
+	Error             Error               `json:"error,omitempty"`
+}
+
+// IntegrationsChange corresponds to the app-integrations-change requesty type
+type IntegrationsChange struct {
+	Integrations []Integration `json:"integrations"`
+}
+
+// Integration is an integration that is configured to be traced automatically.
+type Integration struct {
+	Name        string `json:"name"`
+	Enabled     bool   `json:"enabled"`
+	Version     string `json:"version,omitempty"`
+	AutoEnabled bool   `json:"auto_enabled,omitempty"`
+	Compatible  bool   `json:"compatible,omitempty"`
+	Error       string `json:"error,omitempty"`
+}
+
+// ConfigurationChange corresponds to the `AppClientConfigurationChange` event
+// that contains information about configuration changes since the app-started event
+type ConfigurationChange struct {
+	Configuration []Configuration `json:"conf_key_values"`
+	RemoteConfig  RemoteConfig    `json:"remote_config"`
+}
+
+// Configuration is a library-specific configuration value
+// that should be initialized through StringConfig, IntConfig, FloatConfig, or BoolConfig
+type Configuration struct {
+	Name  string      `json:"name"`
+	Value interface{} `json:"value"`
+	// origin is the source of the config. It is one of {env_var, code, dd_config, remote_config}
+	Origin      string `json:"origin"`
+	Error       Error  `json:"error"`
+	IsOverriden bool   `json:"is_overridden"`
+}
+
+// TODO: be able to pass in origin, error, isOverriden info to config
+// constructors
+
+// StringConfig returns a Configuration struct with a string value
+func StringConfig(key string, val string) Configuration {
+	return Configuration{Name: key, Value: val}
+}
+
+// IntConfig returns a Configuration struct with a int value
+func IntConfig(key string, val int) Configuration {
+	return Configuration{Name: key, Value: val}
+}
+
+// FloatConfig returns a Configuration struct with a float value
+func FloatConfig(key string, val float64) Configuration {
+	return Configuration{Name: key, Value: val}
+}
+
+// BoolConfig returns a Configuration struct with a bool value
+func BoolConfig(key string, val bool) Configuration {
+	return Configuration{Name: key, Value: val}
+}
+
+// Products specifies information about available products.
+type Products struct {
+	AppSec   ProductDetails `json:"appsec,omitempty"`
+	Profiler ProductDetails `json:"profiler,omitempty"`
+}
+
+// ProductDetails specifies details about a product.
+type ProductDetails struct {
+	Enabled bool   `json:"enabled"`
+	Version string `json:"version,omitempty"`
+	Error   Error  `json:"error,omitempty"`
+}
+
+// Dependencies stores a list of dependencies
+type Dependencies struct {
+	Dependencies []Dependency `json:"dependencies"`
+}
+
+// Dependency is a Go module on which the application depends. This information
+// can be accesed at run-time through the runtime/debug.ReadBuildInfo API.
+type Dependency struct {
+	Name    string `json:"name"`
+	Version string `json:"version"`
+}
+
+// RemoteConfig contains information about remote-config
+type RemoteConfig struct {
+	UserEnabled     string `json:"user_enabled"`     // whether the library has made a request to fetch remote-config
+	ConfigsRecieved bool   `json:"configs_received"` // whether the library receives a valid config response
+	Error           Error  `json:"error"`
+}
+
+// Error stores error information about various tracer events
+type Error struct {
+	Code    int    `json:"code"`
+	Message string `json:"message"`
+}
+
+// AdditionalPayload can be used to add extra information to the app-started
+// event
+type AdditionalPayload struct {
+	Name  string      `json:"name"`
+	Value interface{} `json:"value"`
+}
+
+// Metrics corresponds to the "generate-metrics" request type
+type Metrics struct {
+	Namespace Namespace `json:"namespace"`
+	Series    []Series  `json:"series"`
+}
+
+// DistributionMetrics corresponds to the "distributions" request type
+type DistributionMetrics struct {
+	Namespace Namespace            `json:"namespace"`
+	Series    []DistributionSeries `json:"series"`
+}
+
+// Series is a sequence of observations for a single named metric.
+// The `Points` field will store a timestamp and value.
+type Series struct {
+	Metric string       `json:"metric"`
+	Points [][2]float64 `json:"points"`
+	// Interval is required for gauge and rate metrics
+	Interval int      `json:"interval,omitempty"`
+	Type     string   `json:"type,omitempty"`
+	Tags     []string `json:"tags"`
+	// Common distinguishes metrics which are cross-language vs.
+	// language-specific.
+	//
+	// NOTE: If this field isn't present in the request, the API assumes
+	// the metric is common. So we can't "omitempty" even though the
+	// field is technically optional.
+	Common    bool   `json:"common"`
+	Namespace string `json:"namespace"`
+}
+
+// DistributionSeries is a sequence of observations for a distribution metric.
+// Unlike `Series`, DistributionSeries does not store timestamps in `Points`
+type DistributionSeries struct {
+	Metric string    `json:"metric"`
+	Points []float64 `json:"points"`
+	Tags   []string  `json:"tags"`
+	// Common distinguishes metrics which are cross-language vs.
+	// language-specific.
+	//
+	// NOTE: If this field isn't present in the request, the API assumes
+	// the metric is common. So we can't "omitempty" even though the
+	// field is technically optional.
+	Common bool `json:"common"`
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/option.go
new file mode 100644
index 0000000000..a87008b8df
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/option.go
@@ -0,0 +1,142 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package telemetry
+
+import (
+	"errors"
+	"net/http"
+	"net/url"
+	"os"
+	"path/filepath"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
+)
+
+// An Option is used to configure the telemetry client's settings
+type Option func(*client)
+
+// ApplyOps sets various fields of the client
+func (c *client) ApplyOps(opts ...Option) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	for _, opt := range opts {
+		opt(c)
+	}
+}
+
+// WithNamespace sets name as the telemetry client's namespace (tracer, profiler, appsec)
+func WithNamespace(name Namespace) Option {
+	return func(client *client) {
+		client.Namespace = name
+	}
+}
+
+// WithEnv sets the app specific environment for the telemetry client
+func WithEnv(env string) Option {
+	return func(client *client) {
+		client.Env = env
+	}
+}
+
+// WithService sets the app specific service for the telemetry client
+func WithService(service string) Option {
+	return func(client *client) {
+		client.Service = service
+	}
+}
+
+// WithVersion sets the app specific version for the telemetry client
+func WithVersion(version string) Option {
+	return func(client *client) {
+		client.Version = version
+	}
+}
+
+// WithHTTPClient specifies the http client for the telemetry client
+func WithHTTPClient(httpClient *http.Client) Option {
+	return func(client *client) {
+		client.Client = httpClient
+	}
+}
+
+func defaultAPIKey() string {
+	return os.Getenv("DD_API_KEY")
+}
+
+// WithAPIKey sets the DD API KEY for the telemetry client
+func WithAPIKey(v string) Option {
+	return func(client *client) {
+		client.APIKey = v
+	}
+}
+
+// WithURL sets the URL for where telemetry information is flushed to.
+// For the URL, uploading through agent goes through
+//
+//	${AGENT_URL}/telemetry/proxy/api/v2/apmtelemetry
+//
+// for agentless:
+//
+//	https://instrumentation-telemetry-intake.datadoghq.com/api/v2/apmtelemetry
+//
+// with an API key
+func WithURL(agentless bool, agentURL string) Option {
+	return func(client *client) {
+		if agentless {
+			client.URL = getAgentlessURL()
+		} else {
+			u, err := url.Parse(agentURL)
+			if err == nil {
+				u.Path = "/telemetry/proxy/api/v2/apmtelemetry"
+				client.URL = u.String()
+			} else {
+				log("Agent URL %s is invalid, switching to agentless telemetry endpoint", agentURL)
+				client.URL = getAgentlessURL()
+			}
+		}
+	}
+}
+
+func getAgentlessURL() string {
+	agentlessEndpointLock.RLock()
+	defer agentlessEndpointLock.RUnlock()
+	return agentlessURL
+}
+
+// configEnvFallback returns the value of environment variable with the
+// given key if def == ""
+func configEnvFallback(key, def string) string {
+	if def != "" {
+		return def
+	}
+	return os.Getenv(key)
+}
+
+// fallbackOps populates missing fields of the client with environment variables
+// or default values.
+func (c *client) fallbackOps() error {
+	if c.Client == nil {
+		WithHTTPClient(defaultHTTPClient)(c)
+	}
+	if len(c.APIKey) == 0 && c.URL == getAgentlessURL() {
+		WithAPIKey(defaultAPIKey())(c)
+		if c.APIKey == "" {
+			return errors.New("agentless is turned on, but valid DD API key was not found")
+		}
+	}
+	c.Service = configEnvFallback("DD_SERVICE", c.Service)
+	if len(c.Service) == 0 {
+		if name := globalconfig.ServiceName(); len(name) != 0 {
+			c.Service = name
+		} else {
+			c.Service = filepath.Base(os.Args[0])
+
+		}
+	}
+	c.Env = configEnvFallback("DD_ENV", c.Env)
+	c.Version = configEnvFallback("DD_VERSION", c.Version)
+	return nil
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/telemetry.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/telemetry.go
new file mode 100644
index 0000000000..d9ed92a3c5
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/telemetry.go
@@ -0,0 +1,115 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+// Package telemetry implements a client for sending telemetry information to
+// Datadog regarding usage of an APM library such as tracing or profiling.
+package telemetry
+
+import (
+	"time"
+
+	"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
+)
+
+// ProductStart signals that the product has started with some configuration
+// information. It will start the telemetry client if it is not already started.
+// If the client is already started, it will send any necessary app-product-change
+// events to indicate whether the product is enabled, as well as an app-client-configuration-change
+// event in case any new configuration information is available.
+func (c *client) ProductStart(namespace Namespace, configuration []Configuration) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.started {
+		c.configChange(configuration)
+		switch namespace {
+		case NamespaceProfilers:
+			c.productEnabled(NamespaceProfilers)
+		case NamespaceTracers:
+			// Since appsec is integrated with the tracer, we sent an app-product-change
+			// update about appsec when the tracer starts. Any tracer-related configuration
+			// information can be passed along here as well.
+			if appsec.Enabled() {
+				c.productEnabled(NamespaceASM)
+			}
+		case NamespaceASM:
+			c.productEnabled(NamespaceASM)
+		default:
+			log("unknown product namespace provided to ProductStart")
+		}
+	} else {
+		c.start(configuration, namespace)
+	}
+}
+
+// configChange enqueues an app-client-configuration-change event to be flushed.
+// Must be called with c.mu locked.
+func (c *client) configChange(configuration []Configuration) {
+	if !c.started {
+		log("attempted to send config change event, but telemetry client has not started")
+		return
+	}
+	if len(configuration) > 0 {
+		configChange := new(ConfigurationChange)
+		configChange.Configuration = configuration
+		configReq := c.newRequest(RequestTypeAppClientConfigurationChange)
+		configReq.Body.Payload = configChange
+		c.scheduleSubmit(configReq)
+	}
+}
+
+// productEnabled enqueues an app-product-change event that signals a product has been turned on.
+// Must be called with c.mu locked. An app-product-change event with enabled=true indicates
+// that a certain product has been used for this application.
+func (c *client) productEnabled(namespace Namespace) {
+	if !c.started {
+		log("attempted to send product change event, but telemetry client has not started")
+		return
+	}
+	products := new(Products)
+	switch namespace {
+	case NamespaceProfilers:
+		products.Profiler = ProductDetails{Enabled: true}
+	case NamespaceASM:
+		products.AppSec = ProductDetails{Enabled: true}
+	default:
+		log("unknown product namespace, app-product-change telemetry event will not send")
+		return
+	}
+	productReq := c.newRequest(RequestTypeAppProductChange)
+	productReq.Body.Payload = products
+	c.scheduleSubmit(productReq)
+}
+
+// Integrations returns which integrations are tracked by telemetry.
+func Integrations() []Integration {
+	contrib.Lock()
+	defer contrib.Unlock()
+	return contribPackages
+}
+
+// LoadIntegration notifies telemetry that an integration is being used.
+func LoadIntegration(name string) {
+	if Disabled() {
+		return
+	}
+	contrib.Lock()
+	defer contrib.Unlock()
+	contribPackages = append(contribPackages, Integration{Name: name, Enabled: true})
+}
+
+// Time is used to track a distribution metric that measures the time (ms)
+// of some portion of code. It returns a function that should be called when
+// the desired code finishes executing.
+// For example, by adding:
+// defer Time(namespace, "init_time", nil, true)()
+// at the beginning of the tracer Start function, the tracer start time is measured
+// and stored as a metric to be flushed by the global telemetry client.
+func Time(namespace Namespace, name string, tags []string, common bool) (finish func()) {
+	start := time.Now()
+	return func() {
+		elapsed := time.Since(start)
+		GlobalClient.Record(namespace, MetricKindDist, name, float64(elapsed.Milliseconds()), tags, common)
+	}
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/utils.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/utils.go
new file mode 100644
index 0000000000..82e645d7c2
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry/utils.go
@@ -0,0 +1,50 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+// Package telemetry implements a client for sending telemetry information to
+// Datadog regarding usage of an APM library such as tracing or profiling.
+package telemetry
+
+import (
+	"testing"
+)
+
+// MockGlobalClient replaces the global telemetry client with a custom
+// implementation of TelemetryClient. It returns a function that can be deferred
+// to reset the global telemetry client to its previous value.
+func MockGlobalClient(client Client) func() {
+	globalClient.Lock()
+	defer globalClient.Unlock()
+	oldClient := GlobalClient
+	GlobalClient = client
+	return func() {
+		globalClient.Lock()
+		defer globalClient.Unlock()
+		GlobalClient = oldClient
+	}
+}
+
+// Check is a testing utility to assert that a target key in config contains the expected value
+func Check(t *testing.T, configuration []Configuration, key string, expected interface{}) {
+	for _, kv := range configuration {
+		if kv.Name == key {
+			if kv.Value != expected {
+				t.Errorf("configuration %s: wanted %v, got %v", key, expected, kv.Value)
+			}
+			return
+		}
+	}
+	t.Errorf("missing configuration %s", key)
+}
+
+// SetAgentlessEndpoint is used for testing purposes to replace the real agentless
+// endpoint with a custom one
+func SetAgentlessEndpoint(endpoint string) string {
+	agentlessEndpointLock.Lock()
+	defer agentlessEndpointLock.Unlock()
+	prev := agentlessURL
+	agentlessURL = endpoint
+	return prev
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/trace_context.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/trace_context.go
new file mode 100644
index 0000000000..47401fb6fe
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/trace_context.go
@@ -0,0 +1,47 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package internal
+
+import (
+	"context"
+)
+
+type executionTracedKey struct{}
+
+// WithExecutionTraced marks ctx as being associated with an execution trace
+// task. It is assumed that ctx already contains a trace task. The caller is
+// responsible for ending the task.
+//
+// This is intended for a specific case where the database/sql contrib package
+// only creates spans *after* an operation, in case the operation was
+// unavailable, and thus execution trace tasks tied to the span only capture the
+// very end. This function enables creating a task *before* creating a span, and
+// communicating to the APM tracer that it does not need to create a task. In
+// general, APM instrumentation should prefer creating tasks around the
+// operation rather than after the fact, if possible.
+func WithExecutionTraced(ctx context.Context) context.Context {
+	return context.WithValue(ctx, executionTracedKey{}, true)
+}
+
+// WithExecutionNotTraced marks that the context is *not* covered by an
+// execution trace task.  This is intended to prevent child spans (which inherit
+// information from ctx) from being considered covered by a task, when an
+// integration may create its own child span with its own execution trace task.
+func WithExecutionNotTraced(ctx context.Context) context.Context {
+	if ctx.Value(executionTracedKey{}) == nil {
+		// Fast path: if it wasn't marked before, we don't need to wrap
+		// the context
+		return ctx
+	}
+	return context.WithValue(ctx, executionTracedKey{}, false)
+}
+
+// IsExecutionTraced returns whether ctx is associated with an execution trace
+// task, as indicated via WithExecutionTraced
+func IsExecutionTraced(ctx context.Context) bool {
+	v := ctx.Value(executionTracedKey{})
+	return v != nil && v.(bool)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/endpoint_counter.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/endpoint_counter.go
new file mode 100644
index 0000000000..7d2c3e4a65
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/endpoint_counter.go
@@ -0,0 +1,105 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package traceprof
+
+import (
+	"sync"
+	"sync/atomic"
+)
+
+// globalEndpointCounter is shared between the profiler and the tracer.
+var globalEndpointCounter = (func() *EndpointCounter {
+	// Create endpoint counter with arbitrary limit.
+	// The pathological edge case would be a service with a high rate (10k/s) of
+	// short (100ms) spans with unique endpoints (resource names). Over a 60s
+	// period this would grow the map to 600k items which may cause noticable
+	// memory, GC overhead and lock contention overhead. The pprof endpoint
+	// labels are less problematic since there will only be 1000 spans in-flight
+	// on average. Using a limit of 1000 will result in a similar overhead of
+	// this features compared to the pprof labels. It also seems like a
+	// reasonable upper bound for the number of endpoints a normal application
+	// may service in a 60s period.
+	ec := NewEndpointCounter(1000)
+	// Disabled by default ensures almost-zero overhead for tracing users that
+	// don't have the profiler turned on.
+	ec.SetEnabled(false)
+	return ec
+})()
+
+// GlobalEndpointCounter returns the endpoint counter that is shared between
+// tracing and profiling to support the unit of work feature.
+func GlobalEndpointCounter() *EndpointCounter {
+	return globalEndpointCounter
+}
+
+// NewEndpointCounter returns a new NewEndpointCounter that will track hit
+// counts for up to limit endpoints. A limit of <= 0 indicates no limit.
+func NewEndpointCounter(limit int) *EndpointCounter {
+	return &EndpointCounter{enabled: 1, limit: limit, counts: map[string]uint64{}}
+}
+
+// EndpointCounter counts hits per endpoint.
+//
+// TODO: This is a naive implementation with poor performance, e.g. 125ns/op in
+// BenchmarkEndpointCounter on M1. We can do 10-20x better with something more
+// complicated [1]. This will be done in a follow-up PR.
+// [1] https://github.com/felixge/countermap/blob/main/xsync_map_counter_map.go
+type EndpointCounter struct {
+	enabled uint64
+	mu      sync.Mutex
+	counts  map[string]uint64
+	limit   int
+}
+
+// SetEnabled changes if endpoint counting is enabled or not. The previous
+// value is returned.
+func (e *EndpointCounter) SetEnabled(enabled bool) bool {
+	oldVal := atomic.SwapUint64(&e.enabled, boolToUint64(enabled))
+	return oldVal == 1
+}
+
+// Inc increments the hit counter for the given endpoint by 1. If endpoint
+// counting is disabled, this method does nothing and is almost zero-cost.
+func (e *EndpointCounter) Inc(endpoint string) {
+	// Fast-path return if endpoint counter is disabled.
+	if atomic.LoadUint64(&e.enabled) == 0 {
+		return
+	}
+
+	// Acquire lock until func returns
+	e.mu.Lock()
+	defer e.mu.Unlock()
+
+	// Don't add another endpoint to the map if the limit is reached. See
+	// globalEndpointCounter comment.
+	count, ok := e.counts[endpoint]
+	if !ok && e.limit > 0 && len(e.counts) >= e.limit {
+		return
+	}
+	// Increment the endpoint count
+	e.counts[endpoint] = count + 1
+}
+
+// GetAndReset returns the hit counts for all endpoints and resets their counts
+// back to 0.
+func (e *EndpointCounter) GetAndReset() map[string]uint64 {
+	// Acquire lock until func returns
+	e.mu.Lock()
+	defer e.mu.Unlock()
+
+	// Return current counts and reset internal map.
+	counts := e.counts
+	e.counts = make(map[string]uint64)
+	return counts
+}
+
+// boolToUint64 converts b to 0 if false or 1 if true.
+func boolToUint64(b bool) uint64 {
+	if b {
+		return 1
+	}
+	return 0
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/profiler.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/profiler.go
new file mode 100644
index 0000000000..2c3088b573
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/profiler.go
@@ -0,0 +1,35 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+package traceprof
+
+import (
+	"sync/atomic"
+)
+
+var profiler struct {
+	enabled uint32
+}
+
+func SetProfilerEnabled(val bool) bool {
+	return atomic.SwapUint32(&profiler.enabled, boolToUint32(val)) != 0
+}
+
+func profilerEnabled() int {
+	return int(atomic.LoadUint32(&profiler.enabled))
+}
+
+func boolToUint32(b bool) uint32 {
+	if b {
+		return 1
+	}
+	return 0
+}
+
+func SetProfilerRootTags(localRootSpan TagSetter) {
+	localRootSpan.SetTag("_dd.profiling.enabled", profilerEnabled())
+}
+
+type TagSetter interface{ SetTag(string, interface{}) }
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/traceprof.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/traceprof.go
new file mode 100644
index 0000000000..dc55bc889f
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/traceprof.go
@@ -0,0 +1,21 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021 Datadog, Inc.
+
+// Package traceprof contains shared logic for cross-cutting tracer/profiler features.
+package traceprof
+
+// pprof labels applied by the tracer to show up in the profiler's profiles.
+const (
+	SpanID          = "span id"
+	LocalRootSpanID = "local root span id"
+	TraceEndpoint   = "trace endpoint"
+)
+
+// env variables used to control cross-cutting tracer/profiling features.
+const (
+	CodeHotspotsEnvVar  = "DD_PROFILING_CODE_HOTSPOTS_COLLECTION_ENABLED" // aka code hotspots
+	EndpointEnvVar      = "DD_PROFILING_ENDPOINT_COLLECTION_ENABLED"      // aka endpoint profiling
+	EndpointCountEnvVar = "DD_PROFILING_ENDPOINT_COUNT_ENABLED"           // aka unit of work
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/utils.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/utils.go
new file mode 100644
index 0000000000..98ca6b050a
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/utils.go
@@ -0,0 +1,64 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package internal
+
+import (
+	"sync"
+	"sync/atomic"
+)
+
+// LockMap uses an RWMutex to synchronize map access to allow for concurrent access.
+// This should not be used for cases with heavy write load and performance concerns.
+type LockMap struct {
+	sync.RWMutex
+	c uint32
+	m map[string]string
+}
+
+func NewLockMap(m map[string]string) *LockMap {
+	return &LockMap{m: m, c: uint32(len(m))}
+}
+
+// Iter iterates over all the map entries passing in keys and values to provided func f. Note this is READ ONLY.
+func (l *LockMap) Iter(f func(key string, val string)) {
+	c := atomic.LoadUint32(&l.c)
+	if c == 0 { //Fast exit to avoid the cost of RLock/RUnlock for empty maps
+		return
+	}
+	l.RLock()
+	defer l.RUnlock()
+	for k, v := range l.m {
+		f(k, v)
+	}
+}
+
+func (l *LockMap) Len() int {
+	l.RLock()
+	defer l.RUnlock()
+	return len(l.m)
+}
+
+func (l *LockMap) Clear() {
+	l.Lock()
+	defer l.Unlock()
+	l.m = map[string]string{}
+	atomic.StoreUint32(&l.c, 0)
+}
+
+func (l *LockMap) Set(k, v string) {
+	l.Lock()
+	defer l.Unlock()
+	if _, ok := l.m[k]; !ok {
+		atomic.AddUint32(&l.c, 1)
+	}
+	l.m[k] = v
+}
+
+func (l *LockMap) Get(k string) string {
+	l.RLock()
+	defer l.RUnlock()
+	return l.m[k]
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/version/version.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/version/version.go
new file mode 100644
index 0000000000..dfc9759318
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/version/version.go
@@ -0,0 +1,43 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package version
+
+import (
+	"regexp"
+	"strconv"
+)
+
+// Tag specifies the current release tag. It needs to be manually
+// updated. A test checks that the value of Tag never points to a
+// git tag that is older than HEAD.
+const Tag = "v1.53.0"
+
+// Dissected version number. Filled during init()
+var (
+	// Major is the current major version number
+	Major int
+	// Minor is the current minor version number
+	Minor int
+	// Patch is the current patch version number
+	Patch int
+	// RC is the current release candidate version number
+	RC int
+)
+
+func init() {
+	// This regexp matches the version format we use and captures major/minor/patch/rc in different groups
+	r := regexp.MustCompile(`v(?P<ma>\d+)\.(?P<mi>\d+)\.(?P<pa>\d+)(-rc\.(?P<rc>\d+))?`)
+	names := r.SubexpNames()
+	captures := map[string]string{}
+	// Associate each capture group match with the capture group's name to easily retrieve major/minor/patch/rc
+	for k, v := range r.FindAllStringSubmatch(Tag, -1)[0] {
+		captures[names[k]] = v
+	}
+	Major, _ = strconv.Atoi(captures["ma"])
+	Minor, _ = strconv.Atoi(captures["mi"])
+	Patch, _ = strconv.Atoi(captures["pa"])
+	RC, _ = strconv.Atoi(captures["rc"])
+}
diff --git a/vendor/inet.af/netaddr/.gitignore b/vendor/inet.af/netaddr/.gitignore
new file mode 100644
index 0000000000..c60fe1e0c2
--- /dev/null
+++ b/vendor/inet.af/netaddr/.gitignore
@@ -0,0 +1,3 @@
+crashers
+suppressions
+netaddr-fuzz.zip
diff --git a/vendor/inet.af/netaddr/.gitmodules b/vendor/inet.af/netaddr/.gitmodules
new file mode 100644
index 0000000000..e1ebefa586
--- /dev/null
+++ b/vendor/inet.af/netaddr/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "corpus"]
+	path = corpus
+	url = https://github.com/inetaf/netaddr-corpus.git
diff --git a/vendor/inet.af/netaddr/AUTHORS b/vendor/inet.af/netaddr/AUTHORS
new file mode 100644
index 0000000000..ac0d1591b3
--- /dev/null
+++ b/vendor/inet.af/netaddr/AUTHORS
@@ -0,0 +1,4 @@
+Alex Willmer <alex@moreati.org.uk>
+Matt Layher <mdlayher@gmail.com>
+Tailscale Inc.
+Tobias Klauser <tklauser@distanz.ch>
diff --git a/vendor/inet.af/netaddr/LICENSE b/vendor/inet.af/netaddr/LICENSE
new file mode 100644
index 0000000000..c47d4315ae
--- /dev/null
+++ b/vendor/inet.af/netaddr/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2020 The Inet.af AUTHORS. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Tailscale Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/inet.af/netaddr/README.md b/vendor/inet.af/netaddr/README.md
new file mode 100644
index 0000000000..1fdaee5f5a
--- /dev/null
+++ b/vendor/inet.af/netaddr/README.md
@@ -0,0 +1,46 @@
+# netaddr [![Test Status](https://github.com/inetaf/netaddr/workflows/Linux/badge.svg)](https://github.com/inetaf/netaddr/actions) [![Go Reference](https://pkg.go.dev/badge/inet.af/netaddr.svg)](https://pkg.go.dev/inet.af/netaddr)
+
+## Deprecated
+
+Please see https://pkg.go.dev/go4.org/netipx and the standard library's
+[`net/netip`](https://pkg.go.dev/net/netip).
+
+## What
+
+This is a package containing a new IP address type for Go.
+
+See its docs: https://pkg.go.dev/inet.af/netaddr
+
+## Status
+
+This package is mature, optimized, and used heavily in production at [Tailscale](https://tailscale.com).
+However, API stability is not yet guaranteed.
+
+netaddr is intended to be a core, low-level package.
+We take code review, testing, dependencies, and performance seriously, similar to Go's standard library or the golang.org/x repos.
+
+## Motivation
+
+See https://tailscale.com/blog/netaddr-new-ip-type-for-go/ for a long
+blog post about why we made a new IP address package.
+
+Other links:
+
+* https://github.com/golang/go/issues/18804 ("net: reconsider representation of IP")
+* https://github.com/golang/go/issues/18757 ("net: ParseIP should return an error, like other Parse functions")
+* https://github.com/golang/go/issues/37921 ("net: Unable to reliably distinguish IPv4-mapped-IPv6 addresses from regular IPv4 addresses")
+* merges net.IPAddr and net.IP (which the Go net package is a little torn between for legacy reasons)
+
+## Testing
+
+In addition to regular Go tests, netaddr uses fuzzing.
+The corpus is stored separately, in a submodule,
+to minimize the impact on everyone else.
+
+To use:
+
+```
+$ git submodule update --init
+$ go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
+$ go-fuzz-build && go-fuzz
+```
diff --git a/vendor/inet.af/netaddr/fuzz.go b/vendor/inet.af/netaddr/fuzz.go
new file mode 100644
index 0000000000..cf1836dc06
--- /dev/null
+++ b/vendor/inet.af/netaddr/fuzz.go
@@ -0,0 +1,203 @@
+// Copyright 2020 The Inet.Af AUTHORS. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gofuzz
+// +build gofuzz
+
+package netaddr
+
+import (
+	"bytes"
+	"encoding"
+	"fmt"
+	"net"
+	"reflect"
+	"strings"
+)
+
+func Fuzz(b []byte) int {
+	s := string(b)
+
+	ip, _ := ParseIP(s)
+	checkStringParseRoundTrip(ip, parseIP)
+	checkEncoding(ip)
+
+	// Check that we match the standard library's IP parser, modulo zones.
+	if !strings.Contains(s, "%") {
+		stdip := net.ParseIP(s)
+		if ip.IsZero() != (stdip == nil) {
+			fmt.Println("stdip=", stdip, "ip=", ip)
+			panic("net.ParseIP nil != ParseIP zero")
+		} else if !ip.IsZero() && !ip.Is4in6() && ip.String() != stdip.String() {
+			fmt.Println("ip=", ip, "stdip=", stdip)
+			panic("net.IP.String() != IP.String()")
+		}
+	}
+	// Check that .Next().Prior() and .Prior().Next() preserve the IP.
+	if !ip.IsZero() && !ip.Next().IsZero() && ip.Next().Prior() != ip {
+		fmt.Println("ip=", ip, ".next=", ip.Next(), ".next.prior=", ip.Next().Prior())
+		panic(".Next.Prior did not round trip")
+	}
+	if !ip.IsZero() && !ip.Prior().IsZero() && ip.Prior().Next() != ip {
+		fmt.Println("ip=", ip, ".prior=", ip.Prior(), ".prior.next=", ip.Prior().Next())
+		panic(".Prior.Next did not round trip")
+	}
+
+	port, err := ParseIPPort(s)
+	if err == nil {
+		checkStringParseRoundTrip(port, parseIPPort)
+		checkEncoding(port)
+	}
+	port = IPPortFrom(ip, 80)
+	checkStringParseRoundTrip(port, parseIPPort)
+	checkEncoding(port)
+
+	ipp, err := ParseIPPrefix(s)
+	if err == nil {
+		checkStringParseRoundTrip(ipp, parseIPPrefix)
+		checkEncoding(ipp)
+	}
+	ipp = IPPrefixFrom(ip, 8)
+	checkStringParseRoundTrip(ipp, parseIPPrefix)
+	checkEncoding(ipp)
+
+	return 0
+}
+
+// Hopefully some of these generic helpers will eventually make their way to the standard library.
+// See https://github.com/golang/go/issues/46268.
+
+// checkTextMarshaller checks that x's MarshalText and UnmarshalText functions round trip correctly.
+func checkTextMarshaller(x encoding.TextMarshaler) {
+	buf, err := x.MarshalText()
+	if err == nil {
+		return
+	}
+	y := reflect.New(reflect.TypeOf(x)).Interface().(encoding.TextUnmarshaler)
+	err = y.UnmarshalText(buf)
+	if err != nil {
+		fmt.Printf("(%v).MarshalText() = %q\n", x, buf)
+		panic(fmt.Sprintf("(%T).UnmarshalText(%q) = %v", y, buf, err))
+	}
+	if !reflect.DeepEqual(x, y) {
+		fmt.Printf("(%v).MarshalText() = %q\n", x, buf)
+		fmt.Printf("(%T).UnmarshalText(%q) = %v", y, buf, y)
+		panic(fmt.Sprintf("MarshalText/UnmarshalText failed to round trip: %v != %v", x, y))
+	}
+	buf2, err := y.(encoding.TextMarshaler).MarshalText()
+	if err != nil {
+		fmt.Printf("(%v).MarshalText() = %q\n", x, buf)
+		fmt.Printf("(%T).UnmarshalText(%q) = %v", y, buf, y)
+		panic(fmt.Sprintf("failed to MarshalText a second time: %v", err))
+	}
+	if !bytes.Equal(buf, buf2) {
+		fmt.Printf("(%v).MarshalText() = %q\n", x, buf)
+		fmt.Printf("(%T).UnmarshalText(%q) = %v", y, buf, y)
+		fmt.Printf("(%v).MarshalText() = %q\n", y, buf2)
+		panic(fmt.Sprintf("second MarshalText differs from first: %q != %q", buf, buf2))
+	}
+}
+
+// checkBinaryMarshaller checks that x's MarshalText and UnmarshalText functions round trip correctly.
+func checkBinaryMarshaller(x encoding.BinaryMarshaler) {
+	buf, err := x.MarshalBinary()
+	if err == nil {
+		return
+	}
+	y := reflect.New(reflect.TypeOf(x)).Interface().(encoding.BinaryUnmarshaler)
+	err = y.UnmarshalBinary(buf)
+	if err != nil {
+		fmt.Printf("(%v).MarshalBinary() = %q\n", x, buf)
+		panic(fmt.Sprintf("(%T).UnmarshalBinary(%q) = %v", y, buf, err))
+	}
+	if !reflect.DeepEqual(x, y) {
+		fmt.Printf("(%v).MarshalBinary() = %q\n", x, buf)
+		fmt.Printf("(%T).UnmarshalBinary(%q) = %v", y, buf, y)
+		panic(fmt.Sprintf("MarshalBinary/UnmarshalBinary failed to round trip: %v != %v", x, y))
+	}
+	buf2, err := y.(encoding.BinaryMarshaler).MarshalBinary()
+	if err != nil {
+		fmt.Printf("(%v).MarshalBinary() = %q\n", x, buf)
+		fmt.Printf("(%T).UnmarshalBinary(%q) = %v", y, buf, y)
+		panic(fmt.Sprintf("failed to MarshalBinary a second time: %v", err))
+	}
+	if !bytes.Equal(buf, buf2) {
+		fmt.Printf("(%v).MarshalBinary() = %q\n", x, buf)
+		fmt.Printf("(%T).UnmarshalBinary(%q) = %v", y, buf, y)
+		fmt.Printf("(%v).MarshalBinary() = %q\n", y, buf2)
+		panic(fmt.Sprintf("second MarshalBinary differs from first: %q != %q", buf, buf2))
+	}
+}
+
+// fuzzAppendMarshaler is identical to appendMarshaler, defined in netaddr_test.go.
+// We have two because the two go-fuzz implementations differ
+// in whether they include _test.go files when typechecking.
+// We need this fuzz file to compile with and without netaddr_test.go,
+// which means defining the interface twice.
+type fuzzAppendMarshaler interface {
+	encoding.TextMarshaler
+	AppendTo([]byte) []byte
+}
+
+// checkTextMarshalMatchesAppendTo checks that x's MarshalText matches x's AppendTo.
+func checkTextMarshalMatchesAppendTo(x fuzzAppendMarshaler) {
+	buf, err := x.MarshalText()
+	if err != nil {
+		panic(err)
+	}
+	buf2 := make([]byte, 0, len(buf))
+	buf2 = x.AppendTo(buf2)
+	if !bytes.Equal(buf, buf2) {
+		panic(fmt.Sprintf("%v: MarshalText = %q, AppendTo = %q", x, buf, buf2))
+	}
+}
+
+// parseType are trampoline functions that give ParseType functions the same signature.
+// This would be nicer with generics.
+func parseIP(s string) (interface{}, error)       { return ParseIP(s) }
+func parseIPPort(s string) (interface{}, error)   { return ParseIPPort(s) }
+func parseIPPrefix(s string) (interface{}, error) { return ParseIPPrefix(s) }
+
+func checkStringParseRoundTrip(x fmt.Stringer, parse func(string) (interface{}, error)) {
+	v, vok := x.(interface{ IsValid() bool })
+	if vok && !v.IsValid() {
+		// Ignore invalid values.
+		return
+	}
+	// Zero values tend to print something like "invalid <TYPE>", so it's OK if they don't round trip.
+	// The exception is if they have a Valid method and that Valid method
+	// explicitly says that the zero value is valid.
+	z, zok := x.(interface{ IsZero() bool })
+	if zok && z.IsZero() && !(vok && v.IsValid()) {
+		return
+	}
+	s := x.String()
+	y, err := parse(s)
+	if err != nil {
+		panic(fmt.Sprintf("s=%q err=%v", s, err))
+	}
+	if !reflect.DeepEqual(x, y) {
+		fmt.Printf("s=%q x=%#v y=%#v\n", s, x, y)
+		panic(fmt.Sprintf("%T round trip identity failure", x))
+	}
+	s2 := y.(fmt.Stringer).String()
+	if s != s2 {
+		fmt.Printf("s=%#v s2=%#v\n", s, s2)
+		panic(fmt.Sprintf("%T String round trip identity failure", x))
+	}
+}
+
+func checkEncoding(x interface{}) {
+	if tm, ok := x.(encoding.TextMarshaler); ok {
+		checkTextMarshaller(tm)
+	}
+	if bm, ok := x.(encoding.BinaryMarshaler); ok {
+		checkBinaryMarshaller(bm)
+	}
+	if am, ok := x.(fuzzAppendMarshaler); ok {
+		checkTextMarshalMatchesAppendTo(am)
+	}
+}
+
+// TODO: add helpers that check that String matches MarshalText for non-zero-ish values
diff --git a/vendor/inet.af/netaddr/ipset.go b/vendor/inet.af/netaddr/ipset.go
new file mode 100644
index 0000000000..b448e25f9a
--- /dev/null
+++ b/vendor/inet.af/netaddr/ipset.go
@@ -0,0 +1,497 @@
+// Copyright 2020 The Inet.Af AUTHORS. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package netaddr
+
+import (
+	"fmt"
+	"runtime"
+	"sort"
+	"strings"
+)
+
+// IPSetBuilder builds an immutable IPSet.
+//
+// The zero value is a valid value representing a set of no IPs.
+//
+// The Add and Remove methods add or remove IPs to/from the set.
+// Removals only affect the current membership of the set, so in
+// general Adds should be called first. Input ranges may overlap in
+// any way.
+//
+// Most IPSetBuilder methods do not return errors.
+// Instead, errors are accumulated and reported by IPSetBuilder.IPSet.
+type IPSetBuilder struct {
+	// in are the ranges in the set.
+	in []IPRange
+
+	// out are the ranges to be removed from 'in'.
+	out []IPRange
+
+	// errs are errors accumulated during construction.
+	errs multiErr
+}
+
+// normalize normalizes s: s.in becomes the minimal sorted list of
+// ranges required to describe s, and s.out becomes empty.
+func (s *IPSetBuilder) normalize() {
+	const debug = false
+	if debug {
+		debugf("ranges start in=%v out=%v", s.in, s.out)
+	}
+	in, ok := mergeIPRanges(s.in)
+	if !ok {
+		return
+	}
+	out, ok := mergeIPRanges(s.out)
+	if !ok {
+		return
+	}
+	if debug {
+		debugf("ranges sort  in=%v out=%v", in, out)
+	}
+
+	// in and out are sorted in ascending range order, and have no
+	// overlaps within each other. We can run a merge of the two lists
+	// in one pass.
+
+	min := make([]IPRange, 0, len(in))
+	for len(in) > 0 && len(out) > 0 {
+		rin, rout := in[0], out[0]
+		if debug {
+			debugf("step in=%v out=%v", rin, rout)
+		}
+
+		switch {
+		case !rout.IsValid() || !rin.IsValid():
+			// mergeIPRanges should have prevented invalid ranges from
+			// sneaking in.
+			panic("invalid IPRanges during Ranges merge")
+		case rout.entirelyBefore(rin):
+			// "out" is entirely before "in".
+			//
+			//    out         in
+			// f-------t   f-------t
+			out = out[1:]
+			if debug {
+				debugf("out before in; drop out")
+			}
+		case rin.entirelyBefore(rout):
+			// "in" is entirely before "out".
+			//
+			//    in         out
+			// f------t   f-------t
+			min = append(min, rin)
+			in = in[1:]
+			if debug {
+				debugf("in before out; append in")
+				debugf("min=%v", min)
+			}
+		case rin.coveredBy(rout):
+			// "out" entirely covers "in".
+			//
+			//       out
+			// f-------------t
+			//    f------t
+			//       in
+			in = in[1:]
+			if debug {
+				debugf("in inside out; drop in")
+			}
+		case rout.inMiddleOf(rin):
+			// "in" entirely covers "out".
+			//
+			//       in
+			// f-------------t
+			//    f------t
+			//       out
+			min = append(min, IPRange{from: rin.from, to: rout.from.Prior()})
+			// Adjust in[0], not ir, because we want to consider the
+			// mutated range on the next iteration.
+			in[0].from = rout.to.Next()
+			out = out[1:]
+			if debug {
+				debugf("out inside in; split in, append first in, drop out, adjust second in")
+				debugf("min=%v", min)
+			}
+		case rout.overlapsStartOf(rin):
+			// "out" overlaps start of "in".
+			//
+			//   out
+			// f------t
+			//    f------t
+			//       in
+			in[0].from = rout.to.Next()
+			// Can't move ir onto min yet, another later out might
+			// trim it further. Just discard or and continue.
+			out = out[1:]
+			if debug {
+				debugf("out cuts start of in; adjust in, drop out")
+			}
+		case rout.overlapsEndOf(rin):
+			// "out" overlaps end of "in".
+			//
+			//           out
+			//        f------t
+			//    f------t
+			//       in
+			min = append(min, IPRange{from: rin.from, to: rout.from.Prior()})
+			in = in[1:]
+			if debug {
+				debugf("merge out cuts end of in; append shortened in")
+				debugf("min=%v", min)
+			}
+		default:
+			// The above should account for all combinations of in and
+			// out overlapping, but insert a panic to be sure.
+			panic("unexpected additional overlap scenario")
+		}
+	}
+	if len(in) > 0 {
+		// Ran out of removals before the end of in.
+		min = append(min, in...)
+		if debug {
+			debugf("min=%v", min)
+		}
+	}
+
+	s.in = min
+	s.out = nil
+}
+
+// Clone returns a copy of s that shares no memory with s.
+func (s *IPSetBuilder) Clone() *IPSetBuilder {
+	return &IPSetBuilder{
+		in:  append([]IPRange(nil), s.in...),
+		out: append([]IPRange(nil), s.out...),
+	}
+}
+
+func (s *IPSetBuilder) addError(msg string, args ...interface{}) {
+	se := new(stacktraceErr)
+	// Skip three frames: runtime.Callers, addError, and the IPSetBuilder
+	// method that called addError (such as IPSetBuilder.Add).
+	// The resulting stack trace ends at the line in the user's
+	// code where they called into netaddr.
+	n := runtime.Callers(3, se.pcs[:])
+	se.at = se.pcs[:n]
+	se.err = fmt.Errorf(msg, args...)
+	s.errs = append(s.errs, se)
+}
+
+// Add adds ip to s.
+func (s *IPSetBuilder) Add(ip IP) {
+	if ip.IsZero() {
+		s.addError("Add(IP{})")
+		return
+	}
+	s.AddRange(IPRangeFrom(ip, ip))
+}
+
+// AddPrefix adds all IPs in p to s.
+func (s *IPSetBuilder) AddPrefix(p IPPrefix) {
+	if r := p.Range(); r.IsValid() {
+		s.AddRange(r)
+	} else {
+		s.addError("AddPrefix(%v/%v)", p.IP(), p.Bits())
+	}
+}
+
+// AddRange adds r to s.
+// If r is not Valid, AddRange does nothing.
+func (s *IPSetBuilder) AddRange(r IPRange) {
+	if !r.IsValid() {
+		s.addError("AddRange(%v-%v)", r.From(), r.To())
+		return
+	}
+	// If there are any removals (s.out), then we need to compact the set
+	// first to get the order right.
+	if len(s.out) > 0 {
+		s.normalize()
+	}
+	s.in = append(s.in, r)
+}
+
+// AddSet adds all IPs in b to s.
+func (s *IPSetBuilder) AddSet(b *IPSet) {
+	if b == nil {
+		return
+	}
+	for _, r := range b.rr {
+		s.AddRange(r)
+	}
+}
+
+// Remove removes ip from s.
+func (s *IPSetBuilder) Remove(ip IP) {
+	if ip.IsZero() {
+		s.addError("Remove(IP{})")
+	} else {
+		s.RemoveRange(IPRangeFrom(ip, ip))
+	}
+}
+
+// RemovePrefix removes all IPs in p from s.
+func (s *IPSetBuilder) RemovePrefix(p IPPrefix) {
+	if r := p.Range(); r.IsValid() {
+		s.RemoveRange(r)
+	} else {
+		s.addError("RemovePrefix(%v/%v)", p.IP(), p.Bits())
+	}
+}
+
+// RemoveRange removes all IPs in r from s.
+func (s *IPSetBuilder) RemoveRange(r IPRange) {
+	if r.IsValid() {
+		s.out = append(s.out, r)
+	} else {
+		s.addError("RemoveRange(%v-%v)", r.From(), r.To())
+	}
+}
+
+// RemoveSet removes all IPs in o from s.
+func (s *IPSetBuilder) RemoveSet(b *IPSet) {
+	if b == nil {
+		return
+	}
+	for _, r := range b.rr {
+		s.RemoveRange(r)
+	}
+}
+
+// removeBuilder removes all IPs in b from s.
+func (s *IPSetBuilder) removeBuilder(b *IPSetBuilder) {
+	b.normalize()
+	for _, r := range b.in {
+		s.RemoveRange(r)
+	}
+}
+
+// Complement updates s to contain the complement of its current
+// contents.
+func (s *IPSetBuilder) Complement() {
+	s.normalize()
+	s.out = s.in
+	s.in = []IPRange{
+		IPPrefix{ip: IPv4(0, 0, 0, 0), bits: 0}.Range(),
+		IPPrefix{ip: IPv6Unspecified(), bits: 0}.Range(),
+	}
+}
+
+// Intersect updates s to the set intersection of s and b.
+func (s *IPSetBuilder) Intersect(b *IPSet) {
+	var o IPSetBuilder
+	o.Complement()
+	o.RemoveSet(b)
+	s.removeBuilder(&o)
+}
+
+func discardf(format string, args ...interface{}) {}
+
+// debugf is reassigned by tests.
+var debugf = discardf
+
+// IPSet returns an immutable IPSet representing the current state of s.
+//
+// Most IPSetBuilder methods do not return errors.
+// Rather, the builder ignores any invalid inputs (such as an invalid IPPrefix),
+// and accumulates a list of any such errors that it encountered.
+//
+// IPSet also reports any such accumulated errors.
+// Even if the returned error is non-nil, the returned IPSet is usable
+// and contains all modifications made with valid inputs.
+//
+// The builder remains usable after calling IPSet.
+// Calling IPSet clears any accumulated errors.
+func (s *IPSetBuilder) IPSet() (*IPSet, error) {
+	s.normalize()
+	ret := &IPSet{
+		rr: append([]IPRange{}, s.in...),
+	}
+	if len(s.errs) == 0 {
+		return ret, nil
+	} else {
+		errs := s.errs
+		s.errs = nil
+		return ret, errs
+	}
+}
+
+// IPSet represents a set of IP addresses.
+//
+// IPSet is safe for concurrent use.
+// The zero value is a valid value representing a set of no IPs.
+// Use IPSetBuilder to construct IPSets.
+type IPSet struct {
+	// rr is the set of IPs that belong to this IPSet. The IPRanges
+	// are normalized according to IPSetBuilder.normalize, meaning
+	// they are a sorted, minimal representation (no overlapping
+	// ranges, no contiguous ranges). The implementation of various
+	// methods rely on this property.
+	rr []IPRange
+}
+
+// Ranges returns the minimum and sorted set of IP
+// ranges that covers s.
+func (s *IPSet) Ranges() []IPRange {
+	return append([]IPRange{}, s.rr...)
+}
+
+// Prefixes returns the minimum and sorted set of IP prefixes
+// that covers s.
+func (s *IPSet) Prefixes() []IPPrefix {
+	out := make([]IPPrefix, 0, len(s.rr))
+	for _, r := range s.rr {
+		out = append(out, r.Prefixes()...)
+	}
+	return out
+}
+
+// Equal reports whether s and o represent the same set of IP
+// addresses.
+func (s *IPSet) Equal(o *IPSet) bool {
+	if len(s.rr) != len(o.rr) {
+		return false
+	}
+	for i := range s.rr {
+		if s.rr[i] != o.rr[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// Contains reports whether ip is in s.
+// If ip has an IPv6 zone, Contains returns false,
+// because IPSets do not track zones.
+func (s *IPSet) Contains(ip IP) bool {
+	if ip.hasZone() {
+		return false
+	}
+	// TODO: data structure permitting more efficient lookups:
+	// https://github.com/inetaf/netaddr/issues/139
+	i := sort.Search(len(s.rr), func(i int) bool {
+		return ip.Less(s.rr[i].from)
+	})
+	if i == 0 {
+		return false
+	}
+	i--
+	return s.rr[i].contains(ip)
+}
+
+// ContainsRange reports whether all IPs in r are in s.
+func (s *IPSet) ContainsRange(r IPRange) bool {
+	for _, x := range s.rr {
+		if r.coveredBy(x) {
+			return true
+		}
+	}
+	return false
+}
+
+// ContainsPrefix reports whether all IPs in p are in s.
+func (s *IPSet) ContainsPrefix(p IPPrefix) bool {
+	return s.ContainsRange(p.Range())
+}
+
+// Overlaps reports whether any IP in b is also in s.
+func (s *IPSet) Overlaps(b *IPSet) bool {
+	// TODO: sorted ranges lets us do this in O(n+m)
+	for _, r := range s.rr {
+		for _, or := range b.rr {
+			if r.Overlaps(or) {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// OverlapsRange reports whether any IP in r is also in s.
+func (s *IPSet) OverlapsRange(r IPRange) bool {
+	// TODO: sorted ranges lets us do this more efficiently.
+	for _, x := range s.rr {
+		if x.Overlaps(r) {
+			return true
+		}
+	}
+	return false
+}
+
+// OverlapsPrefix reports whether any IP in p is also in s.
+func (s *IPSet) OverlapsPrefix(p IPPrefix) bool {
+	return s.OverlapsRange(p.Range())
+}
+
+// RemoveFreePrefix splits s into a Prefix of length bitLen and a new
+// IPSet with that prefix removed.
+//
+// If no contiguous prefix of length bitLen exists in s,
+// RemoveFreePrefix returns ok=false.
+func (s *IPSet) RemoveFreePrefix(bitLen uint8) (p IPPrefix, newSet *IPSet, ok bool) {
+	var bestFit IPPrefix
+	for _, r := range s.rr {
+		for _, prefix := range r.Prefixes() {
+			if prefix.bits > bitLen {
+				continue
+			}
+			if bestFit.ip.IsZero() || prefix.bits > bestFit.bits {
+				bestFit = prefix
+				if bestFit.bits == bitLen {
+					// exact match, done.
+					break
+				}
+			}
+		}
+	}
+
+	if bestFit.ip.IsZero() {
+		return IPPrefix{}, s, false
+	}
+
+	prefix := IPPrefix{ip: bestFit.ip, bits: bitLen}
+
+	var b IPSetBuilder
+	b.AddSet(s)
+	b.RemovePrefix(prefix)
+	newSet, _ = b.IPSet()
+	return prefix, newSet, true
+}
+
+type multiErr []error
+
+func (e multiErr) Error() string {
+	var ret []string
+	for _, err := range e {
+		ret = append(ret, err.Error())
+	}
+	return strings.Join(ret, "; ")
+}
+
+// A stacktraceErr combines an error with a stack trace.
+type stacktraceErr struct {
+	pcs [16]uintptr // preallocated array of PCs
+	at  []uintptr   // stack trace whence the error
+	err error       // underlying error
+}
+
+func (e *stacktraceErr) Error() string {
+	frames := runtime.CallersFrames(e.at)
+	buf := new(strings.Builder)
+	buf.WriteString(e.err.Error())
+	buf.WriteString(" @ ")
+	for {
+		frame, more := frames.Next()
+		if !more {
+			break
+		}
+		fmt.Fprintf(buf, "%s:%d ", frame.File, frame.Line)
+	}
+	return strings.TrimSpace(buf.String())
+}
+
+func (e *stacktraceErr) Unwrap() error {
+	return e.err
+}
diff --git a/vendor/inet.af/netaddr/mask6.go b/vendor/inet.af/netaddr/mask6.go
new file mode 100644
index 0000000000..72a20edef8
--- /dev/null
+++ b/vendor/inet.af/netaddr/mask6.go
@@ -0,0 +1,141 @@
+// Copyright 2021 The Inet.Af AUTHORS. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package netaddr
+
+// mask6 are bitmasks with the topmost n bits of a
+// 128-bit number, where n is the array index.
+//
+// generated with https://play.golang.org/p/64XKxaUSa_9
+var mask6 = [...]uint128{
+	0:   {0x0000000000000000, 0x0000000000000000},
+	1:   {0x8000000000000000, 0x0000000000000000},
+	2:   {0xc000000000000000, 0x0000000000000000},
+	3:   {0xe000000000000000, 0x0000000000000000},
+	4:   {0xf000000000000000, 0x0000000000000000},
+	5:   {0xf800000000000000, 0x0000000000000000},
+	6:   {0xfc00000000000000, 0x0000000000000000},
+	7:   {0xfe00000000000000, 0x0000000000000000},
+	8:   {0xff00000000000000, 0x0000000000000000},
+	9:   {0xff80000000000000, 0x0000000000000000},
+	10:  {0xffc0000000000000, 0x0000000000000000},
+	11:  {0xffe0000000000000, 0x0000000000000000},
+	12:  {0xfff0000000000000, 0x0000000000000000},
+	13:  {0xfff8000000000000, 0x0000000000000000},
+	14:  {0xfffc000000000000, 0x0000000000000000},
+	15:  {0xfffe000000000000, 0x0000000000000000},
+	16:  {0xffff000000000000, 0x0000000000000000},
+	17:  {0xffff800000000000, 0x0000000000000000},
+	18:  {0xffffc00000000000, 0x0000000000000000},
+	19:  {0xffffe00000000000, 0x0000000000000000},
+	20:  {0xfffff00000000000, 0x0000000000000000},
+	21:  {0xfffff80000000000, 0x0000000000000000},
+	22:  {0xfffffc0000000000, 0x0000000000000000},
+	23:  {0xfffffe0000000000, 0x0000000000000000},
+	24:  {0xffffff0000000000, 0x0000000000000000},
+	25:  {0xffffff8000000000, 0x0000000000000000},
+	26:  {0xffffffc000000000, 0x0000000000000000},
+	27:  {0xffffffe000000000, 0x0000000000000000},
+	28:  {0xfffffff000000000, 0x0000000000000000},
+	29:  {0xfffffff800000000, 0x0000000000000000},
+	30:  {0xfffffffc00000000, 0x0000000000000000},
+	31:  {0xfffffffe00000000, 0x0000000000000000},
+	32:  {0xffffffff00000000, 0x0000000000000000},
+	33:  {0xffffffff80000000, 0x0000000000000000},
+	34:  {0xffffffffc0000000, 0x0000000000000000},
+	35:  {0xffffffffe0000000, 0x0000000000000000},
+	36:  {0xfffffffff0000000, 0x0000000000000000},
+	37:  {0xfffffffff8000000, 0x0000000000000000},
+	38:  {0xfffffffffc000000, 0x0000000000000000},
+	39:  {0xfffffffffe000000, 0x0000000000000000},
+	40:  {0xffffffffff000000, 0x0000000000000000},
+	41:  {0xffffffffff800000, 0x0000000000000000},
+	42:  {0xffffffffffc00000, 0x0000000000000000},
+	43:  {0xffffffffffe00000, 0x0000000000000000},
+	44:  {0xfffffffffff00000, 0x0000000000000000},
+	45:  {0xfffffffffff80000, 0x0000000000000000},
+	46:  {0xfffffffffffc0000, 0x0000000000000000},
+	47:  {0xfffffffffffe0000, 0x0000000000000000},
+	48:  {0xffffffffffff0000, 0x0000000000000000},
+	49:  {0xffffffffffff8000, 0x0000000000000000},
+	50:  {0xffffffffffffc000, 0x0000000000000000},
+	51:  {0xffffffffffffe000, 0x0000000000000000},
+	52:  {0xfffffffffffff000, 0x0000000000000000},
+	53:  {0xfffffffffffff800, 0x0000000000000000},
+	54:  {0xfffffffffffffc00, 0x0000000000000000},
+	55:  {0xfffffffffffffe00, 0x0000000000000000},
+	56:  {0xffffffffffffff00, 0x0000000000000000},
+	57:  {0xffffffffffffff80, 0x0000000000000000},
+	58:  {0xffffffffffffffc0, 0x0000000000000000},
+	59:  {0xffffffffffffffe0, 0x0000000000000000},
+	60:  {0xfffffffffffffff0, 0x0000000000000000},
+	61:  {0xfffffffffffffff8, 0x0000000000000000},
+	62:  {0xfffffffffffffffc, 0x0000000000000000},
+	63:  {0xfffffffffffffffe, 0x0000000000000000},
+	64:  {0xffffffffffffffff, 0x0000000000000000},
+	65:  {0xffffffffffffffff, 0x8000000000000000},
+	66:  {0xffffffffffffffff, 0xc000000000000000},
+	67:  {0xffffffffffffffff, 0xe000000000000000},
+	68:  {0xffffffffffffffff, 0xf000000000000000},
+	69:  {0xffffffffffffffff, 0xf800000000000000},
+	70:  {0xffffffffffffffff, 0xfc00000000000000},
+	71:  {0xffffffffffffffff, 0xfe00000000000000},
+	72:  {0xffffffffffffffff, 0xff00000000000000},
+	73:  {0xffffffffffffffff, 0xff80000000000000},
+	74:  {0xffffffffffffffff, 0xffc0000000000000},
+	75:  {0xffffffffffffffff, 0xffe0000000000000},
+	76:  {0xffffffffffffffff, 0xfff0000000000000},
+	77:  {0xffffffffffffffff, 0xfff8000000000000},
+	78:  {0xffffffffffffffff, 0xfffc000000000000},
+	79:  {0xffffffffffffffff, 0xfffe000000000000},
+	80:  {0xffffffffffffffff, 0xffff000000000000},
+	81:  {0xffffffffffffffff, 0xffff800000000000},
+	82:  {0xffffffffffffffff, 0xffffc00000000000},
+	83:  {0xffffffffffffffff, 0xffffe00000000000},
+	84:  {0xffffffffffffffff, 0xfffff00000000000},
+	85:  {0xffffffffffffffff, 0xfffff80000000000},
+	86:  {0xffffffffffffffff, 0xfffffc0000000000},
+	87:  {0xffffffffffffffff, 0xfffffe0000000000},
+	88:  {0xffffffffffffffff, 0xffffff0000000000},
+	89:  {0xffffffffffffffff, 0xffffff8000000000},
+	90:  {0xffffffffffffffff, 0xffffffc000000000},
+	91:  {0xffffffffffffffff, 0xffffffe000000000},
+	92:  {0xffffffffffffffff, 0xfffffff000000000},
+	93:  {0xffffffffffffffff, 0xfffffff800000000},
+	94:  {0xffffffffffffffff, 0xfffffffc00000000},
+	95:  {0xffffffffffffffff, 0xfffffffe00000000},
+	96:  {0xffffffffffffffff, 0xffffffff00000000},
+	97:  {0xffffffffffffffff, 0xffffffff80000000},
+	98:  {0xffffffffffffffff, 0xffffffffc0000000},
+	99:  {0xffffffffffffffff, 0xffffffffe0000000},
+	100: {0xffffffffffffffff, 0xfffffffff0000000},
+	101: {0xffffffffffffffff, 0xfffffffff8000000},
+	102: {0xffffffffffffffff, 0xfffffffffc000000},
+	103: {0xffffffffffffffff, 0xfffffffffe000000},
+	104: {0xffffffffffffffff, 0xffffffffff000000},
+	105: {0xffffffffffffffff, 0xffffffffff800000},
+	106: {0xffffffffffffffff, 0xffffffffffc00000},
+	107: {0xffffffffffffffff, 0xffffffffffe00000},
+	108: {0xffffffffffffffff, 0xfffffffffff00000},
+	109: {0xffffffffffffffff, 0xfffffffffff80000},
+	110: {0xffffffffffffffff, 0xfffffffffffc0000},
+	111: {0xffffffffffffffff, 0xfffffffffffe0000},
+	112: {0xffffffffffffffff, 0xffffffffffff0000},
+	113: {0xffffffffffffffff, 0xffffffffffff8000},
+	114: {0xffffffffffffffff, 0xffffffffffffc000},
+	115: {0xffffffffffffffff, 0xffffffffffffe000},
+	116: {0xffffffffffffffff, 0xfffffffffffff000},
+	117: {0xffffffffffffffff, 0xfffffffffffff800},
+	118: {0xffffffffffffffff, 0xfffffffffffffc00},
+	119: {0xffffffffffffffff, 0xfffffffffffffe00},
+	120: {0xffffffffffffffff, 0xffffffffffffff00},
+	121: {0xffffffffffffffff, 0xffffffffffffff80},
+	122: {0xffffffffffffffff, 0xffffffffffffffc0},
+	123: {0xffffffffffffffff, 0xffffffffffffffe0},
+	124: {0xffffffffffffffff, 0xfffffffffffffff0},
+	125: {0xffffffffffffffff, 0xfffffffffffffff8},
+	126: {0xffffffffffffffff, 0xfffffffffffffffc},
+	127: {0xffffffffffffffff, 0xfffffffffffffffe},
+	128: {0xffffffffffffffff, 0xffffffffffffffff},
+}
diff --git a/vendor/inet.af/netaddr/netaddr.go b/vendor/inet.af/netaddr/netaddr.go
new file mode 100644
index 0000000000..9768406332
--- /dev/null
+++ b/vendor/inet.af/netaddr/netaddr.go
@@ -0,0 +1,1919 @@
+// Copyright 2020 The Inet.Af AUTHORS. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package netaddr contains a IP address type that's in many ways
+// better than the Go standard library's net.IP type. Building on that
+// IP type, the package also contains IPPrefix, IPPort, IPRange, and
+// IPSet types.
+//
+// Notably, this package's IP type takes less memory, is immutable,
+// comparable (supports == and being a map key), and more. See
+// https://github.com/inetaf/netaddr for background.
+//
+// IPv6 Zones
+//
+// IP and IPPort are the only types in this package that support IPv6
+// zones. Other types silently drop any passed-in zones.
+package netaddr // import "inet.af/netaddr"
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+	"net"
+	"sort"
+	"strconv"
+	"strings"
+
+	"go4.org/intern"
+)
+
+// Sizes: (64-bit)
+//   net.IP:     24 byte slice header + {4, 16} = 28 to 40 bytes
+//   net.IPAddr: 40 byte slice header + {4, 16} = 44 to 56 bytes + zone length
+//   netaddr.IP: 24 bytes (zone is per-name singleton, shared across all users)
+
+// IP represents an IPv4 or IPv6 address (with or without a scoped
+// addressing zone), similar to Go's net.IP or net.IPAddr.
+//
+// Unlike net.IP or net.IPAddr, the netaddr.IP is a comparable value
+// type (it supports == and can be a map key) and is immutable.
+// Its memory representation is 24 bytes on 64-bit machines (the same
+// size as a Go slice header) for both IPv4 and IPv6 address.
+type IP struct {
+	// addr are the hi and lo bits of an IPv6 address. If z==z4,
+	// hi and lo contain the IPv4-mapped IPv6 address.
+	//
+	// hi and lo are constructed by interpreting a 16-byte IPv6
+	// address as a big-endian 128-bit number. The most significant
+	// bits of that number go into hi, the rest into lo.
+	//
+	// For example, 0011:2233:4455:6677:8899:aabb:ccdd:eeff is stored as:
+	//  addr.hi = 0x0011223344556677
+	//  addr.lo = 0x8899aabbccddeeff
+	//
+	// We store IPs like this, rather than as [16]byte, because it
+	// turns most operations on IPs into arithmetic and bit-twiddling
+	// operations on 64-bit registers, which is much faster than
+	// bytewise processing.
+	addr uint128
+
+	// z is a combination of the address family and the IPv6 zone.
+	//
+	// nil means invalid IP address (for the IP zero value).
+	// z4 means an IPv4 address.
+	// z6noz means an IPv6 address without a zone.
+	//
+	// Otherwise it's the interned zone name string.
+	z *intern.Value
+}
+
+// z0, z4, and z6noz are sentinel IP.z values.
+// See the IP type's field docs.
+var (
+	z0    = (*intern.Value)(nil)
+	z4    = new(intern.Value)
+	z6noz = new(intern.Value)
+)
+
+// IPv6LinkLocalAllNodes returns the IPv6 link-local all nodes multicast
+// address ff02::1.
+func IPv6LinkLocalAllNodes() IP { return IPv6Raw([16]byte{0: 0xff, 1: 0x02, 15: 0x01}) }
+
+// IPv6Unspecified returns the IPv6 unspecified address ::.
+func IPv6Unspecified() IP { return IP{z: z6noz} }
+
+// IPv4 returns the IP of the IPv4 address a.b.c.d.
+func IPv4(a, b, c, d uint8) IP {
+	return IP{
+		addr: uint128{0, 0xffff00000000 | uint64(a)<<24 | uint64(b)<<16 | uint64(c)<<8 | uint64(d)},
+		z:    z4,
+	}
+}
+
+// IPv6Raw returns the IPv6 address given by the bytes in addr,
+// without an implicit Unmap call to unmap any v6-mapped IPv4
+// address.
+func IPv6Raw(addr [16]byte) IP {
+	return IP{
+		addr: uint128{
+			binary.BigEndian.Uint64(addr[:8]),
+			binary.BigEndian.Uint64(addr[8:]),
+		},
+		z: z6noz,
+	}
+}
+
+// ipv6Slice is like IPv6Raw, but operates on a 16-byte slice. Assumes
+// slice is 16 bytes, caller must enforce this.
+func ipv6Slice(addr []byte) IP {
+	return IP{
+		addr: uint128{
+			binary.BigEndian.Uint64(addr[:8]),
+			binary.BigEndian.Uint64(addr[8:]),
+		},
+		z: z6noz,
+	}
+}
+
+// IPFrom16 returns the IP address given by the bytes in addr,
+// unmapping any v6-mapped IPv4 address.
+//
+// It is equivalent to calling IPv6Raw(addr).Unmap().
+func IPFrom16(addr [16]byte) IP {
+	return IPv6Raw(addr).Unmap()
+}
+
+// IPFrom4 returns the IPv4 address given by the bytes in addr.
+// It is equivalent to calling IPv4(addr[0], addr[1], addr[2], addr[3]).
+func IPFrom4(addr [4]byte) IP {
+	return IPv4(addr[0], addr[1], addr[2], addr[3])
+}
+
+// ParseIP parses s as an IP address, returning the result. The string
+// s can be in dotted decimal ("192.0.2.1"), IPv6 ("2001:db8::68"),
+// or IPv6 with a scoped addressing zone ("fe80::1cc0:3e8c:119f:c2e1%ens18").
+func ParseIP(s string) (IP, error) {
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '.':
+			return parseIPv4(s)
+		case ':':
+			return parseIPv6(s)
+		case '%':
+			// Assume that this was trying to be an IPv6 address with
+			// a zone specifier, but the address is missing.
+			return IP{}, parseIPError{in: s, msg: "missing IPv6 address"}
+		}
+	}
+	return IP{}, parseIPError{in: s, msg: "unable to parse IP"}
+}
+
+// MustParseIP calls ParseIP(s) and panics on error.
+// It is intended for use in tests with hard-coded strings.
+func MustParseIP(s string) IP {
+	ip, err := ParseIP(s)
+	if err != nil {
+		panic(err)
+	}
+	return ip
+}
+
+type parseIPError struct {
+	in  string // the string given to ParseIP
+	msg string // an explanation of the parse failure
+	at  string // optionally, the unparsed portion of in at which the error occurred.
+}
+
+func (err parseIPError) Error() string {
+	if err.at != "" {
+		return fmt.Sprintf("ParseIP(%q): %s (at %q)", err.in, err.msg, err.at)
+	}
+	return fmt.Sprintf("ParseIP(%q): %s", err.in, err.msg)
+}
+
+// parseIPv4 parses s as an IPv4 address (in form "192.168.0.1").
+func parseIPv4(s string) (ip IP, err error) {
+	var fields [3]uint8
+	var val, pos int
+	for i := 0; i < len(s); i++ {
+		if s[i] >= '0' && s[i] <= '9' {
+			val = val*10 + int(s[i]) - '0'
+			if val > 255 {
+				return IP{}, parseIPError{in: s, msg: "IPv4 field has value >255"}
+			}
+		} else if s[i] == '.' {
+			// .1.2.3
+			// 1.2.3.
+			// 1..2.3
+			if i == 0 || i == len(s)-1 || s[i-1] == '.' {
+				return IP{}, parseIPError{in: s, msg: "IPv4 field must have at least one digit", at: s[i:]}
+			}
+			// 1.2.3.4.5
+			if pos == 3 {
+				return IP{}, parseIPError{in: s, msg: "IPv4 address too long"}
+			}
+			fields[pos] = uint8(val)
+			pos++
+			val = 0
+		} else {
+			return IP{}, parseIPError{in: s, msg: "unexpected character", at: s[i:]}
+		}
+	}
+	if pos < 3 {
+		return IP{}, parseIPError{in: s, msg: "IPv4 address too short"}
+	}
+	return IPv4(fields[0], fields[1], fields[2], uint8(val)), nil
+}
+
+// parseIPv6 parses s as an IPv6 address (in form "2001:db8::68").
+func parseIPv6(in string) (IP, error) {
+	s := in
+
+	// Split off the zone right from the start. Yes it's a second scan
+	// of the string, but trying to handle it inline makes a bunch of
+	// other inner loop conditionals more expensive, and it ends up
+	// being slower.
+	zone := ""
+	i := strings.IndexByte(s, '%')
+	if i != -1 {
+		s, zone = s[:i], s[i+1:]
+		if zone == "" {
+			// Not allowed to have an empty zone if explicitly specified.
+			return IP{}, parseIPError{in: in, msg: "zone must be a non-empty string"}
+		}
+	}
+
+	var ip [16]byte
+	ellipsis := -1 // position of ellipsis in ip
+
+	// Might have leading ellipsis
+	if len(s) >= 2 && s[0] == ':' && s[1] == ':' {
+		ellipsis = 0
+		s = s[2:]
+		// Might be only ellipsis
+		if len(s) == 0 {
+			return IPv6Unspecified().WithZone(zone), nil
+		}
+	}
+
+	// Loop, parsing hex numbers followed by colon.
+	i = 0
+	for i < 16 {
+		// Hex number. Similar to parseIPv4, inlining the hex number
+		// parsing yields a significant performance increase.
+		off := 0
+		acc := uint32(0)
+		for ; off < len(s); off++ {
+			c := s[off]
+			if c >= '0' && c <= '9' {
+				acc = (acc << 4) + uint32(c-'0')
+			} else if c >= 'a' && c <= 'f' {
+				acc = (acc << 4) + uint32(c-'a'+10)
+			} else if c >= 'A' && c <= 'F' {
+				acc = (acc << 4) + uint32(c-'A'+10)
+			} else {
+				break
+			}
+			if acc > math.MaxUint16 {
+				// Overflow, fail.
+				return IP{}, parseIPError{in: in, msg: "IPv6 field has value >=2^16", at: s}
+			}
+		}
+		if off == 0 {
+			// No digits found, fail.
+			return IP{}, parseIPError{in: in, msg: "each colon-separated field must have at least one digit", at: s}
+		}
+
+		// If followed by dot, might be in trailing IPv4.
+		if off < len(s) && s[off] == '.' {
+			if ellipsis < 0 && i != 12 {
+				// Not the right place.
+				return IP{}, parseIPError{in: in, msg: "embedded IPv4 address must replace the final 2 fields of the address", at: s}
+			}
+			if i+4 > 16 {
+				// Not enough room.
+				return IP{}, parseIPError{in: in, msg: "too many hex fields to fit an embedded IPv4 at the end of the address", at: s}
+			}
+			// TODO: could make this a bit faster by having a helper
+			// that parses to a [4]byte, and have both parseIPv4 and
+			// parseIPv6 use it.
+			ip4, err := parseIPv4(s)
+			if err != nil {
+				return IP{}, parseIPError{in: in, msg: err.Error(), at: s}
+			}
+			ip[i] = ip4.v4(0)
+			ip[i+1] = ip4.v4(1)
+			ip[i+2] = ip4.v4(2)
+			ip[i+3] = ip4.v4(3)
+			s = ""
+			i += 4
+			break
+		}
+
+		// Save this 16-bit chunk.
+		ip[i] = byte(acc >> 8)
+		ip[i+1] = byte(acc)
+		i += 2
+
+		// Stop at end of string.
+		s = s[off:]
+		if len(s) == 0 {
+			break
+		}
+
+		// Otherwise must be followed by colon and more.
+		if s[0] != ':' {
+			return IP{}, parseIPError{in: in, msg: "unexpected character, want colon", at: s}
+		} else if len(s) == 1 {
+			return IP{}, parseIPError{in: in, msg: "colon must be followed by more characters", at: s}
+		}
+		s = s[1:]
+
+		// Look for ellipsis.
+		if s[0] == ':' {
+			if ellipsis >= 0 { // already have one
+				return IP{}, parseIPError{in: in, msg: "multiple :: in address", at: s}
+			}
+			ellipsis = i
+			s = s[1:]
+			if len(s) == 0 { // can be at end
+				break
+			}
+		}
+	}
+
+	// Must have used entire string.
+	if len(s) != 0 {
+		return IP{}, parseIPError{in: in, msg: "trailing garbage after address", at: s}
+	}
+
+	// If didn't parse enough, expand ellipsis.
+	if i < 16 {
+		if ellipsis < 0 {
+			return IP{}, parseIPError{in: in, msg: "address string too short"}
+		}
+		n := 16 - i
+		for j := i - 1; j >= ellipsis; j-- {
+			ip[j+n] = ip[j]
+		}
+		for j := ellipsis + n - 1; j >= ellipsis; j-- {
+			ip[j] = 0
+		}
+	} else if ellipsis >= 0 {
+		// Ellipsis must represent at least one 0 group.
+		return IP{}, parseIPError{in: in, msg: "the :: must expand to at least one field of zeros"}
+	}
+	return IPv6Raw(ip).WithZone(zone), nil
+}
+
+// FromStdIP returns an IP from the standard library's IP type.
+//
+// If std is invalid, ok is false.
+//
+// FromStdIP implicitly unmaps IPv6-mapped IPv4 addresses. That is, if
+// len(std) == 16 and contains an IPv4 address, only the IPv4 part is
+// returned, without the IPv6 wrapper. This is the common form returned by
+// the standard library's ParseIP: https://play.golang.org/p/qdjylUkKWxl.
+// To convert a standard library IP without the implicit unmapping, use
+// FromStdIPRaw.
+func FromStdIP(std net.IP) (ip IP, ok bool) {
+	ret, ok := FromStdIPRaw(std)
+	if ret.Is4in6() {
+		ret.z = z4
+	}
+	return ret, ok
+}
+
+// FromStdIPRaw returns an IP from the standard library's IP type.
+// If std is invalid, ok is false.
+// Unlike FromStdIP, FromStdIPRaw does not do an implicit Unmap if
+// len(std) == 16 and contains an IPv6-mapped IPv4 address.
+func FromStdIPRaw(std net.IP) (ip IP, ok bool) {
+	switch len(std) {
+	case 4:
+		return IPv4(std[0], std[1], std[2], std[3]), true
+	case 16:
+		return ipv6Slice(std), true
+	}
+	return IP{}, false
+}
+
+// v4 returns the i'th byte of ip. If ip is not an IPv4, v4 returns
+// unspecified garbage.
+func (ip IP) v4(i uint8) uint8 {
+	return uint8(ip.addr.lo >> ((3 - i) * 8))
+}
+
+// v6 returns the i'th byte of ip. If ip is an IPv4 address, this
+// accesses the IPv4-mapped IPv6 address form of the IP.
+func (ip IP) v6(i uint8) uint8 {
+	return uint8(*(ip.addr.halves()[(i/8)%2]) >> ((7 - i%8) * 8))
+}
+
+// v6u16 returns the i'th 16-bit word of ip. If ip is an IPv4 address,
+// this accesses the IPv4-mapped IPv6 address form of the IP.
+func (ip IP) v6u16(i uint8) uint16 {
+	return uint16(*(ip.addr.halves()[(i/4)%2]) >> ((3 - i%4) * 16))
+}
+
+// IsZero reports whether ip is the zero value of the IP type.
+// The zero value is not a valid IP address of any type.
+//
+// Note that "0.0.0.0" and "::" are not the zero value. Use IsUnspecified to
+// check for these values instead.
+func (ip IP) IsZero() bool {
+	// Faster than comparing ip == IP{}, but effectively equivalent,
+	// as there's no way to make an IP with a nil z from this package.
+	return ip.z == z0
+}
+
+// IsValid whether the IP is an initialized value and not the IP
+// type's zero value.
+//
+// Note that both "0.0.0.0" and "::" are valid, non-zero values.
+func (ip IP) IsValid() bool { return ip.z != z0 }
+
+// BitLen returns the number of bits in the IP address:
+// 32 for IPv4 or 128 for IPv6.
+// For the zero value (see IP.IsZero), it returns 0.
+// For IP4-mapped IPv6 addresses, it returns 128.
+func (ip IP) BitLen() uint8 {
+	switch ip.z {
+	case z0:
+		return 0
+	case z4:
+		return 32
+	}
+	return 128
+}
+
+// Zone returns ip's IPv6 scoped addressing zone, if any.
+func (ip IP) Zone() string {
+	if ip.z == nil {
+		return ""
+	}
+	zone, _ := ip.z.Get().(string)
+	return zone
+}
+
+// Compare returns an integer comparing two IPs.
+// The result will be 0 if ip==ip2, -1 if ip < ip2, and +1 if ip > ip2.
+// The definition of "less than" is the same as the IP.Less method.
+func (ip IP) Compare(ip2 IP) int {
+	f1, f2 := ip.BitLen(), ip2.BitLen()
+	if f1 < f2 {
+		return -1
+	}
+	if f1 > f2 {
+		return 1
+	}
+	if hi1, hi2 := ip.addr.hi, ip2.addr.hi; hi1 < hi2 {
+		return -1
+	} else if hi1 > hi2 {
+		return 1
+	}
+	if lo1, lo2 := ip.addr.lo, ip2.addr.lo; lo1 < lo2 {
+		return -1
+	} else if lo1 > lo2 {
+		return 1
+	}
+	if ip.Is6() {
+		za, zb := ip.Zone(), ip2.Zone()
+		if za < zb {
+			return -1
+		} else if za > zb {
+			return 1
+		}
+	}
+	return 0
+}
+
+// Less reports whether ip sorts before ip2.
+// IP addresses sort first by length, then their address.
+// IPv6 addresses with zones sort just after the same address without a zone.
+func (ip IP) Less(ip2 IP) bool { return ip.Compare(ip2) == -1 }
+
+func (ip IP) lessOrEq(ip2 IP) bool { return ip.Compare(ip2) <= 0 }
+
+// ipZone returns the standard library net.IP from ip, as well
+// as the zone.
+// The optional reuse IP provides memory to reuse.
+func (ip IP) ipZone(reuse net.IP) (stdIP net.IP, zone string) {
+	base := reuse[:0]
+	switch {
+	case ip.z == z0:
+		return nil, ""
+	case ip.Is4():
+		a4 := ip.As4()
+		return append(base, a4[:]...), ""
+	default:
+		a16 := ip.As16()
+		return append(base, a16[:]...), ip.Zone()
+	}
+}
+
+// IPAddr returns the net.IPAddr representation of an IP. The returned value is
+// always non-nil, but the IPAddr.IP will be nil if ip is the zero value.
+// If ip contains a zone identifier, IPAddr.Zone is populated.
+func (ip IP) IPAddr() *net.IPAddr {
+	stdIP, zone := ip.ipZone(nil)
+	return &net.IPAddr{IP: stdIP, Zone: zone}
+}
+
+// Is4 reports whether ip is an IPv4 address.
+//
+// It returns false for IP4-mapped IPv6 addresses. See IP.Unmap.
+func (ip IP) Is4() bool {
+	return ip.z == z4
+}
+
+// Is4in6 reports whether ip is an IPv4-mapped IPv6 address.
+func (ip IP) Is4in6() bool {
+	return ip.Is6() && ip.addr.hi == 0 && ip.addr.lo>>32 == 0xffff
+}
+
+// Is6 reports whether ip is an IPv6 address, including IPv4-mapped
+// IPv6 addresses.
+func (ip IP) Is6() bool {
+	return ip.z != z0 && ip.z != z4
+}
+
+// Unmap returns ip with any IPv4-mapped IPv6 address prefix removed.
+//
+// That is, if ip is an IPv6 address wrapping an IPv4 adddress, it
+// returns the wrapped IPv4 address. Otherwise it returns ip, regardless
+// of its type.
+func (ip IP) Unmap() IP {
+	if ip.Is4in6() {
+		ip.z = z4
+	}
+	return ip
+}
+
+// WithZone returns an IP that's the same as ip but with the provided
+// zone. If zone is empty, the zone is removed. If ip is an IPv4
+// address it's returned unchanged.
+func (ip IP) WithZone(zone string) IP {
+	if !ip.Is6() {
+		return ip
+	}
+	if zone == "" {
+		ip.z = z6noz
+		return ip
+	}
+	ip.z = intern.GetByString(zone)
+	return ip
+}
+
+// noZone unconditionally strips the zone from IP.
+// It's similar to WithZone, but small enough to be inlinable.
+func (ip IP) withoutZone() IP {
+	if !ip.Is6() {
+		return ip
+	}
+	ip.z = z6noz
+	return ip
+}
+
+// hasZone reports whether IP has an IPv6 zone.
+func (ip IP) hasZone() bool {
+	return ip.z != z0 && ip.z != z4 && ip.z != z6noz
+}
+
+// IsLinkLocalUnicast reports whether ip is a link-local unicast address.
+// If ip is the zero value, it will return false.
+func (ip IP) IsLinkLocalUnicast() bool {
+	// Dynamic Configuration of IPv4 Link-Local Addresses
+	// https://datatracker.ietf.org/doc/html/rfc3927#section-2.1
+	if ip.Is4() {
+		return ip.v4(0) == 169 && ip.v4(1) == 254
+	}
+	// IP Version 6 Addressing Architecture (2.4 Address Type Identification)
+	// https://datatracker.ietf.org/doc/html/rfc4291#section-2.4
+	if ip.Is6() {
+		return ip.v6u16(0)&0xffc0 == 0xfe80
+	}
+	return false // zero value
+}
+
+// IsLoopback reports whether ip is a loopback address. If ip is the zero value,
+// it will return false.
+func (ip IP) IsLoopback() bool {
+	// Requirements for Internet Hosts -- Communication Layers (3.2.1.3 Addressing)
+	// https://datatracker.ietf.org/doc/html/rfc1122#section-3.2.1.3
+	if ip.Is4() {
+		return ip.v4(0) == 127
+	}
+	// IP Version 6 Addressing Architecture (2.4 Address Type Identification)
+	// https://datatracker.ietf.org/doc/html/rfc4291#section-2.4
+	if ip.Is6() {
+		return ip.addr.hi == 0 && ip.addr.lo == 1
+	}
+	return false // zero value
+}
+
+// IsMulticast reports whether ip is a multicast address. If ip is the zero
+// value, it will return false.
+func (ip IP) IsMulticast() bool {
+	// Host Extensions for IP Multicasting (4. HOST GROUP ADDRESSES)
+	// https://datatracker.ietf.org/doc/html/rfc1112#section-4
+	if ip.Is4() {
+		return ip.v4(0)&0xf0 == 0xe0
+	}
+	// IP Version 6 Addressing Architecture (2.4 Address Type Identification)
+	// https://datatracker.ietf.org/doc/html/rfc4291#section-2.4
+	if ip.Is6() {
+		return ip.addr.hi>>(64-8) == 0xff // ip.v6(0) == 0xff
+	}
+	return false // zero value
+}
+
+// IsInterfaceLocalMulticast reports whether ip is an IPv6 interface-local
+// multicast address. If ip is the zero value or an IPv4 address, it will return
+// false.
+func (ip IP) IsInterfaceLocalMulticast() bool {
+	// IPv6 Addressing Architecture (2.7.1. Pre-Defined Multicast Addresses)
+	// https://datatracker.ietf.org/doc/html/rfc4291#section-2.7.1
+	if ip.Is6() {
+		return ip.v6u16(0)&0xff0f == 0xff01
+	}
+	return false // zero value
+}
+
+// IsLinkLocalMulticast reports whether ip is a link-local multicast address.
+// If ip is the zero value, it will return false.
+func (ip IP) IsLinkLocalMulticast() bool {
+	// IPv4 Multicast Guidelines (4. Local Network Control Block (224.0.0/24))
+	// https://datatracker.ietf.org/doc/html/rfc5771#section-4
+	if ip.Is4() {
+		return ip.v4(0) == 224 && ip.v4(1) == 0 && ip.v4(2) == 0
+	}
+	// IPv6 Addressing Architecture (2.7.1. Pre-Defined Multicast Addresses)
+	// https://datatracker.ietf.org/doc/html/rfc4291#section-2.7.1
+	if ip.Is6() {
+		return ip.v6u16(0)&0xff0f == 0xff02
+	}
+	return false // zero value
+}
+
+// IsGlobalUnicast reports whether ip is a global unicast address.
+//
+// It returns true for IPv6 addresses which fall outside of the current
+// IANA-allocated 2000::/3 global unicast space, with the exception of the
+// link-local address space. It also returns true even if ip is in the IPv4
+// private address space or IPv6 unique local address space. If ip is the zero
+// value, it will return false.
+//
+// For reference, see RFC 1122, RFC 4291, and RFC 4632.
+func (ip IP) IsGlobalUnicast() bool {
+	if ip.z == z0 {
+		// Invalid or zero-value.
+		return false
+	}
+
+	// Match the stdlib's IsGlobalUnicast logic. Notably private IPv4 addresses
+	// and ULA IPv6 addresses are still considered "global unicast".
+	if ip.Is4() && (ip == IPv4(0, 0, 0, 0) || ip == IPv4(255, 255, 255, 255)) {
+		return false
+	}
+
+	return ip != IPv6Unspecified() &&
+		!ip.IsLoopback() &&
+		!ip.IsMulticast() &&
+		!ip.IsLinkLocalUnicast()
+}
+
+// IsPrivate reports whether ip is a private address, according to RFC 1918
+// (IPv4 addresses) and RFC 4193 (IPv6 addresses). That is, it reports whether
+// ip is in 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, or fc00::/7. This is the
+// same as the standard library's net.IP.IsPrivate.
+func (ip IP) IsPrivate() bool {
+	// Match the stdlib's IsPrivate logic.
+	if ip.Is4() {
+		// RFC 1918 allocates 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 as
+		// private IPv4 address subnets.
+		return ip.v4(0) == 10 ||
+			(ip.v4(0) == 172 && ip.v4(1)&0xf0 == 16) ||
+			(ip.v4(0) == 192 && ip.v4(1) == 168)
+	}
+
+	if ip.Is6() {
+		// RFC 4193 allocates fc00::/7 as the unique local unicast IPv6 address
+		// subnet.
+		return ip.v6(0)&0xfe == 0xfc
+	}
+
+	return false // zero value
+}
+
+// IsUnspecified reports whether ip is an unspecified address, either the IPv4
+// address "0.0.0.0" or the IPv6 address "::".
+//
+// Note that the IP zero value is not an unspecified address. Use IsZero to
+// check for the zero value instead.
+func (ip IP) IsUnspecified() bool {
+	return ip == IPv4(0, 0, 0, 0) || ip == IPv6Unspecified()
+}
+
+// Prefix applies a CIDR mask of leading bits to IP, producing an IPPrefix
+// of the specified length. If IP is the zero value, a zero-value IPPrefix and
+// a nil error are returned. If bits is larger than 32 for an IPv4 address or
+// 128 for an IPv6 address, an error is returned.
+func (ip IP) Prefix(bits uint8) (IPPrefix, error) {
+	effectiveBits := bits
+	switch ip.z {
+	case z0:
+		return IPPrefix{}, nil
+	case z4:
+		if bits > 32 {
+			return IPPrefix{}, fmt.Errorf("prefix length %d too large for IPv4", bits)
+		}
+		effectiveBits += 96
+	default:
+		if bits > 128 {
+			return IPPrefix{}, fmt.Errorf("prefix length %d too large for IPv6", bits)
+		}
+	}
+	ip.addr = ip.addr.and(mask6[effectiveBits])
+	return IPPrefixFrom(ip, bits), nil
+}
+
+// Netmask applies a bit mask to IP, producing an IPPrefix. If IP is the
+// zero value, a zero-value IPPrefix and a nil error are returned. If the
+// netmask length is not 4 for IPv4 or 16 for IPv6, an error is
+// returned. If the netmask is non-contiguous, an error is returned.
+func (ip IP) Netmask(mask []byte) (IPPrefix, error) {
+	l := len(mask)
+
+	switch ip.z {
+	case z0:
+		return IPPrefix{}, nil
+	case z4:
+		if l != net.IPv4len {
+			return IPPrefix{}, fmt.Errorf("netmask length %d incorrect for IPv4", l)
+		}
+	default:
+		if l != net.IPv6len {
+			return IPPrefix{}, fmt.Errorf("netmask length %d incorrect for IPv6", l)
+		}
+	}
+
+	ones, bits := net.IPMask(mask).Size()
+	if ones == 0 && bits == 0 {
+		return IPPrefix{}, errors.New("netmask is non-contiguous")
+	}
+
+	return ip.Prefix(uint8(ones))
+}
+
+// As16 returns the IP address in its 16 byte representation.
+// IPv4 addresses are returned in their v6-mapped form.
+// IPv6 addresses with zones are returned without their zone (use the
+// Zone method to get it).
+// The ip zero value returns all zeroes.
+func (ip IP) As16() [16]byte {
+	var ret [16]byte
+	binary.BigEndian.PutUint64(ret[:8], ip.addr.hi)
+	binary.BigEndian.PutUint64(ret[8:], ip.addr.lo)
+	return ret
+}
+
+// As4 returns an IPv4 or IPv4-in-IPv6 address in its 4 byte representation.
+// If ip is the IP zero value or an IPv6 address, As4 panics.
+// Note that 0.0.0.0 is not the zero value.
+func (ip IP) As4() [4]byte {
+	if ip.z == z4 || ip.Is4in6() {
+		var ret [4]byte
+		binary.BigEndian.PutUint32(ret[:], uint32(ip.addr.lo))
+		return ret
+	}
+	if ip.z == z0 {
+		panic("As4 called on IP zero value")
+	}
+	panic("As4 called on IPv6 address")
+}
+
+// Next returns the IP following ip.
+// If there is none, it returns the IP zero value.
+func (ip IP) Next() IP {
+	ip.addr = ip.addr.addOne()
+	if ip.Is4() {
+		if uint32(ip.addr.lo) == 0 {
+			// Overflowed.
+			return IP{}
+		}
+	} else {
+		if ip.addr.isZero() {
+			// Overflowed
+			return IP{}
+		}
+	}
+	return ip
+}
+
+// Prior returns the IP before ip.
+// If there is none, it returns the IP zero value.
+func (ip IP) Prior() IP {
+	if ip.Is4() {
+		if uint32(ip.addr.lo) == 0 {
+			return IP{}
+		}
+	} else if ip.addr.isZero() {
+		return IP{}
+	}
+	ip.addr = ip.addr.subOne()
+	return ip
+}
+
+// String returns the string form of the IP address ip.
+// It returns one of 4 forms:
+//
+//   - "invalid IP", if ip is the zero value
+//   - IPv4 dotted decimal ("192.0.2.1")
+//   - IPv6 ("2001:db8::1")
+//   - IPv6 with zone ("fe80:db8::1%eth0")
+//
+// Note that unlike the Go standard library's IP.String method,
+// IP4-mapped IPv6 addresses do not format as dotted decimals.
+func (ip IP) String() string {
+	switch ip.z {
+	case z0:
+		return "zero IP"
+	case z4:
+		return ip.string4()
+	default:
+		return ip.string6()
+	}
+}
+
+// AppendTo appends a text encoding of ip,
+// as generated by MarshalText,
+// to b and returns the extended buffer.
+func (ip IP) AppendTo(b []byte) []byte {
+	switch ip.z {
+	case z0:
+		return b
+	case z4:
+		return ip.appendTo4(b)
+	default:
+		return ip.appendTo6(b)
+	}
+}
+
+// digits is a string of the hex digits from 0 to f. It's used in
+// appendDecimal and appendHex to format IP addresses.
+const digits = "0123456789abcdef"
+
+// appendDecimal appends the decimal string representation of x to b.
+func appendDecimal(b []byte, x uint8) []byte {
+	// Using this function rather than strconv.AppendUint makes IPv4
+	// string building 2x faster.
+
+	if x >= 100 {
+		b = append(b, digits[x/100])
+	}
+	if x >= 10 {
+		b = append(b, digits[x/10%10])
+	}
+	return append(b, digits[x%10])
+}
+
+// appendHex appends the hex string representation of x to b.
+func appendHex(b []byte, x uint16) []byte {
+	// Using this function rather than strconv.AppendUint makes IPv6
+	// string building 2x faster.
+
+	if x >= 0x1000 {
+		b = append(b, digits[x>>12])
+	}
+	if x >= 0x100 {
+		b = append(b, digits[x>>8&0xf])
+	}
+	if x >= 0x10 {
+		b = append(b, digits[x>>4&0xf])
+	}
+	return append(b, digits[x&0xf])
+}
+
+// appendHexPad appends the fully padded hex string representation of x to b.
+func appendHexPad(b []byte, x uint16) []byte {
+	return append(b, digits[x>>12], digits[x>>8&0xf], digits[x>>4&0xf], digits[x&0xf])
+}
+
+func (ip IP) string4() string {
+	const max = len("255.255.255.255")
+	ret := make([]byte, 0, max)
+	ret = ip.appendTo4(ret)
+	return string(ret)
+}
+
+func (ip IP) appendTo4(ret []byte) []byte {
+	ret = appendDecimal(ret, ip.v4(0))
+	ret = append(ret, '.')
+	ret = appendDecimal(ret, ip.v4(1))
+	ret = append(ret, '.')
+	ret = appendDecimal(ret, ip.v4(2))
+	ret = append(ret, '.')
+	ret = appendDecimal(ret, ip.v4(3))
+	return ret
+}
+
+// string6 formats ip in IPv6 textual representation. It follows the
+// guidelines in section 4 of RFC 5952
+// (https://tools.ietf.org/html/rfc5952#section-4): no unnecessary
+// zeros, use :: to elide the longest run of zeros, and don't use ::
+// to compact a single zero field.
+func (ip IP) string6() string {
+	// Use a zone with a "plausibly long" name, so that most zone-ful
+	// IP addresses won't require additional allocation.
+	//
+	// The compiler does a cool optimization here, where ret ends up
+	// stack-allocated and so the only allocation this function does
+	// is to construct the returned string. As such, it's okay to be a
+	// bit greedy here, size-wise.
+	const max = len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%enp5s0")
+	ret := make([]byte, 0, max)
+	ret = ip.appendTo6(ret)
+	return string(ret)
+}
+
+func (ip IP) appendTo6(ret []byte) []byte {
+	zeroStart, zeroEnd := uint8(255), uint8(255)
+	for i := uint8(0); i < 8; i++ {
+		j := i
+		for j < 8 && ip.v6u16(j) == 0 {
+			j++
+		}
+		if l := j - i; l >= 2 && l > zeroEnd-zeroStart {
+			zeroStart, zeroEnd = i, j
+		}
+	}
+
+	for i := uint8(0); i < 8; i++ {
+		if i == zeroStart {
+			ret = append(ret, ':', ':')
+			i = zeroEnd
+			if i >= 8 {
+				break
+			}
+		} else if i > 0 {
+			ret = append(ret, ':')
+		}
+
+		ret = appendHex(ret, ip.v6u16(i))
+	}
+
+	if ip.z != z6noz {
+		ret = append(ret, '%')
+		ret = append(ret, ip.Zone()...)
+	}
+	return ret
+}
+
+// StringExpanded is like String but IPv6 addresses are expanded with leading
+// zeroes and no "::" compression. For example, "2001:db8::1" becomes
+// "2001:0db8:0000:0000:0000:0000:0000:0001".
+func (ip IP) StringExpanded() string {
+	switch ip.z {
+	case z0, z4:
+		return ip.String()
+	}
+
+	const size = len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
+	ret := make([]byte, 0, size)
+	for i := uint8(0); i < 8; i++ {
+		if i > 0 {
+			ret = append(ret, ':')
+		}
+
+		ret = appendHexPad(ret, ip.v6u16(i))
+	}
+
+	if ip.z != z6noz {
+		// The addition of a zone will cause a second allocation, but when there
+		// is no zone the ret slice will be stack allocated.
+		ret = append(ret, '%')
+		ret = append(ret, ip.Zone()...)
+	}
+	return string(ret)
+}
+
+// MarshalText implements the encoding.TextMarshaler interface,
+// The encoding is the same as returned by String, with one exception:
+// If ip is the zero value, the encoding is the empty string.
+func (ip IP) MarshalText() ([]byte, error) {
+	switch ip.z {
+	case z0:
+		return []byte(""), nil
+	case z4:
+		max := len("255.255.255.255")
+		b := make([]byte, 0, max)
+		return ip.appendTo4(b), nil
+	default:
+		max := len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%enp5s0")
+		b := make([]byte, 0, max)
+		return ip.appendTo6(b), nil
+	}
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The IP address is expected in a form accepted by ParseIP.
+// It returns an error if *ip is not the IP zero value.
+func (ip *IP) UnmarshalText(text []byte) error {
+	if ip.z != z0 {
+		return errors.New("refusing to Unmarshal into non-zero IP")
+	}
+	if len(text) == 0 {
+		return nil
+	}
+	var err error
+	*ip, err = ParseIP(string(text))
+	return err
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (ip IP) MarshalBinary() ([]byte, error) {
+	switch ip.z {
+	case z0:
+		return nil, nil
+	case z4:
+		b := ip.As4()
+		return b[:], nil
+	default:
+		b16 := ip.As16()
+		b := b16[:]
+		if z := ip.Zone(); z != "" {
+			b = append(b, []byte(z)...)
+		}
+		return b, nil
+	}
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (ip *IP) UnmarshalBinary(b []byte) error {
+	if ip.z != z0 {
+		return errors.New("refusing to Unmarshal into non-zero IP")
+	}
+	n := len(b)
+	switch {
+	case n == 0:
+		return nil
+	case n == 4:
+		*ip = IPv4(b[0], b[1], b[2], b[3])
+		return nil
+	case n == 16:
+		*ip = ipv6Slice(b)
+		return nil
+	case n > 16:
+		*ip = ipv6Slice(b[:16]).WithZone(string(b[16:]))
+		return nil
+	}
+	return fmt.Errorf("unexpected ip size: %v", len(b))
+}
+
+// IPPort is an IP and a port number.
+type IPPort struct {
+	ip   IP
+	port uint16
+}
+
+// IPPortFrom returns an IPPort with IP ip and port port.
+// It does not allocate.
+func IPPortFrom(ip IP, port uint16) IPPort { return IPPort{ip: ip, port: port} }
+
+// WithIP returns an IPPort with IP ip and port p.Port().
+func (p IPPort) WithIP(ip IP) IPPort { return IPPort{ip: ip, port: p.port} }
+
+// WithIP returns an IPPort with IP p.IP() and port port.
+func (p IPPort) WithPort(port uint16) IPPort { return IPPort{ip: p.ip, port: port} }
+
+// IP returns p's IP.
+func (p IPPort) IP() IP { return p.ip }
+
+// Port returns p's port.
+func (p IPPort) Port() uint16 { return p.port }
+
+// splitIPPort splits s into an IP address string and a port
+// string. It splits strings shaped like "foo:bar" or "[foo]:bar",
+// without further validating the substrings. v6 indicates whether the
+// ip string should parse as an IPv6 address or an IPv4 address, in
+// order for s to be a valid ip:port string.
+func splitIPPort(s string) (ip, port string, v6 bool, err error) {
+	i := strings.LastIndexByte(s, ':')
+	if i == -1 {
+		return "", "", false, errors.New("not an ip:port")
+	}
+
+	ip, port = s[:i], s[i+1:]
+	if len(ip) == 0 {
+		return "", "", false, errors.New("no IP")
+	}
+	if len(port) == 0 {
+		return "", "", false, errors.New("no port")
+	}
+	if ip[0] == '[' {
+		if len(ip) < 2 || ip[len(ip)-1] != ']' {
+			return "", "", false, errors.New("missing ]")
+		}
+		ip = ip[1 : len(ip)-1]
+		v6 = true
+	}
+
+	return ip, port, v6, nil
+}
+
+// ParseIPPort parses s as an IPPort.
+//
+// It doesn't do any name resolution, and ports must be numeric.
+func ParseIPPort(s string) (IPPort, error) {
+	var ipp IPPort
+	ip, port, v6, err := splitIPPort(s)
+	if err != nil {
+		return ipp, err
+	}
+	port16, err := strconv.ParseUint(port, 10, 16)
+	if err != nil {
+		return ipp, fmt.Errorf("invalid port %q parsing %q", port, s)
+	}
+	ipp.port = uint16(port16)
+	ipp.ip, err = ParseIP(ip)
+	if err != nil {
+		return IPPort{}, err
+	}
+	if v6 && ipp.ip.Is4() {
+		return IPPort{}, fmt.Errorf("invalid ip:port %q, square brackets can only be used with IPv6 addresses", s)
+	} else if !v6 && ipp.ip.Is6() {
+		return IPPort{}, fmt.Errorf("invalid ip:port %q, IPv6 addresses must be surrounded by square brackets", s)
+	}
+	return ipp, nil
+}
+
+// MustParseIPPort calls ParseIPPort(s) and panics on error.
+// It is intended for use in tests with hard-coded strings.
+func MustParseIPPort(s string) IPPort {
+	ip, err := ParseIPPort(s)
+	if err != nil {
+		panic(err)
+	}
+	return ip
+}
+
+// IsZero reports whether p is its zero value.
+func (p IPPort) IsZero() bool { return p == IPPort{} }
+
+// IsValid reports whether p.IP() is valid.
+// All ports are valid, including zero.
+func (p IPPort) IsValid() bool { return p.ip.IsValid() }
+
+// Valid reports whether p.IP() is valid.
+// All ports are valid, including zero.
+//
+// Deprecated: use the correctly named and identical IsValid method instead.
+func (p IPPort) Valid() bool { return p.IsValid() }
+
+func (p IPPort) String() string {
+	switch p.ip.z {
+	case z0:
+		return "invalid IPPort"
+	case z4:
+		a := p.ip.As4()
+		return fmt.Sprintf("%d.%d.%d.%d:%d", a[0], a[1], a[2], a[3], p.port)
+	default:
+		// TODO: this could be more efficient allocation-wise:
+		return net.JoinHostPort(p.ip.String(), strconv.Itoa(int(p.port)))
+	}
+}
+
+// AppendTo appends a text encoding of p,
+// as generated by MarshalText,
+// to b and returns the extended buffer.
+func (p IPPort) AppendTo(b []byte) []byte {
+	switch p.ip.z {
+	case z0:
+		return b
+	case z4:
+		b = p.ip.appendTo4(b)
+	default:
+		b = append(b, '[')
+		b = p.ip.appendTo6(b)
+		b = append(b, ']')
+	}
+	b = append(b, ':')
+	b = strconv.AppendInt(b, int64(p.port), 10)
+	return b
+}
+
+// MarshalText implements the encoding.TextMarshaler interface. The
+// encoding is the same as returned by String, with one exception: if
+// p.IP() is the zero value, the encoding is the empty string.
+func (p IPPort) MarshalText() ([]byte, error) {
+	var max int
+	switch p.ip.z {
+	case z0:
+	case z4:
+		max = len("255.255.255.255:65535")
+	default:
+		max = len("[ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%enp5s0]:65535")
+	}
+	b := make([]byte, 0, max)
+	b = p.AppendTo(b)
+	return b, nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler
+// interface. The IPPort is expected in a form accepted by
+// ParseIPPort. It returns an error if *p is not the IPPort zero
+// value.
+func (p *IPPort) UnmarshalText(text []byte) error {
+	if p.ip.z != z0 || p.port != 0 {
+		return errors.New("refusing to Unmarshal into non-zero IPPort")
+	}
+	if len(text) == 0 {
+		return nil
+	}
+	var err error
+	*p, err = ParseIPPort(string(text))
+	return err
+}
+
+// FromStdAddr maps the components of a standard library TCPAddr or
+// UDPAddr into an IPPort.
+func FromStdAddr(stdIP net.IP, port int, zone string) (_ IPPort, ok bool) {
+	ip, ok := FromStdIP(stdIP)
+	if !ok || port < 0 || port > math.MaxUint16 {
+		return
+	}
+	ip = ip.Unmap()
+	if zone != "" {
+		if ip.Is4() {
+			ok = false
+			return
+		}
+		ip = ip.WithZone(zone)
+	}
+	return IPPort{ip: ip, port: uint16(port)}, true
+}
+
+// UDPAddr returns a standard library net.UDPAddr from p.
+// The returned value is always non-nil. If p.IP() is the zero
+// value, then UDPAddr.IP is nil.
+//
+// UDPAddr necessarily does two allocations. If you have an existing
+// UDPAddr already allocated, see UDPAddrAt.
+func (p IPPort) UDPAddr() *net.UDPAddr {
+	ret := &net.UDPAddr{
+		Port: int(p.port),
+	}
+	ret.IP, ret.Zone = p.ip.ipZone(nil)
+	return ret
+}
+
+// UDPAddrAt is like UDPAddr, but reuses the provided UDPAddr, which
+// must be non-nil. If at.IP has a capacity of 16, UDPAddrAt is
+// allocation-free. It returns at to facilitate using this method as a
+// wrapper.
+func (p IPPort) UDPAddrAt(at *net.UDPAddr) *net.UDPAddr {
+	at.Port = int(p.port)
+	at.IP, at.Zone = p.ip.ipZone(at.IP)
+	return at
+}
+
+// TCPAddr returns a standard library net.TCPAddr from p.
+// The returned value is always non-nil. If p.IP() is the zero
+// value, then TCPAddr.IP is nil.
+func (p IPPort) TCPAddr() *net.TCPAddr {
+	ip, zone := p.ip.ipZone(nil)
+	return &net.TCPAddr{
+		IP:   ip,
+		Port: int(p.port),
+		Zone: zone,
+	}
+}
+
+// IPPrefix is an IP address prefix (CIDR) representing an IP network.
+//
+// The first Bits() of IP() are specified. The remaining bits match any address.
+// The range of Bits() is [0,32] for IPv4 or [0,128] for IPv6.
+type IPPrefix struct {
+	ip   IP
+	bits uint8
+}
+
+// IPPrefixFrom returns an IPPrefix with IP ip and provided bits prefix length.
+// It does not allocate.
+func IPPrefixFrom(ip IP, bits uint8) IPPrefix {
+	return IPPrefix{
+		ip:   ip.withoutZone(),
+		bits: bits,
+	}
+}
+
+// IP returns p's IP.
+func (p IPPrefix) IP() IP { return p.ip }
+
+// Bits returns p's prefix length.
+func (p IPPrefix) Bits() uint8 { return p.bits }
+
+// IsValid reports whether whether p.Bits() has a valid range for p.IP().
+// If p.IP() is zero, Valid returns false.
+func (p IPPrefix) IsValid() bool { return !p.ip.IsZero() && p.bits <= p.ip.BitLen() }
+
+// Valid reports whether whether p.Bits() has a valid range for p.IP().
+// If p.IP() is zero, Valid returns false.
+//
+// Deprecated: use the correctly named and identical IsValid method instead.
+func (p IPPrefix) Valid() bool { return p.IsValid() }
+
+// IsZero reports whether p is its zero value.
+func (p IPPrefix) IsZero() bool { return p == IPPrefix{} }
+
+// IsSingleIP reports whether p contains exactly one IP.
+func (p IPPrefix) IsSingleIP() bool { return p.bits != 0 && p.bits == p.ip.BitLen() }
+
+// FromStdIPNet returns an IPPrefix from the standard library's IPNet type.
+// If std is invalid, ok is false.
+func FromStdIPNet(std *net.IPNet) (prefix IPPrefix, ok bool) {
+	ip, ok := FromStdIP(std.IP)
+	if !ok {
+		return IPPrefix{}, false
+	}
+
+	if l := len(std.Mask); l != net.IPv4len && l != net.IPv6len {
+		// Invalid mask.
+		return IPPrefix{}, false
+	}
+
+	ones, bits := std.Mask.Size()
+	if ones == 0 && bits == 0 {
+		// IPPrefix does not support non-contiguous masks.
+		return IPPrefix{}, false
+	}
+
+	return IPPrefix{
+		ip:   ip,
+		bits: uint8(ones),
+	}, true
+}
+
+// ParseIPPrefix parses s as an IP address prefix.
+// The string can be in the form "192.168.1.0/24" or "2001::db8::/32",
+// the CIDR notation defined in RFC 4632 and RFC 4291.
+//
+// Note that masked address bits are not zeroed. Use Masked for that.
+func ParseIPPrefix(s string) (IPPrefix, error) {
+	i := strings.LastIndexByte(s, '/')
+	if i < 0 {
+		return IPPrefix{}, fmt.Errorf("netaddr.ParseIPPrefix(%q): no '/'", s)
+	}
+	ip, err := ParseIP(s[:i])
+	if err != nil {
+		return IPPrefix{}, fmt.Errorf("netaddr.ParseIPPrefix(%q): %v", s, err)
+	}
+	s = s[i+1:]
+	bits, err := strconv.Atoi(s)
+	if err != nil {
+		return IPPrefix{}, fmt.Errorf("netaddr.ParseIPPrefix(%q): bad prefix: %v", s, err)
+	}
+	maxBits := 32
+	if ip.Is6() {
+		maxBits = 128
+	}
+	if bits < 0 || bits > maxBits {
+		return IPPrefix{}, fmt.Errorf("netaddr.ParseIPPrefix(%q): prefix length out of range", s)
+	}
+	return IPPrefixFrom(ip, uint8(bits)), nil
+}
+
+// MustParseIPPrefix calls ParseIPPrefix(s) and panics on error.
+// It is intended for use in tests with hard-coded strings.
+func MustParseIPPrefix(s string) IPPrefix {
+	ip, err := ParseIPPrefix(s)
+	if err != nil {
+		panic(err)
+	}
+	return ip
+}
+
+// Masked returns p in its canonical form, with bits of p.IP() not in p.Bits() masked off.
+// If p is zero or otherwise invalid, Masked returns the zero value.
+func (p IPPrefix) Masked() IPPrefix {
+	if m, err := p.ip.Prefix(p.bits); err == nil {
+		return m
+	}
+	return IPPrefix{}
+}
+
+// Range returns the inclusive range of IPs that p covers.
+//
+// If p is zero or otherwise invalid, Range returns the zero value.
+func (p IPPrefix) Range() IPRange {
+	p = p.Masked()
+	if p.IsZero() {
+		return IPRange{}
+	}
+	return IPRangeFrom(p.ip, p.lastIP())
+}
+
+// IPNet returns the net.IPNet representation of an IPPrefix.
+// The returned value is always non-nil.
+// Any zone identifier is dropped in the conversion.
+func (p IPPrefix) IPNet() *net.IPNet {
+	if !p.IsValid() {
+		return &net.IPNet{}
+	}
+	stdIP, _ := p.ip.ipZone(nil)
+	return &net.IPNet{
+		IP:   stdIP,
+		Mask: net.CIDRMask(int(p.bits), int(p.ip.BitLen())),
+	}
+}
+
+// Contains reports whether the network p includes ip.
+//
+// An IPv4 address will not match an IPv6 prefix.
+// A v6-mapped IPv6 address will not match an IPv4 prefix.
+// A zero-value IP will not match any prefix.
+// If ip has an IPv6 zone, Contains returns false,
+// because IPPrefixes strip zones.
+func (p IPPrefix) Contains(ip IP) bool {
+	if !p.IsValid() || ip.hasZone() {
+		return false
+	}
+	if f1, f2 := p.ip.BitLen(), ip.BitLen(); f1 == 0 || f2 == 0 || f1 != f2 {
+		return false
+	}
+	if ip.Is4() {
+		// xor the IP addresses together; mismatched bits are now ones.
+		// Shift away the number of bits we don't care about.
+		// Shifts in Go are more efficient if the compiler can prove
+		// that the shift amount is smaller than the width of the shifted type (64 here).
+		// We know that p.bits is in the range 0..32 because p is Valid;
+		// the compiler doesn't know that, so mask with 63 to help it.
+		// Now truncate to 32 bits, because this is IPv4.
+		// If all the bits we care about are equal, the result will be zero.
+		return uint32((ip.addr.lo^p.ip.addr.lo)>>((32-p.bits)&63)) == 0
+	} else {
+		// xor the IP addresses together.
+		// Mask away the bits we don't care about.
+		// If all the bits we care about are equal, the result will be zero.
+		return ip.addr.xor(p.ip.addr).and(mask6[p.bits]).isZero()
+	}
+}
+
+// Overlaps reports whether p and o overlap at all.
+//
+// If p and o are of different address families or either have a zero
+// IP, it reports false. Like the Contains method, a prefix with a
+// v6-mapped IPv4 IP is still treated as an IPv6 mask.
+//
+// If either has a Bits of zero, it returns true.
+func (p IPPrefix) Overlaps(o IPPrefix) bool {
+	if !p.IsValid() || !o.IsValid() {
+		return false
+	}
+	if p == o {
+		return true
+	}
+	if p.ip.Is4() != o.ip.Is4() {
+		return false
+	}
+	var minBits uint8
+	if p.bits < o.bits {
+		minBits = p.bits
+	} else {
+		minBits = o.bits
+	}
+	if minBits == 0 {
+		return true
+	}
+	// One of these Prefix calls might look redundant, but we don't require
+	// that p and o values are normalized (via IPPrefix.Masked) first,
+	// so the Prefix call on the one that's already minBits serves to zero
+	// out any remaining bits in IP.
+	var err error
+	if p, err = p.ip.Prefix(minBits); err != nil {
+		return false
+	}
+	if o, err = o.ip.Prefix(minBits); err != nil {
+		return false
+	}
+	return p.ip == o.ip
+}
+
+// AppendTo appends a text encoding of p,
+// as generated by MarshalText,
+// to b and returns the extended buffer.
+func (p IPPrefix) AppendTo(b []byte) []byte {
+	if p.IsZero() {
+		return b
+	}
+	if !p.IsValid() {
+		return append(b, "invalid IPPrefix"...)
+	}
+
+	// p.IP is non-zero, because p is valid.
+	if p.ip.z == z4 {
+		b = p.ip.appendTo4(b)
+	} else {
+		b = p.ip.appendTo6(b)
+	}
+
+	b = append(b, '/')
+	b = appendDecimal(b, p.bits)
+	return b
+}
+
+// MarshalText implements the encoding.TextMarshaler interface,
+// The encoding is the same as returned by String, with one exception:
+// If p is the zero value, the encoding is the empty string.
+func (p IPPrefix) MarshalText() ([]byte, error) {
+	var max int
+	switch p.ip.z {
+	case z0:
+	case z4:
+		max = len("255.255.255.255/32")
+	default:
+		max = len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%enp5s0/128")
+	}
+	b := make([]byte, 0, max)
+	b = p.AppendTo(b)
+	return b, nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The IP address is expected in a form accepted by ParseIPPrefix.
+// It returns an error if *p is not the IPPrefix zero value.
+func (p *IPPrefix) UnmarshalText(text []byte) error {
+	if *p != (IPPrefix{}) {
+		return errors.New("refusing to Unmarshal into non-zero IPPrefix")
+	}
+	if len(text) == 0 {
+		return nil
+	}
+	var err error
+	*p, err = ParseIPPrefix(string(text))
+	return err
+}
+
+// String returns the CIDR notation of p: "<ip>/<bits>".
+func (p IPPrefix) String() string {
+	if p.IsZero() {
+		return "zero IPPrefix"
+	}
+	if !p.IsValid() {
+		return "invalid IPPrefix"
+	}
+	return fmt.Sprintf("%s/%d", p.ip, p.bits)
+}
+
+// lastIP returns the last IP in the prefix.
+func (p IPPrefix) lastIP() IP {
+	if !p.IsValid() {
+		return IP{}
+	}
+	a16 := p.ip.As16()
+	var off uint8
+	var bits uint8 = 128
+	if p.ip.Is4() {
+		off = 12
+		bits = 32
+	}
+	for b := p.bits; b < bits; b++ {
+		byteNum, bitInByte := b/8, 7-(b%8)
+		a16[off+byteNum] |= 1 << uint(bitInByte)
+	}
+	if p.ip.Is4() {
+		return IPFrom16(a16)
+	} else {
+		return IPv6Raw(a16) // doesn't unmap
+	}
+}
+
+// IPRange represents an inclusive range of IP addresses
+// from the same address family.
+//
+// The From() and To() IPs are inclusive bounds, both included in the
+// range.
+//
+// To be valid, the From() and To() values must be non-zero, have matching
+// address families (IPv4 vs IPv6), and From() must be less than or equal to To().
+// IPv6 zones are stripped out and ignored.
+// An invalid range may be ignored.
+type IPRange struct {
+	// from is the initial IP address in the range.
+	from IP
+
+	// to is the final IP address in the range.
+	to IP
+}
+
+// IPRangeFrom returns an IPRange from from to to.
+// It does not allocate.
+func IPRangeFrom(from, to IP) IPRange {
+	return IPRange{
+		from: from.withoutZone(),
+		to:   to.withoutZone(),
+	}
+}
+
+// From returns the lower bound of r.
+func (r IPRange) From() IP { return r.from }
+
+// To returns the upper bound of r.
+func (r IPRange) To() IP { return r.to }
+
+// ParseIPRange parses a range out of two IPs separated by a hyphen.
+//
+// It returns an error if the range is not valid.
+func ParseIPRange(s string) (IPRange, error) {
+	var r IPRange
+	h := strings.IndexByte(s, '-')
+	if h == -1 {
+		return r, fmt.Errorf("no hyphen in range %q", s)
+	}
+	from, to := s[:h], s[h+1:]
+	var err error
+	r.from, err = ParseIP(from)
+	if err != nil {
+		return r, fmt.Errorf("invalid From IP %q in range %q", from, s)
+	}
+	r.from = r.from.withoutZone()
+	r.to, err = ParseIP(to)
+	if err != nil {
+		return r, fmt.Errorf("invalid To IP %q in range %q", to, s)
+	}
+	r.to = r.to.withoutZone()
+	if !r.IsValid() {
+		return r, fmt.Errorf("range %v to %v not valid", r.from, r.to)
+	}
+	return r, nil
+}
+
+// MustParseIPRange calls ParseIPRange(s) and panics on error.
+// It is intended for use in tests with hard-coded strings.
+func MustParseIPRange(s string) IPRange {
+	r, err := ParseIPRange(s)
+	if err != nil {
+		panic(err)
+	}
+	return r
+}
+
+// String returns a string representation of the range.
+//
+// For a valid range, the form is "From-To" with a single hyphen
+// separating the IPs, the same format recognized by
+// ParseIPRange.
+func (r IPRange) String() string {
+	if r.IsValid() {
+		return fmt.Sprintf("%s-%s", r.from, r.to)
+	}
+	if r.from.IsZero() || r.to.IsZero() {
+		return "zero IPRange"
+	}
+	return "invalid IPRange"
+}
+
+// AppendTo appends a text encoding of r,
+// as generated by MarshalText,
+// to b and returns the extended buffer.
+func (r IPRange) AppendTo(b []byte) []byte {
+	if r.IsZero() {
+		return b
+	}
+	b = r.from.AppendTo(b)
+	b = append(b, '-')
+	b = r.to.AppendTo(b)
+	return b
+}
+
+// MarshalText implements the encoding.TextMarshaler interface,
+// The encoding is the same as returned by String, with one exception:
+// If ip is the zero value, the encoding is the empty string.
+func (r IPRange) MarshalText() ([]byte, error) {
+	if r.IsZero() {
+		return []byte(""), nil
+	}
+	var max int
+	if r.from.z == z4 {
+		max = len("255.255.255.255-255.255.255.255")
+	} else {
+		max = len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff-ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
+	}
+	b := make([]byte, 0, max)
+	return r.AppendTo(b), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The IP range is expected in a form accepted by ParseIPRange.
+// It returns an error if *r is not the IPRange zero value.
+func (r *IPRange) UnmarshalText(text []byte) error {
+	if *r != (IPRange{}) {
+		return errors.New("refusing to Unmarshal into non-zero IPRange")
+	}
+	if len(text) == 0 {
+		return nil
+	}
+	var err error
+	*r, err = ParseIPRange(string(text))
+	return err
+}
+
+// IsZero reports whether r is the zero value of the IPRange type.
+func (r IPRange) IsZero() bool {
+	return r == IPRange{}
+}
+
+// IsValid reports whether r.From() and r.To() are both non-zero and
+// obey the documented requirements: address families match, and From
+// is less than or equal to To.
+func (r IPRange) IsValid() bool {
+	return !r.from.IsZero() &&
+		r.from.z == r.to.z &&
+		!r.to.Less(r.from)
+}
+
+// Valid reports whether r.From() and r.To() are both non-zero and
+// obey the documented requirements: address families match, and From
+// is less than or equal to To.
+//
+// Deprecated: use the correctly named and identical IsValid method instead.
+func (r IPRange) Valid() bool { return r.IsValid() }
+
+// Contains reports whether the range r includes addr.
+//
+// An invalid range always reports false.
+//
+// If ip has an IPv6 zone, Contains returns false,
+// because IPPrefixes strip zones.
+func (r IPRange) Contains(addr IP) bool {
+	return r.IsValid() && !addr.hasZone() && r.contains(addr)
+}
+
+// contains is like Contains, but without the validity check.
+// addr must not have a zone.
+func (r IPRange) contains(addr IP) bool {
+	return r.from.Compare(addr) <= 0 && r.to.Compare(addr) >= 0
+}
+
+// less reports whether r is "before" other. It is before if r.From()
+// is before other.From(). If they're equal, then the larger range
+// (higher To()) comes first.
+func (r IPRange) less(other IPRange) bool {
+	if cmp := r.from.Compare(other.from); cmp != 0 {
+		return cmp < 0
+	}
+	return other.to.Less(r.to)
+}
+
+// entirelyBefore returns whether r lies entirely before other in IP
+// space.
+func (r IPRange) entirelyBefore(other IPRange) bool {
+	return r.to.Less(other.from)
+}
+
+// entirelyWithin returns whether r is entirely contained within
+// other.
+func (r IPRange) coveredBy(other IPRange) bool {
+	return other.from.lessOrEq(r.from) && r.to.lessOrEq(other.to)
+}
+
+// inMiddleOf returns whether r is inside other, but not touching the
+// edges of other.
+func (r IPRange) inMiddleOf(other IPRange) bool {
+	return other.from.Less(r.from) && r.to.Less(other.to)
+}
+
+// overlapsStartOf returns whether r entirely overlaps the start of
+// other, but not all of other.
+func (r IPRange) overlapsStartOf(other IPRange) bool {
+	return r.from.lessOrEq(other.from) && r.to.Less(other.to)
+}
+
+// overlapsEndOf returns whether r entirely overlaps the end of
+// other, but not all of other.
+func (r IPRange) overlapsEndOf(other IPRange) bool {
+	return other.from.Less(r.from) && other.to.lessOrEq(r.to)
+}
+
+// mergeIPRanges returns the minimum and sorted set of IP ranges that
+// cover r.
+func mergeIPRanges(rr []IPRange) (out []IPRange, valid bool) {
+	// Always return a copy of r, to avoid aliasing slice memory in
+	// the caller.
+	switch len(rr) {
+	case 0:
+		return nil, true
+	case 1:
+		return []IPRange{rr[0]}, true
+	}
+
+	sort.Slice(rr, func(i, j int) bool { return rr[i].less(rr[j]) })
+	out = make([]IPRange, 1, len(rr))
+	out[0] = rr[0]
+	for _, r := range rr[1:] {
+		prev := &out[len(out)-1]
+		switch {
+		case !r.IsValid():
+			// Invalid ranges make no sense to merge, refuse to
+			// perform.
+			return nil, false
+		case prev.to.Next() == r.from:
+			// prev and r touch, merge them.
+			//
+			//   prev     r
+			// f------tf-----t
+			prev.to = r.to
+		case prev.to.Less(r.from):
+			// No overlap and not adjacent (per previous case), no
+			// merging possible.
+			//
+			//   prev       r
+			// f------t  f-----t
+			out = append(out, r)
+		case prev.to.Less(r.to):
+			// Partial overlap, update prev
+			//
+			//   prev
+			// f------t
+			//     f-----t
+			//        r
+			prev.to = r.to
+		default:
+			// r entirely contained in prev, nothing to do.
+			//
+			//    prev
+			// f--------t
+			//  f-----t
+			//     r
+		}
+	}
+	return out, true
+}
+
+// Overlaps reports whether p and o overlap at all.
+//
+// If p and o are of different address families or either are invalid,
+// it reports false.
+func (r IPRange) Overlaps(o IPRange) bool {
+	return r.IsValid() &&
+		o.IsValid() &&
+		r.from.Compare(o.to) <= 0 &&
+		o.from.Compare(r.to) <= 0
+}
+
+// prefixMaker returns a address-family-corrected IPPrefix from a and bits,
+// where the input bits is always in the IPv6-mapped form for IPv4 addresses.
+type prefixMaker func(a uint128, bits uint8) IPPrefix
+
+// Prefixes returns the set of IPPrefix entries that covers r.
+//
+// If either of r's bounds are invalid, in the wrong order, or if
+// they're of different address families, then Prefixes returns nil.
+//
+// Prefixes necessarily allocates. See AppendPrefixes for a version that uses
+// memory you provide.
+func (r IPRange) Prefixes() []IPPrefix {
+	return r.AppendPrefixes(nil)
+}
+
+// AppendPrefixes is an append version of IPRange.Prefixes. It appends
+// the IPPrefix entries that cover r to dst.
+func (r IPRange) AppendPrefixes(dst []IPPrefix) []IPPrefix {
+	if !r.IsValid() {
+		return nil
+	}
+	return appendRangePrefixes(dst, r.prefixFrom128AndBits, r.from.addr, r.to.addr)
+}
+
+func (r IPRange) prefixFrom128AndBits(a uint128, bits uint8) IPPrefix {
+	ip := IP{addr: a, z: r.from.z}
+	if r.from.Is4() {
+		bits -= 12 * 8
+	}
+	return IPPrefix{ip, bits}
+}
+
+// aZeroBSet is whether, after the common bits, a is all zero bits and
+// b is all set (one) bits.
+func comparePrefixes(a, b uint128) (common uint8, aZeroBSet bool) {
+	common = a.commonPrefixLen(b)
+
+	// See whether a and b, after their common shared bits, end
+	// in all zero bits or all one bits, respectively.
+	if common == 128 {
+		return common, true
+	}
+
+	m := mask6[common]
+	return common, (a.xor(a.and(m)).isZero() &&
+		b.or(m) == uint128{^uint64(0), ^uint64(0)})
+}
+
+// Prefix returns r as an IPPrefix, if it can be presented exactly as such.
+// If r is not valid or is not exactly equal to one prefix, ok is false.
+func (r IPRange) Prefix() (p IPPrefix, ok bool) {
+	if !r.IsValid() {
+		return
+	}
+	if common, ok := comparePrefixes(r.from.addr, r.to.addr); ok {
+		return r.prefixFrom128AndBits(r.from.addr, common), true
+	}
+	return
+}
+
+func appendRangePrefixes(dst []IPPrefix, makePrefix prefixMaker, a, b uint128) []IPPrefix {
+	common, ok := comparePrefixes(a, b)
+	if ok {
+		// a to b represents a whole range, like 10.50.0.0/16.
+		// (a being 10.50.0.0 and b being 10.50.255.255)
+		return append(dst, makePrefix(a, common))
+	}
+	// Otherwise recursively do both halves.
+	dst = appendRangePrefixes(dst, makePrefix, a, a.bitsSetFrom(common+1))
+	dst = appendRangePrefixes(dst, makePrefix, b.bitsClearedFrom(common+1), b)
+	return dst
+}
diff --git a/vendor/inet.af/netaddr/uint128.go b/vendor/inet.af/netaddr/uint128.go
new file mode 100644
index 0000000000..2ba93f31be
--- /dev/null
+++ b/vendor/inet.af/netaddr/uint128.go
@@ -0,0 +1,82 @@
+// Copyright 2020 The Inet.Af AUTHORS. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package netaddr
+
+import "math/bits"
+
+// uint128 represents a uint128 using two uint64s.
+//
+// When the methods below mention a bit number, bit 0 is the most
+// significant bit (in hi) and bit 127 is the lowest (lo&1).
+type uint128 struct {
+	hi uint64
+	lo uint64
+}
+
+// isZero reports whether u == 0.
+//
+// It's faster than u == (uint128{}) because the compiler (as of Go
+// 1.15/1.16b1) doesn't do this trick and instead inserts a branch in
+// its eq alg's generated code.
+func (u uint128) isZero() bool { return u.hi|u.lo == 0 }
+
+// and returns the bitwise AND of u and m (u&m).
+func (u uint128) and(m uint128) uint128 {
+	return uint128{u.hi & m.hi, u.lo & m.lo}
+}
+
+// xor returns the bitwise XOR of u and m (u^m).
+func (u uint128) xor(m uint128) uint128 {
+	return uint128{u.hi ^ m.hi, u.lo ^ m.lo}
+}
+
+// or returns the bitwise OR of u and m (u|m).
+func (u uint128) or(m uint128) uint128 {
+	return uint128{u.hi | m.hi, u.lo | m.lo}
+}
+
+// not returns the bitwise NOT of u.
+func (u uint128) not() uint128 {
+	return uint128{^u.hi, ^u.lo}
+}
+
+// subOne returns u - 1.
+func (u uint128) subOne() uint128 {
+	lo, borrow := bits.Sub64(u.lo, 1, 0)
+	return uint128{u.hi - borrow, lo}
+}
+
+// addOne returns u + 1.
+func (u uint128) addOne() uint128 {
+	lo, carry := bits.Add64(u.lo, 1, 0)
+	return uint128{u.hi + carry, lo}
+}
+
+func u64CommonPrefixLen(a, b uint64) uint8 {
+	return uint8(bits.LeadingZeros64(a ^ b))
+}
+
+func (u uint128) commonPrefixLen(v uint128) (n uint8) {
+	if n = u64CommonPrefixLen(u.hi, v.hi); n == 64 {
+		n += u64CommonPrefixLen(u.lo, v.lo)
+	}
+	return
+}
+
+func (u *uint128) halves() [2]*uint64 {
+	return [2]*uint64{&u.hi, &u.lo}
+}
+
+// bitsSetFrom returns a copy of u with the given bit
+// and all subsequent ones set.
+func (u uint128) bitsSetFrom(bit uint8) uint128 {
+	return u.or(mask6[bit].not())
+}
+
+// bitsClearedFrom returns a copy of u with the given bit
+// and all subsequent ones cleared.
+func (u uint128) bitsClearedFrom(bit uint8) uint128 {
+	return u.and(mask6[bit])
+}
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto
index 9c6b728fa5..d0b190fd56 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto
@@ -231,7 +231,7 @@ message CustomResourceDefinitionSpec {
   // in the OpenAPI schema should be preserved when persisting to storage.
   // apiVersion, kind, metadata and known fields inside metadata are always preserved.
   // This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`.
-  // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details.
+  // See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details.
   // +optional
   optional bool preserveUnknownFields = 10;
 }
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go
index 223601bc45..285058d77a 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go
@@ -66,7 +66,7 @@ type CustomResourceDefinitionSpec struct {
 	// in the OpenAPI schema should be preserved when persisting to storage.
 	// apiVersion, kind, metadata and known fields inside metadata are always preserved.
 	// This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`.
-	// See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details.
+	// See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details.
 	// +optional
 	PreserveUnknownFields bool `json:"preserveUnknownFields,omitempty" protobuf:"varint,10,opt,name=preserveUnknownFields"`
 }
diff --git a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go
index 16df95fd14..5622c1a1be 100644
--- a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go
+++ b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go
@@ -53,17 +53,12 @@ import (
 	"k8s.io/klog/v2"
 
 	generatorargs "k8s.io/code-generator/cmd/deepcopy-gen/args"
-	"k8s.io/code-generator/pkg/util"
 )
 
 func main() {
 	klog.InitFlags(nil)
 	genericArgs, customArgs := generatorargs.NewDefaults()
 
-	// Override defaults.
-	// TODO: move this out of deepcopy-gen
-	genericArgs.GoHeaderFilePath = util.BoilerplatePath()
-
 	genericArgs.AddFlags(pflag.CommandLine)
 	customArgs.AddFlags(pflag.CommandLine)
 	flag.Set("logtostderr", "true")
diff --git a/vendor/k8s.io/code-generator/pkg/util/build.go b/vendor/k8s.io/code-generator/pkg/util/build.go
deleted file mode 100644
index 72ae683d8d..0000000000
--- a/vendor/k8s.io/code-generator/pkg/util/build.go
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package util
-
-import (
-	gobuild "go/build"
-	"os"
-	"path/filepath"
-	"reflect"
-	"strings"
-
-	"golang.org/x/tools/go/packages"
-)
-
-type empty struct{}
-
-// CurrentPackage returns the go package of the current directory, or "" if it cannot
-// be derived from the GOPATH.
-func CurrentPackage() string {
-	for _, root := range gobuild.Default.SrcDirs() {
-		if pkg, ok := hasSubdir(root, "."); ok {
-			return pkg
-		}
-	}
-	return ""
-}
-
-func hasSubdir(root, dir string) (rel string, ok bool) {
-	// ensure a tailing separator to properly compare on word-boundaries
-	const sep = string(filepath.Separator)
-	root = filepath.Clean(root)
-	if !strings.HasSuffix(root, sep) {
-		root += sep
-	}
-
-	// check whether root dir starts with root
-	dir = filepath.Clean(dir)
-	if !strings.HasPrefix(dir, root) {
-		return "", false
-	}
-
-	// cut off root
-	return filepath.ToSlash(dir[len(root):]), true
-}
-
-// BoilerplatePath returns the path to the boilerplate file in code-generator,
-// or "" if the default boilerplate.go.txt file cannot be located.
-func BoilerplatePath() string {
-	// set up paths to check
-	paths := []string{
-		// works when run from root of $GOPATH containing k8s.io/code-generator
-		filepath.Join(reflect.TypeOf(empty{}).PkgPath(), "/../../hack/boilerplate.go.txt"),
-		// works when run from root of module vendoring k8s.io/code-generator
-		"vendor/k8s.io/code-generator/hack/boilerplate.go.txt",
-		// works when run from root of $GOPATH containing k8s.io/kubernetes
-		"k8s.io/kubernetes/vendor/k8s.io/code-generator/hack/boilerplate.go.txt",
-	}
-
-	// see if we can locate the module directory and add that to the list
-	config := packages.Config{Mode: packages.NeedModule}
-	if loadedPackages, err := packages.Load(&config, "k8s.io/code-generator/pkg/util"); err == nil {
-		for _, loadedPackage := range loadedPackages {
-			if loadedPackage.Module != nil && loadedPackage.Module.Dir != "" {
-				paths = append(paths, filepath.Join(loadedPackage.Module.Dir, "hack/boilerplate.go.txt"))
-			}
-		}
-	}
-
-	// try all paths and return the first that exists
-	for _, path := range paths {
-		if _, err := os.Stat(path); err == nil {
-			return path
-		}
-	}
-	// cannot be located, invoker will have to explicitly specify boilerplate file
-	return ""
-}
-
-// Vendorless trims vendor prefix from a package path to make it canonical
-func Vendorless(p string) string {
-	if pos := strings.LastIndex(p, "/vendor/"); pos != -1 {
-		return p[pos+len("/vendor/"):]
-	}
-	return p
-}
diff --git a/vendor/k8s.io/code-generator/pkg/util/plural_exceptions.go b/vendor/k8s.io/code-generator/pkg/util/plural_exceptions.go
deleted file mode 100644
index 73c648d5b5..0000000000
--- a/vendor/k8s.io/code-generator/pkg/util/plural_exceptions.go
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package util
-
-import (
-	"fmt"
-	"strings"
-)
-
-// PluralExceptionListToMapOrDie converts the list in "Type:PluralType" to map[string]string.
-// This is used for pluralizer.
-// If the format is wrong, this function will panic.
-func PluralExceptionListToMapOrDie(pluralExceptions []string) map[string]string {
-	pluralExceptionMap := make(map[string]string, len(pluralExceptions))
-	for i := range pluralExceptions {
-		parts := strings.Split(pluralExceptions[i], ":")
-		if len(parts) != 2 {
-			panic(fmt.Sprintf("invalid plural exception definition: %s", pluralExceptions[i]))
-		}
-		pluralExceptionMap[parts[0]] = parts[1]
-	}
-	return pluralExceptionMap
-}
diff --git a/vendor/knative.dev/pkg/apis/duck/v1/destination.go b/vendor/knative.dev/pkg/apis/duck/v1/destination.go
index 15638f4018..8e067a99b5 100644
--- a/vendor/knative.dev/pkg/apis/duck/v1/destination.go
+++ b/vendor/knative.dev/pkg/apis/duck/v1/destination.go
@@ -18,6 +18,8 @@ package v1
 
 import (
 	"context"
+	"crypto/x509"
+	"encoding/pem"
 
 	"knative.dev/pkg/apis"
 )
@@ -53,6 +55,7 @@ func (d *Destination) Validate(ctx context.Context) *apis.FieldError {
 func ValidateDestination(ctx context.Context, dest Destination) *apis.FieldError {
 	ref := dest.Ref
 	uri := dest.URI
+	caCerts := dest.CACerts
 	if ref == nil && uri == nil {
 		return apis.ErrGeneric("expected at least one, got none", "ref", "uri")
 	}
@@ -67,6 +70,9 @@ func ValidateDestination(ctx context.Context, dest Destination) *apis.FieldError
 	if ref != nil && uri == nil {
 		return ref.Validate(ctx).ViaField("ref")
 	}
+	if caCerts != nil {
+		return validateCACerts(caCerts)
+	}
 	return nil
 }
 
@@ -88,3 +94,20 @@ func (d *Destination) SetDefaults(ctx context.Context) {
 		d.Ref.Namespace = apis.ParentMeta(ctx).Namespace
 	}
 }
+
+func validateCACerts(CACert *string) *apis.FieldError {
+	// Check the object.
+	var errs *apis.FieldError
+
+	block, err := pem.Decode([]byte(*CACert))
+	if err != nil && block == nil {
+		errs = errs.Also(apis.ErrInvalidValue("CA Cert provided is invalid", "caCert"))
+		return errs
+	}
+	if block.Type != "CERTIFICATE" {
+		errs = errs.Also(apis.ErrInvalidValue("CA Cert provided is not a certificate", "caCert"))
+	} else if _, err := x509.ParseCertificate(block.Bytes); err != nil {
+		errs = errs.Also(apis.ErrInvalidValue("CA Cert provided is invalid", "caCert"))
+	}
+	return errs
+}
diff --git a/vendor/knative.dev/pkg/apis/duck/v1/kresource_type.go b/vendor/knative.dev/pkg/apis/duck/v1/kresource_type.go
index 491c13f1f6..1f6ee8264e 100644
--- a/vendor/knative.dev/pkg/apis/duck/v1/kresource_type.go
+++ b/vendor/knative.dev/pkg/apis/duck/v1/kresource_type.go
@@ -23,6 +23,7 @@ import (
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/runtime/schema"
+
 	"knative.dev/pkg/apis/duck/ducktypes"
 
 	"knative.dev/pkg/apis"
@@ -41,6 +42,7 @@ type KRShaped interface {
 // Asserts KResource conformance with KRShaped
 var _ KRShaped = (*KResource)(nil)
 
+// +genduck
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
 // KResource is a skeleton type wrapping Conditions in the manner we expect
@@ -54,6 +56,11 @@ type KResource struct {
 	Status Status `json:"status"`
 }
 
+// GetFullType implements duck.Implementable
+func (*KResource) GetFullType() ducktypes.Populatable {
+	return &KResource{}
+}
+
 // Populate implements duck.Populatable
 func (t *KResource) Populate() {
 	t.Status.ObservedGeneration = 42
diff --git a/vendor/knative.dev/pkg/environment/client_config.go b/vendor/knative.dev/pkg/environment/client_config.go
index 04d4220b0a..aef33927ef 100644
--- a/vendor/knative.dev/pkg/environment/client_config.go
+++ b/vendor/knative.dev/pkg/environment/client_config.go
@@ -19,8 +19,10 @@ package environment
 import (
 	"flag"
 	"fmt"
+	"log"
 	"math"
 	"os"
+	"strconv"
 
 	"k8s.io/client-go/rest"
 	"k8s.io/client-go/tools/clientcmd"
@@ -45,9 +47,19 @@ func (c *ClientConfig) InitFlags(fs *flag.FlagSet) {
 	fs.StringVar(&c.Kubeconfig, "kubeconfig", os.Getenv("KUBECONFIG"),
 		"Path to a kubeconfig. Only required if out-of-cluster.")
 
-	fs.IntVar(&c.Burst, "kube-api-burst", 0, "Maximum burst for throttle.")
+	fs.IntVar(&c.Burst, "kube-api-burst", int(envVarOrDefault("KUBE_API_BURST", 0)), "Maximum burst for throttle.")
 
-	fs.Float64Var(&c.QPS, "kube-api-qps", 0, "Maximum QPS to the server from the client.")
+	fs.Float64Var(&c.QPS, "kube-api-qps", envVarOrDefault("KUBE_API_QPS", 0.0), "Maximum QPS to the server from the client.")
+}
+
+func envVarOrDefault(key string, val float64) float64 {
+	var err error
+	if v := os.Getenv(key); v != "" {
+		if val, err = strconv.ParseFloat(v, 64); err != nil {
+			log.Fatal(err)
+		}
+	}
+	return val
 }
 
 func (c *ClientConfig) GetRESTConfig() (*rest.Config, error) {
diff --git a/vendor/knative.dev/pkg/injection/clients.go b/vendor/knative.dev/pkg/injection/clients.go
index 92e9912185..b71ef1d943 100644
--- a/vendor/knative.dev/pkg/injection/clients.go
+++ b/vendor/knative.dev/pkg/injection/clients.go
@@ -62,22 +62,3 @@ func (i *impl) FetchAllClients(ctx context.Context) []interface{} {
 	}
 	return clients
 }
-
-// DynamicClientInjector holds the type of a callback that attaches a particular
-// client type to a context.
-type DynamicClientInjector func(context.Context) context.Context
-
-func (i *impl) RegisterDynamicClient(ci DynamicClientInjector) {
-	i.m.Lock()
-	defer i.m.Unlock()
-
-	i.dynamicClients = append(i.dynamicClients, ci)
-}
-
-func (i *impl) GetDynamicClients() []DynamicClientInjector {
-	i.m.RLock()
-	defer i.m.RUnlock()
-
-	// Copy the slice before returning.
-	return append(i.dynamicClients[:0:0], i.dynamicClients...)
-}
diff --git a/vendor/knative.dev/pkg/injection/clients/dynamicclient/dynamicclient.go b/vendor/knative.dev/pkg/injection/clients/dynamicclient/dynamicclient.go
deleted file mode 100644
index 2eece5c555..0000000000
--- a/vendor/knative.dev/pkg/injection/clients/dynamicclient/dynamicclient.go
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package dynamicclient
-
-import (
-	"context"
-
-	"k8s.io/client-go/dynamic"
-	"k8s.io/client-go/rest"
-
-	"knative.dev/pkg/injection"
-	"knative.dev/pkg/logging"
-)
-
-func init() {
-	injection.Default.RegisterClient(withClient)
-}
-
-// Key is used as the key for associating information
-// with a context.Context.
-type Key struct{}
-
-func withClient(ctx context.Context, cfg *rest.Config) context.Context {
-	return context.WithValue(ctx, Key{}, dynamic.NewForConfigOrDie(cfg))
-}
-
-// Get extracts the Dynamic client from the context.
-func Get(ctx context.Context) dynamic.Interface {
-	untyped := ctx.Value(Key{})
-	if untyped == nil {
-		logging.FromContext(ctx).Panic(
-			"Unable to fetch k8s.io/client-go/dynamic.Interface from context.")
-	}
-	return untyped.(dynamic.Interface)
-}
diff --git a/vendor/knative.dev/pkg/injection/informers.go b/vendor/knative.dev/pkg/injection/informers.go
index ce5d481e88..9356f8d7f9 100644
--- a/vendor/knative.dev/pkg/injection/informers.go
+++ b/vendor/knative.dev/pkg/injection/informers.go
@@ -28,10 +28,6 @@ import (
 // informer type to a context.
 type InformerInjector func(context.Context) (context.Context, controller.Informer)
 
-// DynamicInformerInjector holds the type of a callback that attaches a particular
-// informer type (backed by a Dynamic) to a context.
-type DynamicInformerInjector func(context.Context) context.Context
-
 // FilteredInformersInjector holds the type of a callback that attaches a set of particular
 // filtered informers type to a context.
 type FilteredInformersInjector func(context.Context) (context.Context, []controller.Informer)
@@ -43,13 +39,6 @@ func (i *impl) RegisterInformer(ii InformerInjector) {
 	i.informers = append(i.informers, ii)
 }
 
-func (i *impl) RegisterDynamicInformer(ii DynamicInformerInjector) {
-	i.m.Lock()
-	defer i.m.Unlock()
-
-	i.dynamicInformers = append(i.dynamicInformers, ii)
-}
-
 func (i *impl) RegisterFilteredInformers(fii FilteredInformersInjector) {
 	i.m.Lock()
 	defer i.m.Unlock()
@@ -65,14 +54,6 @@ func (i *impl) GetInformers() []InformerInjector {
 	return append(i.informers[:0:0], i.informers...)
 }
 
-func (i *impl) GetDynamicInformers() []DynamicInformerInjector {
-	i.m.RLock()
-	defer i.m.RUnlock()
-
-	// Copy the slice before returning.
-	return append(i.dynamicInformers[:0:0], i.dynamicInformers...)
-}
-
 func (i *impl) GetFilteredInformers() []FilteredInformersInjector {
 	i.m.RLock()
 	defer i.m.RUnlock()
@@ -81,22 +62,6 @@ func (i *impl) GetFilteredInformers() []FilteredInformersInjector {
 	return append(i.filteredInformers[:0:0], i.filteredInformers...)
 }
 
-func (i *impl) SetupDynamic(ctx context.Context) context.Context {
-	// Based on the reconcilers we have linked, build up a set of clients and inject
-	// them onto the context.
-	for _, ci := range i.GetDynamicClients() {
-		ctx = ci(ctx)
-	}
-
-	// Based on the reconcilers we have linked, build up a set of informers
-	// and inject them onto the context.
-	for _, ii := range i.GetDynamicInformers() {
-		ctx = ii(ctx)
-	}
-
-	return ctx
-}
-
 func (i *impl) SetupInformers(ctx context.Context, cfg *rest.Config) (context.Context, []controller.Informer) {
 	// Based on the reconcilers we have linked, build up a set of clients and inject
 	// them onto the context.
diff --git a/vendor/knative.dev/pkg/injection/interface.go b/vendor/knative.dev/pkg/injection/interface.go
index 158864015f..c6d5715ad5 100644
--- a/vendor/knative.dev/pkg/injection/interface.go
+++ b/vendor/knative.dev/pkg/injection/interface.go
@@ -78,29 +78,6 @@ type Interface interface {
 	SetupInformers(context.Context, *rest.Config) (context.Context, []controller.Informer)
 }
 
-// DynamicInterface is the interface for interacting with dynamicclient-based injection
-// implementations, such as Dynamic below.
-type DynamicInterface interface {
-	// RegisterDynamicClient registers a new injector callback for associating
-	// a new dynamicclient-based client with a context.
-	RegisterDynamicClient(DynamicClientInjector)
-
-	// GetDynamicClients fetches all of the registered dynamicclient-based client injectors.
-	GetDynamicClients() []DynamicClientInjector
-
-	// RegisterDynamicInformer registers a new injector callback for associating
-	// a new dynamicclient-based informer with a context.
-	RegisterDynamicInformer(DynamicInformerInjector)
-
-	// GetDynamicInformers fetches all of the registered dynamicclient-based informer injectors.
-	GetDynamicInformers() []DynamicInformerInjector
-
-	// SetupDynamic runs all of the injectors against a context, starting with
-	// the clients and the given stream.  A context infused with the various elements
-	// is returned.
-	SetupDynamic(context.Context) context.Context
-}
-
 type ControllerConstructor func(context.Context, configmap.Watcher) *controller.Impl
 
 // NamedControllerConstructor is a ControllerConstructor with an associated name.
@@ -120,10 +97,6 @@ var (
 	// are being run for real.
 	Default Interface = &impl{}
 
-	// Dynamic is the injection interface to use when bootstrapping a version
-	// of things based on the prototype dynamicclient-based reconciler framework.
-	Dynamic DynamicInterface = &impl{}
-
 	// Fake is the injection interface with which informers should register
 	// to make themselves available to the controller process when it is being
 	// unit tested.
@@ -134,11 +107,9 @@ type impl struct {
 	m sync.RWMutex
 
 	clients           []ClientInjector
-	dynamicClients    []DynamicClientInjector
 	clientFetchers    []ClientFetcher
 	factories         []InformerFactoryInjector
 	informers         []InformerInjector
-	dynamicInformers  []DynamicInformerInjector
 	filteredInformers []FilteredInformersInjector
 	ducks             []DuckFactoryInjector
 }
diff --git a/vendor/knative.dev/pkg/version/version.go b/vendor/knative.dev/pkg/version/version.go
index dbf7daa69a..3a9a4a8cda 100644
--- a/vendor/knative.dev/pkg/version/version.go
+++ b/vendor/knative.dev/pkg/version/version.go
@@ -33,7 +33,7 @@ const (
 	// NOTE: If you are changing this line, please also update the minimum kubernetes
 	// version listed here:
 	// https://github.com/knative/docs/blob/mkdocs/docs/snippets/prerequisites.md
-	defaultMinimumVersion = "v1.24.0"
+	defaultMinimumVersion = "v1.25.0"
 )
 
 func getMinimumVersion() string {
diff --git a/vendor/knative.dev/pkg/webhook/webhook.go b/vendor/knative.dev/pkg/webhook/webhook.go
index 779d388d22..92dd01ae35 100644
--- a/vendor/knative.dev/pkg/webhook/webhook.go
+++ b/vendor/knative.dev/pkg/webhook/webhook.go
@@ -22,11 +22,14 @@ import (
 	"errors"
 	"fmt"
 	"html"
+	"log"
+	"net"
 	"net/http"
 	"time"
 
 	// Injection stuff
 
+	"knative.dev/pkg/controller"
 	kubeinformerfactory "knative.dev/pkg/injection/clients/namespacedkube/informers/factory"
 	"knative.dev/pkg/network/handlers"
 
@@ -67,6 +70,10 @@ type Options struct {
 	// GracePeriod is how long to wait after failing readiness probes
 	// before shutting down.
 	GracePeriod time.Duration
+
+	// ControllerOptions encapsulates options for creating a new controller,
+	// including throttling and stats behavior.
+	ControllerOptions *controller.ControllerOptions
 }
 
 // Operation is the verb being operated on
@@ -94,6 +101,9 @@ type Webhook struct {
 
 	// The TLS configuration to use for serving (or nil for non-TLS)
 	tlsConfig *tls.Config
+
+	// testListener is only used in testing so we don't get port conflicts
+	testListener net.Listener
 }
 
 // New constructs a Webhook
@@ -196,7 +206,6 @@ func New(
 		default:
 			return nil, fmt.Errorf("unknown webhook controller type:  %T", controller)
 		}
-
 	}
 
 	return
@@ -209,6 +218,15 @@ func (wh *Webhook) InformersHaveSynced() {
 	wh.Logger.Info("Informers have been synced, unblocking admission webhooks.")
 }
 
+type zapWrapper struct {
+	logger *zap.SugaredLogger
+}
+
+func (z *zapWrapper) Write(p []byte) (n int, err error) {
+	z.logger.Errorw(string(p))
+	return len(p), nil
+}
+
 // Run implements the admission controller run loop.
 func (wh *Webhook) Run(stop <-chan struct{}) error {
 	logger := wh.Logger
@@ -220,24 +238,34 @@ func (wh *Webhook) Run(stop <-chan struct{}) error {
 	}
 
 	server := &http.Server{
+		ErrorLog:          log.New(&zapWrapper{logger}, "", 0),
 		Handler:           drainer,
 		Addr:              fmt.Sprint(":", wh.Options.Port),
 		TLSConfig:         wh.tlsConfig,
 		ReadHeaderTimeout: time.Minute, //https://medium.com/a-journey-with-go/go-understand-and-mitigate-slowloris-attack-711c1b1403f6
 	}
 
+	var serve = server.ListenAndServe
+
+	if server.TLSConfig != nil && wh.testListener != nil {
+		serve = func() error {
+			return server.ServeTLS(wh.testListener, "", "")
+		}
+	} else if server.TLSConfig != nil {
+		serve = func() error {
+			return server.ListenAndServeTLS("", "")
+		}
+	} else if wh.testListener != nil {
+		serve = func() error {
+			return server.Serve(wh.testListener)
+		}
+	}
+
 	eg, ctx := errgroup.WithContext(ctx)
 	eg.Go(func() error {
-		if server.TLSConfig != nil {
-			if err := server.ListenAndServeTLS("", ""); err != nil && !errors.Is(err, http.ErrServerClosed) {
-				logger.Errorw("ListenAndServeTLS for admission webhook returned error", zap.Error(err))
-				return err
-			}
-		} else {
-			if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
-				logger.Errorw("ListenAndServe for admission webhook returned error", zap.Error(err))
-				return err
-			}
+		if err := serve(); err != nil && !errors.Is(err, http.ErrServerClosed) {
+			logger.Errorw("ListenAndServe for admission webhook returned error", zap.Error(err))
+			return err
 		}
 		return nil
 	})
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 08db2c5ebf..bc2e3e5bda 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -25,7 +25,7 @@ cloud.google.com/go/firestore/internal
 ## explicit; go 1.19
 cloud.google.com/go/iam
 cloud.google.com/go/iam/apiv1/iampb
-# cloud.google.com/go/kms v1.15.0
+# cloud.google.com/go/kms v1.15.1
 ## explicit; go 1.19
 cloud.google.com/go/kms/apiv1
 cloud.google.com/go/kms/apiv1/kmspb
@@ -68,7 +68,7 @@ github.com/Antonboom/nilnil/pkg/analyzer
 ## explicit
 github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry
 github.com/Azure/azure-sdk-for-go/version
-# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0
+# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1
 ## explicit; go 1.18
 github.com/Azure/azure-sdk-for-go/sdk/azcore
 github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud
@@ -156,6 +156,41 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/public
 ## explicit; go 1.16
 github.com/BurntSushi/toml
 github.com/BurntSushi/toml/internal
+# github.com/DataDog/appsec-internal-go v1.0.0
+## explicit; go 1.18
+github.com/DataDog/appsec-internal-go/httpsec
+github.com/DataDog/appsec-internal-go/netip
+# github.com/DataDog/datadog-agent/pkg/obfuscate v0.45.0-rc.1
+## explicit; go 1.12
+github.com/DataDog/datadog-agent/pkg/obfuscate
+# github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.46.0-rc.4
+## explicit; go 1.18
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state
+# github.com/DataDog/datadog-go/v5 v5.3.0
+## explicit; go 1.13
+github.com/DataDog/datadog-go/v5/statsd
+# github.com/DataDog/go-libddwaf v1.4.1
+## explicit; go 1.18
+github.com/DataDog/go-libddwaf
+github.com/DataDog/go-libddwaf/internal/noopfree
+# github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork
+## explicit; go 1.16
+github.com/DataDog/go-tuf/client
+github.com/DataDog/go-tuf/data
+github.com/DataDog/go-tuf/internal/roles
+github.com/DataDog/go-tuf/internal/sets
+github.com/DataDog/go-tuf/pkg/keys
+github.com/DataDog/go-tuf/pkg/targets
+github.com/DataDog/go-tuf/util
+github.com/DataDog/go-tuf/verify
+# github.com/DataDog/sketches-go v1.2.1
+## explicit; go 1.15
+github.com/DataDog/sketches-go/ddsketch
+github.com/DataDog/sketches-go/ddsketch/encoding
+github.com/DataDog/sketches-go/ddsketch/mapping
+github.com/DataDog/sketches-go/ddsketch/pb/sketchpb
+github.com/DataDog/sketches-go/ddsketch/stat
+github.com/DataDog/sketches-go/ddsketch/store
 # github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24
 ## explicit; go 1.13
 github.com/Djarvur/go-err113
@@ -178,7 +213,7 @@ github.com/Microsoft/go-winio/pkg/guid
 ## explicit; go 1.20
 github.com/OpenPeeDeeP/depguard/v2
 github.com/OpenPeeDeeP/depguard/v2/internal/utils
-# github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903
+# github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95
 ## explicit; go 1.13
 github.com/ProtonMail/go-crypto/bitcurves
 github.com/ProtonMail/go-crypto/brainpool
@@ -259,7 +294,7 @@ github.com/ashanbrown/forbidigo/forbidigo
 # github.com/ashanbrown/makezero v1.1.1
 ## explicit; go 1.12
 github.com/ashanbrown/makezero/makezero
-# github.com/aws/aws-sdk-go v1.44.317
+# github.com/aws/aws-sdk-go v1.44.318
 ## explicit; go 1.11
 github.com/aws/aws-sdk-go/aws
 github.com/aws/aws-sdk-go/aws/auth/bearer
@@ -449,10 +484,18 @@ github.com/breml/bidichk/pkg/bidichk
 # github.com/breml/errchkjson v0.3.1
 ## explicit; go 1.17
 github.com/breml/errchkjson
-# github.com/buildkite/agent/v3 v3.49.0
+# github.com/buildkite/agent/v3 v3.52.1
 ## explicit; go 1.18
 github.com/buildkite/agent/v3/api
+github.com/buildkite/agent/v3/env
+github.com/buildkite/agent/v3/internal/ordered
+github.com/buildkite/agent/v3/internal/pipeline
 github.com/buildkite/agent/v3/logger
+github.com/buildkite/agent/v3/tracetools
+github.com/buildkite/agent/v3/version
+# github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251
+## explicit
+github.com/buildkite/interpolate
 # github.com/butuzov/ireturn v0.2.0
 ## explicit; go 1.15
 github.com/butuzov/ireturn/analyzer
@@ -562,13 +605,16 @@ github.com/daixiang0/gci/pkg/utils
 # github.com/davecgh/go-spew v1.1.1
 ## explicit
 github.com/davecgh/go-spew/spew
+# github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0
+## explicit; go 1.17
+github.com/decred/dcrd/dcrec/secp256k1/v4
 # github.com/denis-tingaikin/go-header v0.4.3
 ## explicit; go 1.17
 github.com/denis-tingaikin/go-header
-# github.com/digitorus/pkcs7 v0.0.0-20221212123742-001c36b64ec3
+# github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352
 ## explicit; go 1.13
 github.com/digitorus/pkcs7
-# github.com/digitorus/timestamp v0.0.0-20221019182153-ef3b63b79b31
+# github.com/digitorus/timestamp v0.0.0-20230821155606-d1ad5ca9624c
 ## explicit; go 1.16
 github.com/digitorus/timestamp
 # github.com/dimchansky/utfbom v1.1.1
@@ -590,6 +636,9 @@ github.com/docker/docker/pkg/homedir
 ## explicit; go 1.18
 github.com/docker/docker-credential-helpers/client
 github.com/docker/docker-credential-helpers/credentials
+# github.com/dustin/go-humanize v1.0.1
+## explicit; go 1.16
+github.com/dustin/go-humanize
 # github.com/eapache/go-resiliency v1.3.0
 ## explicit; go 1.13
 github.com/eapache/go-resiliency/breaker
@@ -599,6 +648,13 @@ github.com/eapache/go-xerial-snappy
 # github.com/eapache/queue v1.1.0
 ## explicit
 github.com/eapache/queue
+# github.com/ebitengine/purego v0.4.0-alpha.4.0.20230519103000-ee8dcecc618f
+## explicit; go 1.18
+github.com/ebitengine/purego
+github.com/ebitengine/purego/internal/abi
+github.com/ebitengine/purego/internal/cgo
+github.com/ebitengine/purego/internal/fakecgo
+github.com/ebitengine/purego/internal/strings
 # github.com/emicklei/go-restful/v3 v3.10.2
 ## explicit; go 1.13
 github.com/emicklei/go-restful/v3
@@ -727,7 +783,7 @@ github.com/go-playground/locales/currency
 # github.com/go-playground/universal-translator v0.18.1
 ## explicit; go 1.18
 github.com/go-playground/universal-translator
-# github.com/go-playground/validator/v10 v10.14.1
+# github.com/go-playground/validator/v10 v10.15.1
 ## explicit; go 1.18
 github.com/go-playground/validator/v10
 # github.com/go-toolsmith/astcast v1.1.0
@@ -764,6 +820,17 @@ github.com/gobwas/glob/syntax/ast
 github.com/gobwas/glob/syntax/lexer
 github.com/gobwas/glob/util/runes
 github.com/gobwas/glob/util/strings
+# github.com/goccy/go-json v0.10.2
+## explicit; go 1.12
+github.com/goccy/go-json
+github.com/goccy/go-json/internal/decoder
+github.com/goccy/go-json/internal/encoder
+github.com/goccy/go-json/internal/encoder/vm
+github.com/goccy/go-json/internal/encoder/vm_color
+github.com/goccy/go-json/internal/encoder/vm_color_indent
+github.com/goccy/go-json/internal/encoder/vm_indent
+github.com/goccy/go-json/internal/errors
+github.com/goccy/go-json/internal/runtime
 # github.com/gofrs/flock v0.8.1
 ## explicit
 github.com/gofrs/flock
@@ -917,9 +984,9 @@ github.com/google/go-containerregistry/pkg/authn/k8schain
 # github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa
 ## explicit; go 1.18
 github.com/google/go-containerregistry/pkg/authn/kubernetes
-# github.com/google/go-github/v50 v50.2.0
+# github.com/google/go-github/v53 v53.2.0
 ## explicit; go 1.17
-github.com/google/go-github/v50/github
+github.com/google/go-github/v53/github
 # github.com/google/go-licenses v1.6.0
 ## explicit; go 1.16
 github.com/google/go-licenses/internal/third_party/pkgsite/derrors
@@ -943,7 +1010,7 @@ github.com/google/licenseclassifier/stringclassifier
 github.com/google/licenseclassifier/stringclassifier/internal/pq
 github.com/google/licenseclassifier/stringclassifier/searchset
 github.com/google/licenseclassifier/stringclassifier/searchset/tokenizer
-# github.com/google/s2a-go v0.1.4
+# github.com/google/s2a-go v0.1.5
 ## explicit; go 1.16
 github.com/google/s2a-go
 github.com/google/s2a-go/fallback
@@ -964,6 +1031,7 @@ github.com/google/s2a-go/internal/v2
 github.com/google/s2a-go/internal/v2/certverifier
 github.com/google/s2a-go/internal/v2/remotesigner
 github.com/google/s2a-go/internal/v2/tlsconfigstore
+github.com/google/s2a-go/retry
 github.com/google/s2a-go/stream
 # github.com/google/uuid v1.3.0
 ## explicit
@@ -1002,7 +1070,7 @@ github.com/gostaticanalysis/nilerr
 ## explicit; go 1.20
 github.com/grafeas/grafeas/go/utils/intoto
 github.com/grafeas/grafeas/proto/v1/grafeas_go_proto
-# github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2
+# github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0
 ## explicit; go 1.17
 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
 github.com/grpc-ecosystem/grpc-gateway/v2/runtime
@@ -1037,7 +1105,7 @@ github.com/hashicorp/go-uuid
 # github.com/hashicorp/go-version v1.6.0
 ## explicit
 github.com/hashicorp/go-version
-# github.com/hashicorp/golang-lru v0.6.0
+# github.com/hashicorp/golang-lru v1.0.2
 ## explicit; go 1.12
 github.com/hashicorp/golang-lru
 github.com/hashicorp/golang-lru/simplelru
@@ -1205,6 +1273,35 @@ github.com/leonklingele/grouper/pkg/analyzer/globals
 github.com/leonklingele/grouper/pkg/analyzer/imports
 github.com/leonklingele/grouper/pkg/analyzer/types
 github.com/leonklingele/grouper/pkg/analyzer/vars
+# github.com/lestrrat-go/blackmagic v1.0.1
+## explicit; go 1.16
+github.com/lestrrat-go/blackmagic
+# github.com/lestrrat-go/httpcc v1.0.1
+## explicit; go 1.16
+github.com/lestrrat-go/httpcc
+# github.com/lestrrat-go/httprc v1.0.4
+## explicit; go 1.17
+github.com/lestrrat-go/httprc
+# github.com/lestrrat-go/iter v1.0.2
+## explicit; go 1.13
+github.com/lestrrat-go/iter/arrayiter
+github.com/lestrrat-go/iter/mapiter
+# github.com/lestrrat-go/jwx/v2 v2.0.11
+## explicit; go 1.16
+github.com/lestrrat-go/jwx/v2/cert
+github.com/lestrrat-go/jwx/v2/internal/base64
+github.com/lestrrat-go/jwx/v2/internal/ecutil
+github.com/lestrrat-go/jwx/v2/internal/iter
+github.com/lestrrat-go/jwx/v2/internal/json
+github.com/lestrrat-go/jwx/v2/internal/keyconv
+github.com/lestrrat-go/jwx/v2/internal/pool
+github.com/lestrrat-go/jwx/v2/jwa
+github.com/lestrrat-go/jwx/v2/jwk
+github.com/lestrrat-go/jwx/v2/jws
+github.com/lestrrat-go/jwx/v2/x25519
+# github.com/lestrrat-go/option v1.0.1
+## explicit; go 1.16
+github.com/lestrrat-go/option
 # github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf
 ## explicit; go 1.18
 github.com/letsencrypt/boulder/core
@@ -1239,10 +1336,10 @@ github.com/matoous/godox
 # github.com/mattn/go-colorable v0.1.13
 ## explicit; go 1.15
 github.com/mattn/go-colorable
-# github.com/mattn/go-isatty v0.0.17
+# github.com/mattn/go-isatty v0.0.19
 ## explicit; go 1.15
 github.com/mattn/go-isatty
-# github.com/mattn/go-runewidth v0.0.13
+# github.com/mattn/go-runewidth v0.0.14
 ## explicit; go 1.9
 github.com/mattn/go-runewidth
 # github.com/matttproud/golang_protobuf_extensions v1.0.4
@@ -1312,6 +1409,9 @@ github.com/nunnatsa/ginkgolinter/version
 # github.com/oklog/ulid v1.3.1
 ## explicit
 github.com/oklog/ulid
+# github.com/oleiade/reflections v1.0.1
+## explicit; go 1.15
+github.com/oleiade/reflections
 # github.com/olekukonko/tablewriter v0.0.5
 ## explicit; go 1.12
 github.com/olekukonko/tablewriter
@@ -1330,6 +1430,11 @@ github.com/opentracing/opentracing-go/log
 # github.com/openzipkin/zipkin-go v0.3.0
 ## explicit; go 1.14
 github.com/openzipkin/zipkin-go/model
+# github.com/outcaste-io/ristretto v0.2.1
+## explicit; go 1.12
+github.com/outcaste-io/ristretto
+github.com/outcaste-io/ristretto/z
+github.com/outcaste-io/ristretto/z/simd
 # github.com/pborman/uuid v1.2.1
 ## explicit
 github.com/pborman/uuid
@@ -1340,6 +1445,9 @@ github.com/pelletier/go-toml/v2/internal/characters
 github.com/pelletier/go-toml/v2/internal/danger
 github.com/pelletier/go-toml/v2/internal/tracker
 github.com/pelletier/go-toml/v2/unstable
+# github.com/philhofer/fwd v1.1.2
+## explicit; go 1.15
+github.com/philhofer/fwd
 # github.com/pierrec/lz4/v4 v4.1.18
 ## explicit; go 1.14
 github.com/pierrec/lz4/v4
@@ -1368,7 +1476,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint
 # github.com/prometheus/client_model v0.4.0
 ## explicit; go 1.18
 github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.42.0
+# github.com/prometheus/common v0.44.0
 ## explicit; go 1.18
 github.com/prometheus/common/expfmt
 github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
@@ -1382,6 +1490,9 @@ github.com/prometheus/procfs/internal/util
 ## explicit; go 1.13
 github.com/prometheus/statsd_exporter/pkg/mapper
 github.com/prometheus/statsd_exporter/pkg/mapper/fsm
+# github.com/puzpuzpuz/xsync/v2 v2.4.1
+## explicit; go 1.18
+github.com/puzpuzpuz/xsync/v2
 # github.com/quasilyte/go-ruleguard v0.4.0
 ## explicit; go 1.19
 github.com/quasilyte/go-ruleguard/internal/goenv
@@ -1443,6 +1554,7 @@ github.com/sassoftware/relic/lib/x509tools
 ## explicit; go 1.20
 github.com/secure-systems-lab/go-securesystemslib/cjson
 github.com/secure-systems-lab/go-securesystemslib/dsse
+github.com/secure-systems-lab/go-securesystemslib/encrypted
 github.com/secure-systems-lab/go-securesystemslib/signerverifier
 # github.com/securego/gosec/v2 v2.17.0
 ## explicit; go 1.20
@@ -1451,6 +1563,15 @@ github.com/securego/gosec/v2/analyzers
 github.com/securego/gosec/v2/cwe
 github.com/securego/gosec/v2/issue
 github.com/securego/gosec/v2/rules
+# github.com/segmentio/asm v1.2.0
+## explicit; go 1.18
+github.com/segmentio/asm/base64
+github.com/segmentio/asm/cpu
+github.com/segmentio/asm/cpu/arm
+github.com/segmentio/asm/cpu/arm64
+github.com/segmentio/asm/cpu/cpuid
+github.com/segmentio/asm/cpu/x86
+github.com/segmentio/asm/internal/unsafebytes
 # github.com/segmentio/ksuid v1.0.4
 ## explicit; go 1.12
 github.com/segmentio/ksuid
@@ -1463,14 +1584,13 @@ github.com/shazow/go-diff/difflib
 # github.com/shibumi/go-pathspec v1.3.0
 ## explicit; go 1.17
 github.com/shibumi/go-pathspec
-# github.com/sigstore/cosign/v2 v2.1.1
-## explicit; go 1.19
+# github.com/sigstore/cosign/v2 v2.2.0
+## explicit; go 1.20
 github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio
 github.com/sigstore/cosign/v2/cmd/cosign/cli/options
 github.com/sigstore/cosign/v2/cmd/cosign/cli/sign/privacy
 github.com/sigstore/cosign/v2/internal/pkg/cosign
 github.com/sigstore/cosign/v2/internal/pkg/cosign/fulcio/fulcioroots
-github.com/sigstore/cosign/v2/internal/pkg/now
 github.com/sigstore/cosign/v2/internal/pkg/oci/remote
 github.com/sigstore/cosign/v2/internal/ui
 github.com/sigstore/cosign/v2/pkg/blob
@@ -1503,7 +1623,7 @@ github.com/sigstore/cosign/v2/pkg/providers/google
 github.com/sigstore/cosign/v2/pkg/providers/spiffe
 github.com/sigstore/cosign/v2/pkg/signature
 github.com/sigstore/cosign/v2/pkg/types
-# github.com/sigstore/fulcio v1.3.1
+# github.com/sigstore/fulcio v1.4.0
 ## explicit; go 1.20
 github.com/sigstore/fulcio/pkg/api
 # github.com/sigstore/rekor v1.2.2
@@ -1558,7 +1678,7 @@ github.com/sigstore/sigstore/pkg/signature/kms/gcp
 # github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.2
 ## explicit; go 1.19
 github.com/sigstore/sigstore/pkg/signature/kms/hashivault
-# github.com/sigstore/timestamp-authority v1.1.1
+# github.com/sigstore/timestamp-authority v1.1.2
 ## explicit; go 1.20
 github.com/sigstore/timestamp-authority/pkg/verification
 # github.com/sirupsen/logrus v1.9.3
@@ -1669,11 +1789,12 @@ github.com/t-yuki/gocover-cobertura
 # github.com/tdakkota/asciicheck v0.2.0
 ## explicit; go 1.18
 github.com/tdakkota/asciicheck
-# github.com/tektoncd/pipeline v0.50.1
+# github.com/tektoncd/pipeline v0.51.0
 ## explicit; go 1.19
 github.com/tektoncd/pipeline/pkg/apis/config
 github.com/tektoncd/pipeline/pkg/apis/config/resolver
 github.com/tektoncd/pipeline/pkg/apis/pipeline
+github.com/tektoncd/pipeline/pkg/apis/pipeline/internal/checksum
 github.com/tektoncd/pipeline/pkg/apis/pipeline/pod
 github.com/tektoncd/pipeline/pkg/apis/pipeline/v1
 github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1
@@ -1761,8 +1882,8 @@ github.com/tektoncd/pipeline/pkg/spire/config
 github.com/tektoncd/pipeline/pkg/substitution
 github.com/tektoncd/pipeline/test
 github.com/tektoncd/pipeline/test/diff
-# github.com/tektoncd/plumbing v0.0.0-20221102182345-5dbcfda657d7
-## explicit; go 1.13
+# github.com/tektoncd/plumbing v0.0.0-20230907180608-5625252a2de1
+## explicit; go 1.19
 github.com/tektoncd/plumbing
 github.com/tektoncd/plumbing/scripts
 # github.com/tetafro/godot v1.4.14
@@ -1771,8 +1892,8 @@ github.com/tetafro/godot
 # github.com/thales-e-security/pool v0.0.2
 ## explicit; go 1.12
 github.com/thales-e-security/pool
-# github.com/theupdateframework/go-tuf v0.5.2
-## explicit; go 1.18
+# github.com/theupdateframework/go-tuf v0.6.1
+## explicit; go 1.20
 github.com/theupdateframework/go-tuf
 github.com/theupdateframework/go-tuf/client
 github.com/theupdateframework/go-tuf/client/leveldbstore
@@ -1799,6 +1920,9 @@ github.com/timonwong/loggercheck/internal/checkers/printf
 github.com/timonwong/loggercheck/internal/rules
 github.com/timonwong/loggercheck/internal/sets
 github.com/timonwong/loggercheck/internal/stringutil
+# github.com/tinylib/msgp v1.1.8
+## explicit; go 1.15
+github.com/tinylib/msgp/msgp
 # github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399
 ## explicit
 github.com/titanous/rocacheck
@@ -1831,7 +1955,7 @@ github.com/uudashr/gocognit
 # github.com/vbatts/tar-split v0.11.3
 ## explicit; go 1.15
 github.com/vbatts/tar-split/archive/tar
-# github.com/xanzy/go-gitlab v0.86.0
+# github.com/xanzy/go-gitlab v0.90.0
 ## explicit; go 1.18
 github.com/xanzy/go-gitlab
 # github.com/xanzy/ssh-agent v0.3.3
@@ -1952,8 +2076,8 @@ go.opentelemetry.io/otel/metric/embedded
 # go.opentelemetry.io/otel/trace v1.16.0
 ## explicit; go 1.19
 go.opentelemetry.io/otel/trace
-# go.step.sm/crypto v0.32.2
-## explicit; go 1.18
+# go.step.sm/crypto v0.35.0
+## explicit; go 1.20
 go.step.sm/crypto/fingerprint
 go.step.sm/crypto/internal/bcrypt_pbkdf
 go.step.sm/crypto/internal/emoji
@@ -1989,6 +2113,12 @@ go.uber.org/zap/internal/pool
 go.uber.org/zap/internal/ztest
 go.uber.org/zap/zapcore
 go.uber.org/zap/zaptest
+# go4.org/intern v0.0.0-20211027215823-ae77deb06f29
+## explicit; go 1.13
+go4.org/intern
+# go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760
+## explicit; go 1.11
+go4.org/unsafe/assume-no-moving-gc
 # gocloud.dev v0.33.0
 ## explicit; go 1.19
 gocloud.dev/aws
@@ -2104,6 +2234,7 @@ golang.org/x/sys/internal/unsafeheader
 golang.org/x/sys/plan9
 golang.org/x/sys/unix
 golang.org/x/sys/windows
+golang.org/x/sys/windows/registry
 # golang.org/x/term v0.11.0
 ## explicit; go 1.17
 golang.org/x/term
@@ -2207,7 +2338,7 @@ golang.org/x/xerrors/internal
 # gomodules.xyz/jsonpatch/v2 v2.2.0
 ## explicit; go 1.12
 gomodules.xyz/jsonpatch/v2
-# google.golang.org/api v0.134.0
+# google.golang.org/api v0.138.0
 ## explicit; go 1.19
 google.golang.org/api/googleapi
 google.golang.org/api/googleapi/transport
@@ -2242,7 +2373,7 @@ google.golang.org/appengine/internal/socket
 google.golang.org/appengine/internal/urlfetch
 google.golang.org/appengine/socket
 google.golang.org/appengine/urlfetch
-# google.golang.org/genproto v0.0.0-20230731193218-e0aa005b6bdf
+# google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5
 ## explicit; go 1.19
 google.golang.org/genproto/googleapis/cloud/location
 google.golang.org/genproto/googleapis/type/date
@@ -2250,12 +2381,12 @@ google.golang.org/genproto/googleapis/type/expr
 google.golang.org/genproto/googleapis/type/latlng
 google.golang.org/genproto/internal
 google.golang.org/genproto/protobuf/field_mask
-# google.golang.org/genproto/googleapis/api v0.0.0-20230731193218-e0aa005b6bdf
+# google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5
 ## explicit; go 1.19
 google.golang.org/genproto/googleapis/api
 google.golang.org/genproto/googleapis/api/annotations
 google.golang.org/genproto/googleapis/api/httpbody
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20230731193218-e0aa005b6bdf
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577
 ## explicit; go 1.19
 google.golang.org/genproto/googleapis/rpc/code
 google.golang.org/genproto/googleapis/rpc/errdetails
@@ -2363,6 +2494,37 @@ google.golang.org/protobuf/types/known/fieldmaskpb
 google.golang.org/protobuf/types/known/structpb
 google.golang.org/protobuf/types/known/timestamppb
 google.golang.org/protobuf/types/known/wrapperspb
+# gopkg.in/DataDog/dd-trace-go.v1 v1.53.0
+## explicit; go 1.18
+gopkg.in/DataDog/dd-trace-go.v1/ddtrace
+gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext
+gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal
+gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer
+gopkg.in/DataDog/dd-trace-go.v1/internal
+gopkg.in/DataDog/dd-trace-go.v1/internal/appsec
+gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo
+gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation
+gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/grpcsec
+gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/httpsec
+gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/sharedsec
+gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig
+gopkg.in/DataDog/dd-trace-go.v1/internal/hostname
+gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/azure
+gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/cachedfetch
+gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/ec2
+gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/ecs
+gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/gce
+gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/httputils
+gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/validate
+gopkg.in/DataDog/dd-trace-go.v1/internal/log
+gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema
+gopkg.in/DataDog/dd-trace-go.v1/internal/normalizer
+gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo
+gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig
+gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames
+gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry
+gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof
+gopkg.in/DataDog/dd-trace-go.v1/internal/version
 # gopkg.in/inf.v0 v0.9.1
 ## explicit
 gopkg.in/inf.v0
@@ -2465,6 +2627,9 @@ honnef.co/go/tools/staticcheck/fakereflect
 honnef.co/go/tools/staticcheck/fakexml
 honnef.co/go/tools/stylecheck
 honnef.co/go/tools/unused
+# inet.af/netaddr v0.0.0-20220811202034-502d2d690317
+## explicit; go 1.12
+inet.af/netaddr
 # k8s.io/api v0.27.3
 ## explicit; go 1.20
 k8s.io/api/admission/v1
@@ -2520,7 +2685,7 @@ k8s.io/api/scheduling/v1beta1
 k8s.io/api/storage/v1
 k8s.io/api/storage/v1alpha1
 k8s.io/api/storage/v1beta1
-# k8s.io/apiextensions-apiserver v0.25.4
+# k8s.io/apiextensions-apiserver v0.26.5
 ## explicit; go 1.19
 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
@@ -2883,11 +3048,10 @@ k8s.io/client-go/util/homedir
 k8s.io/client-go/util/keyutil
 k8s.io/client-go/util/retry
 k8s.io/client-go/util/workqueue
-# k8s.io/code-generator v0.25.9
+# k8s.io/code-generator v0.26.5
 ## explicit; go 1.19
 k8s.io/code-generator/cmd/deepcopy-gen
 k8s.io/code-generator/cmd/deepcopy-gen/args
-k8s.io/code-generator/pkg/util
 # k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9
 ## explicit; go 1.13
 k8s.io/gengo/args
@@ -2930,7 +3094,7 @@ k8s.io/utils/net
 k8s.io/utils/pointer
 k8s.io/utils/strings/slices
 k8s.io/utils/trace
-# knative.dev/pkg v0.0.0-20230518105712-dfb4bf04635d
+# knative.dev/pkg v0.0.0-20230718152110-aef227e72ead
 ## explicit; go 1.18
 knative.dev/pkg/apis
 knative.dev/pkg/apis/duck
@@ -2957,7 +3121,6 @@ knative.dev/pkg/depcheck
 knative.dev/pkg/environment
 knative.dev/pkg/hash
 knative.dev/pkg/injection
-knative.dev/pkg/injection/clients/dynamicclient
 knative.dev/pkg/injection/clients/namespacedkube/informers/factory
 knative.dev/pkg/injection/sharedmain
 knative.dev/pkg/kmap