From 2970bd2076c8a96b26f87b02e83d235aad8d42af Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Apr 2024 23:13:29 +0000 Subject: [PATCH 01/10] Bump go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp (#186) --- impl/go.mod | 13 ++++++------- impl/go.sum | 34 ++++++++++++++-------------------- 2 files changed, 20 insertions(+), 27 deletions(-) diff --git a/impl/go.mod b/impl/go.mod index 5a718523..0647d759 100644 --- a/impl/go.mod +++ b/impl/go.mod @@ -34,7 +34,7 @@ require ( go.opentelemetry.io/contrib/instrumentation/runtime v0.49.0 go.opentelemetry.io/otel v1.25.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.23.1 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 go.opentelemetry.io/otel/sdk v1.25.0 go.opentelemetry.io/otel/sdk/metric v1.25.0 go.opentelemetry.io/otel/trace v1.25.0 @@ -57,7 +57,7 @@ require ( github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/bytedance/sonic v1.11.3 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect github.com/chenzhuoyu/iasm v0.9.1 // indirect github.com/cloudflare/circl v1.3.7 // indirect @@ -76,7 +76,6 @@ require ( github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.19.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect @@ -130,7 +129,7 @@ require ( github.com/swaggo/swag v1.8.12 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.12 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0 // indirect go.opentelemetry.io/otel/metric v1.25.0 // indirect go.opentelemetry.io/proto/otlp v1.1.0 // indirect go.uber.org/atomic v1.9.0 // indirect @@ -144,9 +143,9 @@ require ( golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.17.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/grpc v1.61.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/grpc v1.63.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/impl/go.sum b/impl/go.sum index d4625ed9..20e1b4b3 100644 --- a/impl/go.sum +++ b/impl/go.sum @@ -100,8 +100,8 @@ github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1 github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= github.com/bytedance/sonic v1.11.3 h1:jRN+yEjakWh8aK5FzrciUHG8OFXK+4/KrAX/ysEtHAA= github.com/bytedance/sonic v1.11.3/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= @@ -225,9 +225,6 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -236,7 +233,6 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -552,10 +548,10 @@ go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 h1:mM8nKi6/iFQ0iqst80wDHU2ge198Ye/TfN0WBS5U24Y= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0/go.mod h1:0PrIIzDteLSmNyxqcGYRL4mDIo8OTuBAOI/Bn1URxac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1 h1:o8iWeVFa1BcLtVEV0LzrCxV2/55tB3xLxADr6Kyoey4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1/go.mod h1:SEVfdK4IoBnbT2FXNM/k8yC08MrfbhWk3U4ljM8B3HE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.23.1 h1:cfuy3bXmLJS7M1RZmAL6SuhGtKUp2KEsrm00OlAXkq4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.23.1/go.mod h1:22jr92C6KwlwItJmQzfixzQM3oyyuYLCfHiMY+rpsPU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0 h1:dT33yIHtmsqpixFsSQPwNeY5drM9wTcoL8h0FWF4oGM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0/go.mod h1:h95q0LBGh7hlAC08X2DhSeyIG02YQ0UyioTCVAqRPmc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 h1:Mbi5PKN7u322woPa85d7ebZ+SOvEoPvoiBu+ryHWgfA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0/go.mod h1:e7ciERRhZaOZXVjx5MiL8TK5+Xv7G5Gv5PA2ZDEJdL8= go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA= go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= go.opentelemetry.io/otel/sdk v1.25.0 h1:PDryEJPC8YJZQSyLY5eqLeafHtG+X7FWnf3aXMtxbqo= @@ -667,24 +663,22 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 h1:YJ5pD9rF8o9Qtta0Cmy9rdBwkSjrTCT6XTiUQVOtIos= -google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY= -google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM= -google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= -google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= From db4db5c9444f0359abab7346d23fea00fb8310f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Apr 2024 23:13:49 +0000 Subject: [PATCH 02/10] Bump go.opentelemetry.io/contrib/instrumentation/runtime in /impl (#185) --- impl/go.mod | 2 +- impl/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/impl/go.mod b/impl/go.mod index 0647d759..647d136a 100644 --- a/impl/go.mod +++ b/impl/go.mod @@ -31,7 +31,7 @@ require ( github.com/tv42/zbase32 v0.0.0-20220222190657-f76a9fc892fa go.etcd.io/bbolt v1.3.9 go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.50.0 - go.opentelemetry.io/contrib/instrumentation/runtime v0.49.0 + go.opentelemetry.io/contrib/instrumentation/runtime v0.50.0 go.opentelemetry.io/otel v1.25.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 diff --git a/impl/go.sum b/impl/go.sum index 20e1b4b3..7005ecd4 100644 --- a/impl/go.sum +++ b/impl/go.sum @@ -540,8 +540,8 @@ go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.50.0 h1:LLz31zcmHs6aB8bi2we+tzO0tr5oW7yZp3x06qR5YoI= go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.50.0/go.mod h1:UdPyzt6g4yEwcz9QjnCC1HB2yqadJgpFo9m5ddGweU0= -go.opentelemetry.io/contrib/instrumentation/runtime v0.49.0 h1:dg9y+7ArpumB6zwImJv47RHfdgOGQ1EMkzP5vLkEnTU= -go.opentelemetry.io/contrib/instrumentation/runtime v0.49.0/go.mod h1:Ul4MtXqu/hJBM+v7a6dCF0nHwckPMLpIpLeCi4+zfdw= +go.opentelemetry.io/contrib/instrumentation/runtime v0.50.0 h1:6dck47miguAOny5MeqX1G8idd+HpzDFt86U33d7aW2I= +go.opentelemetry.io/contrib/instrumentation/runtime v0.50.0/go.mod h1:rdPhRwNd2sHiRmwJAGs8xcwitqmP/j8pvl9X5jloYjU= go.opentelemetry.io/contrib/propagators/b3 v1.25.0 h1:QU8UEKyPqgr/8vCC9LlDmkPnfFmiWAUF9GtJdcLz+BU= go.opentelemetry.io/contrib/propagators/b3 v1.25.0/go.mod h1:qonC7wyvtX1E6cEpAR+bJmhcGr6IVRGc/f6ZTpvi7jA= go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= From 09e4939a5920dbf632cd36e3b3408ce9fbe1b8bd Mon Sep 17 00:00:00 2001 From: Gabe <7622243+decentralgabe@users.noreply.github.com> Date: Fri, 19 Apr 2024 16:37:25 -0700 Subject: [PATCH 03/10] renames + better logging around republishing (#191) * renames * update swagger and config * add failure tracking * migration * fix query * update migration to make it simpler * lints --- .github/workflows/dependabot-auto.yml | 28 +++ impl/cmd/cli/identity.go | 4 +- impl/concurrencytest/main.go | 2 +- impl/config/config.go | 22 +- impl/config/config.toml | 2 - impl/docs/swagger.yaml | 12 +- impl/integrationtest/main.go | 2 +- impl/internal/did/client_test.go | 2 +- impl/internal/did/did.go | 2 +- impl/pkg/dht/{pkarr.go => dns.go} | 14 +- impl/pkg/dht/{pkarr_test.go => dns_test.go} | 12 +- impl/pkg/{pkarr => dht}/record.go | 55 +++-- impl/pkg/{pkarr => dht}/record_test.go | 29 ++- impl/pkg/server/{pkarr.go => dht.go} | 48 ++-- .../pkg/server/{pkarr_test.go => dht_test.go} | 44 ++-- impl/pkg/server/server.go | 24 +- impl/pkg/service/{pkarr.go => dht.go} | 229 ++++++++++-------- .../service/{pkarr_test.go => dht_test.go} | 63 +++-- impl/pkg/storage/db/bolt/bolt.go | 158 ++++++++++-- impl/pkg/storage/db/bolt/bolt_test.go | 29 ++- impl/pkg/storage/db/bolt/{pkarr.go => dht.go} | 18 +- .../00001_create_dht_records_table.sql | 17 ++ .../00001_create_pkarr_records_table.sql | 11 - impl/pkg/storage/db/postgres/models.go | 7 +- impl/pkg/storage/db/postgres/postgres.go | 100 ++++++-- impl/pkg/storage/db/postgres/postgres_test.go | 18 +- impl/pkg/storage/db/postgres/queries.sql.go | 77 +++++- .../storage/db/postgres/queries/queries.sql | 21 +- impl/pkg/storage/storage.go | 13 +- impl/pkg/telemetry/telemetry.go | 2 +- impl/sqlc.yaml | 10 + sqlc.yaml | 10 - 32 files changed, 708 insertions(+), 377 deletions(-) create mode 100644 .github/workflows/dependabot-auto.yml rename impl/pkg/dht/{pkarr.go => dns.go} (72%) rename impl/pkg/dht/{pkarr_test.go => dns_test.go} (91%) rename impl/pkg/{pkarr => dht}/record.go (57%) rename impl/pkg/{pkarr => dht}/record_test.go (55%) rename impl/pkg/server/{pkarr.go => dht.go} (69%) rename impl/pkg/server/{pkarr_test.go => dht_test.go} (89%) rename impl/pkg/service/{pkarr.go => dht.go} (54%) rename impl/pkg/service/{pkarr_test.go => dht_test.go} (71%) rename impl/pkg/storage/db/bolt/{pkarr.go => dht.go} (67%) create mode 100644 impl/pkg/storage/db/postgres/migrations/00001_create_dht_records_table.sql delete mode 100644 impl/pkg/storage/db/postgres/migrations/00001_create_pkarr_records_table.sql create mode 100644 impl/sqlc.yaml delete mode 100644 sqlc.yaml diff --git a/.github/workflows/dependabot-auto.yml b/.github/workflows/dependabot-auto.yml new file mode 100644 index 00000000..0f657b9f --- /dev/null +++ b/.github/workflows/dependabot-auto.yml @@ -0,0 +1,28 @@ +name: Dependabot auto-approve and merge +on: pull_request + +permissions: + contents: write + pull-requests: write + +jobs: + approve: + runs-on: ubuntu-latest + if: github.actor == 'dependabot[bot]' + steps: + - name: Dependabot metadata + id: metadata + uses: dependabot/fetch-metadata@v2 + with: + github-token: "${{ secrets.GITHUB_TOKEN }}" + - name: Approve a PR + run: gh pr review --approve "$PR_URL" + env: + PR_URL: ${{github.event.pull_request.html_url}} + GH_TOKEN: ${{secrets.GITHUB_TOKEN}} + - name: Enable auto-merge for Dependabot PRs + if: contains(steps.metadata.outputs.dependency-names, 'my-dependency') && steps.metadata.outputs.update-type == 'version-update:semver-patch' + run: gh pr merge --auto --merge "$PR_URL" + env: + PR_URL: ${{github.event.pull_request.html_url}} + GH_TOKEN: ${{secrets.GITHUB_TOKEN}} \ No newline at end of file diff --git a/impl/cmd/cli/identity.go b/impl/cmd/cli/identity.go index e1516645..ec4a099c 100644 --- a/impl/cmd/cli/identity.go +++ b/impl/cmd/cli/identity.go @@ -103,7 +103,7 @@ var identityAddCmd = &cobra.Command{ Answer: rrds, } // generate put request - putReq, err := dht.CreatePkarrPublishRequest(privKey, msg) + putReq, err := dht.CreateDNSPublishRequest(privKey, msg) if err != nil { logrus.WithError(err).Error("failed to create put request") return err @@ -170,7 +170,7 @@ var identityGetCmd = &cobra.Command{ return err } - msg, err := dht.ParsePkarrGetResponse(*gotResp) + msg, err := dht.ParseDNSGetResponse(*gotResp) if err != nil { logrus.WithError(err).Error("failed to parse get response") return err diff --git a/impl/concurrencytest/main.go b/impl/concurrencytest/main.go index 345b67e5..9f3fbc27 100644 --- a/impl/concurrencytest/main.go +++ b/impl/concurrencytest/main.go @@ -119,7 +119,7 @@ func generateDIDPutRequest() (string, []byte, error) { return "", nil, err } - bep44Put, err := dht.CreatePkarrPublishRequest(sk, *packet) + bep44Put, err := dht.CreateDNSPublishRequest(sk, *packet) if err != nil { return "", nil, err } diff --git a/impl/config/config.go b/impl/config/config.go index 2075d04e..b2f9d5a2 100644 --- a/impl/config/config.go +++ b/impl/config/config.go @@ -41,10 +41,9 @@ func (e EnvironmentVariable) String() string { } type Config struct { - Log LogConfig `toml:"log"` - ServerConfig ServerConfig `toml:"server"` - DHTConfig DHTServiceConfig `toml:"dht"` - PkarrConfig PkarrServiceConfig `toml:"pkarr"` + Log LogConfig `toml:"log"` + ServerConfig ServerConfig `toml:"server"` + DHTConfig DHTServiceConfig `toml:"dht"` } type ServerConfig struct { @@ -57,13 +56,10 @@ type ServerConfig struct { } type DHTServiceConfig struct { - BootstrapPeers []string `toml:"bootstrap_peers"` -} - -type PkarrServiceConfig struct { - RepublishCRON string `toml:"republish_cron"` - CacheTTLSeconds int `toml:"cache_ttl_seconds"` - CacheSizeLimitMB int `toml:"cache_size_limit_mb"` + BootstrapPeers []string `toml:"bootstrap_peers"` + RepublishCRON string `toml:"republish_cron"` + CacheTTLSeconds int `toml:"cache_ttl_seconds"` + CacheSizeLimitMB int `toml:"cache_size_limit_mb"` } type LogConfig struct { @@ -81,9 +77,7 @@ func GetDefaultConfig() Config { Telemetry: false, }, DHTConfig: DHTServiceConfig{ - BootstrapPeers: GetDefaultBootstrapPeers(), - }, - PkarrConfig: PkarrServiceConfig{ + BootstrapPeers: GetDefaultBootstrapPeers(), RepublishCRON: "0 */3 * * *", CacheTTLSeconds: 600, CacheSizeLimitMB: 1000, diff --git a/impl/config/config.toml b/impl/config/config.toml index 65cf88ac..b48239a6 100644 --- a/impl/config/config.toml +++ b/impl/config/config.toml @@ -9,8 +9,6 @@ telemetry = false [dht] bootstrap_peers = ["router.magnets.im:6881", "router.bittorrent.com:6881", "dht.transmissionbt.com:6881", "router.utorrent.com:6881", "router.nuh.dev:6881"] - -[pkarr] republish_cron = "0 */3 * * *" # every 3 hours cache_ttl_seconds = 600 # 10 minutes cache_size_limit_mb = 1000 # 1000 MB \ No newline at end of file diff --git a/impl/docs/swagger.yaml b/impl/docs/swagger.yaml index b8389710..95ee37d7 100644 --- a/impl/docs/swagger.yaml +++ b/impl/docs/swagger.yaml @@ -20,7 +20,7 @@ paths: get: consumes: - application/octet-stream - description: GetRecord a Pkarr record from the DHT + description: GetRecord a BEP44 DNS record from the DHT parameters: - description: ID to get in: path @@ -48,13 +48,13 @@ paths: description: Internal server error schema: type: string - summary: GetRecord a Pkarr record from the DHT + summary: GetRecord a BEP44 DNS record from the DHT tags: - - Pkarr + - DHT put: consumes: - application/octet-stream - description: PutRecord a Pkarr record into the DHT + description: PutRecord a BEP44 DNS record into the DHT parameters: - description: ID of the record to put in: path @@ -80,9 +80,9 @@ paths: description: Internal server error schema: type: string - summary: PutRecord a Pkarr record into the DHT + summary: PutRecord a BEP44 DNS record into the DHT tags: - - Pkarr + - DHT /health: get: consumes: diff --git a/impl/integrationtest/main.go b/impl/integrationtest/main.go index 94949f5d..18ed29c7 100644 --- a/impl/integrationtest/main.go +++ b/impl/integrationtest/main.go @@ -103,7 +103,7 @@ func generateDIDPutRequest() (string, []byte, error) { return "", nil, err } - bep44Put, err := dht.CreatePkarrPublishRequest(sk, *packet) + bep44Put, err := dht.CreateDNSPublishRequest(sk, *packet) if err != nil { return "", nil, err } diff --git a/impl/internal/did/client_test.go b/impl/internal/did/client_test.go index 42ceecd3..cea9c9e7 100644 --- a/impl/internal/did/client_test.go +++ b/impl/internal/did/client_test.go @@ -27,7 +27,7 @@ func TestClient(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - bep44Put, err := dht.CreatePkarrPublishRequest(sk, *packet) + bep44Put, err := dht.CreateDNSPublishRequest(sk, *packet) assert.NoError(t, err) assert.NotEmpty(t, bep44Put) diff --git a/impl/internal/did/did.go b/impl/internal/did/did.go index c8d4709b..5750cd2b 100644 --- a/impl/internal/did/did.go +++ b/impl/internal/did/did.go @@ -314,7 +314,7 @@ func (d DHT) ToDNSPacket(doc did.Document, types []TypeIndex, gateways []Authori keyType := keyTypeForJWK(*vm.PublicKeyJWK) if keyType < 0 { - return nil, fmt.Errorf("+unsupported key type given alg: %s", vm.PublicKeyJWK.ALG) + return nil, fmt.Errorf("unsupported key type given alg: %s", vm.PublicKeyJWK.ALG) } // convert the public key to a base64url encoded string diff --git a/impl/pkg/dht/pkarr.go b/impl/pkg/dht/dns.go similarity index 72% rename from impl/pkg/dht/pkarr.go rename to impl/pkg/dht/dns.go index 2265c5c3..eb374213 100644 --- a/impl/pkg/dht/pkarr.go +++ b/impl/pkg/dht/dns.go @@ -12,13 +12,13 @@ import ( "github.com/TBD54566975/did-dht-method/internal/dht" ) -// CreatePkarrPublishRequest creates a put request for the given records. Requires a public/private keypair and the records to put. -// The records are expected to be a DNS message packet, such as: +// CreateDNSPublishRequest creates a put request for the given records. Requires a public/private keypair and +// the records to put. The records are expected to be a DNS message packet, such as: // // dns.Msg{ // MsgHdr: dns.MsgHdr{ // Id: 0, -// Response: true, +// BEP44Response: true, // Authoritative: true, // }, // Answer: dns.RR{ @@ -30,11 +30,11 @@ import ( // Ttl: 7200, // }, // Txt: []string{ -// "hello pkarr", +// "hello mainline", // }, // } // } -func CreatePkarrPublishRequest(privateKey ed25519.PrivateKey, msg dns.Msg) (*bep44.Put, error) { +func CreateDNSPublishRequest(privateKey ed25519.PrivateKey, msg dns.Msg) (*bep44.Put, error) { packed, err := msg.Pack() if err != nil { return nil, util.LoggingErrorMsg(err, "failed to pack records") @@ -49,9 +49,9 @@ func CreatePkarrPublishRequest(privateKey ed25519.PrivateKey, msg dns.Msg) (*bep return put, nil } -// ParsePkarrGetResponse parses the response from a get request. +// ParseDNSGetResponse parses the response from a get request. // The response is expected to be a slice of DNS resource records. -func ParsePkarrGetResponse(response dht.FullGetResult) (*dns.Msg, error) { +func ParseDNSGetResponse(response dht.FullGetResult) (*dns.Msg, error) { var payload string if err := bencode.Unmarshal(response.V, &payload); err != nil { return nil, util.LoggingErrorMsg(err, "failed to unmarshal payload value") diff --git a/impl/pkg/dht/pkarr_test.go b/impl/pkg/dht/dns_test.go similarity index 91% rename from impl/pkg/dht/pkarr_test.go rename to impl/pkg/dht/dns_test.go index f83992b1..431e4a0c 100644 --- a/impl/pkg/dht/pkarr_test.go +++ b/impl/pkg/dht/dns_test.go @@ -16,7 +16,7 @@ import ( "github.com/TBD54566975/did-dht-method/internal/util" ) -func TestGetPutPkarrDHT(t *testing.T) { +func TestGetPutDNSDHT(t *testing.T) { dht := NewTestDHT(t) defer dht.Close() @@ -31,7 +31,7 @@ func TestGetPutPkarrDHT(t *testing.T) { Ttl: 7200, }, Txt: []string{ - "hello pkarr", + "hello mainline", }, } msg := dns.Msg{ @@ -42,7 +42,7 @@ func TestGetPutPkarrDHT(t *testing.T) { }, Answer: []dns.RR{&txtRecord}, } - put, err := CreatePkarrPublishRequest(privKey, msg) + put, err := CreateDNSPublishRequest(privKey, msg) require.NoError(t, err) id, err := dht.Put(context.Background(), *put) @@ -53,7 +53,7 @@ func TestGetPutPkarrDHT(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, got) - gotMsg, err := ParsePkarrGetResponse(*got) + gotMsg, err := ParseDNSGetResponse(*got) require.NoError(t, err) require.NotEmpty(t, gotMsg.Answer) @@ -103,7 +103,7 @@ func TestGetPutDIDDHT(t *testing.T) { didDocPacket, err := didID.ToDNSPacket(*doc, nil, nil) require.NoError(t, err) - putReq, err := CreatePkarrPublishRequest(privKey, *didDocPacket) + putReq, err := CreateDNSPublishRequest(privKey, *didDocPacket) require.NoError(t, err) gotID, err := dht.Put(context.Background(), *putReq) @@ -114,7 +114,7 @@ func TestGetPutDIDDHT(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, got) - gotMsg, err := ParsePkarrGetResponse(*got) + gotMsg, err := ParseDNSGetResponse(*got) require.NoError(t, err) require.NotEmpty(t, gotMsg.Answer) diff --git a/impl/pkg/pkarr/record.go b/impl/pkg/dht/record.go similarity index 57% rename from impl/pkg/pkarr/record.go rename to impl/pkg/dht/record.go index 862624c4..ea87e9be 100644 --- a/impl/pkg/pkarr/record.go +++ b/impl/pkg/dht/record.go @@ -1,4 +1,4 @@ -package pkarr +package dht import ( "bytes" @@ -14,40 +14,47 @@ import ( "github.com/tv42/zbase32" ) -type Response struct { +type BEP44Response struct { V []byte `validate:"required"` Seq int64 `validate:"required"` Sig [64]byte `validate:"required"` } // Equals returns true if the response is equal to the other response -func (r Response) Equals(other Response) bool { +func (r BEP44Response) Equals(other BEP44Response) bool { return r.Seq == other.Seq && bytes.Equal(r.V, other.V) && r.Sig == other.Sig } -type Record struct { +// BEP44Record represents a record in the DHT +type BEP44Record struct { Value []byte `json:"v" validate:"required"` Key [32]byte `json:"k" validate:"required"` Signature [64]byte `json:"sig" validate:"required"` SequenceNumber int64 `json:"seq" validate:"required"` } -// NewRecord returns a new Record with the given key, value, signature, and sequence number -func NewRecord(k []byte, v []byte, sig []byte, seq int64) (*Record, error) { - record := Record{SequenceNumber: seq} +// FailedRecord represents a record that failed to be written to the DHT +type FailedRecord struct { + ID string `json:"id"` + Count int `json:"count"` +} + +// NewBEP44Record returns a new BEP44Record with the given key, value, signature, and sequence number +func NewBEP44Record(k []byte, v []byte, sig []byte, seq int64) (*BEP44Record, error) { + record := BEP44Record{SequenceNumber: seq} if len(k) != 32 { - return nil, errors.New("incorrect key length for pkarr record") + return nil, errors.New("incorrect key length for bep44 record") } record.Key = [32]byte(k) if len(v) > 1000 { - return nil, errors.New("pkarr record value too long") + return nil, errors.New("bep44 record value too long") } record.Value = v if len(sig) != 64 { - return nil, errors.New("incorrect sig length for pkarr record") + return nil, errors.New("incorrect sig length for bep44 record") } record.Signature = [64]byte(sig) @@ -59,7 +66,7 @@ func NewRecord(k []byte, v []byte, sig []byte, seq int64) (*Record, error) { } // IsValid returns an error if the request is invalid; also validates the signature -func (r Record) IsValid() error { +func (r BEP44Record) IsValid() error { if err := util.IsValidStruct(r); err != nil { return err } @@ -67,7 +74,7 @@ func (r Record) IsValid() error { // validate the signature bv, err := bencode.Marshal(r.Value) if err != nil { - return fmt.Errorf("error bencoding pkarr record: %v", err) + return fmt.Errorf("error bencoding bep44 record: %v", err) } if !bep44.Verify(r.Key[:], nil, r.SequenceNumber, bv, r.Signature[:]) { @@ -76,17 +83,17 @@ func (r Record) IsValid() error { return nil } -// Response returns the record as a Response -func (r Record) Response() Response { - return Response{ +// Response returns the record as a BEP44Response +func (r BEP44Record) Response() BEP44Response { + return BEP44Response{ V: r.Value, Seq: r.SequenceNumber, Sig: r.Signature, } } -// BEP44 returns the record as a BEP44 Put message -func (r Record) BEP44() bep44.Put { +// Put returns the record as a bep44.Put message +func (r BEP44Record) Put() bep44.Put { return bep44.Put{ V: r.Value, K: &r.Key, @@ -96,18 +103,18 @@ func (r Record) BEP44() bep44.Put { } // String returns a string representation of the record -func (r Record) String() string { +func (r BEP44Record) String() string { e := base64.RawURLEncoding - return fmt.Sprintf("pkarr.Record{K=%s V=%s Sig=%s Seq=%d}", zbase32.EncodeToString(r.Key[:]), e.EncodeToString(r.Value), e.EncodeToString(r.Signature[:]), r.SequenceNumber) + return fmt.Sprintf("dht.BEP44Record{K=%s V=%s Sig=%s Seq=%d}", zbase32.EncodeToString(r.Key[:]), e.EncodeToString(r.Value), e.EncodeToString(r.Signature[:]), r.SequenceNumber) } // ID returns the base32 encoded key as a string -func (r Record) ID() string { +func (r BEP44Record) ID() string { return zbase32.EncodeToString(r.Key[:]) } // Hash returns the SHA256 hash of the record as a string -func (r Record) Hash() (string, error) { +func (r BEP44Record) Hash() (string, error) { recordBytes, err := json.Marshal(r) if err != nil { return "", err @@ -115,9 +122,9 @@ func (r Record) Hash() (string, error) { return string(sha256.New().Sum(recordBytes)), nil } -// RecordFromBEP44 returns a Record from a BEP44 Put message -func RecordFromBEP44(putMsg *bep44.Put) Record { - return Record{ +// RecordFromBEP44 returns a BEP44Record from a bep44.Put message +func RecordFromBEP44(putMsg *bep44.Put) BEP44Record { + return BEP44Record{ Key: *putMsg.K, Value: putMsg.V.([]byte), Signature: putMsg.Sig, diff --git a/impl/pkg/pkarr/record_test.go b/impl/pkg/dht/record_test.go similarity index 55% rename from impl/pkg/pkarr/record_test.go rename to impl/pkg/dht/record_test.go index 614a292d..4be14fce 100644 --- a/impl/pkg/pkarr/record_test.go +++ b/impl/pkg/dht/record_test.go @@ -1,4 +1,4 @@ -package pkarr_test +package dht_test import ( "strings" @@ -9,13 +9,12 @@ import ( "github.com/TBD54566975/did-dht-method/internal/did" "github.com/TBD54566975/did-dht-method/pkg/dht" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" ) func TestNewRecord(t *testing.T) { // validate incorrect key length is rejected - r, err := pkarr.NewRecord([]byte("aaaaaaaaaaa"), nil, nil, 0) - assert.EqualError(t, err, "incorrect key length for pkarr record") + r, err := dht.NewBEP44Record([]byte("aaaaaaaaaaa"), nil, nil, 0) + assert.EqualError(t, err, "incorrect key length for bep44 record") assert.Nil(t, r) // create a did doc as a packet to store @@ -27,30 +26,30 @@ func TestNewRecord(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) - r, err = pkarr.NewRecord(putMsg.K[:], []byte(strings.Repeat("a", 1001)), putMsg.Sig[:], putMsg.Seq) - assert.EqualError(t, err, "pkarr record value too long") + r, err = dht.NewBEP44Record(putMsg.K[:], []byte(strings.Repeat("a", 1001)), putMsg.Sig[:], putMsg.Seq) + assert.EqualError(t, err, "bep44 record value too long") assert.Nil(t, r) - r, err = pkarr.NewRecord(putMsg.K[:], putMsg.V.([]byte), []byte(strings.Repeat("a", 65)), putMsg.Seq) - assert.EqualError(t, err, "incorrect sig length for pkarr record") + r, err = dht.NewBEP44Record(putMsg.K[:], putMsg.V.([]byte), []byte(strings.Repeat("a", 65)), putMsg.Seq) + assert.EqualError(t, err, "incorrect sig length for bep44 record") assert.Nil(t, r) - r, err = pkarr.NewRecord(putMsg.K[:], putMsg.V.([]byte), putMsg.Sig[:], 0) - assert.EqualError(t, err, "Key: 'Record.SequenceNumber' Error:Field validation for 'SequenceNumber' failed on the 'required' tag") + r, err = dht.NewBEP44Record(putMsg.K[:], putMsg.V.([]byte), putMsg.Sig[:], 0) + assert.EqualError(t, err, "Key: 'BEP44Record.SequenceNumber' Error:Field validation for 'SequenceNumber' failed on the 'required' tag") assert.Nil(t, r) - r, err = pkarr.NewRecord(putMsg.K[:], putMsg.V.([]byte), putMsg.Sig[:], 1) + r, err = dht.NewBEP44Record(putMsg.K[:], putMsg.V.([]byte), putMsg.Sig[:], 1) assert.EqualError(t, err, "signature is invalid") assert.Nil(t, r) - r, err = pkarr.NewRecord(putMsg.K[:], putMsg.V.([]byte), putMsg.Sig[:], putMsg.Seq) + r, err = dht.NewBEP44Record(putMsg.K[:], putMsg.V.([]byte), putMsg.Sig[:], putMsg.Seq) assert.NoError(t, err) - bep := r.BEP44() + bep := r.Put() assert.Equal(t, putMsg.K, bep.K) assert.Equal(t, putMsg.V, bep.V) assert.Equal(t, putMsg.Sig, bep.Sig) @@ -61,7 +60,7 @@ func TestNewRecord(t *testing.T) { assert.Equal(t, r.SequenceNumber, resp.Seq) assert.Equal(t, r.Signature, resp.Sig) - r2 := pkarr.RecordFromBEP44(putMsg) + r2 := dht.RecordFromBEP44(putMsg) assert.Equal(t, r.Key, r2.Key) assert.Equal(t, r.Value, r2.Value) assert.Equal(t, r.Signature, r2.Signature) diff --git a/impl/pkg/server/pkarr.go b/impl/pkg/server/dht.go similarity index 69% rename from impl/pkg/server/pkarr.go rename to impl/pkg/server/dht.go index d5c88903..966909c0 100644 --- a/impl/pkg/server/pkarr.go +++ b/impl/pkg/server/dht.go @@ -11,26 +11,26 @@ import ( "github.com/gin-gonic/gin" "github.com/TBD54566975/did-dht-method/internal/util" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" + "github.com/TBD54566975/did-dht-method/pkg/dht" "github.com/TBD54566975/did-dht-method/pkg/service" "github.com/TBD54566975/did-dht-method/pkg/telemetry" ) -// PkarrRouter is the router for the Pkarr API -type PkarrRouter struct { - service *service.PkarrService +// DHTRouter is the router for the DHT API +type DHTRouter struct { + service *service.DHTService } -// NewPkarrRouter returns a new instance of the Relay router -func NewPkarrRouter(service *service.PkarrService) (*PkarrRouter, error) { - return &PkarrRouter{service: service}, nil +// NewDHTRouter returns a new instance of the DHT router +func NewDHTRouter(service *service.DHTService) (*DHTRouter, error) { + return &DHTRouter{service: service}, nil } // GetRecord godoc // -// @Summary GetRecord a Pkarr record from the DHT -// @Description GetRecord a Pkarr record from the DHT -// @Tags Pkarr +// @Summary GetRecord a BEP44 DNS record from the DHT +// @Description GetRecord a BEP44 DNS record from the DHT +// @Tags DHT // @Accept octet-stream // @Produce octet-stream // @Param id path string true "ID to get" @@ -39,8 +39,8 @@ func NewPkarrRouter(service *service.PkarrService) (*PkarrRouter, error) { // @Failure 404 {string} string "Not found" // @Failure 500 {string} string "Internal server error" // @Router /{id} [get] -func (r *PkarrRouter) GetRecord(c *gin.Context) { - ctx, span := telemetry.GetTracer().Start(c, "PkarrHTTP.GetRecord") +func (r *DHTRouter) GetRecord(c *gin.Context) { + ctx, span := telemetry.GetTracer().Start(c, "DHTHTTP.GetRecord") defer span.End() id := GetParam(c, IDParam) @@ -60,23 +60,22 @@ func (r *PkarrRouter) GetRecord(c *gin.Context) { return } - resp, err := r.service.GetPkarr(ctx, *id) + resp, err := r.service.GetDHT(ctx, *id) if err != nil { // TODO(gabe): provide a more maintainable way to handle custom errors if strings.Contains(err.Error(), "spam") { LoggingRespondErrMsg(c, fmt.Sprintf("too many requests for bad key %s", *id), http.StatusTooManyRequests) return } - LoggingRespondErrWithMsg(c, err, "failed to get pkarr record", http.StatusInternalServerError) + LoggingRespondErrWithMsg(c, err, "failed to get dht record", http.StatusInternalServerError) return } if resp == nil { - LoggingRespondErrMsg(c, "pkarr record not found", http.StatusNotFound) + LoggingRespondErrMsg(c, "dht record not found", http.StatusNotFound) return } // Convert int64 to uint64 since binary.PutUint64 expects a uint64 value - // according to https://github.com/Nuhvi/pkarr/blob/main/design/relays.md#get var seqBuf [8]byte binary.BigEndian.PutUint64(seqBuf[:], uint64(resp.Seq)) // sig:seq:v @@ -86,9 +85,9 @@ func (r *PkarrRouter) GetRecord(c *gin.Context) { // PutRecord godoc // -// @Summary PutRecord a Pkarr record into the DHT -// @Description PutRecord a Pkarr record into the DHT -// @Tags Pkarr +// @Summary PutRecord a BEP44 DNS record into the DHT +// @Description PutRecord a BEP44 DNS record into the DHT +// @Tags DHT // @Accept octet-stream // @Param id path string true "ID of the record to put" // @Param request body []byte true "64 bytes sig, 8 bytes u64 big-endian seq, 0-1000 bytes of v." @@ -96,8 +95,8 @@ func (r *PkarrRouter) GetRecord(c *gin.Context) { // @Failure 400 {string} string "Bad request" // @Failure 500 {string} string "Internal server error" // @Router /{id} [put] -func (r *PkarrRouter) PutRecord(c *gin.Context) { - ctx, span := telemetry.GetTracer().Start(c, "PkarrHTTP.PutRecord") +func (r *DHTRouter) PutRecord(c *gin.Context) { + ctx, span := telemetry.GetTracer().Start(c, "DHTHTTP.PutRecord") defer span.End() id := GetParam(c, IDParam) @@ -129,18 +128,17 @@ func (r *PkarrRouter) PutRecord(c *gin.Context) { } // transform the request into a service request by extracting the fields - // according to https://github.com/Nuhvi/pkarr/blob/main/design/relays.md#put value := body[72:] sig := body[:64] seq := int64(binary.BigEndian.Uint64(body[64:72])) - request, err := pkarr.NewRecord(key, value, sig, seq) + request, err := dht.NewBEP44Record(key, value, sig, seq) if err != nil { LoggingRespondErrWithMsg(c, err, "error parsing request", http.StatusBadRequest) return } - if err = r.service.PublishPkarr(ctx, *id, *request); err != nil { - LoggingRespondErrWithMsg(c, err, "failed to publish pkarr record", http.StatusInternalServerError) + if err = r.service.PublishDHT(ctx, *id, *request); err != nil { + LoggingRespondErrWithMsg(c, err, "failed to publish dht record", http.StatusInternalServerError) return } diff --git a/impl/pkg/server/pkarr_test.go b/impl/pkg/server/dht_test.go similarity index 89% rename from impl/pkg/server/pkarr_test.go rename to impl/pkg/server/dht_test.go index fe43655c..64e068d3 100644 --- a/impl/pkg/server/pkarr_test.go +++ b/impl/pkg/server/dht_test.go @@ -19,13 +19,13 @@ import ( "github.com/TBD54566975/did-dht-method/pkg/storage" ) -func TestPkarrRouter(t *testing.T) { - pkarrSvc := testPkarrService(t) - pkarrRouter, err := NewPkarrRouter(&pkarrSvc) +func TestDHTRouter(t *testing.T) { + dhtSvc := testDHTService(t) + dhtRouter, err := NewDHTRouter(&dhtSvc) require.NoError(t, err) - require.NotEmpty(t, pkarrRouter) + require.NotEmpty(t, dhtRouter) - defer pkarrSvc.Close() + defer dhtSvc.Close() t.Run("test put record", func(t *testing.T) { didID, reqData := generateDIDPutRequest(t) @@ -36,7 +36,7 @@ func TestPkarrRouter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("%s/%s", testServerURL, suffix), bytes.NewReader(reqData)) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.PutRecord(c) + dhtRouter.PutRecord(c) assert.True(t, is2xxResponse(w.Code), "unexpected %s", w.Result().Status) }) @@ -49,14 +49,14 @@ func TestPkarrRouter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("%s/%s", testServerURL, suffix), bytes.NewReader(reqData)) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.PutRecord(c) + dhtRouter.PutRecord(c) assert.True(t, is2xxResponse(w.Code), "unexpected %s", w.Result().Status) w = httptest.NewRecorder() req = httptest.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", testServerURL, suffix), nil) c = newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.GetRecord(c) + dhtRouter.GetRecord(c) assert.True(t, is2xxResponse(w.Code), "unexpected %s", w.Result().Status) resp, err := io.ReadAll(w.Body) @@ -74,14 +74,14 @@ func TestPkarrRouter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("%s/%s", testServerURL, suffix), bytes.NewReader(reqData)) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.PutRecord(c) + dhtRouter.PutRecord(c) assert.True(t, is2xxResponse(w.Code), "unexpected %s", w.Result().Status) w = httptest.NewRecorder() req = httptest.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", testServerURL, suffix), nil) c = newRequestContextWithParams(w, req, map[string]string{}) - pkarrRouter.GetRecord(c) + dhtRouter.GetRecord(c) assert.Equal(t, http.StatusBadRequest, w.Result().StatusCode, "unexpected %s", w.Result().Status) }) @@ -93,7 +93,7 @@ func TestPkarrRouter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("%s/", testServerURL), bytes.NewReader(reqData)) c := newRequestContextWithParams(w, req, map[string]string{}) - pkarrRouter.PutRecord(c) + dhtRouter.PutRecord(c) assert.Equal(t, http.StatusBadRequest, w.Result().StatusCode, "unexpected %s", w.Result().Status) }) @@ -106,7 +106,7 @@ func TestPkarrRouter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("%s/%s", testServerURL, suffix), bytes.NewReader(reqData)) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.PutRecord(c) + dhtRouter.PutRecord(c) assert.Equal(t, http.StatusInternalServerError, w.Result().StatusCode, "unexpected %s", w.Result().Status) }) @@ -121,7 +121,7 @@ func TestPkarrRouter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("%s/%s", testServerURL, suffix), bytes.NewReader(reqData)) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.PutRecord(c) + dhtRouter.PutRecord(c) assert.Equal(t, http.StatusBadRequest, w.Result().StatusCode, "unexpected %s", w.Result().Status) }) @@ -134,7 +134,7 @@ func TestPkarrRouter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("%s/%s", testServerURL, suffix), bytes.NewReader(reqData)) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.PutRecord(c) + dhtRouter.PutRecord(c) assert.Equal(t, http.StatusBadRequest, w.Result().StatusCode, "unexpected %s", w.Result().Status) }) @@ -143,7 +143,7 @@ func TestPkarrRouter(t *testing.T) { suffix := "uqaj3fcr9db6jg6o9pjs53iuftyj45r46aubogfaceqjbo6pp9sy" req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", testServerURL, suffix), nil) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.GetRecord(c) + dhtRouter.GetRecord(c) assert.Equal(t, http.StatusNotFound, w.Result().StatusCode, "unexpected %s", w.Result().Status) }) @@ -152,18 +152,18 @@ func TestPkarrRouter(t *testing.T) { suffix := "cz13drbfxy3ih6xun4mw3cyiexrtfcs9gyp46o4469e93y36zhsy" req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", testServerURL, suffix), nil) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.GetRecord(c) + dhtRouter.GetRecord(c) assert.Equal(t, http.StatusNotFound, w.Result().StatusCode, "unexpected %s", w.Result().Status) w = httptest.NewRecorder() req = httptest.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", testServerURL, suffix), nil) c = newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.GetRecord(c) + dhtRouter.GetRecord(c) assert.Equal(t, http.StatusTooManyRequests, w.Result().StatusCode, "unexpected %s", w.Result().Status) }) } -func testPkarrService(t *testing.T) service.PkarrService { +func testDHTService(t *testing.T) service.DHTService { defaultConfig := config.GetDefaultConfig() db, err := storage.NewStorage(defaultConfig.ServerConfig.StorageURI) @@ -171,11 +171,11 @@ func testPkarrService(t *testing.T) service.PkarrService { require.NotEmpty(t, db) dht := dht.NewTestDHT(t) - pkarrService, err := service.NewPkarrService(&defaultConfig, db, dht) + dhtService, err := service.NewDHTService(&defaultConfig, db, dht) require.NoError(t, err) - require.NotEmpty(t, pkarrService) + require.NotEmpty(t, dhtService) - return *pkarrService + return *dhtService } func generateDIDPutRequest(t *testing.T) (string, []byte) { @@ -188,7 +188,7 @@ func generateDIDPutRequest(t *testing.T) (string, []byte) { assert.NoError(t, err) assert.NotEmpty(t, packet) - bep44Put, err := dht.CreatePkarrPublishRequest(sk, *packet) + bep44Put, err := dht.CreateDNSPublishRequest(sk, *packet) assert.NoError(t, err) assert.NotEmpty(t, bep44Put) diff --git a/impl/pkg/server/server.go b/impl/pkg/server/server.go index fd6e406e..e6093408 100644 --- a/impl/pkg/server/server.go +++ b/impl/pkg/server/server.go @@ -31,7 +31,7 @@ type Server struct { shutdown chan os.Signal cfg *config.Config - svc *service.PkarrService + svc *service.DHTService } // NewServer returns a new instance of Server with the given db and host. @@ -51,9 +51,9 @@ func NewServer(cfg *config.Config, shutdown chan os.Signal, d *dht.DHT) (*Server logrus.WithField("record_count", recordCnt).Info("storage instantiated with record count") } - pkarrService, err := service.NewPkarrService(cfg, db, d) + dhtService, err := service.NewDHTService(cfg, db, d) if err != nil { - return nil, util.LoggingErrorMsg(err, "could not instantiate pkarr service") + return nil, util.LoggingErrorMsg(err, "could not instantiate the dht service") } handler.GET("/health", Health) @@ -63,8 +63,8 @@ func NewServer(cfg *config.Config, shutdown chan os.Signal, d *dht.DHT) (*Server handler.GET("/swagger/*any", ginswagger.WrapHandler(swaggerfiles.Handler, ginswagger.URL("/swagger.yaml"))) // root relay API - if err = PkarrAPI(&handler.RouterGroup, pkarrService); err != nil { - return nil, util.LoggingErrorMsg(err, "could not setup pkarr API") + if err = DHTAPI(&handler.RouterGroup, dhtService); err != nil { + return nil, util.LoggingErrorMsg(err, "could not setup the dht API") } return &Server{ Server: &http.Server{ @@ -76,7 +76,7 @@ func NewServer(cfg *config.Config, shutdown chan os.Signal, d *dht.DHT) (*Server MaxHeaderBytes: 1 << 20, }, cfg: cfg, - svc: pkarrService, + svc: dhtService, handler: handler, shutdown: shutdown, }, nil @@ -105,14 +105,14 @@ func setupHandler(env config.Environment) *gin.Engine { return handler } -// PkarrAPI sets up the relay API routes according to https://github.com/Nuhvi/pkarr/blob/main/design/relays.md -func PkarrAPI(rg *gin.RouterGroup, service *service.PkarrService) error { - relayRouter, err := NewPkarrRouter(service) +// DHTAPI sets up the relay API routes according to the spec https://did-dht.com/#gateway-api +func DHTAPI(rg *gin.RouterGroup, service *service.DHTService) error { + dhtRouter, err := NewDHTRouter(service) if err != nil { - return util.LoggingErrorMsg(err, "could not instantiate relay router") + return util.LoggingErrorMsg(err, "could not instantiate dht router") } - rg.PUT("/:id", relayRouter.PutRecord) - rg.GET("/:id", relayRouter.GetRecord) + rg.PUT("/:id", dhtRouter.PutRecord) + rg.GET("/:id", dhtRouter.GetRecord) return nil } diff --git a/impl/pkg/service/pkarr.go b/impl/pkg/service/dht.go similarity index 54% rename from impl/pkg/service/pkarr.go rename to impl/pkg/service/dht.go index 1b17ac68..b3c6d64d 100644 --- a/impl/pkg/service/pkarr.go +++ b/impl/pkg/service/dht.go @@ -3,7 +3,6 @@ package service import ( "context" "sync" - "sync/atomic" "time" ssiutil "github.com/TBD54566975/ssi-sdk/util" @@ -12,22 +11,20 @@ import ( "github.com/goccy/go-json" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/tv42/zbase32" "github.com/TBD54566975/did-dht-method/internal/util" "github.com/TBD54566975/did-dht-method/config" dhtint "github.com/TBD54566975/did-dht-method/internal/dht" "github.com/TBD54566975/did-dht-method/pkg/dht" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" "github.com/TBD54566975/did-dht-method/pkg/storage" "github.com/TBD54566975/did-dht-method/pkg/telemetry" ) -const recordSizeLimit = 1000 +const recordSizeLimitBytes = 1000 -// PkarrService is the Pkarr service responsible for managing the Pkarr DHT and reading/writing records -type PkarrService struct { +// DHTService is the service responsible for managing BEP44 DNS records in the DHT and reading/writing records +type DHTService struct { cfg *config.Config db storage.Storage dht *dht.DHT @@ -36,17 +33,17 @@ type PkarrService struct { scheduler *dhtint.Scheduler } -// NewPkarrService returns a new instance of the Pkarr service -func NewPkarrService(cfg *config.Config, db storage.Storage, d *dht.DHT) (*PkarrService, error) { +// NewDHTService returns a new instance of the DHT service +func NewDHTService(cfg *config.Config, db storage.Storage, d *dht.DHT) (*DHTService, error) { if cfg == nil { return nil, ssiutil.LoggingNewError("config is required") } // create and start get cache - cacheTTL := time.Duration(cfg.PkarrConfig.CacheTTLSeconds) * time.Second + cacheTTL := time.Duration(cfg.DHTConfig.CacheTTLSeconds) * time.Second cacheConfig := bigcache.DefaultConfig(cacheTTL) - cacheConfig.MaxEntrySize = recordSizeLimit - cacheConfig.HardMaxCacheSize = cfg.PkarrConfig.CacheSizeLimitMB + cacheConfig.MaxEntrySize = recordSizeLimitBytes + cacheConfig.HardMaxCacheSize = cfg.DHTConfig.CacheSizeLimitMB cacheConfig.CleanWindow = cacheTTL / 2 cache, err := bigcache.New(context.Background(), cacheConfig) if err != nil { @@ -63,7 +60,7 @@ func NewPkarrService(cfg *config.Config, db storage.Storage, d *dht.DHT) (*Pkarr // start scheduler for republishing scheduler := dhtint.NewScheduler() - svc := PkarrService{ + svc := DHTService{ cfg: cfg, db: db, dht: d, @@ -71,15 +68,15 @@ func NewPkarrService(cfg *config.Config, db storage.Storage, d *dht.DHT) (*Pkarr badGetCache: badGetCache, scheduler: &scheduler, } - if err = scheduler.Schedule(cfg.PkarrConfig.RepublishCRON, svc.republish); err != nil { + if err = scheduler.Schedule(cfg.DHTConfig.RepublishCRON, svc.republish); err != nil { return nil, ssiutil.LoggingErrorMsg(err, "failed to start republisher") } return &svc, nil } -// PublishPkarr stores the record in the db, publishes the given Pkarr record to the DHT, and returns the z-base-32 encoded ID -func (s *PkarrService) PublishPkarr(ctx context.Context, id string, record pkarr.Record) error { - ctx, span := telemetry.GetTracer().Start(ctx, "PkarrService.PublishPkarr") +// PublishDHT stores the record in the db, publishes the given DNS record to the DHT, and returns the z-base-32 encoded ID +func (s *DHTService) PublishDHT(ctx context.Context, id string, record dht.BEP44Record) error { + ctx, span := telemetry.GetTracer().Start(ctx, "DHTService.PublishDHT") defer span.End() // make sure the key is valid @@ -93,9 +90,9 @@ func (s *PkarrService) PublishPkarr(ctx context.Context, id string, record pkarr // check if the message is already in the cache if got, err := s.cache.Get(id); err == nil { - var resp pkarr.Response + var resp dht.BEP44Response if err = json.Unmarshal(got, &resp); err == nil && record.Response().Equals(resp) { - logrus.WithContext(ctx).WithField("record_id", id).Debug("resolved pkarr record from cache with matching response") + logrus.WithContext(ctx).WithField("record_id", id).Debug("resolved dht record from cache with matching response") return nil } } @@ -111,7 +108,7 @@ func (s *PkarrService) PublishPkarr(ctx context.Context, id string, record pkarr if err = s.cache.Set(id, recordBytes); err != nil { return err } - logrus.WithContext(ctx).WithField("record", id).Debug("added pkarr record to cache and db") + logrus.WithContext(ctx).WithField("record", id).Debug("added dht record to cache and db") // return here and put it in the DHT asynchronously go func() { @@ -119,19 +116,19 @@ func (s *PkarrService) PublishPkarr(ctx context.Context, id string, record pkarr putCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - if _, err = s.dht.Put(putCtx, record.BEP44()); err != nil { + if _, err = s.dht.Put(putCtx, record.Put()); err != nil { logrus.WithContext(ctx).WithError(err).Errorf("error from dht.Put for record: %s", id) } else { - logrus.WithContext(ctx).WithField("record", id).Debug("put pkarr record to DHT") + logrus.WithContext(ctx).WithField("record", id).Debug("put record to DHT") } }() return nil } -// GetPkarr returns the full Pkarr record (including sig data) for the given z-base-32 encoded ID -func (s *PkarrService) GetPkarr(ctx context.Context, id string) (*pkarr.Response, error) { - ctx, span := telemetry.GetTracer().Start(ctx, "PkarrService.GetPkarr") +// GetDHT returns the full DNS record (including sig data) for the given z-base-32 encoded ID +func (s *DHTService) GetDHT(ctx context.Context, id string) (*dht.BEP44Response, error) { + ctx, span := telemetry.GetTracer().Start(ctx, "DHTService.GetDHT") defer span.End() // make sure the key is valid @@ -146,12 +143,12 @@ func (s *PkarrService) GetPkarr(ctx context.Context, id string) (*pkarr.Response // first do a cache lookup if got, err := s.cache.Get(id); err == nil { - var resp pkarr.Response + var resp dht.BEP44Response if err = json.Unmarshal(got, &resp); err == nil { - logrus.WithContext(ctx).WithField("record_id", id).Info("resolved pkarr record from cache") + logrus.WithContext(ctx).WithField("record_id", id).Info("resolved record from cache") return &resp, nil } - logrus.WithContext(ctx).WithError(err).WithField("record", id).Warn("failed to get pkarr record from cache, falling back to dht") + logrus.WithContext(ctx).WithError(err).WithField("record", id).Warn("failed to get record from cache, falling back to dht") } // next do a dht lookup with a timeout of 10 seconds @@ -163,17 +160,12 @@ func (s *PkarrService) GetPkarr(ctx context.Context, id string) (*pkarr.Response if errors.Is(err, context.DeadlineExceeded) { logrus.WithContext(ctx).WithField("record", id).Warn("dht lookup timed out, attempting to resolve from storage") } else { - logrus.WithContext(ctx).WithError(err).WithField("record", id).Warn("failed to get pkarr record from dht, attempting to resolve from storage") + logrus.WithContext(ctx).WithError(err).WithField("record", id).Warn("failed to get record from dht, attempting to resolve from storage") } - rawID, err := util.Z32Decode(id) - if err != nil { - return nil, err - } - - record, err := s.db.ReadRecord(ctx, rawID) + record, err := s.db.ReadRecord(ctx, id) if err != nil || record == nil { - logrus.WithContext(ctx).WithError(err).WithField("record", id).Error("failed to resolve pkarr record from storage; adding to badGetCache") + logrus.WithContext(ctx).WithError(err).WithField("record", id).Error("failed to resolve record from storage; adding to badGetCache") // add the key to the badGetCache to prevent spamming the DHT if err = s.badGetCache.Set(id, []byte{0}); err != nil { @@ -183,11 +175,11 @@ func (s *PkarrService) GetPkarr(ctx context.Context, id string) (*pkarr.Response return nil, err } - logrus.WithContext(ctx).WithField("record", id).Info("resolved pkarr record from storage") + logrus.WithContext(ctx).WithField("record", id).Info("resolved record from storage") resp := record.Response() // add the record back to the cache for future lookups if err = s.addRecordToCache(id, record.Response()); err != nil { - logrus.WithError(err).WithField("record", id).Error("failed to set pkarr record in cache") + logrus.WithError(err).WithField("record", id).Error("failed to set record in cache") } return &resp, err @@ -202,7 +194,7 @@ func (s *PkarrService) GetPkarr(ctx context.Context, id string) (*pkarr.Response if err = bencode.Unmarshal(bBytes, &payload); err != nil { return nil, ssiutil.LoggingCtxErrorMsg(ctx, err, "failed to unmarshal bencoded payload") } - resp := pkarr.Response{ + resp := dht.BEP44Response{ V: []byte(payload), Seq: got.Seq, Sig: got.Sig, @@ -210,15 +202,15 @@ func (s *PkarrService) GetPkarr(ctx context.Context, id string) (*pkarr.Response // add the record to cache, do it here to avoid duplicate calculations if err = s.addRecordToCache(id, resp); err != nil { - logrus.WithContext(ctx).WithError(err).Errorf("failed to set pkarr record[%s] in cache", id) + logrus.WithContext(ctx).WithField("record", id).WithError(err).Error("failed to set record in cache") } else { - logrus.WithContext(ctx).WithField("record", id).Info("added pkarr record back to cache") + logrus.WithContext(ctx).WithField("record", id).Info("added record back to cache") } return &resp, nil } -func (s *PkarrService) addRecordToCache(id string, resp pkarr.Response) error { +func (s *DHTService) addRecordToCache(id string, resp dht.BEP44Response) error { recordBytes, err := json.Marshal(resp) if err != nil { return err @@ -229,34 +221,47 @@ func (s *PkarrService) addRecordToCache(id string, resp pkarr.Response) error { return nil } +// failedRecord is a struct to keep track of records that failed to be republished +type failedRecord struct { + record dht.BEP44Record + failureCnt int +} + // TODO(gabe) make this more efficient. create a publish schedule based on each individual record, not all records -func (s *PkarrService) republish() { - ctx, span := telemetry.GetTracer().Start(context.Background(), "PkarrService.republish") +func (s *DHTService) republish() { + ctx, span := telemetry.GetTracer().Start(context.Background(), "DHTService.republish") defer span.End() recordCnt, err := s.db.RecordCount(ctx) if err != nil { logrus.WithContext(ctx).WithError(err).Error("failed to get record count before republishing") return - } else { - logrus.WithContext(ctx).WithField("record_count", recordCnt).Info("republishing records") } + logrus.WithContext(ctx).WithField("record_count", recordCnt).Info("republishing records") + + // republish all records in the db and handle failed records up to 3 times + failedRecords := s.republishRecords(ctx) + s.handleFailedRecords(ctx, failedRecords) +} +// republishRecords republishes all records in the db to the DHT and returns a list of failed records +func (s *DHTService) republishRecords(ctx context.Context) []failedRecord { var nextPageToken []byte - var recordsBatch []pkarr.Record - var seenRecords, batchCnt, successCnt, errCnt int32 = 0, 1, 0, 0 + var seenRecords, batchCnt int32 + var failedRecords []failedRecord for { - recordsBatch, nextPageToken, err = s.db.ListRecords(ctx, nextPageToken, 1000) + recordsBatch, nextPageToken, err := s.db.ListRecords(ctx, nextPageToken, 1000) if err != nil { logrus.WithContext(ctx).WithError(err).Error("failed to list record(s) for republishing") - return + return failedRecords } + batchSize := len(recordsBatch) seenRecords += int32(batchSize) if batchSize == 0 { logrus.WithContext(ctx).Info("no records to republish") - return + return failedRecords } logrus.WithContext(ctx).WithFields(logrus.Fields{ @@ -266,68 +271,98 @@ func (s *PkarrService) republish() { }).Infof("republishing batch [%d] of [%d] records", batchCnt, batchSize) batchCnt++ - var wg sync.WaitGroup - wg.Add(batchSize) + failedRecords = append(failedRecords, s.republishBatch(ctx, recordsBatch)...) - var batchErrCnt, batchSuccessCnt int32 = 0, 0 - for _, record := range recordsBatch { - go func(ctx context.Context, record pkarr.Record) { - defer wg.Done() + if nextPageToken == nil { + break + } + } - recordID := zbase32.EncodeToString(record.Key[:]) - logrus.WithContext(ctx).Debugf("republishing record: %s", recordID) + successRate := float64(seenRecords-int32(len(failedRecords))) / float64(seenRecords) * 100 + logrus.WithContext(ctx).WithFields(logrus.Fields{ + "success": seenRecords - int32(len(failedRecords)), + "errors": len(failedRecords), + "total": seenRecords, + }).Infof("republishing complete with [%d] batches of [%d] total records with a [%.2f] percent success rate", batchCnt, seenRecords, successRate) - putCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() + return failedRecords +} - if _, putErr := s.dht.Put(putCtx, record.BEP44()); putErr != nil { - logrus.WithContext(putCtx).WithError(putErr).Debugf("failed to republish record: %s", recordID) - atomic.AddInt32(&batchErrCnt, 1) - } else { - atomic.AddInt32(&batchSuccessCnt, 1) - } - }(ctx, record) - } +// republishBatch republishes a batch of records to the DHT and returns a list of failed records +func (s *DHTService) republishBatch(ctx context.Context, recordsBatch []dht.BEP44Record) []failedRecord { + var wg sync.WaitGroup + var failedRecords []failedRecord - // Wait for all goroutines in this batch to finish before moving on to the next batch - wg.Wait() + for _, record := range recordsBatch { + wg.Add(1) + go func(ctx context.Context, record dht.BEP44Record) { + defer wg.Done() - // Update the success and error counts - atomic.AddInt32(&successCnt, batchSuccessCnt) - atomic.AddInt32(&errCnt, batchErrCnt) + id := record.ID() + logrus.WithContext(ctx).WithField("record_id", id).Debug("republishing record") - successRate := float64(batchSuccessCnt) / float64(batchSize) + putCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() - logrus.WithContext(ctx).WithFields(logrus.Fields{ - "batch_number": batchCnt, - "success": successCnt, - "errors": errCnt, - }).Infof("batch [%d] completed with a [%.2f] percent success rate", batchCnt, successRate*100) - - if successRate < 0.8 { - logrus.WithContext(ctx).WithFields(logrus.Fields{ - "batch_number": batchCnt, - "success": successCnt, - "errors": errCnt, - }).Errorf("batch [%d] failed to meet success rate threshold; exiting republishing early", batchCnt) - break - } + if _, putErr := s.dht.Put(putCtx, record.Put()); putErr != nil { + logrus.WithContext(putCtx).WithField("record_id", id).WithError(putErr).Debug("failed to republish record") + failedRecords = append(failedRecords, failedRecord{ + record: record, + failureCnt: 1, + }) + } + }(ctx, record) + } - if nextPageToken == nil { + wg.Wait() + return failedRecords +} + +// handleFailedRecords attempts to republish failed records up to 3 times +func (s *DHTService) handleFailedRecords(ctx context.Context, failedRecords []failedRecord) { + for i := 0; i < 3; i++ { + var remainingFailedRecords []failedRecord + for _, fr := range failedRecords { + id := fr.record.ID() + putCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + if _, putErr := s.dht.Put(putCtx, fr.record.Put()); putErr != nil { + logrus.WithContext(putCtx).WithField("record_id", id).WithError(putErr).Debugf("failed to re-republish [%s], attempt: %d", id, i+1) + fr.failureCnt++ + if fr.failureCnt <= 3 { + remainingFailedRecords = append(remainingFailedRecords, fr) + } else { + logrus.WithContext(ctx).WithField("record_id", id).Errorf("record failed to republish after 3 attempts") + } + } + } + failedRecords = remainingFailedRecords + if len(failedRecords) == 0 { + logrus.WithContext(ctx).Info("all failed records successfully republished") break } + if i == 2 { + logrus.WithContext(ctx).WithField("failed_records", failedRecords).Error("failed to republish all records after 3 attempts") + for _, fr := range failedRecords { + id := fr.record.ID() + if err := s.db.WriteFailedRecord(ctx, id); err != nil { + logrus.WithContext(ctx).WithField("record_id", id).WithError(err).Warn("failed to write failed record to db") + } + } + } } - successRate := float64(successCnt) / float64(seenRecords) - logrus.WithContext(ctx).WithFields(logrus.Fields{ - "success": seenRecords - errCnt, - "errors": errCnt, - "total": seenRecords, - }).Infof("republishing complete with [%d] batches of [%d] total records with an [%.2f] percent success rate", batchCnt, seenRecords, successRate*100) + failedRecordCnt, err := s.db.FailedRecordCount(ctx) + if err != nil { + logrus.WithContext(ctx).WithError(err).Error("failed to get failed record count") + return + } + logrus.WithContext(ctx).WithField("failed_record_count", failedRecordCnt).Warn("total failed records") } -// Close closes the Pkarr service gracefully -func (s *PkarrService) Close() { +// Close closes the Mainline service gracefully +func (s *DHTService) Close() { if s == nil { return } diff --git a/impl/pkg/service/pkarr_test.go b/impl/pkg/service/dht_test.go similarity index 71% rename from impl/pkg/service/pkarr_test.go rename to impl/pkg/service/dht_test.go index 8d83d90f..f2affb02 100644 --- a/impl/pkg/service/pkarr_test.go +++ b/impl/pkg/service/dht_test.go @@ -13,27 +13,26 @@ import ( "github.com/TBD54566975/did-dht-method/config" "github.com/TBD54566975/did-dht-method/internal/did" "github.com/TBD54566975/did-dht-method/pkg/dht" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" "github.com/TBD54566975/did-dht-method/pkg/storage" ) -func TestPkarrService(t *testing.T) { - svc := newPkarrService(t, "a") +func TestDHTService(t *testing.T) { + svc := newDHTService(t, "a") t.Run("test put bad record", func(t *testing.T) { - err := svc.PublishPkarr(context.Background(), "", pkarr.Record{}) + err := svc.PublishDHT(context.Background(), "", dht.BEP44Record{}) assert.Error(t, err) assert.Contains(t, err.Error(), "validation for 'Value' failed on the 'required' tag") }) t.Run("test get non existent record", func(t *testing.T) { - got, err := svc.GetPkarr(context.Background(), "test") + got, err := svc.GetDHT(context.Background(), "test") assert.NoError(t, err) assert.Nil(t, got) }) t.Run("test get record with invalid ID", func(t *testing.T) { - got, err := svc.GetPkarr(context.Background(), "---") + got, err := svc.GetDHT(context.Background(), "---") assert.ErrorContains(t, err, "illegal z-base-32 data at input byte 0") assert.Nil(t, got) }) @@ -49,18 +48,18 @@ func TestPkarrService(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) suffix, err := d.Suffix() require.NoError(t, err) - err = svc.PublishPkarr(context.Background(), suffix, pkarr.RecordFromBEP44(putMsg)) + err = svc.PublishDHT(context.Background(), suffix, dht.RecordFromBEP44(putMsg)) assert.NoError(t, err) // invalidate the signature putMsg.Sig[0] = 0 - err = svc.PublishPkarr(context.Background(), suffix, pkarr.RecordFromBEP44(putMsg)) + err = svc.PublishDHT(context.Background(), suffix, dht.RecordFromBEP44(putMsg)) assert.Error(t, err) assert.Contains(t, err.Error(), "signature is invalid") }) @@ -76,16 +75,16 @@ func TestPkarrService(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) suffix, err := d.Suffix() require.NoError(t, err) - err = svc.PublishPkarr(context.Background(), suffix, pkarr.RecordFromBEP44(putMsg)) + err = svc.PublishDHT(context.Background(), suffix, dht.RecordFromBEP44(putMsg)) assert.NoError(t, err) - got, err := svc.GetPkarr(context.Background(), suffix) + got, err := svc.GetDHT(context.Background(), suffix) assert.NoError(t, err) assert.NotEmpty(t, got) assert.Equal(t, putMsg.V, got.V) @@ -104,20 +103,20 @@ func TestPkarrService(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) suffix, err := d.Suffix() require.NoError(t, err) - err = svc.PublishPkarr(context.Background(), suffix, pkarr.RecordFromBEP44(putMsg)) + err = svc.PublishDHT(context.Background(), suffix, dht.RecordFromBEP44(putMsg)) require.NoError(t, err) // remove it from the cache so the get tests the uncached lookup path err = svc.cache.Delete(suffix) require.NoError(t, err) - got, err := svc.GetPkarr(context.Background(), suffix) + got, err := svc.GetDHT(context.Background(), suffix) assert.NoError(t, err) assert.NotEmpty(t, got) assert.Equal(t, putMsg.V, got.V) @@ -126,12 +125,12 @@ func TestPkarrService(t *testing.T) { }) t.Run("test get record with invalid ID", func(t *testing.T) { - got, err := svc.GetPkarr(context.Background(), "uqaj3fcr9db6jg6o9pjs53iuftyj45r46aubogfaceqjbo6pp9sy") + got, err := svc.GetDHT(context.Background(), "uqaj3fcr9db6jg6o9pjs53iuftyj45r46aubogfaceqjbo6pp9sy") assert.NoError(t, err) assert.Empty(t, got) // try it again to make sure the cache is working - got, err = svc.GetPkarr(context.Background(), "uqaj3fcr9db6jg6o9pjs53iuftyj45r46aubogfaceqjbo6pp9sy") + got, err = svc.GetDHT(context.Background(), "uqaj3fcr9db6jg6o9pjs53iuftyj45r46aubogfaceqjbo6pp9sy") assert.ErrorContains(t, err, "rate limited to prevent spam") assert.Empty(t, got) }) @@ -140,7 +139,7 @@ func TestPkarrService(t *testing.T) { } func TestDHT(t *testing.T) { - svc1 := newPkarrService(t, "b") + svc1 := newDHTService(t, "b") // create and publish a record to service1 sk, doc, err := did.GenerateDIDDHT(did.CreateDIDDHTOpts{}) @@ -150,16 +149,16 @@ func TestDHT(t *testing.T) { packet, err := d.ToDNSPacket(*doc, nil, nil) require.NoError(t, err) require.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) suffix, err := d.Suffix() require.NoError(t, err) - err = svc1.PublishPkarr(context.Background(), suffix, pkarr.RecordFromBEP44(putMsg)) + err = svc1.PublishDHT(context.Background(), suffix, dht.RecordFromBEP44(putMsg)) require.NoError(t, err) // make sure we can get it back - got, err := svc1.GetPkarr(context.Background(), suffix) + got, err := svc1.GetDHT(context.Background(), suffix) require.NoError(t, err) require.NotEmpty(t, got) assert.Equal(t, putMsg.V, got.V) @@ -167,10 +166,10 @@ func TestDHT(t *testing.T) { assert.Equal(t, putMsg.Seq, got.Seq) // create service2 with service1 as a bootstrap peer - svc2 := newPkarrService(t, "c", anacrolixdht.NewAddr(svc1.dht.Addr())) + svc2 := newDHTService(t, "c", anacrolixdht.NewAddr(svc1.dht.Addr())) // get the record via service2 - gotFrom2, err := svc2.GetPkarr(context.Background(), suffix) + gotFrom2, err := svc2.GetDHT(context.Background(), suffix) require.NoError(t, err) require.NotEmpty(t, gotFrom2) assert.Equal(t, putMsg.V, gotFrom2.V) @@ -184,20 +183,20 @@ func TestDHT(t *testing.T) { } func TestNoConfig(t *testing.T) { - svc, err := NewPkarrService(nil, nil, nil) + svc, err := NewDHTService(nil, nil, nil) assert.EqualError(t, err, "config is required") assert.Empty(t, svc) - svc, err = NewPkarrService(&config.Config{ - PkarrConfig: config.PkarrServiceConfig{ + svc, err = NewDHTService(&config.Config{ + DHTConfig: config.DHTServiceConfig{ CacheSizeLimitMB: -1, }, }, nil, nil) assert.EqualError(t, err, "failed to instantiate cache: HardMaxCacheSize must be >= 0") assert.Nil(t, svc) - svc, err = NewPkarrService(&config.Config{ - PkarrConfig: config.PkarrServiceConfig{ + svc, err = NewDHTService(&config.Config{ + DHTConfig: config.DHTServiceConfig{ RepublishCRON: "not a real cron expression", }, }, nil, nil) @@ -207,7 +206,7 @@ func TestNoConfig(t *testing.T) { t.Cleanup(func() { svc.Close() }) } -func newPkarrService(t *testing.T, id string, bootstrapPeers ...anacrolixdht.Addr) PkarrService { +func newDHTService(t *testing.T, id string, bootstrapPeers ...anacrolixdht.Addr) DHTService { defaultConfig := config.GetDefaultConfig() db, err := storage.NewStorage(fmt.Sprintf("bolt://diddht-test-%s.db", id)) @@ -217,9 +216,9 @@ func newPkarrService(t *testing.T, id string, bootstrapPeers ...anacrolixdht.Add t.Cleanup(func() { os.Remove(fmt.Sprintf("diddht-test-%s.db", id)) }) d := dht.NewTestDHT(t, bootstrapPeers...) - pkarrService, err := NewPkarrService(&defaultConfig, db, d) + dhtService, err := NewDHTService(&defaultConfig, db, d) require.NoError(t, err) - require.NotEmpty(t, pkarrService) + require.NotEmpty(t, dhtService) - return *pkarrService + return *dhtService } diff --git a/impl/pkg/storage/db/bolt/bolt.go b/impl/pkg/storage/db/bolt/bolt.go index 7141d409..ef4272df 100644 --- a/impl/pkg/storage/db/bolt/bolt.go +++ b/impl/pkg/storage/db/bolt/bolt.go @@ -1,7 +1,10 @@ package bolt import ( + "bytes" "context" + "encoding/binary" + "fmt" "time" "github.com/goccy/go-json" @@ -10,12 +13,14 @@ import ( "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" + "github.com/TBD54566975/did-dht-method/pkg/dht" "github.com/TBD54566975/did-dht-method/pkg/telemetry" ) const ( - pkarrNamespace = "pkarr" + dhtNamespace = "dht" + oldDHTNamespace = "pkarr" + failedNamespace = "failed" ) type Bolt struct { @@ -36,12 +41,54 @@ func NewBolt(path string) (*Bolt, error) { return nil, err } + // Perform the migration + go migrate(db) + return &Bolt{db: db}, nil } +func migrate(db *bolt.DB) { + // Perform the migration within a write transaction + err := db.Update(func(tx *bolt.Tx) error { + // Create the new namespace bucket + newBucket, err := tx.CreateBucketIfNotExists([]byte(dhtNamespace)) + if err != nil { + return fmt.Errorf("failed to create new namespace bucket: %v", err) + } + + // Get the old namespace bucket + oldBucket := tx.Bucket([]byte(oldDHTNamespace)) + if oldBucket == nil { + // If the old namespace bucket doesn't exist, there's nothing to migrate + return nil + } + + // Iterate over the key-value pairs in the old namespace bucket + err = oldBucket.ForEach(func(k, v []byte) error { + // Copy each key-value pair to the new namespace bucket + err = newBucket.Put(k, v) + if err != nil { + return fmt.Errorf("failed to copy key-value pair to new namespace: %v", err) + } + return nil + }) + if err != nil { + return err + } + + return nil + }) + + if err != nil { + logrus.WithError(err).Error("failed to migrate records") + } else { + logrus.Info("migration completed successfully") + } +} + // WriteRecord writes the given record to the storage // TODO: don't overwrite existing records, store unique seq numbers -func (b *Bolt) WriteRecord(ctx context.Context, record pkarr.Record) error { +func (b *Bolt) WriteRecord(ctx context.Context, record dht.BEP44Record) error { ctx, span := telemetry.GetTracer().Start(ctx, "bolt.WriteRecord") defer span.End() @@ -51,15 +98,27 @@ func (b *Bolt) WriteRecord(ctx context.Context, record pkarr.Record) error { return err } - return b.write(ctx, pkarrNamespace, encoded.K, recordBytes) + // write to both the old and new namespaces for now + errOld := b.write(ctx, oldDHTNamespace, record.ID(), recordBytes) + errNew := b.write(ctx, dhtNamespace, record.ID(), recordBytes) + if errOld == nil && errNew == nil { + return nil + } + if errOld != nil && errNew != nil { + return errors.New(fmt.Sprintf("old: %v, new: %v", errOld, errNew)) + } + if errOld != nil { + return errOld + } + return errNew } // ReadRecord reads the record with the given id from the storage -func (b *Bolt) ReadRecord(ctx context.Context, id []byte) (*pkarr.Record, error) { +func (b *Bolt) ReadRecord(ctx context.Context, id string) (*dht.BEP44Record, error) { ctx, span := telemetry.GetTracer().Start(ctx, "bolt.ReadRecord") defer span.End() - recordBytes, err := b.read(ctx, pkarrNamespace, encoding.EncodeToString(id)) + recordBytes, err := b.read(ctx, oldDHTNamespace, id) if err != nil { return nil, err } @@ -67,7 +126,7 @@ func (b *Bolt) ReadRecord(ctx context.Context, id []byte) (*pkarr.Record, error) return nil, nil } - var b64record base64PkarrRecord + var b64record base64BEP44Record if err = json.Unmarshal(recordBytes, &b64record); err != nil { return nil, err } @@ -81,18 +140,18 @@ func (b *Bolt) ReadRecord(ctx context.Context, id []byte) (*pkarr.Record, error) } // ListRecords lists all records in the storage -func (b *Bolt) ListRecords(ctx context.Context, nextPageToken []byte, pagesize int) ([]pkarr.Record, []byte, error) { +func (b *Bolt) ListRecords(ctx context.Context, nextPageToken []byte, pageSize int) ([]dht.BEP44Record, []byte, error) { ctx, span := telemetry.GetTracer().Start(ctx, "bolt.ListRecords") defer span.End() - boltRecords, err := b.readSeveral(ctx, pkarrNamespace, nextPageToken, pagesize) + boltRecords, err := b.readSeveral(ctx, oldDHTNamespace, nextPageToken, pageSize) if err != nil { return nil, nil, err } - var records []pkarr.Record + var records []dht.BEP44Record for _, recordBytes := range boltRecords { - var encodedRecord base64PkarrRecord + var encodedRecord base64BEP44Record if err = json.Unmarshal(recordBytes.value, &encodedRecord); err != nil { return nil, nil, err } @@ -105,7 +164,7 @@ func (b *Bolt) ListRecords(ctx context.Context, nextPageToken []byte, pagesize i records = append(records, *record) } - if len(boltRecords) == pagesize { + if len(boltRecords) == pageSize { nextPageToken = boltRecords[len(boltRecords)-1].key } else { nextPageToken = nil @@ -202,16 +261,85 @@ func (b *Bolt) readSeveral(ctx context.Context, namespace string, after []byte, return result, err } -// RecordCount returns the number of records in the storage for the pkarr namespace +// RecordCount returns the number of records in the storage for the mainline namespace func (b *Bolt) RecordCount(ctx context.Context) (int, error) { _, span := telemetry.GetTracer().Start(ctx, "bolt.RecordCount") defer span.End() var count int err := b.db.View(func(tx *bolt.Tx) error { - bucket := tx.Bucket([]byte(pkarrNamespace)) + bucket := tx.Bucket([]byte(oldDHTNamespace)) + if bucket == nil { + logrus.WithContext(ctx).WithField("namespace", oldDHTNamespace).Warn("namespace does not exist") + return nil + } + count = bucket.Stats().KeyN + return nil + }) + return count, err +} + +func (b *Bolt) WriteFailedRecord(ctx context.Context, id string) error { + _, span := telemetry.GetTracer().Start(ctx, "bolt.WriteFailedRecord") + defer span.End() + + return b.db.Update(func(tx *bolt.Tx) error { + bucket, err := tx.CreateBucketIfNotExists([]byte(failedNamespace)) + if err != nil { + return err + } + + var count int32 = 1 + v := bucket.Get([]byte(id)) + if v != nil { + if err = json.Unmarshal(v, &count); err != nil { + return err + } + count++ + } + + buf := new(bytes.Buffer) + if err = binary.Write(buf, binary.LittleEndian, count); err != nil { + return err + } + return bucket.Put([]byte(id), buf.Bytes()) + }) +} + +func (b *Bolt) ListFailedRecords(ctx context.Context) ([]dht.FailedRecord, error) { + _, span := telemetry.GetTracer().Start(ctx, "bolt.ListFailedRecords") + defer span.End() + + var result []dht.FailedRecord + err := b.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte(failedNamespace)) + if bucket == nil { + logrus.WithField("namespace", failedNamespace).Warn("namespace does not exist") + return nil + } + + cursor := bucket.Cursor() + for k, v := cursor.First(); k != nil; k, v = cursor.Next() { + var count int + if err := binary.Read(bytes.NewReader(v), binary.LittleEndian, &count); err != nil { + return err + } + result = append(result, dht.FailedRecord{ID: string(k), Count: count}) + } + return nil + }) + return result, err +} + +func (b *Bolt) FailedRecordCount(ctx context.Context) (int, error) { + _, span := telemetry.GetTracer().Start(ctx, "bolt.FailedRecordCount") + defer span.End() + + var count int + err := b.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte(failedNamespace)) if bucket == nil { - logrus.WithContext(ctx).WithField("namespace", pkarrNamespace).Warn("namespace does not exist") + logrus.WithField("namespace", failedNamespace).Warn("namespace does not exist") return nil } count = bucket.Stats().KeyN diff --git a/impl/pkg/storage/db/bolt/bolt_test.go b/impl/pkg/storage/db/bolt/bolt_test.go index 8a015869..7d9ee485 100644 --- a/impl/pkg/storage/db/bolt/bolt_test.go +++ b/impl/pkg/storage/db/bolt/bolt_test.go @@ -7,12 +7,11 @@ import ( "github.com/goccy/go-json" - "github.com/TBD54566975/did-dht-method/internal/did" - "github.com/TBD54566975/did-dht-method/pkg/dht" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/TBD54566975/did-dht-method/internal/did" + "github.com/TBD54566975/did-dht-method/pkg/dht" ) func TestBoltDB_ReadWrite(t *testing.T) { @@ -40,17 +39,17 @@ func TestBoltDB_ReadWrite(t *testing.T) { assert.NoError(t, err) assert.EqualValues(t, players1, players1Result) - // get a value from a dhtNamespace that doesn't exist + // get a value from a oldDHTNamespace that doesn't exist res, err := db.read(ctx, "bad", "worse") assert.NoError(t, err) assert.Empty(t, res) - // get a value that doesn't exist in the dhtNamespace + // get a value that doesn't exist in the oldDHTNamespace noValue, err := db.read(ctx, namespace, "Porsche") assert.NoError(t, err) assert.Empty(t, noValue) - // create a second value in the dhtNamespace + // create a second value in the oldDHTNamespace team2 := "McLaren" players2 := []string{"Lando Norris", "Daniel Ricciardo"} p2Bytes, err := json.Marshal(players2) @@ -59,7 +58,7 @@ func TestBoltDB_ReadWrite(t *testing.T) { err = db.write(ctx, namespace, team2, p2Bytes) assert.NoError(t, err) - // get all values from the dhtNamespace + // get all values from the oldDHTNamespace gotAll, err := db.readAll(namespace) assert.NoError(t, err) assert.True(t, len(gotAll) == 2) @@ -123,16 +122,16 @@ func TestReadWrite(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) - r := pkarr.RecordFromBEP44(putMsg) + r := dht.RecordFromBEP44(putMsg) err = db.WriteRecord(ctx, r) require.NoError(t, err) - r2, err := db.ReadRecord(ctx, r.Key[:]) + r2, err := db.ReadRecord(ctx, r.ID()) require.NoError(t, err) assert.Equal(t, r.Key, r2.Key) @@ -168,12 +167,12 @@ func TestDBPagination(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) // create record - record := pkarr.RecordFromBEP44(putMsg) + record := dht.RecordFromBEP44(putMsg) err = db.WriteRecord(ctx, record) assert.NoError(t, err) @@ -189,12 +188,12 @@ func TestDBPagination(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) // create eleventhRecord - eleventhRecord := pkarr.RecordFromBEP44(putMsg) + eleventhRecord := dht.RecordFromBEP44(putMsg) err = db.WriteRecord(ctx, eleventhRecord) assert.NoError(t, err) diff --git a/impl/pkg/storage/db/bolt/pkarr.go b/impl/pkg/storage/db/bolt/dht.go similarity index 67% rename from impl/pkg/storage/db/bolt/pkarr.go rename to impl/pkg/storage/db/bolt/dht.go index 97f30040..917094d1 100644 --- a/impl/pkg/storage/db/bolt/pkarr.go +++ b/impl/pkg/storage/db/bolt/dht.go @@ -6,14 +6,14 @@ import ( "github.com/TBD54566975/ssi-sdk/util" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" + "github.com/TBD54566975/did-dht-method/pkg/dht" ) var ( encoding = base64.RawURLEncoding ) -type base64PkarrRecord struct { +type base64BEP44Record struct { // Up to an 1000 byte base64URL encoded string V string `json:"v" validate:"required"` // 32 byte base64URL encoded string @@ -23,8 +23,8 @@ type base64PkarrRecord struct { Seq int64 `json:"seq" validate:"required"` } -func encodeRecord(r pkarr.Record) base64PkarrRecord { - return base64PkarrRecord{ +func encodeRecord(r dht.BEP44Record) base64BEP44Record { + return base64BEP44Record{ V: encoding.EncodeToString(r.Value[:]), K: encoding.EncodeToString(r.Key[:]), Sig: encoding.EncodeToString(r.Signature[:]), @@ -32,23 +32,23 @@ func encodeRecord(r pkarr.Record) base64PkarrRecord { } } -func (b base64PkarrRecord) Decode() (*pkarr.Record, error) { +func (b base64BEP44Record) Decode() (*dht.BEP44Record, error) { v, err := encoding.DecodeString(b.V) if err != nil { - return nil, fmt.Errorf("error parsing pkarr value field: %v", err) + return nil, fmt.Errorf("error parsing bep44 value field: %v", err) } k, err := encoding.DecodeString(b.K) if err != nil { - return nil, fmt.Errorf("error parsing pkarr key field: %v", err) + return nil, fmt.Errorf("error parsing bep44 key field: %v", err) } sig, err := encoding.DecodeString(b.Sig) if err != nil { - return nil, fmt.Errorf("error parsing pkarr sig field: %v", err) + return nil, fmt.Errorf("error parsing bep44 sig field: %v", err) } - record, err := pkarr.NewRecord(k, v, sig, b.Seq) + record, err := dht.NewBEP44Record(k, v, sig, b.Seq) if err != nil { // TODO: do something useful if this happens return nil, util.LoggingErrorMsg(err, "error loading record from database, skipping") diff --git a/impl/pkg/storage/db/postgres/migrations/00001_create_dht_records_table.sql b/impl/pkg/storage/db/postgres/migrations/00001_create_dht_records_table.sql new file mode 100644 index 00000000..f2ee580e --- /dev/null +++ b/impl/pkg/storage/db/postgres/migrations/00001_create_dht_records_table.sql @@ -0,0 +1,17 @@ +-- +goose Up +CREATE TABLE dht_records ( + id SERIAL PRIMARY KEY, + key BYTEA UNIQUE NOT NULL, + value BYTEA NOT NULL, + sig BYTEA NOT NULL, + seq BIGINT NOT NULL +); + +CREATE TABLE failed_records ( + id BYTEA PRIMARY KEY, + failure_count INTEGER NOT NULL +); + +-- +goose Down +DROP TABLE failed_records; +DROP TABLE dht_records; \ No newline at end of file diff --git a/impl/pkg/storage/db/postgres/migrations/00001_create_pkarr_records_table.sql b/impl/pkg/storage/db/postgres/migrations/00001_create_pkarr_records_table.sql deleted file mode 100644 index 05273773..00000000 --- a/impl/pkg/storage/db/postgres/migrations/00001_create_pkarr_records_table.sql +++ /dev/null @@ -1,11 +0,0 @@ --- +goose Up -CREATE TABLE pkarr_records ( - id SERIAL PRIMARY KEY, - key BYTEA UNIQUE NOT NULL, - value BYTEA NOT NULL, - sig BYTEA NOT NULL, - seq BIGINT NOT NULL -); - --- +goose Down -DROP TABLE pkarr_records; \ No newline at end of file diff --git a/impl/pkg/storage/db/postgres/models.go b/impl/pkg/storage/db/postgres/models.go index 03958671..a4d02233 100644 --- a/impl/pkg/storage/db/postgres/models.go +++ b/impl/pkg/storage/db/postgres/models.go @@ -4,10 +4,15 @@ package postgres -type PkarrRecord struct { +type DhtRecord struct { ID int32 Key []byte Value []byte Sig []byte Seq int64 } + +type FailedRecord struct { + ID []byte + FailureCount int32 +} diff --git a/impl/pkg/storage/db/postgres/postgres.go b/impl/pkg/storage/db/postgres/postgres.go index bd41e684..365622b4 100644 --- a/impl/pkg/storage/db/postgres/postgres.go +++ b/impl/pkg/storage/db/postgres/postgres.go @@ -10,8 +10,9 @@ import ( _ "github.com/jackc/pgx/v5/stdlib" "github.com/pressly/goose/v3" "github.com/sirupsen/logrus" + "github.com/tv42/zbase32" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" + "github.com/TBD54566975/did-dht-method/pkg/dht" "github.com/TBD54566975/did-dht-method/pkg/telemetry" ) @@ -61,7 +62,7 @@ func (p Postgres) connect(ctx context.Context) (*Queries, *pgx.Conn, error) { return New(conn), conn, nil } -func (p Postgres) WriteRecord(ctx context.Context, record pkarr.Record) error { +func (p Postgres) WriteRecord(ctx context.Context, record dht.BEP44Record) error { ctx, span := telemetry.GetTracer().Start(ctx, "postgres.WriteRecord") defer span.End() @@ -84,7 +85,7 @@ func (p Postgres) WriteRecord(ctx context.Context, record pkarr.Record) error { return nil } -func (p Postgres) ReadRecord(ctx context.Context, id []byte) (*pkarr.Record, error) { +func (p Postgres) ReadRecord(ctx context.Context, id string) (*dht.BEP44Record, error) { ctx, span := telemetry.GetTracer().Start(ctx, "postgres.ReadRecord") defer span.End() @@ -94,7 +95,11 @@ func (p Postgres) ReadRecord(ctx context.Context, id []byte) (*pkarr.Record, err } defer db.Close(ctx) - row, err := queries.ReadRecord(ctx, id) + decodedID, err := zbase32.DecodeString(id) + if err != nil { + return nil, err + } + row, err := queries.ReadRecord(ctx, decodedID) if err != nil { return nil, err } @@ -107,7 +112,7 @@ func (p Postgres) ReadRecord(ctx context.Context, id []byte) (*pkarr.Record, err return record, nil } -func (p Postgres) ListRecords(ctx context.Context, nextPageToken []byte, limit int) ([]pkarr.Record, []byte, error) { +func (p Postgres) ListRecords(ctx context.Context, nextPageToken []byte, limit int) ([]dht.BEP44Record, []byte, error) { ctx, span := telemetry.GetTracer().Start(ctx, "postgres.ListRecords") defer span.End() @@ -117,7 +122,7 @@ func (p Postgres) ListRecords(ctx context.Context, nextPageToken []byte, limit i } defer db.Close(ctx) - var rows []PkarrRecord + var rows []DhtRecord if nextPageToken == nil { rows, err = queries.ListRecordsFirstPage(ctx, int32(limit)) } else { @@ -130,9 +135,9 @@ func (p Postgres) ListRecords(ctx context.Context, nextPageToken []byte, limit i return nil, nil, err } - var records []pkarr.Record + var records []dht.BEP44Record for _, row := range rows { - record, err := pkarr.NewRecord(row.Key, row.Value, row.Sig, row.Seq) + record, err := dht.NewBEP44Record(row.Key, row.Value, row.Sig, row.Seq) if err != nil { // TODO: do something useful if this happens logrus.WithContext(ctx).WithError(err).WithField("record_id", row.ID).Warn("error loading record from database, skipping") @@ -151,13 +156,8 @@ func (p Postgres) ListRecords(ctx context.Context, nextPageToken []byte, limit i return records, nextPageToken, nil } -func (p Postgres) Close() error { - // no-op, postgres connection is closed after each request - return nil -} - -func (row PkarrRecord) Record() (*pkarr.Record, error) { - return pkarr.NewRecord(row.Key, row.Value, row.Sig, row.Seq) +func (row DhtRecord) Record() (*dht.BEP44Record, error) { + return dht.NewBEP44Record(row.Key, row.Value, row.Sig, row.Seq) } func (p Postgres) RecordCount(ctx context.Context) (int, error) { @@ -177,3 +177,73 @@ func (p Postgres) RecordCount(ctx context.Context) (int, error) { return int(count), nil } + +func (p Postgres) WriteFailedRecord(ctx context.Context, id string) error { + ctx, span := telemetry.GetTracer().Start(ctx, "postgres.WriteFailedRecord") + defer span.End() + + queries, db, err := p.connect(ctx) + if err != nil { + return err + } + defer db.Close(ctx) + + err = queries.WriteFailedRecord(ctx, WriteFailedRecordParams{ + ID: []byte(id), + FailureCount: 1, + }) + if err != nil { + return err + } + + return nil +} + +func (p Postgres) ListFailedRecords(ctx context.Context) ([]dht.FailedRecord, error) { + ctx, span := telemetry.GetTracer().Start(ctx, "postgres.ListFailedRecords") + defer span.End() + + queries, db, err := p.connect(ctx) + if err != nil { + return nil, err + } + defer db.Close(ctx) + + rows, err := queries.ListFailedRecords(ctx) + if err != nil { + return nil, err + } + + var failedRecords []dht.FailedRecord + for _, row := range rows { + failedRecords = append(failedRecords, dht.FailedRecord{ + ID: string(row.ID), + Count: int(row.FailureCount), + }) + } + + return failedRecords, nil +} + +func (p Postgres) FailedRecordCount(ctx context.Context) (int, error) { + ctx, span := telemetry.GetTracer().Start(ctx, "postgres.FailedRecordCount") + defer span.End() + + queries, db, err := p.connect(ctx) + if err != nil { + return 0, err + } + defer db.Close(ctx) + + count, err := queries.FailedRecordCount(ctx) + if err != nil { + return 0, err + } + + return int(count), nil +} + +func (p Postgres) Close() error { + // no-op, postgres connection is closed after each request + return nil +} diff --git a/impl/pkg/storage/db/postgres/postgres_test.go b/impl/pkg/storage/db/postgres/postgres_test.go index 9bee06bb..028146d0 100644 --- a/impl/pkg/storage/db/postgres/postgres_test.go +++ b/impl/pkg/storage/db/postgres/postgres_test.go @@ -3,7 +3,6 @@ package postgres_test import ( "context" "net/url" - "os" "testing" "github.com/stretchr/testify/assert" @@ -11,13 +10,12 @@ import ( "github.com/TBD54566975/did-dht-method/internal/did" "github.com/TBD54566975/did-dht-method/pkg/dht" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" "github.com/TBD54566975/did-dht-method/pkg/storage" "github.com/TBD54566975/did-dht-method/pkg/storage/db/postgres" ) func getTestDB(t *testing.T) storage.Storage { - uri := os.Getenv("TEST_DB") + uri := "postgres://postgres:a@127.0.0.1:5432/postgres" // os.Getenv("TEST_DB") if uri == "" { t.SkipNow() } @@ -50,16 +48,16 @@ func TestReadWrite(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) - r := pkarr.RecordFromBEP44(putMsg) + r := dht.RecordFromBEP44(putMsg) err = db.WriteRecord(ctx, r) require.NoError(t, err) - r2, err := db.ReadRecord(ctx, r.Key[:]) + r2, err := db.ReadRecord(ctx, r.ID()) require.NoError(t, err) assert.Equal(t, r.Key, r2.Key) @@ -95,12 +93,12 @@ func TestDBPagination(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) // create record - record := pkarr.RecordFromBEP44(putMsg) + record := dht.RecordFromBEP44(putMsg) err = db.WriteRecord(ctx, record) assert.NoError(t, err) @@ -116,12 +114,12 @@ func TestDBPagination(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) // create eleventhRecord - eleventhRecord := pkarr.RecordFromBEP44(putMsg) + eleventhRecord := dht.RecordFromBEP44(putMsg) err = db.WriteRecord(ctx, eleventhRecord) assert.NoError(t, err) diff --git a/impl/pkg/storage/db/postgres/queries.sql.go b/impl/pkg/storage/db/postgres/queries.sql.go index 3687e39a..9fdfde12 100644 --- a/impl/pkg/storage/db/postgres/queries.sql.go +++ b/impl/pkg/storage/db/postgres/queries.sql.go @@ -9,8 +9,43 @@ import ( "context" ) +const failedRecordCount = `-- name: FailedRecordCount :one +SELECT count(*) AS exact_count FROM failed_records +` + +func (q *Queries) FailedRecordCount(ctx context.Context) (int64, error) { + row := q.db.QueryRow(ctx, failedRecordCount) + var exact_count int64 + err := row.Scan(&exact_count) + return exact_count, err +} + +const listFailedRecords = `-- name: ListFailedRecords :many +SELECT id, failure_count FROM failed_records +` + +func (q *Queries) ListFailedRecords(ctx context.Context) ([]FailedRecord, error) { + rows, err := q.db.Query(ctx, listFailedRecords) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FailedRecord + for rows.Next() { + var i FailedRecord + if err := rows.Scan(&i.ID, &i.FailureCount); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const listRecords = `-- name: ListRecords :many -SELECT id, key, value, sig, seq FROM pkarr_records WHERE id > (SELECT id FROM pkarr_records WHERE pkarr_records.key = $1) ORDER BY id ASC LIMIT $2 +SELECT id, key, value, sig, seq FROM dht_records WHERE id > (SELECT id FROM dht_records WHERE dht_records.key = $1) ORDER BY id ASC LIMIT $2 ` type ListRecordsParams struct { @@ -18,15 +53,15 @@ type ListRecordsParams struct { Limit int32 } -func (q *Queries) ListRecords(ctx context.Context, arg ListRecordsParams) ([]PkarrRecord, error) { +func (q *Queries) ListRecords(ctx context.Context, arg ListRecordsParams) ([]DhtRecord, error) { rows, err := q.db.Query(ctx, listRecords, arg.Key, arg.Limit) if err != nil { return nil, err } defer rows.Close() - var items []PkarrRecord + var items []DhtRecord for rows.Next() { - var i PkarrRecord + var i DhtRecord if err := rows.Scan( &i.ID, &i.Key, @@ -45,18 +80,18 @@ func (q *Queries) ListRecords(ctx context.Context, arg ListRecordsParams) ([]Pka } const listRecordsFirstPage = `-- name: ListRecordsFirstPage :many -SELECT id, key, value, sig, seq FROM pkarr_records ORDER BY id ASC LIMIT $1 +SELECT id, key, value, sig, seq FROM dht_records ORDER BY id ASC LIMIT $1 ` -func (q *Queries) ListRecordsFirstPage(ctx context.Context, limit int32) ([]PkarrRecord, error) { +func (q *Queries) ListRecordsFirstPage(ctx context.Context, limit int32) ([]DhtRecord, error) { rows, err := q.db.Query(ctx, listRecordsFirstPage, limit) if err != nil { return nil, err } defer rows.Close() - var items []PkarrRecord + var items []DhtRecord for rows.Next() { - var i PkarrRecord + var i DhtRecord if err := rows.Scan( &i.ID, &i.Key, @@ -75,12 +110,12 @@ func (q *Queries) ListRecordsFirstPage(ctx context.Context, limit int32) ([]Pkar } const readRecord = `-- name: ReadRecord :one -SELECT id, key, value, sig, seq FROM pkarr_records WHERE key = $1 LIMIT 1 +SELECT id, key, value, sig, seq FROM dht_records WHERE key = $1 LIMIT 1 ` -func (q *Queries) ReadRecord(ctx context.Context, key []byte) (PkarrRecord, error) { +func (q *Queries) ReadRecord(ctx context.Context, key []byte) (DhtRecord, error) { row := q.db.QueryRow(ctx, readRecord, key) - var i PkarrRecord + var i DhtRecord err := row.Scan( &i.ID, &i.Key, @@ -92,7 +127,7 @@ func (q *Queries) ReadRecord(ctx context.Context, key []byte) (PkarrRecord, erro } const recordCount = `-- name: RecordCount :one -SELECT count(*) AS exact_count FROM pkarr_records +SELECT count(*) AS exact_count FROM dht_records ` func (q *Queries) RecordCount(ctx context.Context) (int64, error) { @@ -102,8 +137,24 @@ func (q *Queries) RecordCount(ctx context.Context) (int64, error) { return exact_count, err } +const writeFailedRecord = `-- name: WriteFailedRecord :exec +INSERT INTO failed_records(id, failure_count) +VALUES($1, $2) +ON CONFLICT (id) DO UPDATE SET failure_count = failed_records.failure_count + 1 +` + +type WriteFailedRecordParams struct { + ID []byte + FailureCount int32 +} + +func (q *Queries) WriteFailedRecord(ctx context.Context, arg WriteFailedRecordParams) error { + _, err := q.db.Exec(ctx, writeFailedRecord, arg.ID, arg.FailureCount) + return err +} + const writeRecord = `-- name: WriteRecord :exec -INSERT INTO pkarr_records(key, value, sig, seq) VALUES($1, $2, $3, $4) +INSERT INTO dht_records(key, value, sig, seq) VALUES($1, $2, $3, $4) ` type WriteRecordParams struct { diff --git a/impl/pkg/storage/db/postgres/queries/queries.sql b/impl/pkg/storage/db/postgres/queries/queries.sql index 8ae96ea3..74153618 100644 --- a/impl/pkg/storage/db/postgres/queries/queries.sql +++ b/impl/pkg/storage/db/postgres/queries/queries.sql @@ -1,14 +1,25 @@ -- name: WriteRecord :exec -INSERT INTO pkarr_records(key, value, sig, seq) VALUES($1, $2, $3, $4); +INSERT INTO dht_records(key, value, sig, seq) VALUES($1, $2, $3, $4); -- name: ReadRecord :one -SELECT * FROM pkarr_records WHERE key = $1 LIMIT 1; +SELECT * FROM dht_records WHERE key = $1 LIMIT 1; -- name: ListRecords :many -SELECT * FROM pkarr_records WHERE id > (SELECT id FROM pkarr_records WHERE pkarr_records.key = $1) ORDER BY id ASC LIMIT $2; +SELECT * FROM dht_records WHERE id > (SELECT id FROM dht_records WHERE dht_records.key = $1) ORDER BY id ASC LIMIT $2; -- name: ListRecordsFirstPage :many -SELECT * FROM pkarr_records ORDER BY id ASC LIMIT $1; +SELECT * FROM dht_records ORDER BY id ASC LIMIT $1; -- name: RecordCount :one -SELECT count(*) AS exact_count FROM pkarr_records; +SELECT count(*) AS exact_count FROM dht_records; + +-- name: WriteFailedRecord :exec +INSERT INTO failed_records(id, failure_count) +VALUES($1, $2) +ON CONFLICT (id) DO UPDATE SET failure_count = failed_records.failure_count + 1; + +-- name: ListFailedRecords :many +SELECT * FROM failed_records; + +-- name: FailedRecordCount :one +SELECT count(*) AS exact_count FROM failed_records; \ No newline at end of file diff --git a/impl/pkg/storage/storage.go b/impl/pkg/storage/storage.go index c8e460f0..daf4b2d2 100644 --- a/impl/pkg/storage/storage.go +++ b/impl/pkg/storage/storage.go @@ -8,16 +8,21 @@ import ( "github.com/sirupsen/logrus" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" + "github.com/TBD54566975/did-dht-method/pkg/dht" "github.com/TBD54566975/did-dht-method/pkg/storage/db/bolt" "github.com/TBD54566975/did-dht-method/pkg/storage/db/postgres" ) type Storage interface { - WriteRecord(ctx context.Context, record pkarr.Record) error - ReadRecord(ctx context.Context, id []byte) (*pkarr.Record, error) - ListRecords(ctx context.Context, nextPageToken []byte, pageSize int) (records []pkarr.Record, nextPage []byte, err error) + WriteRecord(ctx context.Context, record dht.BEP44Record) error + ReadRecord(ctx context.Context, id string) (*dht.BEP44Record, error) + ListRecords(ctx context.Context, nextPageToken []byte, pageSize int) (records []dht.BEP44Record, nextPage []byte, err error) RecordCount(ctx context.Context) (int, error) + + WriteFailedRecord(ctx context.Context, id string) error + ListFailedRecords(ctx context.Context) ([]dht.FailedRecord, error) + FailedRecordCount(ctx context.Context) (int, error) + Close() error } diff --git a/impl/pkg/telemetry/telemetry.go b/impl/pkg/telemetry/telemetry.go index ec1d4247..a4417746 100644 --- a/impl/pkg/telemetry/telemetry.go +++ b/impl/pkg/telemetry/telemetry.go @@ -57,7 +57,7 @@ func SetupTelemetry(ctx context.Context) error { otel.SetMeterProvider(meterProvider) // setup memory metrics - err = runtime.Start(runtime.WithMeterProvider(meterProvider), runtime.WithMinimumReadMemStatsInterval(time.Second*15)) + err = runtime.Start(runtime.WithMeterProvider(meterProvider), runtime.WithMinimumReadMemStatsInterval(15*time.Second)) if err != nil { return err } diff --git a/impl/sqlc.yaml b/impl/sqlc.yaml new file mode 100644 index 00000000..b36ecbd9 --- /dev/null +++ b/impl/sqlc.yaml @@ -0,0 +1,10 @@ +version: "2" +sql: + - engine: "postgresql" + queries: "pkg/storage/db/postgres/queries" + schema: "pkg/storage/db/postgres/migrations" + gen: + go: + package: "postgres" + out: "pkg/storage/db/postgres" + sql_package: "pgx/v5" \ No newline at end of file diff --git a/sqlc.yaml b/sqlc.yaml deleted file mode 100644 index 763be2bd..00000000 --- a/sqlc.yaml +++ /dev/null @@ -1,10 +0,0 @@ -version: "2" -sql: - - engine: "postgresql" - queries: "impl/pkg/storage/db/postgres/queries" - schema: "impl/pkg/storage/db/postgres/migrations" - gen: - go: - package: "postgres" - out: "impl/pkg/storage/db/postgres" - sql_package: "pgx/v5" \ No newline at end of file From 185df6fe8cca18d4e343608c9bd623fb32d4eda7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Apr 2024 23:41:20 +0000 Subject: [PATCH 04/10] Bump go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp (#184) --- impl/go.mod | 2 +- impl/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/impl/go.mod b/impl/go.mod index 647d136a..7354d1b0 100644 --- a/impl/go.mod +++ b/impl/go.mod @@ -33,7 +33,7 @@ require ( go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.50.0 go.opentelemetry.io/contrib/instrumentation/runtime v0.50.0 go.opentelemetry.io/otel v1.25.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 go.opentelemetry.io/otel/sdk v1.25.0 go.opentelemetry.io/otel/sdk/metric v1.25.0 diff --git a/impl/go.sum b/impl/go.sum index 7005ecd4..ea551dcc 100644 --- a/impl/go.sum +++ b/impl/go.sum @@ -546,8 +546,8 @@ go.opentelemetry.io/contrib/propagators/b3 v1.25.0 h1:QU8UEKyPqgr/8vCC9LlDmkPnfF go.opentelemetry.io/contrib/propagators/b3 v1.25.0/go.mod h1:qonC7wyvtX1E6cEpAR+bJmhcGr6IVRGc/f6ZTpvi7jA= go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 h1:mM8nKi6/iFQ0iqst80wDHU2ge198Ye/TfN0WBS5U24Y= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0/go.mod h1:0PrIIzDteLSmNyxqcGYRL4mDIo8OTuBAOI/Bn1URxac= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0 h1:Wc4hZuYXhVqq+TfRXLXlmNIL/awOanGx8ssq3ciDQxc= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0/go.mod h1:BydOvapRqVEc0DVz27qWBX2jq45Ca5TI9mhZBDIdweY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0 h1:dT33yIHtmsqpixFsSQPwNeY5drM9wTcoL8h0FWF4oGM= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0/go.mod h1:h95q0LBGh7hlAC08X2DhSeyIG02YQ0UyioTCVAqRPmc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 h1:Mbi5PKN7u322woPa85d7ebZ+SOvEoPvoiBu+ryHWgfA= From 27501889d81b694aa15955e5abff059c32c7446a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Apr 2024 16:47:34 -0700 Subject: [PATCH 05/10] Bump github.com/anacrolix/torrent from 1.52.5 to 1.55.0 in /impl (#187) Bumps [github.com/anacrolix/torrent](https://github.com/anacrolix/torrent) from 1.52.5 to 1.55.0. - [Commits](https://github.com/anacrolix/torrent/compare/v1.52.5...v1.55.0) --- updated-dependencies: - dependency-name: github.com/anacrolix/torrent dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Gabe <7622243+decentralgabe@users.noreply.github.com> --- impl/go.mod | 8 ++++---- impl/go.sum | 20 ++++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/impl/go.mod b/impl/go.mod index 7354d1b0..18a6dfa4 100644 --- a/impl/go.mod +++ b/impl/go.mod @@ -8,7 +8,7 @@ require ( github.com/allegro/bigcache/v3 v3.1.0 github.com/anacrolix/dht/v2 v2.21.1 github.com/anacrolix/log v0.15.2 - github.com/anacrolix/torrent v1.52.5 + github.com/anacrolix/torrent v1.55.0 github.com/gin-contrib/cors v1.7.1 github.com/gin-gonic/gin v1.9.1 github.com/go-co-op/gocron v1.37.0 @@ -46,13 +46,13 @@ require ( github.com/KyleBanks/depth v1.2.1 // indirect github.com/alecthomas/atomic v0.1.0-alpha2 // indirect github.com/anacrolix/chansync v0.3.0 // indirect - github.com/anacrolix/generics v0.0.0-20230428105757-683593396d68 // indirect + github.com/anacrolix/generics v0.0.0-20230911070922-5dd7545c6b13 // indirect github.com/anacrolix/missinggo v1.3.0 // indirect github.com/anacrolix/missinggo/perf v1.0.0 // indirect - github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9 // indirect + github.com/anacrolix/missinggo/v2 v2.7.3 // indirect github.com/anacrolix/multiless v0.3.1-0.20221221005021-2d12701f83f7 // indirect github.com/anacrolix/stm v0.4.1-0.20221221005312-96d17df0e496 // indirect - github.com/anacrolix/sync v0.4.0 // indirect + github.com/anacrolix/sync v0.5.1 // indirect github.com/benbjohnson/immutable v0.4.1-0.20221220213129-8932b999621d // indirect github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect diff --git a/impl/go.sum b/impl/go.sum index ea551dcc..b432b7e2 100644 --- a/impl/go.sum +++ b/impl/go.sum @@ -45,10 +45,10 @@ github.com/anacrolix/dht/v2 v2.21.1/go.mod h1:SDGC+sEs1pnO2sJGYuhvIis7T8749dDHNf github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= github.com/anacrolix/envpprof v1.1.0/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= -github.com/anacrolix/envpprof v1.2.1 h1:25TJe6t/i0AfzzldiGFKCpD+s+dk8lONBcacJZB2rdE= -github.com/anacrolix/envpprof v1.2.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= -github.com/anacrolix/generics v0.0.0-20230428105757-683593396d68 h1:fyXlBfnlFzZSFckJ8QLb2lfmWfY++4RiUnae7ZMuv0A= -github.com/anacrolix/generics v0.0.0-20230428105757-683593396d68/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8= +github.com/anacrolix/envpprof v1.3.0 h1:WJt9bpuT7A/CDCxPOv/eeZqHWlle/Y0keJUvc6tcJDk= +github.com/anacrolix/envpprof v1.3.0/go.mod h1:7QIG4CaX1uexQ3tqd5+BRa/9e2D02Wcertl6Yh0jCB0= +github.com/anacrolix/generics v0.0.0-20230911070922-5dd7545c6b13 h1:qwOprPTDMM3BASJRf84mmZnTXRsPGGJ8xoHKQS7m3so= +github.com/anacrolix/generics v0.0.0-20230911070922-5dd7545c6b13/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8= github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= github.com/anacrolix/log v0.6.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= github.com/anacrolix/log v0.15.2 h1:LTSf5Wm6Q4GNWPFMBP7NPYV6UBVZzZLKckL+/Lj72Oo= @@ -62,21 +62,21 @@ github.com/anacrolix/missinggo/perf v1.0.0 h1:7ZOGYziGEBytW49+KmYGTaNfnwUqP1HBsy github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5urunoLS0Cbvb4V0uMQ= github.com/anacrolix/missinggo/v2 v2.2.0/go.mod h1:o0jgJoYOyaoYQ4E2ZMISVa9c88BbUBVQQW4QeRkNCGY= github.com/anacrolix/missinggo/v2 v2.5.1/go.mod h1:WEjqh2rmKECd0t1VhQkLGTdIWXO6f6NLjp5GlMZ+6FA= -github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9 h1:W/oGeHhYwxueeiDjQfmK9G+X9M2xJgfTtow62v0TWAs= -github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9/go.mod h1:mIEtp9pgaXqt8VQ3NQxFOod/eQ1H0D1XsZzKUQfwtac= +github.com/anacrolix/missinggo/v2 v2.7.3 h1:Ee//CmZBMadeNiYB/hHo9ly2PFOEZ4Fhsbnug3rDAIE= +github.com/anacrolix/missinggo/v2 v2.7.3/go.mod h1:mIEtp9pgaXqt8VQ3NQxFOod/eQ1H0D1XsZzKUQfwtac= github.com/anacrolix/multiless v0.3.1-0.20221221005021-2d12701f83f7 h1:lOtCD+LzoD1g7bowhYJNR++uV+FyY5bTZXKwnPex9S8= github.com/anacrolix/multiless v0.3.1-0.20221221005021-2d12701f83f7/go.mod h1:zJv1JF9AqdZiHwxqPgjuOZDGWER6nyE48WBCi/OOrMM= github.com/anacrolix/stm v0.2.0/go.mod h1:zoVQRvSiGjGoTmbM0vSLIiaKjWtNPeTvXUSdJQA4hsg= github.com/anacrolix/stm v0.4.1-0.20221221005312-96d17df0e496 h1:aMiRi2kOOd+nG64suAmFMVnNK2E6GsnLif7ia9tI3cA= github.com/anacrolix/stm v0.4.1-0.20221221005312-96d17df0e496/go.mod h1:DBm8/1OXm4A4RZ6Xa9u/eOsjeAXCaoRYvd2JzlskXeM= github.com/anacrolix/sync v0.3.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= -github.com/anacrolix/sync v0.4.0 h1:T+MdO/u87ir/ijWsTFsPYw5jVm0SMm4kVpg8t4KF38o= -github.com/anacrolix/sync v0.4.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= +github.com/anacrolix/sync v0.5.1 h1:FbGju6GqSjzVoTgcXTUKkF041lnZkG5P0C3T5RL3SGc= +github.com/anacrolix/sync v0.5.1/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= -github.com/anacrolix/torrent v1.52.5 h1:jWowdx+EU6zFVfBwmnL0d3H4J6vTFEGOrHI35YdfIT8= -github.com/anacrolix/torrent v1.52.5/go.mod h1:CcM8oPMYye5J42cSqJrmUpqwRFgSsJQ1jCEHwygqnqQ= +github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8= +github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE= github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI= github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 h1:goHVqTbFX3AIo0tzGr14pgfAW2ZfPChKO21Z9MGf/gk= From d7034054c078b1464939ad4684fcee88da6350e0 Mon Sep 17 00:00:00 2001 From: Gabe <7622243+decentralgabe@users.noreply.github.com> Date: Fri, 19 Apr 2024 17:13:26 -0700 Subject: [PATCH 06/10] remove db migration (#192) * migration * update logging * consistent logging --- impl/pkg/service/dht.go | 24 ++++++------ impl/pkg/storage/db/bolt/bolt.go | 67 +++----------------------------- 2 files changed, 17 insertions(+), 74 deletions(-) diff --git a/impl/pkg/service/dht.go b/impl/pkg/service/dht.go index b3c6d64d..566db4a5 100644 --- a/impl/pkg/service/dht.go +++ b/impl/pkg/service/dht.go @@ -108,7 +108,7 @@ func (s *DHTService) PublishDHT(ctx context.Context, id string, record dht.BEP44 if err = s.cache.Set(id, recordBytes); err != nil { return err } - logrus.WithContext(ctx).WithField("record", id).Debug("added dht record to cache and db") + logrus.WithContext(ctx).WithField("record_id", id).Debug("added dht record to cache and db") // return here and put it in the DHT asynchronously go func() { @@ -117,9 +117,9 @@ func (s *DHTService) PublishDHT(ctx context.Context, id string, record dht.BEP44 defer cancel() if _, err = s.dht.Put(putCtx, record.Put()); err != nil { - logrus.WithContext(ctx).WithError(err).Errorf("error from dht.Put for record: %s", id) + logrus.WithContext(ctx).WithField("record_id", id).WithError(err).Warnf("error from dht.Put for record: %s", id) } else { - logrus.WithContext(ctx).WithField("record", id).Debug("put record to DHT") + logrus.WithContext(ctx).WithField("record_id", id).Debug("put record to DHT") } }() @@ -148,7 +148,7 @@ func (s *DHTService) GetDHT(ctx context.Context, id string) (*dht.BEP44Response, logrus.WithContext(ctx).WithField("record_id", id).Info("resolved record from cache") return &resp, nil } - logrus.WithContext(ctx).WithError(err).WithField("record", id).Warn("failed to get record from cache, falling back to dht") + logrus.WithContext(ctx).WithError(err).WithField("record_id", id).Warn("failed to get record from cache, falling back to dht") } // next do a dht lookup with a timeout of 10 seconds @@ -158,28 +158,28 @@ func (s *DHTService) GetDHT(ctx context.Context, id string) (*dht.BEP44Response, got, err := s.dht.GetFull(getCtx, id) if err != nil { if errors.Is(err, context.DeadlineExceeded) { - logrus.WithContext(ctx).WithField("record", id).Warn("dht lookup timed out, attempting to resolve from storage") + logrus.WithContext(ctx).WithField("record_id", id).Warn("dht lookup timed out, attempting to resolve from storage") } else { - logrus.WithContext(ctx).WithError(err).WithField("record", id).Warn("failed to get record from dht, attempting to resolve from storage") + logrus.WithContext(ctx).WithError(err).WithField("record_id", id).Warn("failed to get record from dht, attempting to resolve from storage") } record, err := s.db.ReadRecord(ctx, id) if err != nil || record == nil { - logrus.WithContext(ctx).WithError(err).WithField("record", id).Error("failed to resolve record from storage; adding to badGetCache") + logrus.WithContext(ctx).WithError(err).WithField("record_id", id).Error("failed to resolve record from storage; adding to badGetCache") // add the key to the badGetCache to prevent spamming the DHT if err = s.badGetCache.Set(id, []byte{0}); err != nil { - logrus.WithContext(ctx).WithError(err).WithField("record", id).Error("failed to set key in badGetCache") + logrus.WithContext(ctx).WithError(err).WithField("record_id", id).Error("failed to set key in badGetCache") } return nil, err } - logrus.WithContext(ctx).WithField("record", id).Info("resolved record from storage") + logrus.WithContext(ctx).WithField("record_id", id).Info("resolved record from storage") resp := record.Response() // add the record back to the cache for future lookups if err = s.addRecordToCache(id, record.Response()); err != nil { - logrus.WithError(err).WithField("record", id).Error("failed to set record in cache") + logrus.WithError(err).WithField("record_id", id).Error("failed to set record in cache") } return &resp, err @@ -202,9 +202,9 @@ func (s *DHTService) GetDHT(ctx context.Context, id string) (*dht.BEP44Response, // add the record to cache, do it here to avoid duplicate calculations if err = s.addRecordToCache(id, resp); err != nil { - logrus.WithContext(ctx).WithField("record", id).WithError(err).Error("failed to set record in cache") + logrus.WithContext(ctx).WithField("record_id", id).WithError(err).Error("failed to set record in cache") } else { - logrus.WithContext(ctx).WithField("record", id).Info("added record back to cache") + logrus.WithContext(ctx).WithField("record_id", id).Info("added record back to cache") } return &resp, nil diff --git a/impl/pkg/storage/db/bolt/bolt.go b/impl/pkg/storage/db/bolt/bolt.go index ef4272df..608c466d 100644 --- a/impl/pkg/storage/db/bolt/bolt.go +++ b/impl/pkg/storage/db/bolt/bolt.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "encoding/binary" - "fmt" "time" "github.com/goccy/go-json" @@ -19,7 +18,6 @@ import ( const ( dhtNamespace = "dht" - oldDHTNamespace = "pkarr" failedNamespace = "failed" ) @@ -40,52 +38,9 @@ func NewBolt(path string) (*Bolt, error) { if err != nil { return nil, err } - - // Perform the migration - go migrate(db) - return &Bolt{db: db}, nil } -func migrate(db *bolt.DB) { - // Perform the migration within a write transaction - err := db.Update(func(tx *bolt.Tx) error { - // Create the new namespace bucket - newBucket, err := tx.CreateBucketIfNotExists([]byte(dhtNamespace)) - if err != nil { - return fmt.Errorf("failed to create new namespace bucket: %v", err) - } - - // Get the old namespace bucket - oldBucket := tx.Bucket([]byte(oldDHTNamespace)) - if oldBucket == nil { - // If the old namespace bucket doesn't exist, there's nothing to migrate - return nil - } - - // Iterate over the key-value pairs in the old namespace bucket - err = oldBucket.ForEach(func(k, v []byte) error { - // Copy each key-value pair to the new namespace bucket - err = newBucket.Put(k, v) - if err != nil { - return fmt.Errorf("failed to copy key-value pair to new namespace: %v", err) - } - return nil - }) - if err != nil { - return err - } - - return nil - }) - - if err != nil { - logrus.WithError(err).Error("failed to migrate records") - } else { - logrus.Info("migration completed successfully") - } -} - // WriteRecord writes the given record to the storage // TODO: don't overwrite existing records, store unique seq numbers func (b *Bolt) WriteRecord(ctx context.Context, record dht.BEP44Record) error { @@ -98,19 +53,7 @@ func (b *Bolt) WriteRecord(ctx context.Context, record dht.BEP44Record) error { return err } - // write to both the old and new namespaces for now - errOld := b.write(ctx, oldDHTNamespace, record.ID(), recordBytes) - errNew := b.write(ctx, dhtNamespace, record.ID(), recordBytes) - if errOld == nil && errNew == nil { - return nil - } - if errOld != nil && errNew != nil { - return errors.New(fmt.Sprintf("old: %v, new: %v", errOld, errNew)) - } - if errOld != nil { - return errOld - } - return errNew + return b.write(ctx, dhtNamespace, record.ID(), recordBytes) } // ReadRecord reads the record with the given id from the storage @@ -118,7 +61,7 @@ func (b *Bolt) ReadRecord(ctx context.Context, id string) (*dht.BEP44Record, err ctx, span := telemetry.GetTracer().Start(ctx, "bolt.ReadRecord") defer span.End() - recordBytes, err := b.read(ctx, oldDHTNamespace, id) + recordBytes, err := b.read(ctx, dhtNamespace, id) if err != nil { return nil, err } @@ -144,7 +87,7 @@ func (b *Bolt) ListRecords(ctx context.Context, nextPageToken []byte, pageSize i ctx, span := telemetry.GetTracer().Start(ctx, "bolt.ListRecords") defer span.End() - boltRecords, err := b.readSeveral(ctx, oldDHTNamespace, nextPageToken, pageSize) + boltRecords, err := b.readSeveral(ctx, dhtNamespace, nextPageToken, pageSize) if err != nil { return nil, nil, err } @@ -268,9 +211,9 @@ func (b *Bolt) RecordCount(ctx context.Context) (int, error) { var count int err := b.db.View(func(tx *bolt.Tx) error { - bucket := tx.Bucket([]byte(oldDHTNamespace)) + bucket := tx.Bucket([]byte(dhtNamespace)) if bucket == nil { - logrus.WithContext(ctx).WithField("namespace", oldDHTNamespace).Warn("namespace does not exist") + logrus.WithContext(ctx).WithField("namespace", dhtNamespace).Warn("namespace does not exist") return nil } count = bucket.Stats().KeyN From 09dd0374482137f1e1963d2c03fb6eeea0198e93 Mon Sep 17 00:00:00 2001 From: Gabe <7622243+decentralgabe@users.noreply.github.com> Date: Fri, 19 Apr 2024 18:35:46 -0700 Subject: [PATCH 07/10] revert (#193) --- impl/pkg/storage/db/bolt/bolt.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/impl/pkg/storage/db/bolt/bolt.go b/impl/pkg/storage/db/bolt/bolt.go index 608c466d..2f25a39a 100644 --- a/impl/pkg/storage/db/bolt/bolt.go +++ b/impl/pkg/storage/db/bolt/bolt.go @@ -17,7 +17,7 @@ import ( ) const ( - dhtNamespace = "dht" + dhtNamespace = "pkarr" failedNamespace = "failed" ) From 7f13c88df0d707b63400137530e68a10713f10f5 Mon Sep 17 00:00:00 2001 From: Gabe <7622243+decentralgabe@users.noreply.github.com> Date: Mon, 22 Apr 2024 13:22:51 -0700 Subject: [PATCH 08/10] fix republish and migration (#195) * fix republish and migration * update cron --- impl/pkg/service/dht.go | 94 ++++++++++++++++++-------------- impl/pkg/storage/db/bolt/bolt.go | 70 +++++++++++++++++++++++- 2 files changed, 120 insertions(+), 44 deletions(-) diff --git a/impl/pkg/service/dht.go b/impl/pkg/service/dht.go index 566db4a5..489495e5 100644 --- a/impl/pkg/service/dht.go +++ b/impl/pkg/service/dht.go @@ -165,11 +165,11 @@ func (s *DHTService) GetDHT(ctx context.Context, id string) (*dht.BEP44Response, record, err := s.db.ReadRecord(ctx, id) if err != nil || record == nil { - logrus.WithContext(ctx).WithError(err).WithField("record_id", id).Error("failed to resolve record from storage; adding to badGetCache") + logrus.WithContext(ctx).WithError(err).WithField("record_id", id).Error("failed to resolve record from storage; adding to bad get cache") // add the key to the badGetCache to prevent spamming the DHT if err = s.badGetCache.Set(id, []byte{0}); err != nil { - logrus.WithContext(ctx).WithError(err).WithField("record_id", id).Error("failed to set key in badGetCache") + logrus.WithContext(ctx).WithError(err).WithField("record_id", id).Error("failed to set key in bad get cache") } return nil, err @@ -228,6 +228,7 @@ type failedRecord struct { } // TODO(gabe) make this more efficient. create a publish schedule based on each individual record, not all records +// republish republishes all records in the db func (s *DHTService) republish() { ctx, span := telemetry.GetTracer().Start(context.Background(), "DHTService.republish") defer span.End() @@ -241,43 +242,53 @@ func (s *DHTService) republish() { // republish all records in the db and handle failed records up to 3 times failedRecords := s.republishRecords(ctx) + + // handle failed records + logrus.WithContext(ctx).WithField("failed_record_count", len(failedRecords)).Info("handling failed records") s.handleFailedRecords(ctx, failedRecords) } -// republishRecords republishes all records in the db to the DHT and returns a list of failed records +// republishRecords republishes all records in the db and returns a list of failed records to be retried func (s *DHTService) republishRecords(ctx context.Context) []failedRecord { var nextPageToken []byte var seenRecords, batchCnt int32 var failedRecords []failedRecord + var recordsBatch []dht.BEP44Record + var err error + + var wg sync.WaitGroup for { - recordsBatch, nextPageToken, err := s.db.ListRecords(ctx, nextPageToken, 1000) + recordsBatch, nextPageToken, err = s.db.ListRecords(ctx, nextPageToken, 1000) if err != nil { logrus.WithContext(ctx).WithError(err).Error("failed to list record(s) for republishing") - return failedRecords + continue } batchSize := len(recordsBatch) seenRecords += int32(batchSize) if batchSize == 0 { logrus.WithContext(ctx).Info("no records to republish") - return failedRecords + break } logrus.WithContext(ctx).WithFields(logrus.Fields{ "record_count": batchSize, "batch_number": batchCnt, "total_seen": seenRecords, - }).Infof("republishing batch [%d] of [%d] records", batchCnt, batchSize) + }).Debugf("republishing batch [%d] of [%d] records", batchCnt, batchSize) batchCnt++ - failedRecords = append(failedRecords, s.republishBatch(ctx, recordsBatch)...) + batchFailedRecords := s.republishBatch(ctx, &wg, recordsBatch) + failedRecords = append(failedRecords, batchFailedRecords...) if nextPageToken == nil { break } } + wg.Wait() + successRate := float64(seenRecords-int32(len(failedRecords))) / float64(seenRecords) * 100 logrus.WithContext(ctx).WithFields(logrus.Fields{ "success": seenRecords - int32(len(failedRecords)), @@ -288,67 +299,66 @@ func (s *DHTService) republishRecords(ctx context.Context) []failedRecord { return failedRecords } -// republishBatch republishes a batch of records to the DHT and returns a list of failed records -func (s *DHTService) republishBatch(ctx context.Context, recordsBatch []dht.BEP44Record) []failedRecord { - var wg sync.WaitGroup +// republishBatch republishes a batch of records and returns a list of failed records to be retried +func (s *DHTService) republishBatch(ctx context.Context, wg *sync.WaitGroup, recordsBatch []dht.BEP44Record) []failedRecord { + failedRecordsChan := make(chan failedRecord, len(recordsBatch)) var failedRecords []failedRecord for _, record := range recordsBatch { wg.Add(1) - go func(ctx context.Context, record dht.BEP44Record) { + go func(record dht.BEP44Record) { defer wg.Done() id := record.ID() - logrus.WithContext(ctx).WithField("record_id", id).Debug("republishing record") - putCtx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() - if _, putErr := s.dht.Put(putCtx, record.Put()); putErr != nil { - logrus.WithContext(putCtx).WithField("record_id", id).WithError(putErr).Debug("failed to republish record") - failedRecords = append(failedRecords, failedRecord{ + _, putErr := s.dht.Put(putCtx, record.Put()) + if putErr != nil { + if errors.Is(putErr, context.DeadlineExceeded) { + logrus.WithContext(putCtx).WithField("record_id", id).Info("republish timeout exceeded") + } else { + logrus.WithContext(putCtx).WithField("record_id", id).WithError(putErr).Info("failed to republish record") + } + failedRecordsChan <- failedRecord{ record: record, failureCnt: 1, - }) + } } - }(ctx, record) + }(record) } wg.Wait() + close(failedRecordsChan) + + for fr := range failedRecordsChan { + failedRecords = append(failedRecords, fr) + } return failedRecords } // handleFailedRecords attempts to republish failed records up to 3 times func (s *DHTService) handleFailedRecords(ctx context.Context, failedRecords []failedRecord) { - for i := 0; i < 3; i++ { - var remainingFailedRecords []failedRecord - for _, fr := range failedRecords { + for _, fr := range failedRecords { + retryCount := 0 + for retryCount < 3 { id := fr.record.ID() putCtx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() if _, putErr := s.dht.Put(putCtx, fr.record.Put()); putErr != nil { - logrus.WithContext(putCtx).WithField("record_id", id).WithError(putErr).Debugf("failed to re-republish [%s], attempt: %d", id, i+1) - fr.failureCnt++ - if fr.failureCnt <= 3 { - remainingFailedRecords = append(remainingFailedRecords, fr) - } else { - logrus.WithContext(ctx).WithField("record_id", id).Errorf("record failed to republish after 3 attempts") - } + logrus.WithContext(putCtx).WithField("record_id", id).WithError(putErr).Debugf("failed to re-republish [%s], attempt: %d", id, retryCount+1) + retryCount++ + } else { + break } } - failedRecords = remainingFailedRecords - if len(failedRecords) == 0 { - logrus.WithContext(ctx).Info("all failed records successfully republished") - break - } - if i == 2 { - logrus.WithContext(ctx).WithField("failed_records", failedRecords).Error("failed to republish all records after 3 attempts") - for _, fr := range failedRecords { - id := fr.record.ID() - if err := s.db.WriteFailedRecord(ctx, id); err != nil { - logrus.WithContext(ctx).WithField("record_id", id).WithError(err).Warn("failed to write failed record to db") - } + + if retryCount == 3 { + id := fr.record.ID() + logrus.WithContext(ctx).WithField("record_id", id).Error("record failed to republish after 3 attempts") + if err := s.db.WriteFailedRecord(ctx, id); err != nil { + logrus.WithContext(ctx).WithField("record_id", id).WithError(err).Warn("failed to write failed record to db") } } } @@ -376,7 +386,7 @@ func (s *DHTService) Close() { } if s.badGetCache != nil { if err := s.badGetCache.Close(); err != nil { - logrus.WithError(err).Error("failed to close badGetCache") + logrus.WithError(err).Error("failed to close bad get cache") } } if err := s.db.Close(); err != nil { diff --git a/impl/pkg/storage/db/bolt/bolt.go b/impl/pkg/storage/db/bolt/bolt.go index 2f25a39a..c4c4c22a 100644 --- a/impl/pkg/storage/db/bolt/bolt.go +++ b/impl/pkg/storage/db/bolt/bolt.go @@ -17,7 +17,8 @@ import ( ) const ( - dhtNamespace = "pkarr" + dhtNamespace = "dht" + oldDHTNamespace = "pkarr" failedNamespace = "failed" ) @@ -38,7 +39,72 @@ func NewBolt(path string) (*Bolt, error) { if err != nil { return nil, err } - return &Bolt{db: db}, nil + + b := Bolt{db: db} + b.migrate(context.Background()) + return &b, nil +} + +func (b *Bolt) migrate(ctx context.Context) { + _, span := telemetry.GetTracer().Start(ctx, "bolt.migrate") + defer span.End() + + // Delete new namespace + err := b.db.Update(func(tx *bolt.Tx) error { + return tx.DeleteBucket([]byte(dhtNamespace)) + }) + if err != nil { + logrus.WithContext(ctx).WithError(err).Error("error deleting new namespace") + } + + // Migrate old namespace to new namespace + var nextPageToken []byte + var migratedCount int + var failedCount int + for { + pageSize := 1000 + oldRecords, err := b.readSeveral(ctx, oldDHTNamespace, nextPageToken, pageSize) + if err != nil { + logrus.WithContext(ctx).WithError(err).Error("error reading old namespace") + return + } + + for _, oldRecord := range oldRecords { + key := string(oldRecord.key) + var encodedRecord base64BEP44Record + if err = json.Unmarshal(oldRecord.value, &encodedRecord); err != nil { + logrus.WithContext(ctx).WithError(err).Errorf("error decoding[%s]", key) + continue + } + record, err := encodedRecord.Decode() + if err != nil { + logrus.WithContext(ctx).WithError(err).Errorf("error decoding[%s]", key) + continue + } + if err = b.write(ctx, dhtNamespace, record.ID(), oldRecord.value); err != nil { + logrus.WithContext(ctx).WithError(err).Errorf("error writing[%s] to new namespace", key) + failedCount++ + } else { + migratedCount++ + } + } + + if len(oldRecords) == pageSize { + nextPageToken = oldRecords[len(oldRecords)-1].key + } else { + break + } + } + + logrus.WithContext(ctx).Infof("migrated %d records, failed %d records", migratedCount, failedCount) + if failedCount == 0 { + err = b.db.Update(func(tx *bolt.Tx) error { + return tx.DeleteBucket([]byte(oldDHTNamespace)) + }) + if err != nil { + logrus.WithContext(ctx).WithError(err).Error("error deleting old namespace") + } + } } // WriteRecord writes the given record to the storage From 83b3addb3bd864fda728cd8045fcd3a2cc66c5e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Apr 2024 13:32:41 -0700 Subject: [PATCH 09/10] Bump github.com/miekg/dns from 1.1.58 to 1.1.59 in /impl (#194) Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.58 to 1.1.59. - [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release) - [Commits](https://github.com/miekg/dns/compare/v1.1.58...v1.1.59) --- updated-dependencies: - dependency-name: github.com/miekg/dns dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Gabe <7622243+decentralgabe@users.noreply.github.com> --- impl/go.mod | 6 +++--- impl/go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/impl/go.mod b/impl/go.mod index 18a6dfa4..73c05114 100644 --- a/impl/go.mod +++ b/impl/go.mod @@ -17,7 +17,7 @@ require ( github.com/joho/godotenv v1.5.1 github.com/lestrrat-go/jwx/v2 v2.0.21 github.com/magefile/mage v1.15.0 - github.com/miekg/dns v1.1.58 + github.com/miekg/dns v1.1.59 github.com/mitchellh/go-homedir v1.1.0 github.com/mr-tron/base58 v1.2.0 github.com/pkg/errors v0.9.1 @@ -137,12 +137,12 @@ require ( golang.org/x/arch v0.7.0 // indirect golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20231108232855-2478ac86f678 // indirect - golang.org/x/mod v0.14.0 // indirect + golang.org/x/mod v0.16.0 // indirect golang.org/x/net v0.23.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.19.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/grpc v1.63.0 // indirect diff --git a/impl/go.sum b/impl/go.sum index b432b7e2..9f52b4ab 100644 --- a/impl/go.sum +++ b/impl/go.sum @@ -352,8 +352,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= +github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -582,8 +582,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -653,8 +653,8 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= From 26ea0ad7acc293c997fe0f7ae0d747001a47873a Mon Sep 17 00:00:00 2001 From: Gabe <7622243+decentralgabe@users.noreply.github.com> Date: Mon, 22 Apr 2024 13:52:39 -0700 Subject: [PATCH 10/10] remove migration and fix typos (#197) --- impl/pkg/storage/db/bolt/bolt.go | 68 +------------------------------- spec/spec.md | 34 ++++++++-------- 2 files changed, 19 insertions(+), 83 deletions(-) diff --git a/impl/pkg/storage/db/bolt/bolt.go b/impl/pkg/storage/db/bolt/bolt.go index c4c4c22a..608c466d 100644 --- a/impl/pkg/storage/db/bolt/bolt.go +++ b/impl/pkg/storage/db/bolt/bolt.go @@ -18,7 +18,6 @@ import ( const ( dhtNamespace = "dht" - oldDHTNamespace = "pkarr" failedNamespace = "failed" ) @@ -39,72 +38,7 @@ func NewBolt(path string) (*Bolt, error) { if err != nil { return nil, err } - - b := Bolt{db: db} - b.migrate(context.Background()) - return &b, nil -} - -func (b *Bolt) migrate(ctx context.Context) { - _, span := telemetry.GetTracer().Start(ctx, "bolt.migrate") - defer span.End() - - // Delete new namespace - err := b.db.Update(func(tx *bolt.Tx) error { - return tx.DeleteBucket([]byte(dhtNamespace)) - }) - if err != nil { - logrus.WithContext(ctx).WithError(err).Error("error deleting new namespace") - } - - // Migrate old namespace to new namespace - var nextPageToken []byte - var migratedCount int - var failedCount int - for { - pageSize := 1000 - oldRecords, err := b.readSeveral(ctx, oldDHTNamespace, nextPageToken, pageSize) - if err != nil { - logrus.WithContext(ctx).WithError(err).Error("error reading old namespace") - return - } - - for _, oldRecord := range oldRecords { - key := string(oldRecord.key) - var encodedRecord base64BEP44Record - if err = json.Unmarshal(oldRecord.value, &encodedRecord); err != nil { - logrus.WithContext(ctx).WithError(err).Errorf("error decoding[%s]", key) - continue - } - record, err := encodedRecord.Decode() - if err != nil { - logrus.WithContext(ctx).WithError(err).Errorf("error decoding[%s]", key) - continue - } - if err = b.write(ctx, dhtNamespace, record.ID(), oldRecord.value); err != nil { - logrus.WithContext(ctx).WithError(err).Errorf("error writing[%s] to new namespace", key) - failedCount++ - } else { - migratedCount++ - } - } - - if len(oldRecords) == pageSize { - nextPageToken = oldRecords[len(oldRecords)-1].key - } else { - break - } - } - - logrus.WithContext(ctx).Infof("migrated %d records, failed %d records", migratedCount, failedCount) - if failedCount == 0 { - err = b.db.Update(func(tx *bolt.Tx) error { - return tx.DeleteBucket([]byte(oldDHTNamespace)) - }) - if err != nil { - logrus.WithContext(ctx).WithError(err).Error("error deleting old namespace") - } - } + return &Bolt{db: db}, nil } // WriteRecord writes the given record to the storage diff --git a/spec/spec.md b/spec/spec.md index 9e9a3f5f..dca8a5e1 100644 --- a/spec/spec.md +++ b/spec/spec.md @@ -9,7 +9,7 @@ The DID DHT Method Specification 1.0 **Draft Created:** October 20, 2023 -**Latest Update:** April 3, 2024 +**Latest Update:** April 22, 2024 **Editors:** ~ [Gabe Cohen](https://github.com/decentralgabe) @@ -74,13 +74,15 @@ specification and shall be registered with the [[spec:DID-Spec-Registries]]. ## Conformance -The keywords MAY, MUST, MUST NOT, RECOMMENDED, SHOULD, and SHOULD NOT in this document are to be interpreted as described in [BCP 14](https://www.rfc-editor.org/info/bcp14) [[spec:RFC2119]] [[spec:RFC8174]] when, and only when, they appear in all capitals, as shown here. +The keywords MAY, MUST, MUST NOT, RECOMMENDED, SHOULD, and SHOULD NOT in this document are to be interpreted as +described in [BCP 14](https://www.rfc-editor.org/info/bcp14) [[spec:RFC2119]] [[spec:RFC8174]] when, and only when, +they appear in all capitals, as shown here. ## Terminology [[def:Decentralized Identifier, Decentralized Identifier, DID, DIDs, DID Document, DID Documents]] ~ A [W3C specification](https://www.w3.org/TR/did-core/) describing an _identifier that enables verifiable, -decentralized digital identity_. A DID identifier is associated with a JSON document containing cryptograhpic keys, +decentralized digital identity_. A DID identifier is associated with a JSON document containing cryptographic keys, services, and other properties outlined in the specification. [[def:DID Suffix, Suffix]] @@ -167,19 +169,19 @@ to the initial publisher. Consequently, DHT records, including DID DHT Documents implies that trust in a specific [[ref:Mainline]] or [[ref:Gateway]] server for providing unaltered messages is unnecessary. Instead, all clients can, and should, verify messages themselves. This approach significantly mitigates risks associated with other DID methods, where a compromised server or [DID resolver](https://www.w3.org/TR/did-core/#choosing-did-resolvers) might tamper with a [[ref:DID Document]] -which would be undecetable by a client. +which would be undetectable by a client. Currently, [[ref:Mainline]] exclusively supports the [[ref:Ed25519]] key type. In turn, [[ref:Ed25519]] is required by DID DHT and is used to uniquely identify DID DHT Documents. DID DHT identifiers are formed by concatenating the `did:dht:` prefix with a [[ref:z-base-32]] encoded Identity Key, which acts as its [[ref:suffix]]. Identity Keys ****MUST**** have the identifier `0` as both its Verification Method `id` and JWK `kid` [[spec:RFC7517]]. Identity Keys ****MUST**** have the [Verification Relationships](#verification-relationships) -_Authentication_, _Assertion_, _Capabilitiy Invocation_, and _Capability Delegation_. +_Authentication_, _Assertion_, _Capability Invocation_, and _Capability Delegation_. While the system requires at least one [[ref:Ed25519]], a DID DHT Document can include any number of additional keys. Additional key types ****MUST**** be registered in the [Key Type Index](registry/index.html##key-type-index). As a unique consequence of the requirement of the Identity Key, DID DHT Documents are able to be partially-resolved without contacting -[[ref:Maineline]] or [[ref:Gateway]] servers, though it is ****RECOMMENDED**** that deterministic resolution is only used as a fallback mechanism. +[[ref:Mainline]] or [[ref:Gateway]] servers, though it is ****RECOMMENDED**** that deterministic resolution is only used as a fallback mechanism. Similarly, the requirement of an Identity Key enables [interoperability with other DID methods](#interoperability-with-other-did-methods). ### DIDs as DNS Records @@ -214,7 +216,7 @@ each `key` or `service` as attributes. - The DNS packet ****MUST**** set the _Authoritative Answer_ flag since this is always an _Authoritative_ packet. - `TXT` records ****MAY**** exceed 255 characters as per [[spec:RFC1035]]. Records exceeding 255 characters are -represented as multiple strings, which upon DID Document reconstructin, can be concatenated to a single value. +represented as multiple strings, which upon DID Document reconstruction, can be concatenated to a single value. #### Root Record @@ -279,7 +281,7 @@ A [DID controller](https://www.w3.org/TR/did-core/#did-controller) ****MAY**** b - The [Controller](https://www.w3.org/TR/did-core/#did-controller) record's **type** is `TXT`, indicating a Text record. -- The [Controller](https://www.w3.org/TR/did-core/#did-controller) record's **data** is represented as a comma-separatedlist of controller DID identifiers. +- The [Controller](https://www.w3.org/TR/did-core/#did-controller) record's **data** is represented as a comma-separated list of controller DID identifiers. To ensure that the DID controller is authorized to make changes to the DID Document, the controller for the [[ref:Identity Key]] Verification Method ****MUST**** be contained within the controller property. @@ -314,13 +316,13 @@ as a `_kN._did.` record where `N` is the zero-indexed positional index of a give - Each [Verification Method](https://www.w3.org/TR/did-core/#verification-methods) record's **rdata** is represented by the form `id=M;t=N;k=O;a=P` where `M` is the Verification Method's `id`, `N` is the index of the key's type from the -[key type index](registry/index.html#key-type-index), `N` is the unpadded base64URL [[spec:RFC4648]] representation of +[key type index](registry/index.html#key-type-index), `O` is the unpadded base64URL [[spec:RFC4648]] representation of the public key, and `P` is the `JWK` `alg` identifier of the key. - Verification Method `id`s ****MAY**** be omitted. If omitted, they can be computed according to the rules specified in the section on [representing keys](#representing-keys) when reconstructing the DID Document. - - `alg` identifiers ****MAY**** be ommitted. If omimtted, they are assigned to the default value specified in the + - `alg` identifiers ****MAY**** be omitted. If omitted, they are assigned to the default value specified in the [key type index](registry/index.html#key-type-index). - The [[ref:Identity Key]] ****MUST**** always be at index `_k0` with `id` `0`. @@ -950,21 +952,21 @@ access-token-based approach. ### DID Resolution -The process for resoloving a DID DHT Document via a [[ref:Gateway]] is outlined in the [read section above](#read). +The process for resolving a DID DHT Document via a [[ref:Gateway]] is outlined in the [read section above](#read). However, we provide additional guidance for [DID Resolvers](https://www.w3.org/TR/did-core/#dfn-did-resolvers) supplying [DID Document Metadata](https://www.w3.org/TR/did-core/#did-document-metadata) and [DID Resolution Metadata](https://www.w3.org/TR/did-core/#did-resolution-metadata) as follows: #### DID Document Metadata -* The metadata's [`versionId` property](https://www.w3.org/TR/did-core/#dfn-versionid) ****MUST**** be set to the +* The metadata [`versionId` property](https://www.w3.org/TR/did-core/#dfn-versionid) ****MUST**** be set to the [[ref:DID Document]] packet's current [[ref:sequence number]]. -* The metadata's [`created` property](https://www.w3.org/TR/did-core/#dfn-created) ****MUST**** be set to +* The metadata [`created` property](https://www.w3.org/TR/did-core/#dfn-created) ****MUST**** be set to [XML Datetime](https://www.w3.org/TR/xmlschema11-2/#dateTime) representation of the earliest known sequence number for the DID. -* The metadata's [`updated` property](https://www.w3.org/TR/did-core/#dfn-updated) ****MUST**** be set to the +* The metadata [`updated` property](https://www.w3.org/TR/did-core/#dfn-updated) ****MUST**** be set to the [XML Datetime](https://www.w3.org/TR/xmlschema11-2/#dateTime) representation of the last known sequence number for the DID. @@ -973,10 +975,10 @@ for the DID. #### DID Resolution Metadata -* The metadata's `types` property ****MUST**** be set to an array of strings representing type values if +* The metadata `types` property ****MUST**** be set to an array of strings representing type values if [type data](#type-indexing) is present in the [[ref:DID Document]]'s packet. -* The metadata's `gateway` property ****MUST**** be set to a string representing the [[ref:Gateway]]'s URI +* The metadata `gateway` property ****MUST**** be set to a string representing the [[ref:Gateway]]'s URI from which the DID was resolved. This is useful in cases where a [DID Resolvers](https://www.w3.org/TR/did-core/#dfn-did-resolvers) performs resolution against an [Authoritative Gateway](#designating-authoritative-gateways).