diff --git a/.github/workflows/dependabot-auto.yml b/.github/workflows/dependabot-auto.yml new file mode 100644 index 00000000..0f657b9f --- /dev/null +++ b/.github/workflows/dependabot-auto.yml @@ -0,0 +1,28 @@ +name: Dependabot auto-approve and merge +on: pull_request + +permissions: + contents: write + pull-requests: write + +jobs: + approve: + runs-on: ubuntu-latest + if: github.actor == 'dependabot[bot]' + steps: + - name: Dependabot metadata + id: metadata + uses: dependabot/fetch-metadata@v2 + with: + github-token: "${{ secrets.GITHUB_TOKEN }}" + - name: Approve a PR + run: gh pr review --approve "$PR_URL" + env: + PR_URL: ${{github.event.pull_request.html_url}} + GH_TOKEN: ${{secrets.GITHUB_TOKEN}} + - name: Enable auto-merge for Dependabot PRs + if: contains(steps.metadata.outputs.dependency-names, 'my-dependency') && steps.metadata.outputs.update-type == 'version-update:semver-patch' + run: gh pr merge --auto --merge "$PR_URL" + env: + PR_URL: ${{github.event.pull_request.html_url}} + GH_TOKEN: ${{secrets.GITHUB_TOKEN}} \ No newline at end of file diff --git a/impl/cmd/cli/identity.go b/impl/cmd/cli/identity.go index e1516645..ec4a099c 100644 --- a/impl/cmd/cli/identity.go +++ b/impl/cmd/cli/identity.go @@ -103,7 +103,7 @@ var identityAddCmd = &cobra.Command{ Answer: rrds, } // generate put request - putReq, err := dht.CreatePkarrPublishRequest(privKey, msg) + putReq, err := dht.CreateDNSPublishRequest(privKey, msg) if err != nil { logrus.WithError(err).Error("failed to create put request") return err @@ -170,7 +170,7 @@ var identityGetCmd = &cobra.Command{ return err } - msg, err := dht.ParsePkarrGetResponse(*gotResp) + msg, err := dht.ParseDNSGetResponse(*gotResp) if err != nil { logrus.WithError(err).Error("failed to parse get response") return err diff --git a/impl/concurrencytest/main.go b/impl/concurrencytest/main.go index 345b67e5..9f3fbc27 100644 --- a/impl/concurrencytest/main.go +++ b/impl/concurrencytest/main.go @@ -119,7 +119,7 @@ func generateDIDPutRequest() (string, []byte, error) { return "", nil, err } - bep44Put, err := dht.CreatePkarrPublishRequest(sk, *packet) + bep44Put, err := dht.CreateDNSPublishRequest(sk, *packet) if err != nil { return "", nil, err } diff --git a/impl/config/config.go b/impl/config/config.go index 2075d04e..b2f9d5a2 100644 --- a/impl/config/config.go +++ b/impl/config/config.go @@ -41,10 +41,9 @@ func (e EnvironmentVariable) String() string { } type Config struct { - Log LogConfig `toml:"log"` - ServerConfig ServerConfig `toml:"server"` - DHTConfig DHTServiceConfig `toml:"dht"` - PkarrConfig PkarrServiceConfig `toml:"pkarr"` + Log LogConfig `toml:"log"` + ServerConfig ServerConfig `toml:"server"` + DHTConfig DHTServiceConfig `toml:"dht"` } type ServerConfig struct { @@ -57,13 +56,10 @@ type ServerConfig struct { } type DHTServiceConfig struct { - BootstrapPeers []string `toml:"bootstrap_peers"` -} - -type PkarrServiceConfig struct { - RepublishCRON string `toml:"republish_cron"` - CacheTTLSeconds int `toml:"cache_ttl_seconds"` - CacheSizeLimitMB int `toml:"cache_size_limit_mb"` + BootstrapPeers []string `toml:"bootstrap_peers"` + RepublishCRON string `toml:"republish_cron"` + CacheTTLSeconds int `toml:"cache_ttl_seconds"` + CacheSizeLimitMB int `toml:"cache_size_limit_mb"` } type LogConfig struct { @@ -81,9 +77,7 @@ func GetDefaultConfig() Config { Telemetry: false, }, DHTConfig: DHTServiceConfig{ - BootstrapPeers: GetDefaultBootstrapPeers(), - }, - PkarrConfig: PkarrServiceConfig{ + BootstrapPeers: GetDefaultBootstrapPeers(), RepublishCRON: "0 */3 * * *", CacheTTLSeconds: 600, CacheSizeLimitMB: 1000, diff --git a/impl/config/config.toml b/impl/config/config.toml index 65cf88ac..b48239a6 100644 --- a/impl/config/config.toml +++ b/impl/config/config.toml @@ -9,8 +9,6 @@ telemetry = false [dht] bootstrap_peers = ["router.magnets.im:6881", "router.bittorrent.com:6881", "dht.transmissionbt.com:6881", "router.utorrent.com:6881", "router.nuh.dev:6881"] - -[pkarr] republish_cron = "0 */3 * * *" # every 3 hours cache_ttl_seconds = 600 # 10 minutes cache_size_limit_mb = 1000 # 1000 MB \ No newline at end of file diff --git a/impl/docs/swagger.yaml b/impl/docs/swagger.yaml index b8389710..95ee37d7 100644 --- a/impl/docs/swagger.yaml +++ b/impl/docs/swagger.yaml @@ -20,7 +20,7 @@ paths: get: consumes: - application/octet-stream - description: GetRecord a Pkarr record from the DHT + description: GetRecord a BEP44 DNS record from the DHT parameters: - description: ID to get in: path @@ -48,13 +48,13 @@ paths: description: Internal server error schema: type: string - summary: GetRecord a Pkarr record from the DHT + summary: GetRecord a BEP44 DNS record from the DHT tags: - - Pkarr + - DHT put: consumes: - application/octet-stream - description: PutRecord a Pkarr record into the DHT + description: PutRecord a BEP44 DNS record into the DHT parameters: - description: ID of the record to put in: path @@ -80,9 +80,9 @@ paths: description: Internal server error schema: type: string - summary: PutRecord a Pkarr record into the DHT + summary: PutRecord a BEP44 DNS record into the DHT tags: - - Pkarr + - DHT /health: get: consumes: diff --git a/impl/go.mod b/impl/go.mod index 5a718523..73c05114 100644 --- a/impl/go.mod +++ b/impl/go.mod @@ -8,7 +8,7 @@ require ( github.com/allegro/bigcache/v3 v3.1.0 github.com/anacrolix/dht/v2 v2.21.1 github.com/anacrolix/log v0.15.2 - github.com/anacrolix/torrent v1.52.5 + github.com/anacrolix/torrent v1.55.0 github.com/gin-contrib/cors v1.7.1 github.com/gin-gonic/gin v1.9.1 github.com/go-co-op/gocron v1.37.0 @@ -17,7 +17,7 @@ require ( github.com/joho/godotenv v1.5.1 github.com/lestrrat-go/jwx/v2 v2.0.21 github.com/magefile/mage v1.15.0 - github.com/miekg/dns v1.1.58 + github.com/miekg/dns v1.1.59 github.com/mitchellh/go-homedir v1.1.0 github.com/mr-tron/base58 v1.2.0 github.com/pkg/errors v0.9.1 @@ -31,10 +31,10 @@ require ( github.com/tv42/zbase32 v0.0.0-20220222190657-f76a9fc892fa go.etcd.io/bbolt v1.3.9 go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.50.0 - go.opentelemetry.io/contrib/instrumentation/runtime v0.49.0 + go.opentelemetry.io/contrib/instrumentation/runtime v0.50.0 go.opentelemetry.io/otel v1.25.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.23.1 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 go.opentelemetry.io/otel/sdk v1.25.0 go.opentelemetry.io/otel/sdk/metric v1.25.0 go.opentelemetry.io/otel/trace v1.25.0 @@ -46,18 +46,18 @@ require ( github.com/KyleBanks/depth v1.2.1 // indirect github.com/alecthomas/atomic v0.1.0-alpha2 // indirect github.com/anacrolix/chansync v0.3.0 // indirect - github.com/anacrolix/generics v0.0.0-20230428105757-683593396d68 // indirect + github.com/anacrolix/generics v0.0.0-20230911070922-5dd7545c6b13 // indirect github.com/anacrolix/missinggo v1.3.0 // indirect github.com/anacrolix/missinggo/perf v1.0.0 // indirect - github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9 // indirect + github.com/anacrolix/missinggo/v2 v2.7.3 // indirect github.com/anacrolix/multiless v0.3.1-0.20221221005021-2d12701f83f7 // indirect github.com/anacrolix/stm v0.4.1-0.20221221005312-96d17df0e496 // indirect - github.com/anacrolix/sync v0.4.0 // indirect + github.com/anacrolix/sync v0.5.1 // indirect github.com/benbjohnson/immutable v0.4.1-0.20221220213129-8932b999621d // indirect github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/bytedance/sonic v1.11.3 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect github.com/chenzhuoyu/iasm v0.9.1 // indirect github.com/cloudflare/circl v1.3.7 // indirect @@ -76,7 +76,6 @@ require ( github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.19.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect @@ -130,7 +129,7 @@ require ( github.com/swaggo/swag v1.8.12 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.12 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0 // indirect go.opentelemetry.io/otel/metric v1.25.0 // indirect go.opentelemetry.io/proto/otlp v1.1.0 // indirect go.uber.org/atomic v1.9.0 // indirect @@ -138,15 +137,15 @@ require ( golang.org/x/arch v0.7.0 // indirect golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20231108232855-2478ac86f678 // indirect - golang.org/x/mod v0.14.0 // indirect + golang.org/x/mod v0.16.0 // indirect golang.org/x/net v0.23.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.17.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/grpc v1.61.1 // indirect + golang.org/x/tools v0.19.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/grpc v1.63.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/impl/go.sum b/impl/go.sum index d4625ed9..9f52b4ab 100644 --- a/impl/go.sum +++ b/impl/go.sum @@ -45,10 +45,10 @@ github.com/anacrolix/dht/v2 v2.21.1/go.mod h1:SDGC+sEs1pnO2sJGYuhvIis7T8749dDHNf github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= github.com/anacrolix/envpprof v1.1.0/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= -github.com/anacrolix/envpprof v1.2.1 h1:25TJe6t/i0AfzzldiGFKCpD+s+dk8lONBcacJZB2rdE= -github.com/anacrolix/envpprof v1.2.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= -github.com/anacrolix/generics v0.0.0-20230428105757-683593396d68 h1:fyXlBfnlFzZSFckJ8QLb2lfmWfY++4RiUnae7ZMuv0A= -github.com/anacrolix/generics v0.0.0-20230428105757-683593396d68/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8= +github.com/anacrolix/envpprof v1.3.0 h1:WJt9bpuT7A/CDCxPOv/eeZqHWlle/Y0keJUvc6tcJDk= +github.com/anacrolix/envpprof v1.3.0/go.mod h1:7QIG4CaX1uexQ3tqd5+BRa/9e2D02Wcertl6Yh0jCB0= +github.com/anacrolix/generics v0.0.0-20230911070922-5dd7545c6b13 h1:qwOprPTDMM3BASJRf84mmZnTXRsPGGJ8xoHKQS7m3so= +github.com/anacrolix/generics v0.0.0-20230911070922-5dd7545c6b13/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8= github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= github.com/anacrolix/log v0.6.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= github.com/anacrolix/log v0.15.2 h1:LTSf5Wm6Q4GNWPFMBP7NPYV6UBVZzZLKckL+/Lj72Oo= @@ -62,21 +62,21 @@ github.com/anacrolix/missinggo/perf v1.0.0 h1:7ZOGYziGEBytW49+KmYGTaNfnwUqP1HBsy github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5urunoLS0Cbvb4V0uMQ= github.com/anacrolix/missinggo/v2 v2.2.0/go.mod h1:o0jgJoYOyaoYQ4E2ZMISVa9c88BbUBVQQW4QeRkNCGY= github.com/anacrolix/missinggo/v2 v2.5.1/go.mod h1:WEjqh2rmKECd0t1VhQkLGTdIWXO6f6NLjp5GlMZ+6FA= -github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9 h1:W/oGeHhYwxueeiDjQfmK9G+X9M2xJgfTtow62v0TWAs= -github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9/go.mod h1:mIEtp9pgaXqt8VQ3NQxFOod/eQ1H0D1XsZzKUQfwtac= +github.com/anacrolix/missinggo/v2 v2.7.3 h1:Ee//CmZBMadeNiYB/hHo9ly2PFOEZ4Fhsbnug3rDAIE= +github.com/anacrolix/missinggo/v2 v2.7.3/go.mod h1:mIEtp9pgaXqt8VQ3NQxFOod/eQ1H0D1XsZzKUQfwtac= github.com/anacrolix/multiless v0.3.1-0.20221221005021-2d12701f83f7 h1:lOtCD+LzoD1g7bowhYJNR++uV+FyY5bTZXKwnPex9S8= github.com/anacrolix/multiless v0.3.1-0.20221221005021-2d12701f83f7/go.mod h1:zJv1JF9AqdZiHwxqPgjuOZDGWER6nyE48WBCi/OOrMM= github.com/anacrolix/stm v0.2.0/go.mod h1:zoVQRvSiGjGoTmbM0vSLIiaKjWtNPeTvXUSdJQA4hsg= github.com/anacrolix/stm v0.4.1-0.20221221005312-96d17df0e496 h1:aMiRi2kOOd+nG64suAmFMVnNK2E6GsnLif7ia9tI3cA= github.com/anacrolix/stm v0.4.1-0.20221221005312-96d17df0e496/go.mod h1:DBm8/1OXm4A4RZ6Xa9u/eOsjeAXCaoRYvd2JzlskXeM= github.com/anacrolix/sync v0.3.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= -github.com/anacrolix/sync v0.4.0 h1:T+MdO/u87ir/ijWsTFsPYw5jVm0SMm4kVpg8t4KF38o= -github.com/anacrolix/sync v0.4.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= +github.com/anacrolix/sync v0.5.1 h1:FbGju6GqSjzVoTgcXTUKkF041lnZkG5P0C3T5RL3SGc= +github.com/anacrolix/sync v0.5.1/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= -github.com/anacrolix/torrent v1.52.5 h1:jWowdx+EU6zFVfBwmnL0d3H4J6vTFEGOrHI35YdfIT8= -github.com/anacrolix/torrent v1.52.5/go.mod h1:CcM8oPMYye5J42cSqJrmUpqwRFgSsJQ1jCEHwygqnqQ= +github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8= +github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE= github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI= github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 h1:goHVqTbFX3AIo0tzGr14pgfAW2ZfPChKO21Z9MGf/gk= @@ -100,8 +100,8 @@ github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1 github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= github.com/bytedance/sonic v1.11.3 h1:jRN+yEjakWh8aK5FzrciUHG8OFXK+4/KrAX/ysEtHAA= github.com/bytedance/sonic v1.11.3/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= @@ -225,9 +225,6 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -236,7 +233,6 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -356,8 +352,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= +github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -544,18 +540,18 @@ go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.50.0 h1:LLz31zcmHs6aB8bi2we+tzO0tr5oW7yZp3x06qR5YoI= go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.50.0/go.mod h1:UdPyzt6g4yEwcz9QjnCC1HB2yqadJgpFo9m5ddGweU0= -go.opentelemetry.io/contrib/instrumentation/runtime v0.49.0 h1:dg9y+7ArpumB6zwImJv47RHfdgOGQ1EMkzP5vLkEnTU= -go.opentelemetry.io/contrib/instrumentation/runtime v0.49.0/go.mod h1:Ul4MtXqu/hJBM+v7a6dCF0nHwckPMLpIpLeCi4+zfdw= +go.opentelemetry.io/contrib/instrumentation/runtime v0.50.0 h1:6dck47miguAOny5MeqX1G8idd+HpzDFt86U33d7aW2I= +go.opentelemetry.io/contrib/instrumentation/runtime v0.50.0/go.mod h1:rdPhRwNd2sHiRmwJAGs8xcwitqmP/j8pvl9X5jloYjU= go.opentelemetry.io/contrib/propagators/b3 v1.25.0 h1:QU8UEKyPqgr/8vCC9LlDmkPnfFmiWAUF9GtJdcLz+BU= go.opentelemetry.io/contrib/propagators/b3 v1.25.0/go.mod h1:qonC7wyvtX1E6cEpAR+bJmhcGr6IVRGc/f6ZTpvi7jA= go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 h1:mM8nKi6/iFQ0iqst80wDHU2ge198Ye/TfN0WBS5U24Y= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0/go.mod h1:0PrIIzDteLSmNyxqcGYRL4mDIo8OTuBAOI/Bn1URxac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1 h1:o8iWeVFa1BcLtVEV0LzrCxV2/55tB3xLxADr6Kyoey4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1/go.mod h1:SEVfdK4IoBnbT2FXNM/k8yC08MrfbhWk3U4ljM8B3HE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.23.1 h1:cfuy3bXmLJS7M1RZmAL6SuhGtKUp2KEsrm00OlAXkq4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.23.1/go.mod h1:22jr92C6KwlwItJmQzfixzQM3oyyuYLCfHiMY+rpsPU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0 h1:Wc4hZuYXhVqq+TfRXLXlmNIL/awOanGx8ssq3ciDQxc= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0/go.mod h1:BydOvapRqVEc0DVz27qWBX2jq45Ca5TI9mhZBDIdweY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0 h1:dT33yIHtmsqpixFsSQPwNeY5drM9wTcoL8h0FWF4oGM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0/go.mod h1:h95q0LBGh7hlAC08X2DhSeyIG02YQ0UyioTCVAqRPmc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 h1:Mbi5PKN7u322woPa85d7ebZ+SOvEoPvoiBu+ryHWgfA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0/go.mod h1:e7ciERRhZaOZXVjx5MiL8TK5+Xv7G5Gv5PA2ZDEJdL8= go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA= go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= go.opentelemetry.io/otel/sdk v1.25.0 h1:PDryEJPC8YJZQSyLY5eqLeafHtG+X7FWnf3aXMtxbqo= @@ -586,8 +582,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -657,8 +653,8 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= @@ -667,24 +663,22 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 h1:YJ5pD9rF8o9Qtta0Cmy9rdBwkSjrTCT6XTiUQVOtIos= -google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY= -google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM= -google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= -google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= diff --git a/impl/integrationtest/main.go b/impl/integrationtest/main.go index 94949f5d..18ed29c7 100644 --- a/impl/integrationtest/main.go +++ b/impl/integrationtest/main.go @@ -103,7 +103,7 @@ func generateDIDPutRequest() (string, []byte, error) { return "", nil, err } - bep44Put, err := dht.CreatePkarrPublishRequest(sk, *packet) + bep44Put, err := dht.CreateDNSPublishRequest(sk, *packet) if err != nil { return "", nil, err } diff --git a/impl/internal/did/client_test.go b/impl/internal/did/client_test.go index 37cdcedd..e052ed7e 100644 --- a/impl/internal/did/client_test.go +++ b/impl/internal/did/client_test.go @@ -28,7 +28,7 @@ func TestClient(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - bep44Put, err := dht.CreatePkarrPublishRequest(sk, *packet) + bep44Put, err := dht.CreateDNSPublishRequest(sk, *packet) assert.NoError(t, err) assert.NotEmpty(t, bep44Put) diff --git a/impl/internal/did/did.go b/impl/internal/did/did.go index c8d4709b..5750cd2b 100644 --- a/impl/internal/did/did.go +++ b/impl/internal/did/did.go @@ -314,7 +314,7 @@ func (d DHT) ToDNSPacket(doc did.Document, types []TypeIndex, gateways []Authori keyType := keyTypeForJWK(*vm.PublicKeyJWK) if keyType < 0 { - return nil, fmt.Errorf("+unsupported key type given alg: %s", vm.PublicKeyJWK.ALG) + return nil, fmt.Errorf("unsupported key type given alg: %s", vm.PublicKeyJWK.ALG) } // convert the public key to a base64url encoded string diff --git a/impl/pkg/dht/pkarr.go b/impl/pkg/dht/dns.go similarity index 72% rename from impl/pkg/dht/pkarr.go rename to impl/pkg/dht/dns.go index 2265c5c3..eb374213 100644 --- a/impl/pkg/dht/pkarr.go +++ b/impl/pkg/dht/dns.go @@ -12,13 +12,13 @@ import ( "github.com/TBD54566975/did-dht-method/internal/dht" ) -// CreatePkarrPublishRequest creates a put request for the given records. Requires a public/private keypair and the records to put. -// The records are expected to be a DNS message packet, such as: +// CreateDNSPublishRequest creates a put request for the given records. Requires a public/private keypair and +// the records to put. The records are expected to be a DNS message packet, such as: // // dns.Msg{ // MsgHdr: dns.MsgHdr{ // Id: 0, -// Response: true, +// BEP44Response: true, // Authoritative: true, // }, // Answer: dns.RR{ @@ -30,11 +30,11 @@ import ( // Ttl: 7200, // }, // Txt: []string{ -// "hello pkarr", +// "hello mainline", // }, // } // } -func CreatePkarrPublishRequest(privateKey ed25519.PrivateKey, msg dns.Msg) (*bep44.Put, error) { +func CreateDNSPublishRequest(privateKey ed25519.PrivateKey, msg dns.Msg) (*bep44.Put, error) { packed, err := msg.Pack() if err != nil { return nil, util.LoggingErrorMsg(err, "failed to pack records") @@ -49,9 +49,9 @@ func CreatePkarrPublishRequest(privateKey ed25519.PrivateKey, msg dns.Msg) (*bep return put, nil } -// ParsePkarrGetResponse parses the response from a get request. +// ParseDNSGetResponse parses the response from a get request. // The response is expected to be a slice of DNS resource records. -func ParsePkarrGetResponse(response dht.FullGetResult) (*dns.Msg, error) { +func ParseDNSGetResponse(response dht.FullGetResult) (*dns.Msg, error) { var payload string if err := bencode.Unmarshal(response.V, &payload); err != nil { return nil, util.LoggingErrorMsg(err, "failed to unmarshal payload value") diff --git a/impl/pkg/dht/pkarr_test.go b/impl/pkg/dht/dns_test.go similarity index 91% rename from impl/pkg/dht/pkarr_test.go rename to impl/pkg/dht/dns_test.go index f83992b1..431e4a0c 100644 --- a/impl/pkg/dht/pkarr_test.go +++ b/impl/pkg/dht/dns_test.go @@ -16,7 +16,7 @@ import ( "github.com/TBD54566975/did-dht-method/internal/util" ) -func TestGetPutPkarrDHT(t *testing.T) { +func TestGetPutDNSDHT(t *testing.T) { dht := NewTestDHT(t) defer dht.Close() @@ -31,7 +31,7 @@ func TestGetPutPkarrDHT(t *testing.T) { Ttl: 7200, }, Txt: []string{ - "hello pkarr", + "hello mainline", }, } msg := dns.Msg{ @@ -42,7 +42,7 @@ func TestGetPutPkarrDHT(t *testing.T) { }, Answer: []dns.RR{&txtRecord}, } - put, err := CreatePkarrPublishRequest(privKey, msg) + put, err := CreateDNSPublishRequest(privKey, msg) require.NoError(t, err) id, err := dht.Put(context.Background(), *put) @@ -53,7 +53,7 @@ func TestGetPutPkarrDHT(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, got) - gotMsg, err := ParsePkarrGetResponse(*got) + gotMsg, err := ParseDNSGetResponse(*got) require.NoError(t, err) require.NotEmpty(t, gotMsg.Answer) @@ -103,7 +103,7 @@ func TestGetPutDIDDHT(t *testing.T) { didDocPacket, err := didID.ToDNSPacket(*doc, nil, nil) require.NoError(t, err) - putReq, err := CreatePkarrPublishRequest(privKey, *didDocPacket) + putReq, err := CreateDNSPublishRequest(privKey, *didDocPacket) require.NoError(t, err) gotID, err := dht.Put(context.Background(), *putReq) @@ -114,7 +114,7 @@ func TestGetPutDIDDHT(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, got) - gotMsg, err := ParsePkarrGetResponse(*got) + gotMsg, err := ParseDNSGetResponse(*got) require.NoError(t, err) require.NotEmpty(t, gotMsg.Answer) diff --git a/impl/pkg/pkarr/record.go b/impl/pkg/dht/record.go similarity index 57% rename from impl/pkg/pkarr/record.go rename to impl/pkg/dht/record.go index 862624c4..ea87e9be 100644 --- a/impl/pkg/pkarr/record.go +++ b/impl/pkg/dht/record.go @@ -1,4 +1,4 @@ -package pkarr +package dht import ( "bytes" @@ -14,40 +14,47 @@ import ( "github.com/tv42/zbase32" ) -type Response struct { +type BEP44Response struct { V []byte `validate:"required"` Seq int64 `validate:"required"` Sig [64]byte `validate:"required"` } // Equals returns true if the response is equal to the other response -func (r Response) Equals(other Response) bool { +func (r BEP44Response) Equals(other BEP44Response) bool { return r.Seq == other.Seq && bytes.Equal(r.V, other.V) && r.Sig == other.Sig } -type Record struct { +// BEP44Record represents a record in the DHT +type BEP44Record struct { Value []byte `json:"v" validate:"required"` Key [32]byte `json:"k" validate:"required"` Signature [64]byte `json:"sig" validate:"required"` SequenceNumber int64 `json:"seq" validate:"required"` } -// NewRecord returns a new Record with the given key, value, signature, and sequence number -func NewRecord(k []byte, v []byte, sig []byte, seq int64) (*Record, error) { - record := Record{SequenceNumber: seq} +// FailedRecord represents a record that failed to be written to the DHT +type FailedRecord struct { + ID string `json:"id"` + Count int `json:"count"` +} + +// NewBEP44Record returns a new BEP44Record with the given key, value, signature, and sequence number +func NewBEP44Record(k []byte, v []byte, sig []byte, seq int64) (*BEP44Record, error) { + record := BEP44Record{SequenceNumber: seq} if len(k) != 32 { - return nil, errors.New("incorrect key length for pkarr record") + return nil, errors.New("incorrect key length for bep44 record") } record.Key = [32]byte(k) if len(v) > 1000 { - return nil, errors.New("pkarr record value too long") + return nil, errors.New("bep44 record value too long") } record.Value = v if len(sig) != 64 { - return nil, errors.New("incorrect sig length for pkarr record") + return nil, errors.New("incorrect sig length for bep44 record") } record.Signature = [64]byte(sig) @@ -59,7 +66,7 @@ func NewRecord(k []byte, v []byte, sig []byte, seq int64) (*Record, error) { } // IsValid returns an error if the request is invalid; also validates the signature -func (r Record) IsValid() error { +func (r BEP44Record) IsValid() error { if err := util.IsValidStruct(r); err != nil { return err } @@ -67,7 +74,7 @@ func (r Record) IsValid() error { // validate the signature bv, err := bencode.Marshal(r.Value) if err != nil { - return fmt.Errorf("error bencoding pkarr record: %v", err) + return fmt.Errorf("error bencoding bep44 record: %v", err) } if !bep44.Verify(r.Key[:], nil, r.SequenceNumber, bv, r.Signature[:]) { @@ -76,17 +83,17 @@ func (r Record) IsValid() error { return nil } -// Response returns the record as a Response -func (r Record) Response() Response { - return Response{ +// Response returns the record as a BEP44Response +func (r BEP44Record) Response() BEP44Response { + return BEP44Response{ V: r.Value, Seq: r.SequenceNumber, Sig: r.Signature, } } -// BEP44 returns the record as a BEP44 Put message -func (r Record) BEP44() bep44.Put { +// Put returns the record as a bep44.Put message +func (r BEP44Record) Put() bep44.Put { return bep44.Put{ V: r.Value, K: &r.Key, @@ -96,18 +103,18 @@ func (r Record) BEP44() bep44.Put { } // String returns a string representation of the record -func (r Record) String() string { +func (r BEP44Record) String() string { e := base64.RawURLEncoding - return fmt.Sprintf("pkarr.Record{K=%s V=%s Sig=%s Seq=%d}", zbase32.EncodeToString(r.Key[:]), e.EncodeToString(r.Value), e.EncodeToString(r.Signature[:]), r.SequenceNumber) + return fmt.Sprintf("dht.BEP44Record{K=%s V=%s Sig=%s Seq=%d}", zbase32.EncodeToString(r.Key[:]), e.EncodeToString(r.Value), e.EncodeToString(r.Signature[:]), r.SequenceNumber) } // ID returns the base32 encoded key as a string -func (r Record) ID() string { +func (r BEP44Record) ID() string { return zbase32.EncodeToString(r.Key[:]) } // Hash returns the SHA256 hash of the record as a string -func (r Record) Hash() (string, error) { +func (r BEP44Record) Hash() (string, error) { recordBytes, err := json.Marshal(r) if err != nil { return "", err @@ -115,9 +122,9 @@ func (r Record) Hash() (string, error) { return string(sha256.New().Sum(recordBytes)), nil } -// RecordFromBEP44 returns a Record from a BEP44 Put message -func RecordFromBEP44(putMsg *bep44.Put) Record { - return Record{ +// RecordFromBEP44 returns a BEP44Record from a bep44.Put message +func RecordFromBEP44(putMsg *bep44.Put) BEP44Record { + return BEP44Record{ Key: *putMsg.K, Value: putMsg.V.([]byte), Signature: putMsg.Sig, diff --git a/impl/pkg/pkarr/record_test.go b/impl/pkg/dht/record_test.go similarity index 55% rename from impl/pkg/pkarr/record_test.go rename to impl/pkg/dht/record_test.go index 614a292d..4be14fce 100644 --- a/impl/pkg/pkarr/record_test.go +++ b/impl/pkg/dht/record_test.go @@ -1,4 +1,4 @@ -package pkarr_test +package dht_test import ( "strings" @@ -9,13 +9,12 @@ import ( "github.com/TBD54566975/did-dht-method/internal/did" "github.com/TBD54566975/did-dht-method/pkg/dht" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" ) func TestNewRecord(t *testing.T) { // validate incorrect key length is rejected - r, err := pkarr.NewRecord([]byte("aaaaaaaaaaa"), nil, nil, 0) - assert.EqualError(t, err, "incorrect key length for pkarr record") + r, err := dht.NewBEP44Record([]byte("aaaaaaaaaaa"), nil, nil, 0) + assert.EqualError(t, err, "incorrect key length for bep44 record") assert.Nil(t, r) // create a did doc as a packet to store @@ -27,30 +26,30 @@ func TestNewRecord(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) - r, err = pkarr.NewRecord(putMsg.K[:], []byte(strings.Repeat("a", 1001)), putMsg.Sig[:], putMsg.Seq) - assert.EqualError(t, err, "pkarr record value too long") + r, err = dht.NewBEP44Record(putMsg.K[:], []byte(strings.Repeat("a", 1001)), putMsg.Sig[:], putMsg.Seq) + assert.EqualError(t, err, "bep44 record value too long") assert.Nil(t, r) - r, err = pkarr.NewRecord(putMsg.K[:], putMsg.V.([]byte), []byte(strings.Repeat("a", 65)), putMsg.Seq) - assert.EqualError(t, err, "incorrect sig length for pkarr record") + r, err = dht.NewBEP44Record(putMsg.K[:], putMsg.V.([]byte), []byte(strings.Repeat("a", 65)), putMsg.Seq) + assert.EqualError(t, err, "incorrect sig length for bep44 record") assert.Nil(t, r) - r, err = pkarr.NewRecord(putMsg.K[:], putMsg.V.([]byte), putMsg.Sig[:], 0) - assert.EqualError(t, err, "Key: 'Record.SequenceNumber' Error:Field validation for 'SequenceNumber' failed on the 'required' tag") + r, err = dht.NewBEP44Record(putMsg.K[:], putMsg.V.([]byte), putMsg.Sig[:], 0) + assert.EqualError(t, err, "Key: 'BEP44Record.SequenceNumber' Error:Field validation for 'SequenceNumber' failed on the 'required' tag") assert.Nil(t, r) - r, err = pkarr.NewRecord(putMsg.K[:], putMsg.V.([]byte), putMsg.Sig[:], 1) + r, err = dht.NewBEP44Record(putMsg.K[:], putMsg.V.([]byte), putMsg.Sig[:], 1) assert.EqualError(t, err, "signature is invalid") assert.Nil(t, r) - r, err = pkarr.NewRecord(putMsg.K[:], putMsg.V.([]byte), putMsg.Sig[:], putMsg.Seq) + r, err = dht.NewBEP44Record(putMsg.K[:], putMsg.V.([]byte), putMsg.Sig[:], putMsg.Seq) assert.NoError(t, err) - bep := r.BEP44() + bep := r.Put() assert.Equal(t, putMsg.K, bep.K) assert.Equal(t, putMsg.V, bep.V) assert.Equal(t, putMsg.Sig, bep.Sig) @@ -61,7 +60,7 @@ func TestNewRecord(t *testing.T) { assert.Equal(t, r.SequenceNumber, resp.Seq) assert.Equal(t, r.Signature, resp.Sig) - r2 := pkarr.RecordFromBEP44(putMsg) + r2 := dht.RecordFromBEP44(putMsg) assert.Equal(t, r.Key, r2.Key) assert.Equal(t, r.Value, r2.Value) assert.Equal(t, r.Signature, r2.Signature) diff --git a/impl/pkg/server/pkarr.go b/impl/pkg/server/dht.go similarity index 69% rename from impl/pkg/server/pkarr.go rename to impl/pkg/server/dht.go index d5c88903..966909c0 100644 --- a/impl/pkg/server/pkarr.go +++ b/impl/pkg/server/dht.go @@ -11,26 +11,26 @@ import ( "github.com/gin-gonic/gin" "github.com/TBD54566975/did-dht-method/internal/util" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" + "github.com/TBD54566975/did-dht-method/pkg/dht" "github.com/TBD54566975/did-dht-method/pkg/service" "github.com/TBD54566975/did-dht-method/pkg/telemetry" ) -// PkarrRouter is the router for the Pkarr API -type PkarrRouter struct { - service *service.PkarrService +// DHTRouter is the router for the DHT API +type DHTRouter struct { + service *service.DHTService } -// NewPkarrRouter returns a new instance of the Relay router -func NewPkarrRouter(service *service.PkarrService) (*PkarrRouter, error) { - return &PkarrRouter{service: service}, nil +// NewDHTRouter returns a new instance of the DHT router +func NewDHTRouter(service *service.DHTService) (*DHTRouter, error) { + return &DHTRouter{service: service}, nil } // GetRecord godoc // -// @Summary GetRecord a Pkarr record from the DHT -// @Description GetRecord a Pkarr record from the DHT -// @Tags Pkarr +// @Summary GetRecord a BEP44 DNS record from the DHT +// @Description GetRecord a BEP44 DNS record from the DHT +// @Tags DHT // @Accept octet-stream // @Produce octet-stream // @Param id path string true "ID to get" @@ -39,8 +39,8 @@ func NewPkarrRouter(service *service.PkarrService) (*PkarrRouter, error) { // @Failure 404 {string} string "Not found" // @Failure 500 {string} string "Internal server error" // @Router /{id} [get] -func (r *PkarrRouter) GetRecord(c *gin.Context) { - ctx, span := telemetry.GetTracer().Start(c, "PkarrHTTP.GetRecord") +func (r *DHTRouter) GetRecord(c *gin.Context) { + ctx, span := telemetry.GetTracer().Start(c, "DHTHTTP.GetRecord") defer span.End() id := GetParam(c, IDParam) @@ -60,23 +60,22 @@ func (r *PkarrRouter) GetRecord(c *gin.Context) { return } - resp, err := r.service.GetPkarr(ctx, *id) + resp, err := r.service.GetDHT(ctx, *id) if err != nil { // TODO(gabe): provide a more maintainable way to handle custom errors if strings.Contains(err.Error(), "spam") { LoggingRespondErrMsg(c, fmt.Sprintf("too many requests for bad key %s", *id), http.StatusTooManyRequests) return } - LoggingRespondErrWithMsg(c, err, "failed to get pkarr record", http.StatusInternalServerError) + LoggingRespondErrWithMsg(c, err, "failed to get dht record", http.StatusInternalServerError) return } if resp == nil { - LoggingRespondErrMsg(c, "pkarr record not found", http.StatusNotFound) + LoggingRespondErrMsg(c, "dht record not found", http.StatusNotFound) return } // Convert int64 to uint64 since binary.PutUint64 expects a uint64 value - // according to https://github.com/Nuhvi/pkarr/blob/main/design/relays.md#get var seqBuf [8]byte binary.BigEndian.PutUint64(seqBuf[:], uint64(resp.Seq)) // sig:seq:v @@ -86,9 +85,9 @@ func (r *PkarrRouter) GetRecord(c *gin.Context) { // PutRecord godoc // -// @Summary PutRecord a Pkarr record into the DHT -// @Description PutRecord a Pkarr record into the DHT -// @Tags Pkarr +// @Summary PutRecord a BEP44 DNS record into the DHT +// @Description PutRecord a BEP44 DNS record into the DHT +// @Tags DHT // @Accept octet-stream // @Param id path string true "ID of the record to put" // @Param request body []byte true "64 bytes sig, 8 bytes u64 big-endian seq, 0-1000 bytes of v." @@ -96,8 +95,8 @@ func (r *PkarrRouter) GetRecord(c *gin.Context) { // @Failure 400 {string} string "Bad request" // @Failure 500 {string} string "Internal server error" // @Router /{id} [put] -func (r *PkarrRouter) PutRecord(c *gin.Context) { - ctx, span := telemetry.GetTracer().Start(c, "PkarrHTTP.PutRecord") +func (r *DHTRouter) PutRecord(c *gin.Context) { + ctx, span := telemetry.GetTracer().Start(c, "DHTHTTP.PutRecord") defer span.End() id := GetParam(c, IDParam) @@ -129,18 +128,17 @@ func (r *PkarrRouter) PutRecord(c *gin.Context) { } // transform the request into a service request by extracting the fields - // according to https://github.com/Nuhvi/pkarr/blob/main/design/relays.md#put value := body[72:] sig := body[:64] seq := int64(binary.BigEndian.Uint64(body[64:72])) - request, err := pkarr.NewRecord(key, value, sig, seq) + request, err := dht.NewBEP44Record(key, value, sig, seq) if err != nil { LoggingRespondErrWithMsg(c, err, "error parsing request", http.StatusBadRequest) return } - if err = r.service.PublishPkarr(ctx, *id, *request); err != nil { - LoggingRespondErrWithMsg(c, err, "failed to publish pkarr record", http.StatusInternalServerError) + if err = r.service.PublishDHT(ctx, *id, *request); err != nil { + LoggingRespondErrWithMsg(c, err, "failed to publish dht record", http.StatusInternalServerError) return } diff --git a/impl/pkg/server/pkarr_test.go b/impl/pkg/server/dht_test.go similarity index 89% rename from impl/pkg/server/pkarr_test.go rename to impl/pkg/server/dht_test.go index fe43655c..64e068d3 100644 --- a/impl/pkg/server/pkarr_test.go +++ b/impl/pkg/server/dht_test.go @@ -19,13 +19,13 @@ import ( "github.com/TBD54566975/did-dht-method/pkg/storage" ) -func TestPkarrRouter(t *testing.T) { - pkarrSvc := testPkarrService(t) - pkarrRouter, err := NewPkarrRouter(&pkarrSvc) +func TestDHTRouter(t *testing.T) { + dhtSvc := testDHTService(t) + dhtRouter, err := NewDHTRouter(&dhtSvc) require.NoError(t, err) - require.NotEmpty(t, pkarrRouter) + require.NotEmpty(t, dhtRouter) - defer pkarrSvc.Close() + defer dhtSvc.Close() t.Run("test put record", func(t *testing.T) { didID, reqData := generateDIDPutRequest(t) @@ -36,7 +36,7 @@ func TestPkarrRouter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("%s/%s", testServerURL, suffix), bytes.NewReader(reqData)) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.PutRecord(c) + dhtRouter.PutRecord(c) assert.True(t, is2xxResponse(w.Code), "unexpected %s", w.Result().Status) }) @@ -49,14 +49,14 @@ func TestPkarrRouter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("%s/%s", testServerURL, suffix), bytes.NewReader(reqData)) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.PutRecord(c) + dhtRouter.PutRecord(c) assert.True(t, is2xxResponse(w.Code), "unexpected %s", w.Result().Status) w = httptest.NewRecorder() req = httptest.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", testServerURL, suffix), nil) c = newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.GetRecord(c) + dhtRouter.GetRecord(c) assert.True(t, is2xxResponse(w.Code), "unexpected %s", w.Result().Status) resp, err := io.ReadAll(w.Body) @@ -74,14 +74,14 @@ func TestPkarrRouter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("%s/%s", testServerURL, suffix), bytes.NewReader(reqData)) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.PutRecord(c) + dhtRouter.PutRecord(c) assert.True(t, is2xxResponse(w.Code), "unexpected %s", w.Result().Status) w = httptest.NewRecorder() req = httptest.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", testServerURL, suffix), nil) c = newRequestContextWithParams(w, req, map[string]string{}) - pkarrRouter.GetRecord(c) + dhtRouter.GetRecord(c) assert.Equal(t, http.StatusBadRequest, w.Result().StatusCode, "unexpected %s", w.Result().Status) }) @@ -93,7 +93,7 @@ func TestPkarrRouter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("%s/", testServerURL), bytes.NewReader(reqData)) c := newRequestContextWithParams(w, req, map[string]string{}) - pkarrRouter.PutRecord(c) + dhtRouter.PutRecord(c) assert.Equal(t, http.StatusBadRequest, w.Result().StatusCode, "unexpected %s", w.Result().Status) }) @@ -106,7 +106,7 @@ func TestPkarrRouter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("%s/%s", testServerURL, suffix), bytes.NewReader(reqData)) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.PutRecord(c) + dhtRouter.PutRecord(c) assert.Equal(t, http.StatusInternalServerError, w.Result().StatusCode, "unexpected %s", w.Result().Status) }) @@ -121,7 +121,7 @@ func TestPkarrRouter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("%s/%s", testServerURL, suffix), bytes.NewReader(reqData)) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.PutRecord(c) + dhtRouter.PutRecord(c) assert.Equal(t, http.StatusBadRequest, w.Result().StatusCode, "unexpected %s", w.Result().Status) }) @@ -134,7 +134,7 @@ func TestPkarrRouter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("%s/%s", testServerURL, suffix), bytes.NewReader(reqData)) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.PutRecord(c) + dhtRouter.PutRecord(c) assert.Equal(t, http.StatusBadRequest, w.Result().StatusCode, "unexpected %s", w.Result().Status) }) @@ -143,7 +143,7 @@ func TestPkarrRouter(t *testing.T) { suffix := "uqaj3fcr9db6jg6o9pjs53iuftyj45r46aubogfaceqjbo6pp9sy" req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", testServerURL, suffix), nil) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.GetRecord(c) + dhtRouter.GetRecord(c) assert.Equal(t, http.StatusNotFound, w.Result().StatusCode, "unexpected %s", w.Result().Status) }) @@ -152,18 +152,18 @@ func TestPkarrRouter(t *testing.T) { suffix := "cz13drbfxy3ih6xun4mw3cyiexrtfcs9gyp46o4469e93y36zhsy" req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", testServerURL, suffix), nil) c := newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.GetRecord(c) + dhtRouter.GetRecord(c) assert.Equal(t, http.StatusNotFound, w.Result().StatusCode, "unexpected %s", w.Result().Status) w = httptest.NewRecorder() req = httptest.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", testServerURL, suffix), nil) c = newRequestContextWithParams(w, req, map[string]string{IDParam: suffix}) - pkarrRouter.GetRecord(c) + dhtRouter.GetRecord(c) assert.Equal(t, http.StatusTooManyRequests, w.Result().StatusCode, "unexpected %s", w.Result().Status) }) } -func testPkarrService(t *testing.T) service.PkarrService { +func testDHTService(t *testing.T) service.DHTService { defaultConfig := config.GetDefaultConfig() db, err := storage.NewStorage(defaultConfig.ServerConfig.StorageURI) @@ -171,11 +171,11 @@ func testPkarrService(t *testing.T) service.PkarrService { require.NotEmpty(t, db) dht := dht.NewTestDHT(t) - pkarrService, err := service.NewPkarrService(&defaultConfig, db, dht) + dhtService, err := service.NewDHTService(&defaultConfig, db, dht) require.NoError(t, err) - require.NotEmpty(t, pkarrService) + require.NotEmpty(t, dhtService) - return *pkarrService + return *dhtService } func generateDIDPutRequest(t *testing.T) (string, []byte) { @@ -188,7 +188,7 @@ func generateDIDPutRequest(t *testing.T) (string, []byte) { assert.NoError(t, err) assert.NotEmpty(t, packet) - bep44Put, err := dht.CreatePkarrPublishRequest(sk, *packet) + bep44Put, err := dht.CreateDNSPublishRequest(sk, *packet) assert.NoError(t, err) assert.NotEmpty(t, bep44Put) diff --git a/impl/pkg/server/server.go b/impl/pkg/server/server.go index fd6e406e..e6093408 100644 --- a/impl/pkg/server/server.go +++ b/impl/pkg/server/server.go @@ -31,7 +31,7 @@ type Server struct { shutdown chan os.Signal cfg *config.Config - svc *service.PkarrService + svc *service.DHTService } // NewServer returns a new instance of Server with the given db and host. @@ -51,9 +51,9 @@ func NewServer(cfg *config.Config, shutdown chan os.Signal, d *dht.DHT) (*Server logrus.WithField("record_count", recordCnt).Info("storage instantiated with record count") } - pkarrService, err := service.NewPkarrService(cfg, db, d) + dhtService, err := service.NewDHTService(cfg, db, d) if err != nil { - return nil, util.LoggingErrorMsg(err, "could not instantiate pkarr service") + return nil, util.LoggingErrorMsg(err, "could not instantiate the dht service") } handler.GET("/health", Health) @@ -63,8 +63,8 @@ func NewServer(cfg *config.Config, shutdown chan os.Signal, d *dht.DHT) (*Server handler.GET("/swagger/*any", ginswagger.WrapHandler(swaggerfiles.Handler, ginswagger.URL("/swagger.yaml"))) // root relay API - if err = PkarrAPI(&handler.RouterGroup, pkarrService); err != nil { - return nil, util.LoggingErrorMsg(err, "could not setup pkarr API") + if err = DHTAPI(&handler.RouterGroup, dhtService); err != nil { + return nil, util.LoggingErrorMsg(err, "could not setup the dht API") } return &Server{ Server: &http.Server{ @@ -76,7 +76,7 @@ func NewServer(cfg *config.Config, shutdown chan os.Signal, d *dht.DHT) (*Server MaxHeaderBytes: 1 << 20, }, cfg: cfg, - svc: pkarrService, + svc: dhtService, handler: handler, shutdown: shutdown, }, nil @@ -105,14 +105,14 @@ func setupHandler(env config.Environment) *gin.Engine { return handler } -// PkarrAPI sets up the relay API routes according to https://github.com/Nuhvi/pkarr/blob/main/design/relays.md -func PkarrAPI(rg *gin.RouterGroup, service *service.PkarrService) error { - relayRouter, err := NewPkarrRouter(service) +// DHTAPI sets up the relay API routes according to the spec https://did-dht.com/#gateway-api +func DHTAPI(rg *gin.RouterGroup, service *service.DHTService) error { + dhtRouter, err := NewDHTRouter(service) if err != nil { - return util.LoggingErrorMsg(err, "could not instantiate relay router") + return util.LoggingErrorMsg(err, "could not instantiate dht router") } - rg.PUT("/:id", relayRouter.PutRecord) - rg.GET("/:id", relayRouter.GetRecord) + rg.PUT("/:id", dhtRouter.PutRecord) + rg.GET("/:id", dhtRouter.GetRecord) return nil } diff --git a/impl/pkg/service/dht.go b/impl/pkg/service/dht.go new file mode 100644 index 00000000..489495e5 --- /dev/null +++ b/impl/pkg/service/dht.go @@ -0,0 +1,398 @@ +package service + +import ( + "context" + "sync" + "time" + + ssiutil "github.com/TBD54566975/ssi-sdk/util" + "github.com/allegro/bigcache/v3" + "github.com/anacrolix/torrent/bencode" + "github.com/goccy/go-json" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/TBD54566975/did-dht-method/internal/util" + + "github.com/TBD54566975/did-dht-method/config" + dhtint "github.com/TBD54566975/did-dht-method/internal/dht" + "github.com/TBD54566975/did-dht-method/pkg/dht" + "github.com/TBD54566975/did-dht-method/pkg/storage" + "github.com/TBD54566975/did-dht-method/pkg/telemetry" +) + +const recordSizeLimitBytes = 1000 + +// DHTService is the service responsible for managing BEP44 DNS records in the DHT and reading/writing records +type DHTService struct { + cfg *config.Config + db storage.Storage + dht *dht.DHT + cache *bigcache.BigCache + badGetCache *bigcache.BigCache + scheduler *dhtint.Scheduler +} + +// NewDHTService returns a new instance of the DHT service +func NewDHTService(cfg *config.Config, db storage.Storage, d *dht.DHT) (*DHTService, error) { + if cfg == nil { + return nil, ssiutil.LoggingNewError("config is required") + } + + // create and start get cache + cacheTTL := time.Duration(cfg.DHTConfig.CacheTTLSeconds) * time.Second + cacheConfig := bigcache.DefaultConfig(cacheTTL) + cacheConfig.MaxEntrySize = recordSizeLimitBytes + cacheConfig.HardMaxCacheSize = cfg.DHTConfig.CacheSizeLimitMB + cacheConfig.CleanWindow = cacheTTL / 2 + cache, err := bigcache.New(context.Background(), cacheConfig) + if err != nil { + return nil, ssiutil.LoggingErrorMsg(err, "failed to instantiate cache") + } + + // create a new cache for bad gets to prevent spamming the DHT + cacheConfig.LifeWindow = 60 * time.Second + cacheConfig.CleanWindow = 30 * time.Second + badGetCache, err := bigcache.New(context.Background(), cacheConfig) + if err != nil { + return nil, ssiutil.LoggingErrorMsg(err, "failed to instantiate badGetCache") + } + + // start scheduler for republishing + scheduler := dhtint.NewScheduler() + svc := DHTService{ + cfg: cfg, + db: db, + dht: d, + cache: cache, + badGetCache: badGetCache, + scheduler: &scheduler, + } + if err = scheduler.Schedule(cfg.DHTConfig.RepublishCRON, svc.republish); err != nil { + return nil, ssiutil.LoggingErrorMsg(err, "failed to start republisher") + } + return &svc, nil +} + +// PublishDHT stores the record in the db, publishes the given DNS record to the DHT, and returns the z-base-32 encoded ID +func (s *DHTService) PublishDHT(ctx context.Context, id string, record dht.BEP44Record) error { + ctx, span := telemetry.GetTracer().Start(ctx, "DHTService.PublishDHT") + defer span.End() + + // make sure the key is valid + if _, err := util.Z32Decode(id); err != nil { + return ssiutil.LoggingCtxErrorMsgf(ctx, err, "failed to decode z-base-32 encoded ID: %s", id) + } + + if err := record.IsValid(); err != nil { + return err + } + + // check if the message is already in the cache + if got, err := s.cache.Get(id); err == nil { + var resp dht.BEP44Response + if err = json.Unmarshal(got, &resp); err == nil && record.Response().Equals(resp) { + logrus.WithContext(ctx).WithField("record_id", id).Debug("resolved dht record from cache with matching response") + return nil + } + } + + // write to db and cache + if err := s.db.WriteRecord(ctx, record); err != nil { + return err + } + recordBytes, err := json.Marshal(record.Response()) + if err != nil { + return err + } + if err = s.cache.Set(id, recordBytes); err != nil { + return err + } + logrus.WithContext(ctx).WithField("record_id", id).Debug("added dht record to cache and db") + + // return here and put it in the DHT asynchronously + go func() { + // Create a new context with a timeout so that the parent context does not cancel the put + putCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + if _, err = s.dht.Put(putCtx, record.Put()); err != nil { + logrus.WithContext(ctx).WithField("record_id", id).WithError(err).Warnf("error from dht.Put for record: %s", id) + } else { + logrus.WithContext(ctx).WithField("record_id", id).Debug("put record to DHT") + } + }() + + return nil +} + +// GetDHT returns the full DNS record (including sig data) for the given z-base-32 encoded ID +func (s *DHTService) GetDHT(ctx context.Context, id string) (*dht.BEP44Response, error) { + ctx, span := telemetry.GetTracer().Start(ctx, "DHTService.GetDHT") + defer span.End() + + // make sure the key is valid + if _, err := util.Z32Decode(id); err != nil { + return nil, ssiutil.LoggingCtxErrorMsgf(ctx, err, "failed to decode z-base-32 encoded ID: %s", id) + } + + // if the key is in the badGetCache, return an error + if _, err := s.badGetCache.Get(id); err == nil { + return nil, ssiutil.LoggingCtxErrorMsgf(ctx, err, "bad key [%s] rate limited to prevent spam", id) + } + + // first do a cache lookup + if got, err := s.cache.Get(id); err == nil { + var resp dht.BEP44Response + if err = json.Unmarshal(got, &resp); err == nil { + logrus.WithContext(ctx).WithField("record_id", id).Info("resolved record from cache") + return &resp, nil + } + logrus.WithContext(ctx).WithError(err).WithField("record_id", id).Warn("failed to get record from cache, falling back to dht") + } + + // next do a dht lookup with a timeout of 10 seconds + getCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + got, err := s.dht.GetFull(getCtx, id) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + logrus.WithContext(ctx).WithField("record_id", id).Warn("dht lookup timed out, attempting to resolve from storage") + } else { + logrus.WithContext(ctx).WithError(err).WithField("record_id", id).Warn("failed to get record from dht, attempting to resolve from storage") + } + + record, err := s.db.ReadRecord(ctx, id) + if err != nil || record == nil { + logrus.WithContext(ctx).WithError(err).WithField("record_id", id).Error("failed to resolve record from storage; adding to bad get cache") + + // add the key to the badGetCache to prevent spamming the DHT + if err = s.badGetCache.Set(id, []byte{0}); err != nil { + logrus.WithContext(ctx).WithError(err).WithField("record_id", id).Error("failed to set key in bad get cache") + } + + return nil, err + } + + logrus.WithContext(ctx).WithField("record_id", id).Info("resolved record from storage") + resp := record.Response() + // add the record back to the cache for future lookups + if err = s.addRecordToCache(id, record.Response()); err != nil { + logrus.WithError(err).WithField("record_id", id).Error("failed to set record in cache") + } + + return &resp, err + } + + // prepare the record for return + bBytes, err := got.V.MarshalBencode() + if err != nil { + return nil, err + } + var payload string + if err = bencode.Unmarshal(bBytes, &payload); err != nil { + return nil, ssiutil.LoggingCtxErrorMsg(ctx, err, "failed to unmarshal bencoded payload") + } + resp := dht.BEP44Response{ + V: []byte(payload), + Seq: got.Seq, + Sig: got.Sig, + } + + // add the record to cache, do it here to avoid duplicate calculations + if err = s.addRecordToCache(id, resp); err != nil { + logrus.WithContext(ctx).WithField("record_id", id).WithError(err).Error("failed to set record in cache") + } else { + logrus.WithContext(ctx).WithField("record_id", id).Info("added record back to cache") + } + + return &resp, nil +} + +func (s *DHTService) addRecordToCache(id string, resp dht.BEP44Response) error { + recordBytes, err := json.Marshal(resp) + if err != nil { + return err + } + if err = s.cache.Set(id, recordBytes); err != nil { + return err + } + return nil +} + +// failedRecord is a struct to keep track of records that failed to be republished +type failedRecord struct { + record dht.BEP44Record + failureCnt int +} + +// TODO(gabe) make this more efficient. create a publish schedule based on each individual record, not all records +// republish republishes all records in the db +func (s *DHTService) republish() { + ctx, span := telemetry.GetTracer().Start(context.Background(), "DHTService.republish") + defer span.End() + + recordCnt, err := s.db.RecordCount(ctx) + if err != nil { + logrus.WithContext(ctx).WithError(err).Error("failed to get record count before republishing") + return + } + logrus.WithContext(ctx).WithField("record_count", recordCnt).Info("republishing records") + + // republish all records in the db and handle failed records up to 3 times + failedRecords := s.republishRecords(ctx) + + // handle failed records + logrus.WithContext(ctx).WithField("failed_record_count", len(failedRecords)).Info("handling failed records") + s.handleFailedRecords(ctx, failedRecords) +} + +// republishRecords republishes all records in the db and returns a list of failed records to be retried +func (s *DHTService) republishRecords(ctx context.Context) []failedRecord { + var nextPageToken []byte + var seenRecords, batchCnt int32 + var failedRecords []failedRecord + var recordsBatch []dht.BEP44Record + var err error + + var wg sync.WaitGroup + + for { + recordsBatch, nextPageToken, err = s.db.ListRecords(ctx, nextPageToken, 1000) + if err != nil { + logrus.WithContext(ctx).WithError(err).Error("failed to list record(s) for republishing") + continue + } + + batchSize := len(recordsBatch) + seenRecords += int32(batchSize) + if batchSize == 0 { + logrus.WithContext(ctx).Info("no records to republish") + break + } + + logrus.WithContext(ctx).WithFields(logrus.Fields{ + "record_count": batchSize, + "batch_number": batchCnt, + "total_seen": seenRecords, + }).Debugf("republishing batch [%d] of [%d] records", batchCnt, batchSize) + batchCnt++ + + batchFailedRecords := s.republishBatch(ctx, &wg, recordsBatch) + failedRecords = append(failedRecords, batchFailedRecords...) + + if nextPageToken == nil { + break + } + } + + wg.Wait() + + successRate := float64(seenRecords-int32(len(failedRecords))) / float64(seenRecords) * 100 + logrus.WithContext(ctx).WithFields(logrus.Fields{ + "success": seenRecords - int32(len(failedRecords)), + "errors": len(failedRecords), + "total": seenRecords, + }).Infof("republishing complete with [%d] batches of [%d] total records with a [%.2f] percent success rate", batchCnt, seenRecords, successRate) + + return failedRecords +} + +// republishBatch republishes a batch of records and returns a list of failed records to be retried +func (s *DHTService) republishBatch(ctx context.Context, wg *sync.WaitGroup, recordsBatch []dht.BEP44Record) []failedRecord { + failedRecordsChan := make(chan failedRecord, len(recordsBatch)) + var failedRecords []failedRecord + + for _, record := range recordsBatch { + wg.Add(1) + go func(record dht.BEP44Record) { + defer wg.Done() + + id := record.ID() + putCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + _, putErr := s.dht.Put(putCtx, record.Put()) + if putErr != nil { + if errors.Is(putErr, context.DeadlineExceeded) { + logrus.WithContext(putCtx).WithField("record_id", id).Info("republish timeout exceeded") + } else { + logrus.WithContext(putCtx).WithField("record_id", id).WithError(putErr).Info("failed to republish record") + } + failedRecordsChan <- failedRecord{ + record: record, + failureCnt: 1, + } + } + }(record) + } + + wg.Wait() + close(failedRecordsChan) + + for fr := range failedRecordsChan { + failedRecords = append(failedRecords, fr) + } + return failedRecords +} + +// handleFailedRecords attempts to republish failed records up to 3 times +func (s *DHTService) handleFailedRecords(ctx context.Context, failedRecords []failedRecord) { + for _, fr := range failedRecords { + retryCount := 0 + for retryCount < 3 { + id := fr.record.ID() + putCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + if _, putErr := s.dht.Put(putCtx, fr.record.Put()); putErr != nil { + logrus.WithContext(putCtx).WithField("record_id", id).WithError(putErr).Debugf("failed to re-republish [%s], attempt: %d", id, retryCount+1) + retryCount++ + } else { + break + } + } + + if retryCount == 3 { + id := fr.record.ID() + logrus.WithContext(ctx).WithField("record_id", id).Error("record failed to republish after 3 attempts") + if err := s.db.WriteFailedRecord(ctx, id); err != nil { + logrus.WithContext(ctx).WithField("record_id", id).WithError(err).Warn("failed to write failed record to db") + } + } + } + + failedRecordCnt, err := s.db.FailedRecordCount(ctx) + if err != nil { + logrus.WithContext(ctx).WithError(err).Error("failed to get failed record count") + return + } + logrus.WithContext(ctx).WithField("failed_record_count", failedRecordCnt).Warn("total failed records") +} + +// Close closes the Mainline service gracefully +func (s *DHTService) Close() { + if s == nil { + return + } + if s.scheduler != nil { + s.scheduler.Stop() + } + if s.cache != nil { + if err := s.cache.Close(); err != nil { + logrus.WithError(err).Error("failed to close cache") + } + } + if s.badGetCache != nil { + if err := s.badGetCache.Close(); err != nil { + logrus.WithError(err).Error("failed to close bad get cache") + } + } + if err := s.db.Close(); err != nil { + logrus.WithError(err).Error("failed to close db") + } + if s.dht != nil { + s.dht.Close() + } +} diff --git a/impl/pkg/service/pkarr_test.go b/impl/pkg/service/dht_test.go similarity index 71% rename from impl/pkg/service/pkarr_test.go rename to impl/pkg/service/dht_test.go index 8d83d90f..f2affb02 100644 --- a/impl/pkg/service/pkarr_test.go +++ b/impl/pkg/service/dht_test.go @@ -13,27 +13,26 @@ import ( "github.com/TBD54566975/did-dht-method/config" "github.com/TBD54566975/did-dht-method/internal/did" "github.com/TBD54566975/did-dht-method/pkg/dht" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" "github.com/TBD54566975/did-dht-method/pkg/storage" ) -func TestPkarrService(t *testing.T) { - svc := newPkarrService(t, "a") +func TestDHTService(t *testing.T) { + svc := newDHTService(t, "a") t.Run("test put bad record", func(t *testing.T) { - err := svc.PublishPkarr(context.Background(), "", pkarr.Record{}) + err := svc.PublishDHT(context.Background(), "", dht.BEP44Record{}) assert.Error(t, err) assert.Contains(t, err.Error(), "validation for 'Value' failed on the 'required' tag") }) t.Run("test get non existent record", func(t *testing.T) { - got, err := svc.GetPkarr(context.Background(), "test") + got, err := svc.GetDHT(context.Background(), "test") assert.NoError(t, err) assert.Nil(t, got) }) t.Run("test get record with invalid ID", func(t *testing.T) { - got, err := svc.GetPkarr(context.Background(), "---") + got, err := svc.GetDHT(context.Background(), "---") assert.ErrorContains(t, err, "illegal z-base-32 data at input byte 0") assert.Nil(t, got) }) @@ -49,18 +48,18 @@ func TestPkarrService(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) suffix, err := d.Suffix() require.NoError(t, err) - err = svc.PublishPkarr(context.Background(), suffix, pkarr.RecordFromBEP44(putMsg)) + err = svc.PublishDHT(context.Background(), suffix, dht.RecordFromBEP44(putMsg)) assert.NoError(t, err) // invalidate the signature putMsg.Sig[0] = 0 - err = svc.PublishPkarr(context.Background(), suffix, pkarr.RecordFromBEP44(putMsg)) + err = svc.PublishDHT(context.Background(), suffix, dht.RecordFromBEP44(putMsg)) assert.Error(t, err) assert.Contains(t, err.Error(), "signature is invalid") }) @@ -76,16 +75,16 @@ func TestPkarrService(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) suffix, err := d.Suffix() require.NoError(t, err) - err = svc.PublishPkarr(context.Background(), suffix, pkarr.RecordFromBEP44(putMsg)) + err = svc.PublishDHT(context.Background(), suffix, dht.RecordFromBEP44(putMsg)) assert.NoError(t, err) - got, err := svc.GetPkarr(context.Background(), suffix) + got, err := svc.GetDHT(context.Background(), suffix) assert.NoError(t, err) assert.NotEmpty(t, got) assert.Equal(t, putMsg.V, got.V) @@ -104,20 +103,20 @@ func TestPkarrService(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) suffix, err := d.Suffix() require.NoError(t, err) - err = svc.PublishPkarr(context.Background(), suffix, pkarr.RecordFromBEP44(putMsg)) + err = svc.PublishDHT(context.Background(), suffix, dht.RecordFromBEP44(putMsg)) require.NoError(t, err) // remove it from the cache so the get tests the uncached lookup path err = svc.cache.Delete(suffix) require.NoError(t, err) - got, err := svc.GetPkarr(context.Background(), suffix) + got, err := svc.GetDHT(context.Background(), suffix) assert.NoError(t, err) assert.NotEmpty(t, got) assert.Equal(t, putMsg.V, got.V) @@ -126,12 +125,12 @@ func TestPkarrService(t *testing.T) { }) t.Run("test get record with invalid ID", func(t *testing.T) { - got, err := svc.GetPkarr(context.Background(), "uqaj3fcr9db6jg6o9pjs53iuftyj45r46aubogfaceqjbo6pp9sy") + got, err := svc.GetDHT(context.Background(), "uqaj3fcr9db6jg6o9pjs53iuftyj45r46aubogfaceqjbo6pp9sy") assert.NoError(t, err) assert.Empty(t, got) // try it again to make sure the cache is working - got, err = svc.GetPkarr(context.Background(), "uqaj3fcr9db6jg6o9pjs53iuftyj45r46aubogfaceqjbo6pp9sy") + got, err = svc.GetDHT(context.Background(), "uqaj3fcr9db6jg6o9pjs53iuftyj45r46aubogfaceqjbo6pp9sy") assert.ErrorContains(t, err, "rate limited to prevent spam") assert.Empty(t, got) }) @@ -140,7 +139,7 @@ func TestPkarrService(t *testing.T) { } func TestDHT(t *testing.T) { - svc1 := newPkarrService(t, "b") + svc1 := newDHTService(t, "b") // create and publish a record to service1 sk, doc, err := did.GenerateDIDDHT(did.CreateDIDDHTOpts{}) @@ -150,16 +149,16 @@ func TestDHT(t *testing.T) { packet, err := d.ToDNSPacket(*doc, nil, nil) require.NoError(t, err) require.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) suffix, err := d.Suffix() require.NoError(t, err) - err = svc1.PublishPkarr(context.Background(), suffix, pkarr.RecordFromBEP44(putMsg)) + err = svc1.PublishDHT(context.Background(), suffix, dht.RecordFromBEP44(putMsg)) require.NoError(t, err) // make sure we can get it back - got, err := svc1.GetPkarr(context.Background(), suffix) + got, err := svc1.GetDHT(context.Background(), suffix) require.NoError(t, err) require.NotEmpty(t, got) assert.Equal(t, putMsg.V, got.V) @@ -167,10 +166,10 @@ func TestDHT(t *testing.T) { assert.Equal(t, putMsg.Seq, got.Seq) // create service2 with service1 as a bootstrap peer - svc2 := newPkarrService(t, "c", anacrolixdht.NewAddr(svc1.dht.Addr())) + svc2 := newDHTService(t, "c", anacrolixdht.NewAddr(svc1.dht.Addr())) // get the record via service2 - gotFrom2, err := svc2.GetPkarr(context.Background(), suffix) + gotFrom2, err := svc2.GetDHT(context.Background(), suffix) require.NoError(t, err) require.NotEmpty(t, gotFrom2) assert.Equal(t, putMsg.V, gotFrom2.V) @@ -184,20 +183,20 @@ func TestDHT(t *testing.T) { } func TestNoConfig(t *testing.T) { - svc, err := NewPkarrService(nil, nil, nil) + svc, err := NewDHTService(nil, nil, nil) assert.EqualError(t, err, "config is required") assert.Empty(t, svc) - svc, err = NewPkarrService(&config.Config{ - PkarrConfig: config.PkarrServiceConfig{ + svc, err = NewDHTService(&config.Config{ + DHTConfig: config.DHTServiceConfig{ CacheSizeLimitMB: -1, }, }, nil, nil) assert.EqualError(t, err, "failed to instantiate cache: HardMaxCacheSize must be >= 0") assert.Nil(t, svc) - svc, err = NewPkarrService(&config.Config{ - PkarrConfig: config.PkarrServiceConfig{ + svc, err = NewDHTService(&config.Config{ + DHTConfig: config.DHTServiceConfig{ RepublishCRON: "not a real cron expression", }, }, nil, nil) @@ -207,7 +206,7 @@ func TestNoConfig(t *testing.T) { t.Cleanup(func() { svc.Close() }) } -func newPkarrService(t *testing.T, id string, bootstrapPeers ...anacrolixdht.Addr) PkarrService { +func newDHTService(t *testing.T, id string, bootstrapPeers ...anacrolixdht.Addr) DHTService { defaultConfig := config.GetDefaultConfig() db, err := storage.NewStorage(fmt.Sprintf("bolt://diddht-test-%s.db", id)) @@ -217,9 +216,9 @@ func newPkarrService(t *testing.T, id string, bootstrapPeers ...anacrolixdht.Add t.Cleanup(func() { os.Remove(fmt.Sprintf("diddht-test-%s.db", id)) }) d := dht.NewTestDHT(t, bootstrapPeers...) - pkarrService, err := NewPkarrService(&defaultConfig, db, d) + dhtService, err := NewDHTService(&defaultConfig, db, d) require.NoError(t, err) - require.NotEmpty(t, pkarrService) + require.NotEmpty(t, dhtService) - return *pkarrService + return *dhtService } diff --git a/impl/pkg/service/pkarr.go b/impl/pkg/service/pkarr.go index 0dcdadcd..e69de29b 100644 --- a/impl/pkg/service/pkarr.go +++ b/impl/pkg/service/pkarr.go @@ -1,353 +0,0 @@ -package service - -import ( - "context" - "sync" - "sync/atomic" - "time" - - ssiutil "github.com/TBD54566975/ssi-sdk/util" - "github.com/allegro/bigcache/v3" - "github.com/anacrolix/torrent/bencode" - "github.com/goccy/go-json" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/tv42/zbase32" - - "github.com/TBD54566975/did-dht-method/internal/util" - - "github.com/TBD54566975/did-dht-method/config" - dhtint "github.com/TBD54566975/did-dht-method/internal/dht" - "github.com/TBD54566975/did-dht-method/pkg/dht" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" - "github.com/TBD54566975/did-dht-method/pkg/storage" - "github.com/TBD54566975/did-dht-method/pkg/telemetry" -) - -const recordSizeLimit = 1000 - -// PkarrService is the Pkarr service responsible for managing the Pkarr DHT and reading/writing records -type PkarrService struct { - cfg *config.Config - db storage.Storage - dht *dht.DHT - cache *bigcache.BigCache - badGetCache *bigcache.BigCache - scheduler *dhtint.Scheduler -} - -// NewPkarrService returns a new instance of the Pkarr service -func NewPkarrService(cfg *config.Config, db storage.Storage, d *dht.DHT) (*PkarrService, error) { - if cfg == nil { - return nil, ssiutil.LoggingNewError("config is required") - } - - // create and start get cache - cacheTTL := time.Duration(cfg.PkarrConfig.CacheTTLSeconds) * time.Second - cacheConfig := bigcache.DefaultConfig(cacheTTL) - cacheConfig.MaxEntrySize = recordSizeLimit - cacheConfig.HardMaxCacheSize = cfg.PkarrConfig.CacheSizeLimitMB - cacheConfig.CleanWindow = cacheTTL / 2 - cache, err := bigcache.New(context.Background(), cacheConfig) - if err != nil { - return nil, ssiutil.LoggingErrorMsg(err, "failed to instantiate cache") - } - - // create a new cache for bad gets to prevent spamming the DHT - cacheConfig.LifeWindow = 60 * time.Second - cacheConfig.CleanWindow = 30 * time.Second - badGetCache, err := bigcache.New(context.Background(), cacheConfig) - if err != nil { - return nil, ssiutil.LoggingErrorMsg(err, "failed to instantiate badGetCache") - } - - // start scheduler for republishing - scheduler := dhtint.NewScheduler() - svc := PkarrService{ - cfg: cfg, - db: db, - dht: d, - cache: cache, - badGetCache: badGetCache, - scheduler: &scheduler, - } - if err = scheduler.Schedule(cfg.PkarrConfig.RepublishCRON, svc.republish); err != nil { - return nil, ssiutil.LoggingErrorMsg(err, "failed to start republisher") - } - return &svc, nil -} - -// PublishPkarr stores the record in the db, publishes the given Pkarr record to the DHT, and returns the z-base-32 encoded ID -func (s *PkarrService) PublishPkarr(ctx context.Context, id string, record pkarr.Record) error { - ctx, span := telemetry.GetTracer().Start(ctx, "PkarrService.PublishPkarr") - defer span.End() - - // make sure the key is valid - if _, err := util.Z32Decode(id); err != nil { - return ssiutil.LoggingCtxErrorMsgf(ctx, err, "failed to decode z-base-32 encoded ID: %s", id) - } - - if err := record.IsValid(); err != nil { - return err - } - - // check if the message is already in the cache - if got, err := s.cache.Get(id); err == nil { - var resp pkarr.Response - if err = json.Unmarshal(got, &resp); err == nil && record.Response().Equals(resp) { - logrus.WithContext(ctx).WithField("record_id", id).Debug("resolved pkarr record from cache with matching response") - return nil - } - } - - // write to db and cache - if err := s.db.WriteRecord(ctx, record); err != nil { - return err - } - recordBytes, err := json.Marshal(record.Response()) - if err != nil { - return err - } - if err = s.cache.Set(id, recordBytes); err != nil { - return err - } - logrus.WithContext(ctx).WithField("record", id).Debug("added pkarr record to cache and db") - - // return here and put it in the DHT asynchronously - go func() { - // Create a new context with a timeout so that the parent context does not cancel the put - putCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - if _, err = s.dht.Put(putCtx, record.BEP44()); err != nil { - logrus.WithContext(ctx).WithError(err).Errorf("error from dht.Put for record: %s", id) - } else { - logrus.WithContext(ctx).WithField("record", id).Debug("put pkarr record to DHT") - } - }() - - return nil -} - -// GetPkarr returns the full Pkarr record (including sig data) for the given z-base-32 encoded ID -func (s *PkarrService) GetPkarr(ctx context.Context, id string) (*pkarr.Response, error) { - ctx, span := telemetry.GetTracer().Start(ctx, "PkarrService.GetPkarr") - defer span.End() - - // make sure the key is valid - if _, err := util.Z32Decode(id); err != nil { - return nil, ssiutil.LoggingCtxErrorMsgf(ctx, err, "failed to decode z-base-32 encoded ID: %s", id) - } - - // if the key is in the badGetCache, return an error - if _, err := s.badGetCache.Get(id); err == nil { - return nil, ssiutil.LoggingCtxErrorMsgf(ctx, err, "bad key [%s] rate limited to prevent spam", id) - } - - // first do a cache lookup - if got, err := s.cache.Get(id); err == nil { - var resp pkarr.Response - if err = json.Unmarshal(got, &resp); err == nil { - logrus.WithContext(ctx).WithField("record_id", id).Info("resolved pkarr record from cache") - return &resp, nil - } - logrus.WithContext(ctx).WithError(err).WithField("record", id).Warn("failed to get pkarr record from cache, falling back to dht") - } - - // next do a dht lookup with a timeout of 10 seconds - getCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - got, err := s.dht.GetFull(getCtx, id) - if err != nil { - if errors.Is(err, context.DeadlineExceeded) { - logrus.WithContext(ctx).WithField("record", id).Warn("dht lookup timed out, attempting to resolve from storage") - } else { - logrus.WithContext(ctx).WithError(err).WithField("record", id).Warn("failed to get pkarr record from dht, attempting to resolve from storage") - } - - rawID, err := util.Z32Decode(id) - if err != nil { - return nil, err - } - - record, err := s.db.ReadRecord(ctx, rawID) - if err != nil || record == nil { - logrus.WithContext(ctx).WithError(err).WithField("record", id).Error("failed to resolve pkarr record from storage; adding to badGetCache") - - // add the key to the badGetCache to prevent spamming the DHT - if err = s.badGetCache.Set(id, []byte{0}); err != nil { - logrus.WithContext(ctx).WithError(err).WithField("record", id).Error("failed to set key in badGetCache") - } - - return nil, err - } - - logrus.WithContext(ctx).WithField("record", id).Info("resolved pkarr record from storage") - resp := record.Response() - // add the record back to the cache for future lookups - if err = s.addRecordToCache(id, record.Response()); err != nil { - logrus.WithError(err).WithField("record", id).Error("failed to set pkarr record in cache") - } - - return &resp, err - } - - // prepare the record for return - bBytes, err := got.V.MarshalBencode() - if err != nil { - return nil, err - } - var payload string - if err = bencode.Unmarshal(bBytes, &payload); err != nil { - return nil, ssiutil.LoggingCtxErrorMsg(ctx, err, "failed to unmarshal bencoded payload") - } - resp := pkarr.Response{ - V: []byte(payload), - Seq: got.Seq, - Sig: got.Sig, - } - - // add the record to cache, do it here to avoid duplicate calculations - if err = s.addRecordToCache(id, resp); err != nil { - logrus.WithContext(ctx).WithError(err).Errorf("failed to set pkarr record[%s] in cache", id) - } else { - logrus.WithContext(ctx).WithField("record", id).Info("added pkarr record back to cache") - } - - return &resp, nil -} - -func (s *PkarrService) addRecordToCache(id string, resp pkarr.Response) error { - recordBytes, err := json.Marshal(resp) - if err != nil { - return err - } - if err = s.cache.Set(id, recordBytes); err != nil { - return err - } - return nil -} - -// TODO(gabe) make this more efficient. create a publish schedule based on each individual record, not all records -func (s *PkarrService) republish() { - ctx, span := telemetry.GetTracer().Start(context.Background(), "PkarrService.republish") - defer span.End() - - recordCnt, err := s.db.RecordCount(ctx) - if err != nil { - logrus.WithContext(ctx).WithError(err).Error("failed to get record count before republishing") - return - } else { - logrus.WithContext(ctx).WithField("record_count", recordCnt).Info("republishing records") - } - - var nextPageToken []byte - var recordsBatch []pkarr.Record - var seenRecords, batchCnt, successCnt, errCnt int32 = 0, 1, 0, 0 - - for { - recordsBatch, nextPageToken, err = s.db.ListRecords(ctx, nextPageToken, 1000) - if err != nil { - logrus.WithContext(ctx).WithError(err).Error("failed to list record(s) for republishing") - return - } - batchSize := len(recordsBatch) - seenRecords += int32(batchSize) - if batchSize == 0 { - logrus.WithContext(ctx).Info("no records to republish") - return - } - - logrus.WithContext(ctx).WithFields(logrus.Fields{ - "record_count": batchSize, - "batch_number": batchCnt, - "total_seen": seenRecords, - }).Infof("republishing batch [%d] of [%d] records", batchCnt, batchSize) - batchCnt++ - - var wg sync.WaitGroup - wg.Add(batchSize) - - var batchErrCnt, batchSuccessCnt int32 = 0, 0 - for _, record := range recordsBatch { - go func(ctx context.Context, record pkarr.Record) { - defer wg.Done() - - recordID := zbase32.EncodeToString(record.Key[:]) - logrus.WithContext(ctx).Debugf("republishing record: %s", recordID) - - putCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - if _, putErr := s.dht.Put(putCtx, record.BEP44()); putErr != nil { - logrus.WithContext(putCtx).WithError(putErr).Warnf("failed to republish record: %s", recordID) - atomic.AddInt32(&batchErrCnt, 1) - } else { - atomic.AddInt32(&batchSuccessCnt, 1) - } - }(ctx, record) - } - - // Wait for all goroutines in this batch to finish before moving on to the next batch - wg.Wait() - - // Update the success and error counts - atomic.AddInt32(&successCnt, batchSuccessCnt) - atomic.AddInt32(&errCnt, batchErrCnt) - - successRate := float64(batchSuccessCnt) / float64(batchSize) - - logrus.WithContext(ctx).WithFields(logrus.Fields{ - "batch_number": batchCnt, - "success": successCnt, - "errors": errCnt, - }).Infof("batch [%d] completed with a [%.2f] percent success rate", batchCnt, successRate*100) - - if successRate < 0.8 { - logrus.WithContext(ctx).WithFields(logrus.Fields{ - "batch_number": batchCnt, - "success": successCnt, - "errors": errCnt, - }).Errorf("batch [%d] failed to meet success rate threshold; exiting republishing early", batchCnt) - break - } - - if nextPageToken == nil { - break - } - } - - successRate := float64(successCnt) / float64(seenRecords) - logrus.WithContext(ctx).WithFields(logrus.Fields{ - "success": seenRecords - errCnt, - "errors": errCnt, - "total": seenRecords, - }).Infof("republishing complete with [%d] batches of [%d] total records with an [%.2f] percent success rate", batchCnt, seenRecords, successRate*100) -} - -// Close closes the Pkarr service gracefully -func (s *PkarrService) Close() { - if s == nil { - return - } - if s.scheduler != nil { - s.scheduler.Stop() - } - if s.cache != nil { - if err := s.cache.Close(); err != nil { - logrus.WithError(err).Error("failed to close cache") - } - } - if s.badGetCache != nil { - if err := s.badGetCache.Close(); err != nil { - logrus.WithError(err).Error("failed to close badGetCache") - } - } - if err := s.db.Close(); err != nil { - logrus.WithError(err).Error("failed to close db") - } - if s.dht != nil { - s.dht.Close() - } -} diff --git a/impl/pkg/storage/db/bolt/bolt.go b/impl/pkg/storage/db/bolt/bolt.go index 7141d409..608c466d 100644 --- a/impl/pkg/storage/db/bolt/bolt.go +++ b/impl/pkg/storage/db/bolt/bolt.go @@ -1,7 +1,9 @@ package bolt import ( + "bytes" "context" + "encoding/binary" "time" "github.com/goccy/go-json" @@ -10,12 +12,13 @@ import ( "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" + "github.com/TBD54566975/did-dht-method/pkg/dht" "github.com/TBD54566975/did-dht-method/pkg/telemetry" ) const ( - pkarrNamespace = "pkarr" + dhtNamespace = "dht" + failedNamespace = "failed" ) type Bolt struct { @@ -35,13 +38,12 @@ func NewBolt(path string) (*Bolt, error) { if err != nil { return nil, err } - return &Bolt{db: db}, nil } // WriteRecord writes the given record to the storage // TODO: don't overwrite existing records, store unique seq numbers -func (b *Bolt) WriteRecord(ctx context.Context, record pkarr.Record) error { +func (b *Bolt) WriteRecord(ctx context.Context, record dht.BEP44Record) error { ctx, span := telemetry.GetTracer().Start(ctx, "bolt.WriteRecord") defer span.End() @@ -51,15 +53,15 @@ func (b *Bolt) WriteRecord(ctx context.Context, record pkarr.Record) error { return err } - return b.write(ctx, pkarrNamespace, encoded.K, recordBytes) + return b.write(ctx, dhtNamespace, record.ID(), recordBytes) } // ReadRecord reads the record with the given id from the storage -func (b *Bolt) ReadRecord(ctx context.Context, id []byte) (*pkarr.Record, error) { +func (b *Bolt) ReadRecord(ctx context.Context, id string) (*dht.BEP44Record, error) { ctx, span := telemetry.GetTracer().Start(ctx, "bolt.ReadRecord") defer span.End() - recordBytes, err := b.read(ctx, pkarrNamespace, encoding.EncodeToString(id)) + recordBytes, err := b.read(ctx, dhtNamespace, id) if err != nil { return nil, err } @@ -67,7 +69,7 @@ func (b *Bolt) ReadRecord(ctx context.Context, id []byte) (*pkarr.Record, error) return nil, nil } - var b64record base64PkarrRecord + var b64record base64BEP44Record if err = json.Unmarshal(recordBytes, &b64record); err != nil { return nil, err } @@ -81,18 +83,18 @@ func (b *Bolt) ReadRecord(ctx context.Context, id []byte) (*pkarr.Record, error) } // ListRecords lists all records in the storage -func (b *Bolt) ListRecords(ctx context.Context, nextPageToken []byte, pagesize int) ([]pkarr.Record, []byte, error) { +func (b *Bolt) ListRecords(ctx context.Context, nextPageToken []byte, pageSize int) ([]dht.BEP44Record, []byte, error) { ctx, span := telemetry.GetTracer().Start(ctx, "bolt.ListRecords") defer span.End() - boltRecords, err := b.readSeveral(ctx, pkarrNamespace, nextPageToken, pagesize) + boltRecords, err := b.readSeveral(ctx, dhtNamespace, nextPageToken, pageSize) if err != nil { return nil, nil, err } - var records []pkarr.Record + var records []dht.BEP44Record for _, recordBytes := range boltRecords { - var encodedRecord base64PkarrRecord + var encodedRecord base64BEP44Record if err = json.Unmarshal(recordBytes.value, &encodedRecord); err != nil { return nil, nil, err } @@ -105,7 +107,7 @@ func (b *Bolt) ListRecords(ctx context.Context, nextPageToken []byte, pagesize i records = append(records, *record) } - if len(boltRecords) == pagesize { + if len(boltRecords) == pageSize { nextPageToken = boltRecords[len(boltRecords)-1].key } else { nextPageToken = nil @@ -202,16 +204,85 @@ func (b *Bolt) readSeveral(ctx context.Context, namespace string, after []byte, return result, err } -// RecordCount returns the number of records in the storage for the pkarr namespace +// RecordCount returns the number of records in the storage for the mainline namespace func (b *Bolt) RecordCount(ctx context.Context) (int, error) { _, span := telemetry.GetTracer().Start(ctx, "bolt.RecordCount") defer span.End() var count int err := b.db.View(func(tx *bolt.Tx) error { - bucket := tx.Bucket([]byte(pkarrNamespace)) + bucket := tx.Bucket([]byte(dhtNamespace)) + if bucket == nil { + logrus.WithContext(ctx).WithField("namespace", dhtNamespace).Warn("namespace does not exist") + return nil + } + count = bucket.Stats().KeyN + return nil + }) + return count, err +} + +func (b *Bolt) WriteFailedRecord(ctx context.Context, id string) error { + _, span := telemetry.GetTracer().Start(ctx, "bolt.WriteFailedRecord") + defer span.End() + + return b.db.Update(func(tx *bolt.Tx) error { + bucket, err := tx.CreateBucketIfNotExists([]byte(failedNamespace)) + if err != nil { + return err + } + + var count int32 = 1 + v := bucket.Get([]byte(id)) + if v != nil { + if err = json.Unmarshal(v, &count); err != nil { + return err + } + count++ + } + + buf := new(bytes.Buffer) + if err = binary.Write(buf, binary.LittleEndian, count); err != nil { + return err + } + return bucket.Put([]byte(id), buf.Bytes()) + }) +} + +func (b *Bolt) ListFailedRecords(ctx context.Context) ([]dht.FailedRecord, error) { + _, span := telemetry.GetTracer().Start(ctx, "bolt.ListFailedRecords") + defer span.End() + + var result []dht.FailedRecord + err := b.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte(failedNamespace)) + if bucket == nil { + logrus.WithField("namespace", failedNamespace).Warn("namespace does not exist") + return nil + } + + cursor := bucket.Cursor() + for k, v := cursor.First(); k != nil; k, v = cursor.Next() { + var count int + if err := binary.Read(bytes.NewReader(v), binary.LittleEndian, &count); err != nil { + return err + } + result = append(result, dht.FailedRecord{ID: string(k), Count: count}) + } + return nil + }) + return result, err +} + +func (b *Bolt) FailedRecordCount(ctx context.Context) (int, error) { + _, span := telemetry.GetTracer().Start(ctx, "bolt.FailedRecordCount") + defer span.End() + + var count int + err := b.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte(failedNamespace)) if bucket == nil { - logrus.WithContext(ctx).WithField("namespace", pkarrNamespace).Warn("namespace does not exist") + logrus.WithField("namespace", failedNamespace).Warn("namespace does not exist") return nil } count = bucket.Stats().KeyN diff --git a/impl/pkg/storage/db/bolt/bolt_test.go b/impl/pkg/storage/db/bolt/bolt_test.go index 8a015869..7d9ee485 100644 --- a/impl/pkg/storage/db/bolt/bolt_test.go +++ b/impl/pkg/storage/db/bolt/bolt_test.go @@ -7,12 +7,11 @@ import ( "github.com/goccy/go-json" - "github.com/TBD54566975/did-dht-method/internal/did" - "github.com/TBD54566975/did-dht-method/pkg/dht" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/TBD54566975/did-dht-method/internal/did" + "github.com/TBD54566975/did-dht-method/pkg/dht" ) func TestBoltDB_ReadWrite(t *testing.T) { @@ -40,17 +39,17 @@ func TestBoltDB_ReadWrite(t *testing.T) { assert.NoError(t, err) assert.EqualValues(t, players1, players1Result) - // get a value from a dhtNamespace that doesn't exist + // get a value from a oldDHTNamespace that doesn't exist res, err := db.read(ctx, "bad", "worse") assert.NoError(t, err) assert.Empty(t, res) - // get a value that doesn't exist in the dhtNamespace + // get a value that doesn't exist in the oldDHTNamespace noValue, err := db.read(ctx, namespace, "Porsche") assert.NoError(t, err) assert.Empty(t, noValue) - // create a second value in the dhtNamespace + // create a second value in the oldDHTNamespace team2 := "McLaren" players2 := []string{"Lando Norris", "Daniel Ricciardo"} p2Bytes, err := json.Marshal(players2) @@ -59,7 +58,7 @@ func TestBoltDB_ReadWrite(t *testing.T) { err = db.write(ctx, namespace, team2, p2Bytes) assert.NoError(t, err) - // get all values from the dhtNamespace + // get all values from the oldDHTNamespace gotAll, err := db.readAll(namespace) assert.NoError(t, err) assert.True(t, len(gotAll) == 2) @@ -123,16 +122,16 @@ func TestReadWrite(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) - r := pkarr.RecordFromBEP44(putMsg) + r := dht.RecordFromBEP44(putMsg) err = db.WriteRecord(ctx, r) require.NoError(t, err) - r2, err := db.ReadRecord(ctx, r.Key[:]) + r2, err := db.ReadRecord(ctx, r.ID()) require.NoError(t, err) assert.Equal(t, r.Key, r2.Key) @@ -168,12 +167,12 @@ func TestDBPagination(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) // create record - record := pkarr.RecordFromBEP44(putMsg) + record := dht.RecordFromBEP44(putMsg) err = db.WriteRecord(ctx, record) assert.NoError(t, err) @@ -189,12 +188,12 @@ func TestDBPagination(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) // create eleventhRecord - eleventhRecord := pkarr.RecordFromBEP44(putMsg) + eleventhRecord := dht.RecordFromBEP44(putMsg) err = db.WriteRecord(ctx, eleventhRecord) assert.NoError(t, err) diff --git a/impl/pkg/storage/db/bolt/pkarr.go b/impl/pkg/storage/db/bolt/dht.go similarity index 67% rename from impl/pkg/storage/db/bolt/pkarr.go rename to impl/pkg/storage/db/bolt/dht.go index 97f30040..917094d1 100644 --- a/impl/pkg/storage/db/bolt/pkarr.go +++ b/impl/pkg/storage/db/bolt/dht.go @@ -6,14 +6,14 @@ import ( "github.com/TBD54566975/ssi-sdk/util" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" + "github.com/TBD54566975/did-dht-method/pkg/dht" ) var ( encoding = base64.RawURLEncoding ) -type base64PkarrRecord struct { +type base64BEP44Record struct { // Up to an 1000 byte base64URL encoded string V string `json:"v" validate:"required"` // 32 byte base64URL encoded string @@ -23,8 +23,8 @@ type base64PkarrRecord struct { Seq int64 `json:"seq" validate:"required"` } -func encodeRecord(r pkarr.Record) base64PkarrRecord { - return base64PkarrRecord{ +func encodeRecord(r dht.BEP44Record) base64BEP44Record { + return base64BEP44Record{ V: encoding.EncodeToString(r.Value[:]), K: encoding.EncodeToString(r.Key[:]), Sig: encoding.EncodeToString(r.Signature[:]), @@ -32,23 +32,23 @@ func encodeRecord(r pkarr.Record) base64PkarrRecord { } } -func (b base64PkarrRecord) Decode() (*pkarr.Record, error) { +func (b base64BEP44Record) Decode() (*dht.BEP44Record, error) { v, err := encoding.DecodeString(b.V) if err != nil { - return nil, fmt.Errorf("error parsing pkarr value field: %v", err) + return nil, fmt.Errorf("error parsing bep44 value field: %v", err) } k, err := encoding.DecodeString(b.K) if err != nil { - return nil, fmt.Errorf("error parsing pkarr key field: %v", err) + return nil, fmt.Errorf("error parsing bep44 key field: %v", err) } sig, err := encoding.DecodeString(b.Sig) if err != nil { - return nil, fmt.Errorf("error parsing pkarr sig field: %v", err) + return nil, fmt.Errorf("error parsing bep44 sig field: %v", err) } - record, err := pkarr.NewRecord(k, v, sig, b.Seq) + record, err := dht.NewBEP44Record(k, v, sig, b.Seq) if err != nil { // TODO: do something useful if this happens return nil, util.LoggingErrorMsg(err, "error loading record from database, skipping") diff --git a/impl/pkg/storage/db/postgres/migrations/00001_create_dht_records_table.sql b/impl/pkg/storage/db/postgres/migrations/00001_create_dht_records_table.sql new file mode 100644 index 00000000..f2ee580e --- /dev/null +++ b/impl/pkg/storage/db/postgres/migrations/00001_create_dht_records_table.sql @@ -0,0 +1,17 @@ +-- +goose Up +CREATE TABLE dht_records ( + id SERIAL PRIMARY KEY, + key BYTEA UNIQUE NOT NULL, + value BYTEA NOT NULL, + sig BYTEA NOT NULL, + seq BIGINT NOT NULL +); + +CREATE TABLE failed_records ( + id BYTEA PRIMARY KEY, + failure_count INTEGER NOT NULL +); + +-- +goose Down +DROP TABLE failed_records; +DROP TABLE dht_records; \ No newline at end of file diff --git a/impl/pkg/storage/db/postgres/migrations/00001_create_pkarr_records_table.sql b/impl/pkg/storage/db/postgres/migrations/00001_create_pkarr_records_table.sql deleted file mode 100644 index 05273773..00000000 --- a/impl/pkg/storage/db/postgres/migrations/00001_create_pkarr_records_table.sql +++ /dev/null @@ -1,11 +0,0 @@ --- +goose Up -CREATE TABLE pkarr_records ( - id SERIAL PRIMARY KEY, - key BYTEA UNIQUE NOT NULL, - value BYTEA NOT NULL, - sig BYTEA NOT NULL, - seq BIGINT NOT NULL -); - --- +goose Down -DROP TABLE pkarr_records; \ No newline at end of file diff --git a/impl/pkg/storage/db/postgres/models.go b/impl/pkg/storage/db/postgres/models.go index 03958671..a4d02233 100644 --- a/impl/pkg/storage/db/postgres/models.go +++ b/impl/pkg/storage/db/postgres/models.go @@ -4,10 +4,15 @@ package postgres -type PkarrRecord struct { +type DhtRecord struct { ID int32 Key []byte Value []byte Sig []byte Seq int64 } + +type FailedRecord struct { + ID []byte + FailureCount int32 +} diff --git a/impl/pkg/storage/db/postgres/postgres.go b/impl/pkg/storage/db/postgres/postgres.go index bd41e684..365622b4 100644 --- a/impl/pkg/storage/db/postgres/postgres.go +++ b/impl/pkg/storage/db/postgres/postgres.go @@ -10,8 +10,9 @@ import ( _ "github.com/jackc/pgx/v5/stdlib" "github.com/pressly/goose/v3" "github.com/sirupsen/logrus" + "github.com/tv42/zbase32" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" + "github.com/TBD54566975/did-dht-method/pkg/dht" "github.com/TBD54566975/did-dht-method/pkg/telemetry" ) @@ -61,7 +62,7 @@ func (p Postgres) connect(ctx context.Context) (*Queries, *pgx.Conn, error) { return New(conn), conn, nil } -func (p Postgres) WriteRecord(ctx context.Context, record pkarr.Record) error { +func (p Postgres) WriteRecord(ctx context.Context, record dht.BEP44Record) error { ctx, span := telemetry.GetTracer().Start(ctx, "postgres.WriteRecord") defer span.End() @@ -84,7 +85,7 @@ func (p Postgres) WriteRecord(ctx context.Context, record pkarr.Record) error { return nil } -func (p Postgres) ReadRecord(ctx context.Context, id []byte) (*pkarr.Record, error) { +func (p Postgres) ReadRecord(ctx context.Context, id string) (*dht.BEP44Record, error) { ctx, span := telemetry.GetTracer().Start(ctx, "postgres.ReadRecord") defer span.End() @@ -94,7 +95,11 @@ func (p Postgres) ReadRecord(ctx context.Context, id []byte) (*pkarr.Record, err } defer db.Close(ctx) - row, err := queries.ReadRecord(ctx, id) + decodedID, err := zbase32.DecodeString(id) + if err != nil { + return nil, err + } + row, err := queries.ReadRecord(ctx, decodedID) if err != nil { return nil, err } @@ -107,7 +112,7 @@ func (p Postgres) ReadRecord(ctx context.Context, id []byte) (*pkarr.Record, err return record, nil } -func (p Postgres) ListRecords(ctx context.Context, nextPageToken []byte, limit int) ([]pkarr.Record, []byte, error) { +func (p Postgres) ListRecords(ctx context.Context, nextPageToken []byte, limit int) ([]dht.BEP44Record, []byte, error) { ctx, span := telemetry.GetTracer().Start(ctx, "postgres.ListRecords") defer span.End() @@ -117,7 +122,7 @@ func (p Postgres) ListRecords(ctx context.Context, nextPageToken []byte, limit i } defer db.Close(ctx) - var rows []PkarrRecord + var rows []DhtRecord if nextPageToken == nil { rows, err = queries.ListRecordsFirstPage(ctx, int32(limit)) } else { @@ -130,9 +135,9 @@ func (p Postgres) ListRecords(ctx context.Context, nextPageToken []byte, limit i return nil, nil, err } - var records []pkarr.Record + var records []dht.BEP44Record for _, row := range rows { - record, err := pkarr.NewRecord(row.Key, row.Value, row.Sig, row.Seq) + record, err := dht.NewBEP44Record(row.Key, row.Value, row.Sig, row.Seq) if err != nil { // TODO: do something useful if this happens logrus.WithContext(ctx).WithError(err).WithField("record_id", row.ID).Warn("error loading record from database, skipping") @@ -151,13 +156,8 @@ func (p Postgres) ListRecords(ctx context.Context, nextPageToken []byte, limit i return records, nextPageToken, nil } -func (p Postgres) Close() error { - // no-op, postgres connection is closed after each request - return nil -} - -func (row PkarrRecord) Record() (*pkarr.Record, error) { - return pkarr.NewRecord(row.Key, row.Value, row.Sig, row.Seq) +func (row DhtRecord) Record() (*dht.BEP44Record, error) { + return dht.NewBEP44Record(row.Key, row.Value, row.Sig, row.Seq) } func (p Postgres) RecordCount(ctx context.Context) (int, error) { @@ -177,3 +177,73 @@ func (p Postgres) RecordCount(ctx context.Context) (int, error) { return int(count), nil } + +func (p Postgres) WriteFailedRecord(ctx context.Context, id string) error { + ctx, span := telemetry.GetTracer().Start(ctx, "postgres.WriteFailedRecord") + defer span.End() + + queries, db, err := p.connect(ctx) + if err != nil { + return err + } + defer db.Close(ctx) + + err = queries.WriteFailedRecord(ctx, WriteFailedRecordParams{ + ID: []byte(id), + FailureCount: 1, + }) + if err != nil { + return err + } + + return nil +} + +func (p Postgres) ListFailedRecords(ctx context.Context) ([]dht.FailedRecord, error) { + ctx, span := telemetry.GetTracer().Start(ctx, "postgres.ListFailedRecords") + defer span.End() + + queries, db, err := p.connect(ctx) + if err != nil { + return nil, err + } + defer db.Close(ctx) + + rows, err := queries.ListFailedRecords(ctx) + if err != nil { + return nil, err + } + + var failedRecords []dht.FailedRecord + for _, row := range rows { + failedRecords = append(failedRecords, dht.FailedRecord{ + ID: string(row.ID), + Count: int(row.FailureCount), + }) + } + + return failedRecords, nil +} + +func (p Postgres) FailedRecordCount(ctx context.Context) (int, error) { + ctx, span := telemetry.GetTracer().Start(ctx, "postgres.FailedRecordCount") + defer span.End() + + queries, db, err := p.connect(ctx) + if err != nil { + return 0, err + } + defer db.Close(ctx) + + count, err := queries.FailedRecordCount(ctx) + if err != nil { + return 0, err + } + + return int(count), nil +} + +func (p Postgres) Close() error { + // no-op, postgres connection is closed after each request + return nil +} diff --git a/impl/pkg/storage/db/postgres/postgres_test.go b/impl/pkg/storage/db/postgres/postgres_test.go index 9bee06bb..028146d0 100644 --- a/impl/pkg/storage/db/postgres/postgres_test.go +++ b/impl/pkg/storage/db/postgres/postgres_test.go @@ -3,7 +3,6 @@ package postgres_test import ( "context" "net/url" - "os" "testing" "github.com/stretchr/testify/assert" @@ -11,13 +10,12 @@ import ( "github.com/TBD54566975/did-dht-method/internal/did" "github.com/TBD54566975/did-dht-method/pkg/dht" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" "github.com/TBD54566975/did-dht-method/pkg/storage" "github.com/TBD54566975/did-dht-method/pkg/storage/db/postgres" ) func getTestDB(t *testing.T) storage.Storage { - uri := os.Getenv("TEST_DB") + uri := "postgres://postgres:a@127.0.0.1:5432/postgres" // os.Getenv("TEST_DB") if uri == "" { t.SkipNow() } @@ -50,16 +48,16 @@ func TestReadWrite(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) - r := pkarr.RecordFromBEP44(putMsg) + r := dht.RecordFromBEP44(putMsg) err = db.WriteRecord(ctx, r) require.NoError(t, err) - r2, err := db.ReadRecord(ctx, r.Key[:]) + r2, err := db.ReadRecord(ctx, r.ID()) require.NoError(t, err) assert.Equal(t, r.Key, r2.Key) @@ -95,12 +93,12 @@ func TestDBPagination(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) // create record - record := pkarr.RecordFromBEP44(putMsg) + record := dht.RecordFromBEP44(putMsg) err = db.WriteRecord(ctx, record) assert.NoError(t, err) @@ -116,12 +114,12 @@ func TestDBPagination(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, packet) - putMsg, err := dht.CreatePkarrPublishRequest(sk, *packet) + putMsg, err := dht.CreateDNSPublishRequest(sk, *packet) require.NoError(t, err) require.NotEmpty(t, putMsg) // create eleventhRecord - eleventhRecord := pkarr.RecordFromBEP44(putMsg) + eleventhRecord := dht.RecordFromBEP44(putMsg) err = db.WriteRecord(ctx, eleventhRecord) assert.NoError(t, err) diff --git a/impl/pkg/storage/db/postgres/queries.sql.go b/impl/pkg/storage/db/postgres/queries.sql.go index 3687e39a..9fdfde12 100644 --- a/impl/pkg/storage/db/postgres/queries.sql.go +++ b/impl/pkg/storage/db/postgres/queries.sql.go @@ -9,8 +9,43 @@ import ( "context" ) +const failedRecordCount = `-- name: FailedRecordCount :one +SELECT count(*) AS exact_count FROM failed_records +` + +func (q *Queries) FailedRecordCount(ctx context.Context) (int64, error) { + row := q.db.QueryRow(ctx, failedRecordCount) + var exact_count int64 + err := row.Scan(&exact_count) + return exact_count, err +} + +const listFailedRecords = `-- name: ListFailedRecords :many +SELECT id, failure_count FROM failed_records +` + +func (q *Queries) ListFailedRecords(ctx context.Context) ([]FailedRecord, error) { + rows, err := q.db.Query(ctx, listFailedRecords) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FailedRecord + for rows.Next() { + var i FailedRecord + if err := rows.Scan(&i.ID, &i.FailureCount); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const listRecords = `-- name: ListRecords :many -SELECT id, key, value, sig, seq FROM pkarr_records WHERE id > (SELECT id FROM pkarr_records WHERE pkarr_records.key = $1) ORDER BY id ASC LIMIT $2 +SELECT id, key, value, sig, seq FROM dht_records WHERE id > (SELECT id FROM dht_records WHERE dht_records.key = $1) ORDER BY id ASC LIMIT $2 ` type ListRecordsParams struct { @@ -18,15 +53,15 @@ type ListRecordsParams struct { Limit int32 } -func (q *Queries) ListRecords(ctx context.Context, arg ListRecordsParams) ([]PkarrRecord, error) { +func (q *Queries) ListRecords(ctx context.Context, arg ListRecordsParams) ([]DhtRecord, error) { rows, err := q.db.Query(ctx, listRecords, arg.Key, arg.Limit) if err != nil { return nil, err } defer rows.Close() - var items []PkarrRecord + var items []DhtRecord for rows.Next() { - var i PkarrRecord + var i DhtRecord if err := rows.Scan( &i.ID, &i.Key, @@ -45,18 +80,18 @@ func (q *Queries) ListRecords(ctx context.Context, arg ListRecordsParams) ([]Pka } const listRecordsFirstPage = `-- name: ListRecordsFirstPage :many -SELECT id, key, value, sig, seq FROM pkarr_records ORDER BY id ASC LIMIT $1 +SELECT id, key, value, sig, seq FROM dht_records ORDER BY id ASC LIMIT $1 ` -func (q *Queries) ListRecordsFirstPage(ctx context.Context, limit int32) ([]PkarrRecord, error) { +func (q *Queries) ListRecordsFirstPage(ctx context.Context, limit int32) ([]DhtRecord, error) { rows, err := q.db.Query(ctx, listRecordsFirstPage, limit) if err != nil { return nil, err } defer rows.Close() - var items []PkarrRecord + var items []DhtRecord for rows.Next() { - var i PkarrRecord + var i DhtRecord if err := rows.Scan( &i.ID, &i.Key, @@ -75,12 +110,12 @@ func (q *Queries) ListRecordsFirstPage(ctx context.Context, limit int32) ([]Pkar } const readRecord = `-- name: ReadRecord :one -SELECT id, key, value, sig, seq FROM pkarr_records WHERE key = $1 LIMIT 1 +SELECT id, key, value, sig, seq FROM dht_records WHERE key = $1 LIMIT 1 ` -func (q *Queries) ReadRecord(ctx context.Context, key []byte) (PkarrRecord, error) { +func (q *Queries) ReadRecord(ctx context.Context, key []byte) (DhtRecord, error) { row := q.db.QueryRow(ctx, readRecord, key) - var i PkarrRecord + var i DhtRecord err := row.Scan( &i.ID, &i.Key, @@ -92,7 +127,7 @@ func (q *Queries) ReadRecord(ctx context.Context, key []byte) (PkarrRecord, erro } const recordCount = `-- name: RecordCount :one -SELECT count(*) AS exact_count FROM pkarr_records +SELECT count(*) AS exact_count FROM dht_records ` func (q *Queries) RecordCount(ctx context.Context) (int64, error) { @@ -102,8 +137,24 @@ func (q *Queries) RecordCount(ctx context.Context) (int64, error) { return exact_count, err } +const writeFailedRecord = `-- name: WriteFailedRecord :exec +INSERT INTO failed_records(id, failure_count) +VALUES($1, $2) +ON CONFLICT (id) DO UPDATE SET failure_count = failed_records.failure_count + 1 +` + +type WriteFailedRecordParams struct { + ID []byte + FailureCount int32 +} + +func (q *Queries) WriteFailedRecord(ctx context.Context, arg WriteFailedRecordParams) error { + _, err := q.db.Exec(ctx, writeFailedRecord, arg.ID, arg.FailureCount) + return err +} + const writeRecord = `-- name: WriteRecord :exec -INSERT INTO pkarr_records(key, value, sig, seq) VALUES($1, $2, $3, $4) +INSERT INTO dht_records(key, value, sig, seq) VALUES($1, $2, $3, $4) ` type WriteRecordParams struct { diff --git a/impl/pkg/storage/db/postgres/queries/queries.sql b/impl/pkg/storage/db/postgres/queries/queries.sql index 8ae96ea3..74153618 100644 --- a/impl/pkg/storage/db/postgres/queries/queries.sql +++ b/impl/pkg/storage/db/postgres/queries/queries.sql @@ -1,14 +1,25 @@ -- name: WriteRecord :exec -INSERT INTO pkarr_records(key, value, sig, seq) VALUES($1, $2, $3, $4); +INSERT INTO dht_records(key, value, sig, seq) VALUES($1, $2, $3, $4); -- name: ReadRecord :one -SELECT * FROM pkarr_records WHERE key = $1 LIMIT 1; +SELECT * FROM dht_records WHERE key = $1 LIMIT 1; -- name: ListRecords :many -SELECT * FROM pkarr_records WHERE id > (SELECT id FROM pkarr_records WHERE pkarr_records.key = $1) ORDER BY id ASC LIMIT $2; +SELECT * FROM dht_records WHERE id > (SELECT id FROM dht_records WHERE dht_records.key = $1) ORDER BY id ASC LIMIT $2; -- name: ListRecordsFirstPage :many -SELECT * FROM pkarr_records ORDER BY id ASC LIMIT $1; +SELECT * FROM dht_records ORDER BY id ASC LIMIT $1; -- name: RecordCount :one -SELECT count(*) AS exact_count FROM pkarr_records; +SELECT count(*) AS exact_count FROM dht_records; + +-- name: WriteFailedRecord :exec +INSERT INTO failed_records(id, failure_count) +VALUES($1, $2) +ON CONFLICT (id) DO UPDATE SET failure_count = failed_records.failure_count + 1; + +-- name: ListFailedRecords :many +SELECT * FROM failed_records; + +-- name: FailedRecordCount :one +SELECT count(*) AS exact_count FROM failed_records; \ No newline at end of file diff --git a/impl/pkg/storage/storage.go b/impl/pkg/storage/storage.go index c8e460f0..daf4b2d2 100644 --- a/impl/pkg/storage/storage.go +++ b/impl/pkg/storage/storage.go @@ -8,16 +8,21 @@ import ( "github.com/sirupsen/logrus" - "github.com/TBD54566975/did-dht-method/pkg/pkarr" + "github.com/TBD54566975/did-dht-method/pkg/dht" "github.com/TBD54566975/did-dht-method/pkg/storage/db/bolt" "github.com/TBD54566975/did-dht-method/pkg/storage/db/postgres" ) type Storage interface { - WriteRecord(ctx context.Context, record pkarr.Record) error - ReadRecord(ctx context.Context, id []byte) (*pkarr.Record, error) - ListRecords(ctx context.Context, nextPageToken []byte, pageSize int) (records []pkarr.Record, nextPage []byte, err error) + WriteRecord(ctx context.Context, record dht.BEP44Record) error + ReadRecord(ctx context.Context, id string) (*dht.BEP44Record, error) + ListRecords(ctx context.Context, nextPageToken []byte, pageSize int) (records []dht.BEP44Record, nextPage []byte, err error) RecordCount(ctx context.Context) (int, error) + + WriteFailedRecord(ctx context.Context, id string) error + ListFailedRecords(ctx context.Context) ([]dht.FailedRecord, error) + FailedRecordCount(ctx context.Context) (int, error) + Close() error } diff --git a/impl/pkg/telemetry/telemetry.go b/impl/pkg/telemetry/telemetry.go index ec1d4247..a4417746 100644 --- a/impl/pkg/telemetry/telemetry.go +++ b/impl/pkg/telemetry/telemetry.go @@ -57,7 +57,7 @@ func SetupTelemetry(ctx context.Context) error { otel.SetMeterProvider(meterProvider) // setup memory metrics - err = runtime.Start(runtime.WithMeterProvider(meterProvider), runtime.WithMinimumReadMemStatsInterval(time.Second*15)) + err = runtime.Start(runtime.WithMeterProvider(meterProvider), runtime.WithMinimumReadMemStatsInterval(15*time.Second)) if err != nil { return err } diff --git a/impl/sqlc.yaml b/impl/sqlc.yaml new file mode 100644 index 00000000..b36ecbd9 --- /dev/null +++ b/impl/sqlc.yaml @@ -0,0 +1,10 @@ +version: "2" +sql: + - engine: "postgresql" + queries: "pkg/storage/db/postgres/queries" + schema: "pkg/storage/db/postgres/migrations" + gen: + go: + package: "postgres" + out: "pkg/storage/db/postgres" + sql_package: "pgx/v5" \ No newline at end of file diff --git a/spec/spec.md b/spec/spec.md index cd93b8e1..ff5cc3c8 100644 --- a/spec/spec.md +++ b/spec/spec.md @@ -9,7 +9,7 @@ The DID DHT Method Specification 1.0 **Draft Created:** October 20, 2023 -**Latest Update:** April 16, 2024 +**Latest Update:** April 22, 2024 **Editors:** ~ [Gabe Cohen](https://github.com/decentralgabe) @@ -58,7 +58,7 @@ DID DHT | [DNS RRs](https://datatracker.ietf.org/doc/html/rfc1035) | [Mainline DHT](https://en.wikipedia.org/wiki/Mainline_DHT) | -DID DHT makes use of [[ref:Mainline DHT]], specifically [[ref:BEP44]] to store signed mutable records. +DID DHT makes use of [[ref:Mainline DHT]], specifically [[ref:BEP44]] to store signed mutable records. This DID method uses [[ref:DNS Resource Records]] to efficiently represent _[[ref:DID Documents]]_. [[def:Mainline]] is in use for the following reasons: @@ -73,7 +73,7 @@ specification and shall be registered with the [[spec:DID-Spec-Registries]]. ## Conformance -The keywords MAY, MUST, MUST NOT, RECOMMENDED, SHOULD, and SHOULD NOT in this document are to be interpreted as +The keywords MAY, MUST, MUST NOT, RECOMMENDED, SHOULD, and SHOULD NOT in this document are to be interpreted as described in [BCP 14](https://www.rfc-editor.org/info/bcp14) [[spec:RFC2119]] [[spec:RFC8174]] when, and only when, they appear in all capitals, as shown here. @@ -81,7 +81,7 @@ they appear in all capitals, as shown here. [[def:Decentralized Identifier, Decentralized Identifier, DID, DIDs, DID Document, DID Documents]] ~ A [W3C specification](https://www.w3.org/TR/did-core/) describing an _identifier that enables verifiable, -decentralized digital identity_. A DID identifier is associated with a JSON document containing cryptograhpic keys, +decentralized digital identity_. A DID identifier is associated with a JSON document containing cryptographic keys, services, and other properties outlined in the specification. [[def:DID Suffix, Suffix]] @@ -97,7 +97,7 @@ in each `did:dht` document. [[def:DNS Resource Records]] ~ An efficient format for representing [[ref:DID Documents]] and providing semantics pertinent to DID DHT, -such as TTLs, cachcing, and different record types (e.g. `NS`, `TXT`). Follows [[spec:RFC1035]]. +such as TTLs, caching, and different record types (e.g. `NS`, `TXT`). Follows [[spec:RFC1035]]. [[def:Mainline DHT, DHT, Mainline, Mainline Server]] ~ [Mainline DHT](https://en.wikipedia.org/wiki/Mainline_DHT) is the name given to the @@ -167,19 +167,19 @@ to the initial publisher. Consequently, DHT records, including DID DHT Documents implies that trust in a specific [[ref:Mainline]] or [[ref:Gateway]] server for providing unaltered messages is unnecessary. Instead, all clients can, and should, verify messages themselves. This approach significantly mitigates risks associated with other DID methods, where a compromised server or [DID resolver](https://www.w3.org/TR/did-core/#choosing-did-resolvers) might tamper with a [[ref:DID Document]] -which would be undecetable by a client. +which would be undetectable by a client. Currently, [[ref:Mainline]] exclusively supports the [[ref:Ed25519]] key type. In turn, [[ref:Ed25519]] is required by DID DHT and is used to uniquely identify DID DHT Documents. DID DHT identifiers are formed by concatenating the `did:dht:` prefix with a [[ref:z-base-32]] encoded Identity Key, which acts as its [[ref:suffix]]. Identity Keys ****MUST**** have the identifier `0` as both its Verification Method `id` and JWK `kid` [[spec:RFC7517]]. Identity Keys ****MUST**** have the [Verification Relationships](#verification-relationships) -_Authentication_, _Assertion_, _Capabilitiy Invocation_, and _Capability Delegation_. +_Authentication_, _Assertion_, _Capability Invocation_, and _Capability Delegation_. While the system requires at least one [[ref:Ed25519]], a DID DHT Document can include any number of additional keys. Additional key types ****MUST**** be registered in the [Key Type Index](registry/index.html##key-type-index). As a unique consequence of the requirement of the Identity Key, DID DHT Documents are able to be partially-resolved without contacting -[[ref:Maineline]] or [[ref:Gateway]] servers, though it is ****RECOMMENDED**** that deterministic resolution is only used as a fallback mechanism. +[[ref:Mainline]] or [[ref:Gateway]] servers, though it is ****RECOMMENDED**** that deterministic resolution is only used as a fallback mechanism. Similarly, the requirement of an Identity Key enables [interoperability with other DID methods](#interoperability-with-other-did-methods). ### DIDs as DNS Records @@ -214,7 +214,7 @@ each `key` or `service` as attributes. - The DNS packet ****MUST**** set the _Authoritative Answer_ flag since this is always an _Authoritative_ packet. - `TXT` records ****MAY**** exceed 255 characters as per [[spec:RFC1035]]. Records exceeding 255 characters are -represented as multiple strings, which upon DID Document reconstructin, can be concatenated to a single value. +represented as multiple strings, which upon DID Document reconstruction, can be concatenated to a single value. #### Root Record @@ -222,7 +222,7 @@ The root record is a special record which serves as instructions on how to recon by providing a [property mapping](#property-mapping) for a [[ref:DID Document]], along with containing pertinent metadata such as a version identifier. -- The root record's **name** ****MUST**** be of the form, `_did..`, where `ID` is the [[ref:Mainline]] +- The root record's **name** ****MUST**** be of the form, `_did..`, where `ID` is the [[ref:Mainline]] identifier associated with the DID (i.e. `did:dht:` becomes `_did..`). - The root record's **type** is `TXT`, indicating a Text record. @@ -279,7 +279,7 @@ A [DID controller](https://www.w3.org/TR/did-core/#did-controller) ****MAY**** b - The [Controller](https://www.w3.org/TR/did-core/#did-controller) record's **type** is `TXT`, indicating a Text record. -- The [Controller](https://www.w3.org/TR/did-core/#did-controller) record's **data** is represented as a comma-separatedlist of controller DID identifiers. +- The [Controller](https://www.w3.org/TR/did-core/#did-controller) record's **data** is represented as a comma-separated list of controller DID identifiers. To ensure that the DID controller is authorized to make changes to the DID Document, the controller for the [[ref:Identity Key]] Verification Method ****MUST**** be contained within the controller property. @@ -314,13 +314,13 @@ as a `_kN._did.` record where `N` is the zero-indexed positional index of a give - Each [Verification Method](https://www.w3.org/TR/did-core/#verification-methods) record's **rdata** is represented by the form `id=M;t=N;k=O;a=P` where `M` is the Verification Method's `id`, `N` is the index of the key's type from the -[key type index](registry/index.html#key-type-index), `N` is the unpadded base64URL [[spec:RFC4648]] representation of +[key type index](registry/index.html#key-type-index), `O` is the unpadded base64URL [[spec:RFC4648]] representation of the public key, and `P` is the `JWK` `alg` identifier of the key. - Verification Method `id`s ****MAY**** be omitted. If omitted, they can be computed according to the rules specified in the section on [representing keys](#representing-keys) when reconstructing the DID Document. - - `alg` identifiers ****MAY**** be ommitted. If omimtted, they are assigned to the default value specified in the + - `alg` identifiers ****MAY**** be omitted. If omitted, they are assigned to the default value specified in the [key type index](registry/index.html#key-type-index). - The [[ref:Identity Key]] ****MUST**** always be at index `_k0` with `id` `0`. @@ -550,7 +550,7 @@ Add guidance for rotating to a new DID after deactivation. ### Designating Authoritative Gateways -[Gateways](#gateways) provide additional benfits to `did:dht`, such as the ability to [resolve historical DID Documents](#historical-resolution), +[Gateways](#gateways) provide additional benefits to `did:dht`, such as the ability to [resolve historical DID Documents](#historical-resolution), or support [type indexing](#type-indexing). To enable the usage of these additional features, `did:dht` documents need to be published to Gateway(s) that with the necessary capabilities. Whether it's accessing historical states, engaging with type indexes, or utilizing other specialized features, the [resolution process](#resolving-a-did) must be directed towards a [[ref:Gateway]] that maintains this supplementary data. @@ -950,21 +950,21 @@ access-token-based approach. ### DID Resolution -The process for resoloving a DID DHT Document via a [[ref:Gateway]] is outlined in the [read section above](#read). +The process for resolving a DID DHT Document via a [[ref:Gateway]] is outlined in the [read section above](#read). However, we provide additional guidance for [DID Resolvers](https://www.w3.org/TR/did-core/#dfn-did-resolvers) supplying [DID Document Metadata](https://www.w3.org/TR/did-core/#did-document-metadata) and [DID Resolution Metadata](https://www.w3.org/TR/did-core/#did-resolution-metadata) as follows: #### DID Document Metadata -* The metadata's [`versionId` property](https://www.w3.org/TR/did-core/#dfn-versionid) ****MUST**** be set to the +* The metadata [`versionId` property](https://www.w3.org/TR/did-core/#dfn-versionid) ****MUST**** be set to the [[ref:DID Document]] packet's current [[ref:sequence number]]. -* The metadata's [`created` property](https://www.w3.org/TR/did-core/#dfn-created) ****MUST**** be set to +* The metadata [`created` property](https://www.w3.org/TR/did-core/#dfn-created) ****MUST**** be set to [XML Datetime](https://www.w3.org/TR/xmlschema11-2/#dateTime) representation of the earliest known sequence number for the DID. -* The metadata's [`updated` property](https://www.w3.org/TR/did-core/#dfn-updated) ****MUST**** be set to the +* The metadata [`updated` property](https://www.w3.org/TR/did-core/#dfn-updated) ****MUST**** be set to the [XML Datetime](https://www.w3.org/TR/xmlschema11-2/#dateTime) representation of the last known sequence number for the DID. @@ -973,10 +973,10 @@ for the DID. #### DID Resolution Metadata -* The metadata's `types` property ****MUST**** be set to an array of strings representing type values if +* The metadata `types` property ****MUST**** be set to an array of strings representing type values if [type data](#type-indexing) is present in the [[ref:DID Document]]'s packet. -* The metadata's `gateway` property ****MUST**** be set to a string representing the [[ref:Gateway]]'s URI +* The metadata `gateway` property ****MUST**** be set to a string representing the [[ref:Gateway]]'s URI from which the DID was resolved. This is useful in cases where a [DID Resolvers](https://www.w3.org/TR/did-core/#dfn-did-resolvers) performs resolution against an [Authoritative Gateway](#designating-authoritative-gateways). diff --git a/sqlc.yaml b/sqlc.yaml deleted file mode 100644 index 763be2bd..00000000 --- a/sqlc.yaml +++ /dev/null @@ -1,10 +0,0 @@ -version: "2" -sql: - - engine: "postgresql" - queries: "impl/pkg/storage/db/postgres/queries" - schema: "impl/pkg/storage/db/postgres/migrations" - gen: - go: - package: "postgres" - out: "impl/pkg/storage/db/postgres" - sql_package: "pgx/v5" \ No newline at end of file