diff --git a/go.mod b/go.mod index d55b529e..b3884477 100644 --- a/go.mod +++ b/go.mod @@ -5,13 +5,13 @@ go 1.22.5 toolchain go1.23.2 require ( - github.com/99designs/gqlgen v0.17.55 - github.com/Masterminds/semver/v3 v3.3.0 + github.com/99designs/gqlgen v0.17.57 + github.com/Masterminds/semver/v3 v3.3.1 github.com/atrox/haikunatorgo/v2 v2.0.1 github.com/caddyserver/certmagic v0.21.4 - github.com/datarhei/gosrt v0.7.0 + github.com/datarhei/gosrt v0.8.0 github.com/datarhei/joy4 v0.0.0-20240603190808-b1407345907e - github.com/go-playground/validator/v10 v10.22.1 + github.com/go-playground/validator/v10 v10.23.0 github.com/gobwas/glob v0.2.3 github.com/golang-jwt/jwt/v5 v5.2.1 github.com/google/uuid v1.6.0 @@ -19,20 +19,20 @@ require ( github.com/joho/godotenv v1.5.1 github.com/labstack/echo-jwt v0.0.0-20221127215225-c84d41a71003 github.com/labstack/echo/v4 v4.12.0 - github.com/lithammer/shortuuid/v4 v4.0.0 + github.com/lithammer/shortuuid/v4 v4.1.0 github.com/mattn/go-isatty v0.0.20 - github.com/minio/minio-go/v7 v7.0.80 + github.com/minio/minio-go/v7 v7.0.81 github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 github.com/prometheus/client_golang v1.20.5 github.com/puzpuzpuz/xsync/v3 v3.4.0 github.com/shirou/gopsutil/v3 v3.24.5 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/swaggo/echo-swagger v1.4.1 github.com/swaggo/swag v1.16.4 - github.com/vektah/gqlparser/v2 v2.5.18 + github.com/vektah/gqlparser/v2 v2.5.20 github.com/xeipuuv/gojsonschema v1.2.0 go.uber.org/zap v1.27.0 - golang.org/x/mod v0.21.0 + golang.org/x/mod v0.22.0 ) require ( @@ -42,10 +42,10 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/caddyserver/zerossl v0.1.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/gabriel-vasile/mimetype v1.4.6 // indirect + github.com/gabriel-vasile/mimetype v1.4.7 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-ole/go-ole v1.3.0 // indirect @@ -55,6 +55,7 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/golang-jwt/jwt/v4 v4.5.1 // indirect @@ -63,7 +64,7 @@ require ( github.com/iancoleman/orderedmap v0.2.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/klauspost/cpuid/v2 v2.2.8 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect github.com/labstack/gommon v0.4.2 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/libdns/libdns v0.2.2 // indirect @@ -73,7 +74,6 @@ require ( github.com/mholt/acmez/v2 v2.0.3 // indirect github.com/miekg/dns v1.1.62 // indirect github.com/minio/md5-simd v1.1.2 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect @@ -87,7 +87,7 @@ require ( github.com/swaggo/files/v2 v2.0.1 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.9.0 // indirect - github.com/urfave/cli/v2 v2.27.4 // indirect + github.com/urfave/cli/v2 v2.27.5 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect @@ -96,14 +96,14 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zeebo/blake3 v0.2.4 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.28.0 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect - golang.org/x/tools v0.26.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + golang.org/x/crypto v0.29.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect + golang.org/x/tools v0.27.0 // indirect + google.golang.org/protobuf v1.35.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 9d7c7313..1c5b66d7 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,9 @@ -github.com/99designs/gqlgen v0.17.55 h1:3vzrNWYyzSZjGDFo68e5j9sSauLxfKvLp+6ioRokVtM= -github.com/99designs/gqlgen v0.17.55/go.mod h1:3Bq768f8hgVPGZxL8aY9MaYmbxa6llPM/qu1IGH1EJo= +github.com/99designs/gqlgen v0.17.57 h1:Ak4p60BRq6QibxY0lEc0JnQhDurfhxA67sp02lMjmPc= +github.com/99designs/gqlgen v0.17.57/go.mod h1:Jx61hzOSTcR4VJy/HFIgXiQ5rJ0Ypw8DxWLjbYDAUw0= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= -github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= -github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= +github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/PuerkitoBio/goquery v1.9.3 h1:mpJr/ikUA9/GNJB/DBZcGeFDXUtosHRyRrwh7KGdTG0= github.com/PuerkitoBio/goquery v1.9.3/go.mod h1:1ndLHPdTz+DyQPICCWYlYQMPl0oXZj0G6D4LCYA6u4U= github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= @@ -26,10 +26,10 @@ github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+Y github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/datarhei/gosrt v0.7.0 h1:1/IY66HVVgqGA9zkmL5l6jUFuI8t/76WkuamSkJqHqs= -github.com/datarhei/gosrt v0.7.0/go.mod h1:wTDoyog1z4au8Fd/QJBQAndzvccuxjqUL/qMm0EyJxE= +github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= +github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/datarhei/gosrt v0.8.0 h1:fna/FFRbVN7LvwAt2cR6pxwFz7rm979vdRzGfh9zbNM= +github.com/datarhei/gosrt v0.8.0/go.mod h1:ab1q3G0/DxsEU5iH/OCMaqYOWAqUI0SAbJ2sRKeQblA= github.com/datarhei/joy4 v0.0.0-20240603190808-b1407345907e h1:Qc/0D4xvXrazFkoi/4UGqO15yQ1JN5I8h7RwdzCLgTY= github.com/datarhei/joy4 v0.0.0-20240603190808-b1407345907e/go.mod h1:Jcw/6jZDQQmPx8A7INEkXmuEF7E9jjBbSTfVSLwmiQw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -39,8 +39,8 @@ github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7c github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc= -github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc= +github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA= +github.com/gabriel-vasile/mimetype v1.4.7/go.mod h1:GDlAgAyIRT27BhFl53XNAFtfjzOkLaF35JdEG0P7LtU= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= @@ -62,8 +62,10 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.22.1 h1:40JcKH+bBNGFczGuoBYgX4I6m/i27HYW8P9FDk5PbgA= -github.com/go-playground/validator/v10 v10.22.1/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o= +github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= @@ -76,7 +78,6 @@ github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17w github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= @@ -95,8 +96,8 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= -github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -113,8 +114,8 @@ github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s= github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= -github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c= -github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y= +github.com/lithammer/shortuuid/v4 v4.1.0 h1:1L0IC3luO1fdb8YNhfTDfflTwyVER4GCuCa7ZTYgbbU= +github.com/lithammer/shortuuid/v4 v4.1.0/go.mod h1:dawOscqzZBSdcTpJllSPiyGfvTCeUvq6CY0Cxc9eqjU= github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0= github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -130,10 +131,8 @@ github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.80 h1:2mdUHXEykRdY/BigLt3Iuu1otL0JTogT0Nmltg0wujk= -github.com/minio/minio-go/v7 v7.0.80/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/minio/minio-go/v7 v7.0.81 h1:SzhMN0TQ6T/xSBu6Nvw3M5M8voM+Ht8RH3hE8S7zxaA= +github.com/minio/minio-go/v7 v7.0.81/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -172,8 +171,8 @@ github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERA github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/swaggo/echo-swagger v1.4.1 h1:Yf0uPaJWp1uRtDloZALyLnvdBeoEL5Kc7DtnjzO/TUk= github.com/swaggo/echo-swagger v1.4.1/go.mod h1:C8bSi+9yH2FLZsnhqMZLIZddpUxZdBYuNHbtaS1Hljc= github.com/swaggo/files/v2 v2.0.1 h1:XCVJO/i/VosCDsJu1YLpdejGsGnBE9deRMpjN4pJLHk= @@ -184,14 +183,14 @@ github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZ github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo= github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI= -github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8= -github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= +github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= +github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/vektah/gqlparser/v2 v2.5.18 h1:zSND3GtutylAQ1JpWnTHcqtaRZjl+y3NROeW8vuNo6Y= -github.com/vektah/gqlparser/v2 v2.5.18/go.mod h1:6HLzf7JKv9Fi3APymudztFQNmLXR5qJeEo6BOFcXVfc= +github.com/vektah/gqlparser/v2 v2.5.20 h1:kPaWbhBntxoZPaNdBaIPT1Kh0i1b/onb5kXgEdP5JCo= +github.com/vektah/gqlparser/v2 v2.5.20/go.mod h1:xMl+ta8a5M1Yo1A1Iwt/k7gSpscwSnHZdw7tfhEGfTM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -215,30 +214,29 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/vendor/github.com/99designs/gqlgen/TESTING.md b/vendor/github.com/99designs/gqlgen/TESTING.md index 2677b33e..bdb56f40 100644 --- a/vendor/github.com/99designs/gqlgen/TESTING.md +++ b/vendor/github.com/99designs/gqlgen/TESTING.md @@ -1,7 +1,7 @@ How to write tests for gqlgen === -Testing generated code is a little tricky, heres how its currently set up. +Testing generated code is a little tricky, here's how its currently set up. ### Testing responses from a server diff --git a/vendor/github.com/99designs/gqlgen/codegen/config/exec.go b/vendor/github.com/99designs/gqlgen/codegen/config/exec.go index 838e17b2..37ec2c30 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/config/exec.go +++ b/vendor/github.com/99designs/gqlgen/codegen/config/exec.go @@ -20,6 +20,12 @@ type ExecConfig struct { // Only for follow-schema layout: FilenameTemplate string `yaml:"filename_template,omitempty"` // String template with {name} as placeholder for base name. DirName string `yaml:"dir"` + + // Maximum number of goroutines in concurrency to use when running multiple child resolvers + // Suppressing the number of goroutines generated can reduce memory consumption per request, + // but processing time may increase due to the reduced number of concurrences + // Default: 0 (unlimited) + WorkerLimit uint `yaml:"worker_limit"` } type ExecLayout string diff --git a/vendor/github.com/99designs/gqlgen/codegen/config/resolver.go b/vendor/github.com/99designs/gqlgen/codegen/config/resolver.go index 1901fd2d..fa0744e1 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/config/resolver.go +++ b/vendor/github.com/99designs/gqlgen/codegen/config/resolver.go @@ -19,6 +19,7 @@ type ResolverConfig struct { DirName string `yaml:"dir"` OmitTemplateComment bool `yaml:"omit_template_comment,omitempty"` ResolverTemplate string `yaml:"resolver_template,omitempty"` + PreserveResolver bool `yaml:"preserve_resolver,omitempty"` } type ResolverLayout string diff --git a/vendor/github.com/99designs/gqlgen/codegen/directive.go b/vendor/github.com/99designs/gqlgen/codegen/directive.go index 077bd9f7..aaf3be64 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/directive.go +++ b/vendor/github.com/99designs/gqlgen/codegen/directive.go @@ -2,7 +2,6 @@ package codegen import ( "fmt" - "strconv" "strings" "github.com/vektah/gqlparser/v2/ast" @@ -143,7 +142,7 @@ func (d *Directive) CallArgs() string { args := []string{"ctx", "obj", "n"} for _, arg := range d.Args { - args = append(args, "args["+strconv.Quote(arg.Name)+"].("+templates.CurrentImports.LookupType(arg.TypeReference.GO)+")") + args = append(args, fmt.Sprintf("args[%q].(%s)", arg.Name, templates.CurrentImports.LookupType(arg.TypeReference.GO))) } return strings.Join(args, ", ") diff --git a/vendor/github.com/99designs/gqlgen/codegen/generate.go b/vendor/github.com/99designs/gqlgen/codegen/generate.go index bbd4d947..3d88b045 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/generate.go +++ b/vendor/github.com/99designs/gqlgen/codegen/generate.go @@ -4,9 +4,7 @@ import ( "embed" "errors" "fmt" - "os" "path/filepath" - "runtime" "strings" "github.com/vektah/gqlparser/v2/ast" @@ -129,24 +127,18 @@ func addBuild(filename string, p *ast.Position, data *Data, builds *map[string]* } } +//go:embed root_.gotpl +var rootTemplate string + // Root file contains top-level definitions that should not be duplicated across the generated // files for each schema file. func generateRootFile(data *Data) error { dir := data.Config.Exec.DirName path := filepath.Join(dir, "root_.generated.go") - _, thisFile, _, _ := runtime.Caller(0) - rootDir := filepath.Dir(thisFile) - templatePath := filepath.Join(rootDir, "root_.gotpl") - templateBytes, err := os.ReadFile(templatePath) - if err != nil { - return err - } - template := string(templateBytes) - return templates.Render(templates.Options{ PackageName: data.Config.Exec.Package, - Template: template, + Template: rootTemplate, Filename: path, Data: data, RegionTags: false, diff --git a/vendor/github.com/99designs/gqlgen/codegen/generated!.gotpl b/vendor/github.com/99designs/gqlgen/codegen/generated!.gotpl index 1e5bfbfc..8638b1cf 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/generated!.gotpl +++ b/vendor/github.com/99designs/gqlgen/codegen/generated!.gotpl @@ -10,6 +10,7 @@ {{ reserveImport "bytes" }} {{ reserveImport "embed" }} +{{ reserveImport "golang.org/x/sync/semaphore"}} {{ reserveImport "github.com/vektah/gqlparser/v2" "gqlparser" }} {{ reserveImport "github.com/vektah/gqlparser/v2/ast" }} {{ reserveImport "github.com/99designs/gqlgen/graphql" }} @@ -150,8 +151,8 @@ } func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { - rc := graphql.GetOperationContext(ctx) - ec := executionContext{rc, e, 0, 0, make(chan graphql.DeferredResult)} + opCtx := graphql.GetOperationContext(ctx) + ec := executionContext{opCtx, e, 0, 0, make(chan graphql.DeferredResult)} inputUnmarshalMap := graphql.BuildUnmarshalerMap( {{- range $input := .Inputs -}} {{ if not $input.HasUnmarshal }} @@ -161,7 +162,7 @@ ) first := true - switch rc.Operation.Operation { + switch opCtx.Operation.Operation { {{- if .QueryRoot }} case ast.Query: return func(ctx context.Context) *graphql.Response { var response graphql.Response @@ -170,11 +171,11 @@ first = false ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap) {{ if .Directives.LocationDirectives "QUERY" -}} - data = ec._queryMiddleware(ctx, rc.Operation, func(ctx context.Context) (interface{}, error){ - return ec._{{.QueryRoot.Name}}(ctx, rc.Operation.SelectionSet), nil + data = ec._queryMiddleware(ctx, opCtx.Operation, func(ctx context.Context) (interface{}, error){ + return ec._{{.QueryRoot.Name}}(ctx, opCtx.Operation.SelectionSet), nil }) {{- else -}} - data = ec._{{.QueryRoot.Name}}(ctx, rc.Operation.SelectionSet) + data = ec._{{.QueryRoot.Name}}(ctx, opCtx.Operation.SelectionSet) {{- end }} } else { if atomic.LoadInt32(&ec.pendingDeferred) > 0 { @@ -206,11 +207,11 @@ first = false ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap) {{ if .Directives.LocationDirectives "MUTATION" -}} - data := ec._mutationMiddleware(ctx, rc.Operation, func(ctx context.Context) (interface{}, error){ - return ec._{{.MutationRoot.Name}}(ctx, rc.Operation.SelectionSet), nil + data := ec._mutationMiddleware(ctx, opCtx.Operation, func(ctx context.Context) (interface{}, error){ + return ec._{{.MutationRoot.Name}}(ctx, opCtx.Operation.SelectionSet), nil }) {{- else -}} - data := ec._{{.MutationRoot.Name}}(ctx, rc.Operation.SelectionSet) + data := ec._{{.MutationRoot.Name}}(ctx, opCtx.Operation.SelectionSet) {{- end }} var buf bytes.Buffer data.MarshalGQL(&buf) @@ -223,11 +224,11 @@ {{- if .SubscriptionRoot }} case ast.Subscription: {{ if .Directives.LocationDirectives "SUBSCRIPTION" -}} - next := ec._subscriptionMiddleware(ctx, rc.Operation, func(ctx context.Context) (interface{}, error){ - return ec._{{.SubscriptionRoot.Name}}(ctx, rc.Operation.SelectionSet),nil + next := ec._subscriptionMiddleware(ctx, opCtx.Operation, func(ctx context.Context) (interface{}, error){ + return ec._{{.SubscriptionRoot.Name}}(ctx, opCtx.Operation.SelectionSet),nil }) {{- else -}} - next := ec._{{.SubscriptionRoot.Name}}(ctx, rc.Operation.SelectionSet) + next := ec._{{.SubscriptionRoot.Name}}(ctx, opCtx.Operation.SelectionSet) {{- end }} var buf bytes.Buffer diff --git a/vendor/github.com/99designs/gqlgen/codegen/root_.gotpl b/vendor/github.com/99designs/gqlgen/codegen/root_.gotpl index d392ef53..91882647 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/root_.gotpl +++ b/vendor/github.com/99designs/gqlgen/codegen/root_.gotpl @@ -123,8 +123,8 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in } func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { - rc := graphql.GetOperationContext(ctx) - ec := executionContext{rc, e, 0, 0, make(chan graphql.DeferredResult)} + opCtx := graphql.GetOperationContext(ctx) + ec := executionContext{opCtx, e, 0, 0, make(chan graphql.DeferredResult)} inputUnmarshalMap := graphql.BuildUnmarshalerMap( {{- range $input := .Inputs -}} {{ if not $input.HasUnmarshal }} @@ -134,7 +134,7 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { ) first := true - switch rc.Operation.Operation { + switch opCtx.Operation.Operation { {{- if .QueryRoot }} case ast.Query: return func(ctx context.Context) *graphql.Response { var response graphql.Response @@ -143,11 +143,11 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { first = false ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap) {{ if .Directives.LocationDirectives "QUERY" -}} - data = ec._queryMiddleware(ctx, rc.Operation, func(ctx context.Context) (interface{}, error){ - return ec._{{.QueryRoot.Name}}(ctx, rc.Operation.SelectionSet), nil + data = ec._queryMiddleware(ctx, opCtx.Operation, func(ctx context.Context) (interface{}, error){ + return ec._{{.QueryRoot.Name}}(ctx, opCtx.Operation.SelectionSet), nil }) {{- else -}} - data = ec._{{.QueryRoot.Name}}(ctx, rc.Operation.SelectionSet) + data = ec._{{.QueryRoot.Name}}(ctx, opCtx.Operation.SelectionSet) {{- end }} } else { if atomic.LoadInt32(&ec.pendingDeferred) > 0 { @@ -179,11 +179,11 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { first = false ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap) {{ if .Directives.LocationDirectives "MUTATION" -}} - data := ec._mutationMiddleware(ctx, rc.Operation, func(ctx context.Context) (interface{}, error){ - return ec._{{.MutationRoot.Name}}(ctx, rc.Operation.SelectionSet), nil + data := ec._mutationMiddleware(ctx, opCtx.Operation, func(ctx context.Context) (interface{}, error){ + return ec._{{.MutationRoot.Name}}(ctx, opCtx.Operation.SelectionSet), nil }) {{- else -}} - data := ec._{{.MutationRoot.Name}}(ctx, rc.Operation.SelectionSet) + data := ec._{{.MutationRoot.Name}}(ctx, opCtx.Operation.SelectionSet) {{- end }} var buf bytes.Buffer data.MarshalGQL(&buf) @@ -196,11 +196,11 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { {{- if .SubscriptionRoot }} case ast.Subscription: {{ if .Directives.LocationDirectives "SUBSCRIPTION" -}} - next := ec._subscriptionMiddleware(ctx, rc.Operation, func(ctx context.Context) (interface{}, error){ - return ec._{{.SubscriptionRoot.Name}}(ctx, rc.Operation.SelectionSet),nil + next := ec._subscriptionMiddleware(ctx, opCtx.Operation, func(ctx context.Context) (interface{}, error){ + return ec._{{.SubscriptionRoot.Name}}(ctx, opCtx.Operation.SelectionSet),nil }) {{- else -}} - next := ec._{{.SubscriptionRoot.Name}}(ctx, rc.Operation.SelectionSet) + next := ec._{{.SubscriptionRoot.Name}}(ctx, opCtx.Operation.SelectionSet) {{- end }} var buf bytes.Buffer diff --git a/vendor/github.com/99designs/gqlgen/codegen/type.gotpl b/vendor/github.com/99designs/gqlgen/codegen/type.gotpl index 1898d444..aa143b1f 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/type.gotpl +++ b/vendor/github.com/99designs/gqlgen/codegen/type.gotpl @@ -101,6 +101,9 @@ ret := make(graphql.Array, len(v)) {{- if not $type.IsScalar }} var wg sync.WaitGroup + {{- if gt $.Config.Exec.WorkerLimit 0 }} + sm := semaphore.NewWeighted({{ $.Config.Exec.WorkerLimit }}) + {{- end }} isLen1 := len(v) == 1 if !isLen1 { wg.Add(len(v)) @@ -124,14 +127,29 @@ }() {{- end }} if !isLen1 { - defer wg.Done() + {{- if gt $.Config.Exec.WorkerLimit 0 }} + defer func(){ + sm.Release(1) + wg.Done() + }() + {{- else }} + defer wg.Done() + {{- end }} } ret[i] = ec.{{ $type.Elem.MarshalFunc }}(ctx, sel, v[i]) } if isLen1 { f(i) } else { - go f(i) + {{- if gt $.Config.Exec.WorkerLimit 0 }} + if err := sm.Acquire(ctx, 1); err != nil { + ec.Error(ctx, ctx.Err()) + } else { + go f(i) + } + {{- else }} + go f(i) + {{- end }} } {{ else }} ret[i] = ec.{{ $type.Elem.MarshalFunc }}(ctx, sel, v[i]) diff --git a/vendor/github.com/99designs/gqlgen/graphql/cache.go b/vendor/github.com/99designs/gqlgen/graphql/cache.go index 8804cfe0..836d9868 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/cache.go +++ b/vendor/github.com/99designs/gqlgen/graphql/cache.go @@ -23,9 +23,13 @@ func (m MapCache[T]) Get(_ context.Context, key string) (value T, ok bool) { // Add adds a value to the cache. func (m MapCache[T]) Add(_ context.Context, key string, value T) { m[key] = value } -type NoCache[T any, T2 *T] struct{} +type NoCache[T any] struct{} -var _ Cache[*string] = (*NoCache[string, *string])(nil) +var _ Cache[string] = (*NoCache[string])(nil) -func (n NoCache[T, T2]) Get(_ context.Context, _ string) (value T2, ok bool) { return nil, false } -func (n NoCache[T, T2]) Add(_ context.Context, _ string, _ T2) {} +func (n NoCache[T]) Get(_ context.Context, _ string) (value T, ok bool) { + var val T + return val, false +} + +func (n NoCache[T]) Add(_ context.Context, _ string, _ T) {} diff --git a/vendor/github.com/99designs/gqlgen/graphql/context_field.go b/vendor/github.com/99designs/gqlgen/graphql/context_field.go index b3fab910..0fef0855 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/context_field.go +++ b/vendor/github.com/99designs/gqlgen/graphql/context_field.go @@ -36,14 +36,14 @@ type FieldContext struct { // // srv.AroundFields(func(ctx context.Context, next graphql.Resolver) (interface{}, error) { // fc := graphql.GetFieldContext(ctx) - // op := graphql.GetOperationContext(ctx) + // opCtx := graphql.GetOperationContext(ctx) // collected := graphql.CollectFields(opCtx, fc.Field.Selections, []string{"User"}) // // child, err := fc.Child(ctx, collected[0]) // if err != nil { // return nil, err // } - // fmt.Println("child context %q with args: %v", child.Field.Name, child.Args) + // fmt.Printf("child context %q with args: %v\n", child.Field.Name, child.Args) // // return next(ctx) // }) diff --git a/vendor/github.com/99designs/gqlgen/graphql/context_operation.go b/vendor/github.com/99designs/gqlgen/graphql/context_operation.go index d515acce..718809a4 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/context_operation.go +++ b/vendor/github.com/99designs/gqlgen/graphql/context_operation.go @@ -65,8 +65,8 @@ func GetOperationContext(ctx context.Context) *OperationContext { panic("missing operation context") } -func WithOperationContext(ctx context.Context, rc *OperationContext) context.Context { - return context.WithValue(ctx, operationCtx, rc) +func WithOperationContext(ctx context.Context, opCtx *OperationContext) context.Context { + return context.WithValue(ctx, operationCtx, opCtx) } // HasOperationContext checks if the given context is part of an ongoing operation @@ -77,7 +77,7 @@ func HasOperationContext(ctx context.Context) bool { return ok && val != nil } -// This is just a convenient wrapper method for CollectFields +// CollectFieldsCtx is just a convenient wrapper method for CollectFields. func CollectFieldsCtx(ctx context.Context, satisfies []string) []CollectedField { resctx := GetFieldContext(ctx) return CollectFields(GetOperationContext(ctx), resctx.Field.Selections, satisfies) diff --git a/vendor/github.com/99designs/gqlgen/graphql/executable_schema.go b/vendor/github.com/99designs/gqlgen/graphql/executable_schema.go index aa9d7c44..03d25aab 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/executable_schema.go +++ b/vendor/github.com/99designs/gqlgen/graphql/executable_schema.go @@ -17,8 +17,7 @@ type ExecutableSchema interface { } // CollectFields returns the set of fields from an ast.SelectionSet where all collected fields satisfy at least one of the GraphQL types -// passed through satisfies. Providing an empty or nil slice for satisfies will return collect all fields regardless of fragment -// type conditions. +// passed through satisfies. Providing an empty slice for satisfies will collect all fields regardless of fragment type conditions. func CollectFields(reqCtx *OperationContext, selSet ast.SelectionSet, satisfies []string) []CollectedField { return collectFields(reqCtx, selSet, satisfies, map[string]bool{}) } @@ -39,13 +38,22 @@ func collectFields(reqCtx *OperationContext, selSet ast.SelectionSet, satisfies f.Selections = append(f.Selections, sel.SelectionSet...) case *ast.InlineFragment: - if !shouldIncludeNode(sel.Directives, reqCtx.Variables) { + // To allow simplified "collect all" types behavior, pass an empty list + // of types that the type condition must satisfy: we will apply the + // fragment regardless of type condition. + // + // When the type condition is not set (... { field }) we will apply the + // fragment to any satisfying types. + // + // We will only NOT apply the fragment when we have at least one type in + // the list we must satisfy and a type condition to compare them to. + if len(satisfies) > 0 && sel.TypeCondition != "" && !instanceOf(sel.TypeCondition, satisfies) { continue } - if len(satisfies) > 0 && !instanceOf(sel.TypeCondition, satisfies) { + + if !shouldIncludeNode(sel.Directives, reqCtx.Variables) { continue } - shouldDefer, label := deferrable(sel.Directives, reqCtx.Variables) for _, childField := range collectFields(reqCtx, sel.SelectionSet, satisfies, visited) { @@ -61,9 +69,6 @@ func collectFields(reqCtx *OperationContext, selSet ast.SelectionSet, satisfies } case *ast.FragmentSpread: - if !shouldIncludeNode(sel.Directives, reqCtx.Variables) { - continue - } fragmentName := sel.Name if _, seen := visited[fragmentName]; seen { continue @@ -80,6 +85,9 @@ func collectFields(reqCtx *OperationContext, selSet ast.SelectionSet, satisfies continue } + if !shouldIncludeNode(sel.Directives, reqCtx.Variables) { + continue + } shouldDefer, label := deferrable(sel.Directives, reqCtx.Variables) for _, childField := range collectFields(reqCtx, fragment.SelectionSet, satisfies, visited) { diff --git a/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go b/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go index 566b0476..c0f0721c 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go +++ b/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go @@ -36,7 +36,7 @@ func New(es graphql.ExecutableSchema) *Executor { es: es, errorPresenter: graphql.DefaultErrorPresenter, recoverFunc: graphql.DefaultRecover, - queryCache: graphql.NoCache[ast.QueryDocument, *ast.QueryDocument]{}, + queryCache: graphql.NoCache[*ast.QueryDocument]{}, ext: processExtensions(nil), parserTokenLimit: parserTokenNoLimit, } @@ -47,7 +47,7 @@ func (e *Executor) CreateOperationContext( ctx context.Context, params *graphql.RawParams, ) (*graphql.OperationContext, gqlerror.List) { - rc := &graphql.OperationContext{ + opCtx := &graphql.OperationContext{ DisableIntrospection: true, RecoverFunc: e.recoverFunc, ResolverMiddleware: e.ext.fieldMiddleware, @@ -57,56 +57,56 @@ func (e *Executor) CreateOperationContext( OperationStart: graphql.GetStartTime(ctx), }, } - ctx = graphql.WithOperationContext(ctx, rc) + ctx = graphql.WithOperationContext(ctx, opCtx) for _, p := range e.ext.operationParameterMutators { if err := p.MutateOperationParameters(ctx, params); err != nil { - return rc, gqlerror.List{err} + return opCtx, gqlerror.List{err} } } - rc.RawQuery = params.Query - rc.OperationName = params.OperationName - rc.Headers = params.Headers + opCtx.RawQuery = params.Query + opCtx.OperationName = params.OperationName + opCtx.Headers = params.Headers var listErr gqlerror.List - rc.Doc, listErr = e.parseQuery(ctx, &rc.Stats, params.Query) + opCtx.Doc, listErr = e.parseQuery(ctx, &opCtx.Stats, params.Query) if len(listErr) != 0 { - return rc, listErr + return opCtx, listErr } - rc.Operation = rc.Doc.Operations.ForName(params.OperationName) - if rc.Operation == nil { + opCtx.Operation = opCtx.Doc.Operations.ForName(params.OperationName) + if opCtx.Operation == nil { err := gqlerror.Errorf("operation %s not found", params.OperationName) errcode.Set(err, errcode.ValidationFailed) - return rc, gqlerror.List{err} + return opCtx, gqlerror.List{err} } var err error - rc.Variables, err = validator.VariableValues(e.es.Schema(), rc.Operation, params.Variables) + opCtx.Variables, err = validator.VariableValues(e.es.Schema(), opCtx.Operation, params.Variables) if err != nil { gqlErr, ok := err.(*gqlerror.Error) if ok { errcode.Set(gqlErr, errcode.ValidationFailed) - return rc, gqlerror.List{gqlErr} + return opCtx, gqlerror.List{gqlErr} } } - rc.Stats.Validation.End = graphql.Now() + opCtx.Stats.Validation.End = graphql.Now() for _, p := range e.ext.operationContextMutators { - if err := p.MutateOperationContext(ctx, rc); err != nil { - return rc, gqlerror.List{err} + if err := p.MutateOperationContext(ctx, opCtx); err != nil { + return opCtx, gqlerror.List{err} } } - return rc, nil + return opCtx, nil } func (e *Executor) DispatchOperation( ctx context.Context, - rc *graphql.OperationContext, + opCtx *graphql.OperationContext, ) (graphql.ResponseHandler, context.Context) { - ctx = graphql.WithOperationContext(ctx, rc) + ctx = graphql.WithOperationContext(ctx, opCtx) var innerCtx context.Context res := e.ext.operationMiddleware(ctx, func(ctx context.Context) graphql.ResponseHandler { diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler.go b/vendor/github.com/99designs/gqlgen/graphql/handler.go index 4df36117..ab2ed4b6 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler.go @@ -34,7 +34,7 @@ type ( GraphExecutor interface { CreateOperationContext(ctx context.Context, params *RawParams) (*OperationContext, gqlerror.List) - DispatchOperation(ctx context.Context, rc *OperationContext) (ResponseHandler, context.Context) + DispatchOperation(ctx context.Context, opCtx *OperationContext) (ResponseHandler, context.Context) DispatchError(ctx context.Context, list gqlerror.List) *Response } @@ -65,7 +65,7 @@ type ( // OperationContextMutator is called after creating the request context, but before executing the root resolver. OperationContextMutator interface { - MutateOperationContext(ctx context.Context, rc *OperationContext) *gqlerror.Error + MutateOperationContext(ctx context.Context, opCtx *OperationContext) *gqlerror.Error } // OperationInterceptor is called for each incoming query, for basic requests the writer will be invoked once, diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go b/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go index a4cb32c9..9390c4fe 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go @@ -6,7 +6,7 @@ import ( "encoding/hex" "errors" - "github.com/mitchellh/mapstructure" + "github.com/go-viper/mapstructure/v2" "github.com/vektah/gqlparser/v2/gqlerror" "github.com/99designs/gqlgen/graphql" @@ -98,12 +98,12 @@ func (a AutomaticPersistedQuery) MutateOperationParameters(ctx context.Context, } func GetApqStats(ctx context.Context) *ApqStats { - rc := graphql.GetOperationContext(ctx) - if rc == nil { + opCtx := graphql.GetOperationContext(ctx) + if opCtx == nil { return nil } - s, _ := rc.Stats.GetExtension(apqExtension).(*ApqStats) + s, _ := opCtx.Stats.GetExtension(apqExtension).(*ApqStats) return s } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/extension/complexity.go b/vendor/github.com/99designs/gqlgen/graphql/handler/extension/complexity.go index af1e002f..3684f078 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/extension/complexity.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/extension/complexity.go @@ -17,7 +17,7 @@ const errComplexityLimit = "COMPLEXITY_LIMIT_EXCEEDED" // // If a query is submitted that exceeds the limit, a 422 status code will be returned. type ComplexityLimit struct { - Func func(ctx context.Context, rc *graphql.OperationContext) int + Func func(ctx context.Context, opCtx *graphql.OperationContext) int es graphql.ExecutableSchema } @@ -40,7 +40,7 @@ type ComplexityStats struct { // FixedComplexityLimit sets a complexity limit that does not change func FixedComplexityLimit(limit int) *ComplexityLimit { return &ComplexityLimit{ - Func: func(ctx context.Context, rc *graphql.OperationContext) int { + Func: func(ctx context.Context, opCtx *graphql.OperationContext) int { return limit }, } @@ -58,13 +58,13 @@ func (c *ComplexityLimit) Validate(schema graphql.ExecutableSchema) error { return nil } -func (c ComplexityLimit) MutateOperationContext(ctx context.Context, rc *graphql.OperationContext) *gqlerror.Error { - op := rc.Doc.Operations.ForName(rc.OperationName) - complexityCalcs := complexity.Calculate(c.es, op, rc.Variables) +func (c ComplexityLimit) MutateOperationContext(ctx context.Context, opCtx *graphql.OperationContext) *gqlerror.Error { + op := opCtx.Doc.Operations.ForName(opCtx.OperationName) + complexityCalcs := complexity.Calculate(c.es, op, opCtx.Variables) - limit := c.Func(ctx, rc) + limit := c.Func(ctx, opCtx) - rc.Stats.SetExtension(complexityExtension, &ComplexityStats{ + opCtx.Stats.SetExtension(complexityExtension, &ComplexityStats{ Complexity: complexityCalcs, ComplexityLimit: limit, }) @@ -79,11 +79,11 @@ func (c ComplexityLimit) MutateOperationContext(ctx context.Context, rc *graphql } func GetComplexityStats(ctx context.Context) *ComplexityStats { - rc := graphql.GetOperationContext(ctx) - if rc == nil { + opCtx := graphql.GetOperationContext(ctx) + if opCtx == nil { return nil } - s, _ := rc.Stats.GetExtension(complexityExtension).(*ComplexityStats) + s, _ := opCtx.Stats.GetExtension(complexityExtension).(*ComplexityStats) return s } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/extension/introspection.go b/vendor/github.com/99designs/gqlgen/graphql/handler/extension/introspection.go index 8e391265..bdf0fdf8 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/extension/introspection.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/extension/introspection.go @@ -24,7 +24,7 @@ func (c Introspection) Validate(schema graphql.ExecutableSchema) error { return nil } -func (c Introspection) MutateOperationContext(ctx context.Context, rc *graphql.OperationContext) *gqlerror.Error { - rc.DisableIntrospection = false +func (c Introspection) MutateOperationContext(ctx context.Context, opCtx *graphql.OperationContext) *gqlerror.Error { + opCtx.DisableIntrospection = false return nil } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_get.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_get.go index 470a0fbe..09f1020d 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_get.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_get.go @@ -66,21 +66,21 @@ func (h GET) Do(w http.ResponseWriter, r *http.Request, exec graphql.GraphExecut raw.ReadTime.End = graphql.Now() - rc, gqlError := exec.CreateOperationContext(r.Context(), raw) + opCtx, gqlError := exec.CreateOperationContext(r.Context(), raw) if gqlError != nil { w.WriteHeader(statusFor(gqlError)) - resp := exec.DispatchError(graphql.WithOperationContext(r.Context(), rc), gqlError) + resp := exec.DispatchError(graphql.WithOperationContext(r.Context(), opCtx), gqlError) writeJson(w, resp) return } - op := rc.Doc.Operations.ForName(rc.OperationName) + op := opCtx.Doc.Operations.ForName(opCtx.OperationName) if op.Operation != ast.Query { w.WriteHeader(http.StatusNotAcceptable) writeJsonError(w, "GET requests only allow query operations") return } - responses, ctx := exec.DispatchOperation(r.Context(), rc) + responses, ctx := exec.DispatchOperation(r.Context(), opCtx) writeJson(w, responses(ctx)) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_multipart_mixed.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_multipart_mixed.go new file mode 100644 index 00000000..6447b286 --- /dev/null +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_multipart_mixed.go @@ -0,0 +1,286 @@ +package transport + +import ( + "encoding/json" + "fmt" + "io" + "log" + "mime" + "net/http" + "strings" + "sync" + "time" + + "github.com/vektah/gqlparser/v2/gqlerror" + + "github.com/99designs/gqlgen/graphql" +) + +// MultipartMixed is a transport that supports the multipart/mixed spec +type MultipartMixed struct { + Boundary string + DeliveryTimeout time.Duration +} + +var _ graphql.Transport = MultipartMixed{} + +// Supports checks if the request supports the multipart/mixed spec +// Might be worth check the spec required, but Apollo Client mislabel the spec in the headers. +func (t MultipartMixed) Supports(r *http.Request) bool { + if !strings.Contains(r.Header.Get("Accept"), "multipart/mixed") { + return false + } + mediaType, _, err := mime.ParseMediaType(r.Header.Get("Content-Type")) + if err != nil { + return false + } + return r.Method == http.MethodPost && mediaType == "application/json" +} + +// Do implements the multipart/mixed spec as a multipart/mixed response +func (t MultipartMixed) Do(w http.ResponseWriter, r *http.Request, exec graphql.GraphExecutor) { + // Implements the multipart/mixed spec as a multipart/mixed response: + // * https://github.com/graphql/graphql-wg/blob/e4ef5f9d5997815d9de6681655c152b6b7838b4c/rfcs/DeferStream.md + // 2022/08/23 as implemented by gqlgen. + // * https://github.com/graphql/graphql-wg/blob/f22ea7748c6ebdf88fdbf770a8d9e41984ebd429/rfcs/DeferStream.md June 2023 Spec for the + // `incremental` field + // * https://github.com/graphql/graphql-over-http/blob/main/rfcs/IncrementalDelivery.md + // multipart specification + // Follows the format that is used in the Apollo Client tests: + // https://github.com/apollographql/apollo-client/blob/v3.11.8/src/link/http/__tests__/responseIterator.ts#L68 + // Apollo Client, despite mentioning in its requests that they require the 2022 spec, it wants the + // `incremental` field to be an array of responses, not a single response. Theoretically we could + // batch responses in the `incremental` field, if we wanted to optimize this code. + ctx := r.Context() + flusher, ok := w.(http.Flusher) + if !ok { + SendErrorf(w, http.StatusInternalServerError, "streaming unsupported") + return + } + defer flusher.Flush() + + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + // This header will be replaced below, but it's required in case we return errors. + w.Header().Set("Content-Type", "application/json") + + boundary := t.Boundary + if boundary == "" { + boundary = "-" + } + timeout := t.DeliveryTimeout + if timeout.Milliseconds() < 1 { + // If the timeout is less than 1ms, we'll set it to 1ms to avoid a busy loop + timeout = 1 * time.Millisecond + } + + params := &graphql.RawParams{} + start := graphql.Now() + params.Headers = r.Header + params.ReadTime = graphql.TraceTiming{ + Start: start, + End: graphql.Now(), + } + + bodyString, err := getRequestBody(r) + if err != nil { + gqlErr := gqlerror.Errorf("could not get json request body: %+v", err) + resp := exec.DispatchError(ctx, gqlerror.List{gqlErr}) + log.Printf("could not get json request body: %+v", err.Error()) + writeJson(w, resp) + return + } + + bodyReader := io.NopCloser(strings.NewReader(bodyString)) + if err = jsonDecode(bodyReader, ¶ms); err != nil { + w.WriteHeader(http.StatusBadRequest) + gqlErr := gqlerror.Errorf( + "json request body could not be decoded: %+v body:%s", + err, + bodyString, + ) + resp := exec.DispatchError(ctx, gqlerror.List{gqlErr}) + log.Printf("decoding error: %+v body:%s", err.Error(), bodyString) + writeJson(w, resp) + return + } + + rc, opErr := exec.CreateOperationContext(ctx, params) + ctx = graphql.WithOperationContext(ctx, rc) + if opErr != nil { + w.WriteHeader(statusFor(opErr)) + + resp := exec.DispatchError(ctx, opErr) + writeJson(w, resp) + return + } + + // Example of the response format (note the new lines and boundaries are important!): + // https://github.com/graphql/graphql-over-http/blob/main/rfcs/IncrementalDelivery.md + // --graphql + // Content-Type: application/json + // + // {"data":{"apps":{"apps":[ .. ],"totalNumApps":161,"__typename":"AppsOutput"}},"hasNext":true} + // --graphql + // Content-Type: application/json + // + // {"incremental":[{"data":{"groupAccessCount":0},"label":"test","path":["apps","apps",7],"hasNext":true}],"hasNext":true} + // --graphql + // ... + // --graphql-- + // Last boundary is a closing boundary with two dashes at the end. + + w.Header().Set( + "Content-Type", + fmt.Sprintf(`multipart/mixed;boundary="%s";deferSpec=20220824`, boundary), + ) + + a := newMultipartResponseAggregator(w, boundary, timeout) + defer a.Done(w) + + responses, ctx := exec.DispatchOperation(ctx, rc) + initialResponse := true + for { + response := responses(ctx) + if response == nil { + break + } + + a.Add(response, initialResponse) + initialResponse = false + } +} + +func writeIncrementalJson(w io.Writer, responses []*graphql.Response, hasNext bool) { + // TODO: Remove this wrapper on response once gqlgen supports the 2023 spec + b, err := json.Marshal(struct { + Incremental []*graphql.Response `json:"incremental"` + HasNext bool `json:"hasNext"` + }{ + Incremental: responses, + HasNext: hasNext, + }) + if err != nil { + panic(err) + } + w.Write(b) +} + +func writeBoundary(w io.Writer, boundary string, finalResponse bool) { + if finalResponse { + fmt.Fprintf(w, "--%s--\r\n", boundary) + return + } + fmt.Fprintf(w, "--%s\r\n", boundary) +} + +func writeContentTypeHeader(w io.Writer) { + fmt.Fprintf(w, "Content-Type: application/json\r\n\r\n") +} + +// multipartResponseAggregator helps us reduce the number of responses sent to the frontend by batching all the +// incremental responses together. +type multipartResponseAggregator struct { + mu sync.Mutex + boundary string + initialResponse *graphql.Response + deferResponses []*graphql.Response + done chan bool +} + +// newMultipartResponseAggregator creates a new multipartResponseAggregator +// The aggregator will flush responses to the client every `tickerDuration` (default 1ms) so that +// multiple incremental responses are batched together. +func newMultipartResponseAggregator( + w http.ResponseWriter, + boundary string, + tickerDuration time.Duration, +) *multipartResponseAggregator { + a := &multipartResponseAggregator{ + boundary: boundary, + done: make(chan bool, 1), + } + go func() { + ticker := time.NewTicker(tickerDuration) + defer ticker.Stop() + for { + select { + case <-a.done: + return + case <-ticker.C: + a.flush(w) + } + } + }() + return a +} + +// Done flushes the remaining responses +func (a *multipartResponseAggregator) Done(w http.ResponseWriter) { + a.done <- true + a.flush(w) +} + +// Add accumulates the responses +func (a *multipartResponseAggregator) Add(resp *graphql.Response, initialResponse bool) { + a.mu.Lock() + defer a.mu.Unlock() + if initialResponse { + a.initialResponse = resp + return + } + a.deferResponses = append(a.deferResponses, resp) +} + +// flush sends the accumulated responses to the client +func (a *multipartResponseAggregator) flush(w http.ResponseWriter) { + a.mu.Lock() + defer a.mu.Unlock() + + // If we don't have any responses, we can return early + if a.initialResponse == nil && len(a.deferResponses) == 0 { + return + } + + flusher, ok := w.(http.Flusher) + if !ok { + // This should never happen, as we check for this much earlier on + panic("response writer does not support flushing") + } + + hasNext := false + if a.initialResponse != nil { + // Initial response will need to begin with the boundary + writeBoundary(w, a.boundary, false) + writeContentTypeHeader(w) + + writeJson(w, a.initialResponse) + hasNext = a.initialResponse.HasNext != nil && *a.initialResponse.HasNext + + // Handle when initial is aggregated with deferred responses. + if len(a.deferResponses) > 0 { + fmt.Fprintf(w, "\r\n") + writeBoundary(w, a.boundary, false) + } + + // Reset the initial response so we don't send it again + a.initialResponse = nil + } + + if len(a.deferResponses) > 0 { + writeContentTypeHeader(w) + hasNext = a.deferResponses[len(a.deferResponses)-1].HasNext != nil && + *a.deferResponses[len(a.deferResponses)-1].HasNext + writeIncrementalJson(w, a.deferResponses, hasNext) + // Reset the deferResponses so we don't send them again + a.deferResponses = nil + } + + // Make sure to put the delimiter after every request, so that Apollo Client knows that the + // current payload has been sent, and updates the UI. This is particular important for the first + // response and the last response, which may either hang or never get handled. + // Final response will have a closing boundary with two dashes at the end. + fmt.Fprintf(w, "\r\n") + writeBoundary(w, a.boundary, !hasNext) + flusher.Flush() +} diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_post.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_post.go index 985f8db2..4b9a2399 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_post.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_post.go @@ -1,11 +1,12 @@ package transport import ( + "bytes" "fmt" "io" "mime" "net/http" - "strings" + "sync" "github.com/vektah/gqlparser/v2/gqlerror" @@ -46,32 +47,49 @@ func getRequestBody(r *http.Request) (string, error) { return string(body), nil } +var pool = sync.Pool{ + New: func() any { + return &graphql.RawParams{} + }, +} + func (h POST) Do(w http.ResponseWriter, r *http.Request, exec graphql.GraphExecutor) { ctx := r.Context() writeHeaders(w, h.ResponseHeaders) - params := &graphql.RawParams{} - start := graphql.Now() + params := pool.Get().(*graphql.RawParams) + defer func() { + params.Headers = nil + params.ReadTime = graphql.TraceTiming{} + params.Extensions = nil + params.OperationName = "" + params.Query = "" + params.Variables = nil + + pool.Put(params) + }() params.Headers = r.Header + + start := graphql.Now() params.ReadTime = graphql.TraceTiming{ Start: start, End: graphql.Now(), } - bodyString, err := getRequestBody(r) + bodyBytes, err := io.ReadAll(r.Body) if err != nil { - gqlErr := gqlerror.Errorf("could not get json request body: %+v", err) + gqlErr := gqlerror.Errorf("could not read request body: %+v", err) resp := exec.DispatchError(ctx, gqlerror.List{gqlErr}) writeJson(w, resp) return } - bodyReader := io.NopCloser(strings.NewReader(bodyString)) - if err = jsonDecode(bodyReader, ¶ms); err != nil { + bodyReader := bytes.NewReader(bodyBytes) + if err := jsonDecode(bodyReader, ¶ms); err != nil { w.WriteHeader(http.StatusBadRequest) gqlErr := gqlerror.Errorf( "json request body could not be decoded: %+v body:%s", err, - bodyString, + string(bodyBytes), ) resp := exec.DispatchError(ctx, gqlerror.List{gqlErr}) writeJson(w, resp) diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/util.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/util.go index aca7207e..f2a0d357 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/util.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/util.go @@ -13,7 +13,7 @@ import ( func writeJson(w io.Writer, response *graphql.Response) { b, err := json.Marshal(response) if err != nil { - panic(err) + panic(fmt.Errorf("unable to marshal %s: %w", string(response.Data), err)) } w.Write(b) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go index 32e31c7c..d6c174cd 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go @@ -54,6 +54,7 @@ type ( receivedPong bool exec graphql.GraphExecutor closed bool + headers http.Header initPayload InitPayload } @@ -119,6 +120,7 @@ func (t Websocket) Do(w http.ResponseWriter, r *http.Request, exec graphql.Graph ctx: r.Context(), exec: exec, me: me, + headers: r.Header, Websocket: t, } @@ -387,6 +389,8 @@ func (c *wsConnection) subscribe(start time.Time, msg *message) { End: graphql.Now(), } + params.Headers = c.headers + rc, err := c.exec.CreateOperationContext(ctx, params) if err != nil { resp := c.exec.DispatchError(graphql.WithOperationContext(ctx, rc), err) diff --git a/vendor/github.com/99designs/gqlgen/graphql/playground/playground.go b/vendor/github.com/99designs/gqlgen/graphql/playground/playground.go index 816fcca3..8b1c9787 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/playground/playground.go +++ b/vendor/github.com/99designs/gqlgen/graphql/playground/playground.go @@ -92,7 +92,10 @@ func Handler(title, endpoint string) http.HandlerFunc { // HandlerWithHeaders sets up the playground. // fetcherHeaders are used by the playground's fetcher instance and will not be visible in the UI. // uiHeaders are default headers that will show up in the UI headers editor. -func HandlerWithHeaders(title, endpoint string, fetcherHeaders, uiHeaders map[string]string) http.HandlerFunc { +func HandlerWithHeaders( + title, endpoint string, + fetcherHeaders, uiHeaders map[string]string, +) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "text/html; charset=UTF-8") err := page.Execute(w, map[string]any{ @@ -102,9 +105,9 @@ func HandlerWithHeaders(title, endpoint string, fetcherHeaders, uiHeaders map[st "uiHeaders": uiHeaders, "endpointIsAbsolute": endpointHasScheme(endpoint), "subscriptionEndpoint": getSubscriptionEndpoint(endpoint), - "version": "3.0.6", - "cssSRI": "sha256-wTzfn13a+pLMB5rMeysPPR1hO7x0SwSeQI+cnw7VdbE=", - "jsSRI": "sha256-eNxH+Ah7Z9up9aJYTQycgyNuy953zYZwE9Rqf5rH+r4=", + "version": "3.7.0", + "cssSRI": "sha256-Dbkv2LUWis+0H4Z+IzxLBxM2ka1J133lSjqqtSu49o8=", + "jsSRI": "sha256-qsScAZytFdTAEOM8REpljROHu8DvdvxXBK7xhoq5XD0=", "reactSRI": "sha256-S0lp+k7zWUMk2ixteM6HZvu8L9Eh//OVrt+ZfbCpmgY=", "reactDOMSRI": "sha256-IXWO0ITNDjfnNXIu5POVfqlgYoop36bDzhodR6LW5Pc=", }) diff --git a/vendor/github.com/99designs/gqlgen/graphql/version.go b/vendor/github.com/99designs/gqlgen/graphql/version.go index 2031242f..803902c2 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/version.go +++ b/vendor/github.com/99designs/gqlgen/graphql/version.go @@ -1,3 +1,3 @@ package graphql -const Version = "v0.17.55" +const Version = "v0.17.57" diff --git a/vendor/github.com/99designs/gqlgen/init-templates/gqlgen.yml.gotmpl b/vendor/github.com/99designs/gqlgen/init-templates/gqlgen.yml.gotmpl index 648ec2b4..6429c93a 100644 --- a/vendor/github.com/99designs/gqlgen/init-templates/gqlgen.yml.gotmpl +++ b/vendor/github.com/99designs/gqlgen/init-templates/gqlgen.yml.gotmpl @@ -6,6 +6,8 @@ schema: exec: filename: graph/generated.go package: graph + # Optional: Maximum number of goroutines in concurrency to use per child resolvers(default: unlimited) + # worker_limit: 1000 # Uncomment to enable federation # federation: diff --git a/vendor/github.com/99designs/gqlgen/internal/code/packages.go b/vendor/github.com/99designs/gqlgen/internal/code/packages.go index 17fd5bb6..3b08b01c 100644 --- a/vendor/github.com/99designs/gqlgen/internal/code/packages.go +++ b/vendor/github.com/99designs/gqlgen/internal/code/packages.go @@ -231,8 +231,7 @@ func (p *Packages) ModTidy() error { // Errors returns any errors that were returned by Load, either from the call itself or any of the loaded packages. func (p *Packages) Errors() PkgErrors { - var res []error //nolint:prealloc - res = append(res, p.loadErrors...) + res := append([]error{}, p.loadErrors...) for _, pkg := range p.packages { for _, err := range pkg.Errors { res = append(res, err) diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/constants.go b/vendor/github.com/99designs/gqlgen/plugin/federation/constants.go index 8571db1f..2427b22c 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/federation/constants.go +++ b/vendor/github.com/99designs/gqlgen/plugin/federation/constants.go @@ -1,8 +1,9 @@ package federation import ( - "github.com/99designs/gqlgen/codegen/config" "github.com/vektah/gqlparser/v2/ast" + + "github.com/99designs/gqlgen/codegen/config" ) // The name of the field argument that is injected into the resolver to support @requires. diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go index f7a4d6be..2af9205f 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go +++ b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go @@ -279,10 +279,13 @@ func (f *Federation) GenerateCode(data *codegen.Data) error { typeString := strings.Split(obj.Type.String(), ".") requiresImports[strings.Join(typeString[:len(typeString)-1], ".")] = true + if containsUnionField(reqField) { + continue + } + cgField := reqField.Field.TypeReference(obj, data.Objects) reqField.Type = cgField.TypeReference } - // add type info to entity e.Type = obj.Type } @@ -329,6 +332,15 @@ func (f *Federation) GenerateCode(data *codegen.Data) error { }) } +func containsUnionField(reqField *Requires) bool { + for _, requireFields := range reqField.Field { + if strings.HasPrefix(requireFields, "... on") { + return true + } + } + return false +} + // Fill in types for key fields func populateKeyFieldTypes( resolver *EntityResolver, diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/fieldset/fieldset.go b/vendor/github.com/99designs/gqlgen/plugin/federation/fieldset/fieldset.go index 8ff53cd2..65b32eae 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/federation/fieldset/fieldset.go +++ b/vendor/github.com/99designs/gqlgen/plugin/federation/fieldset/fieldset.go @@ -133,8 +133,22 @@ func (f Field) LastIndex() int { // parseUnnestedKeyFieldSet // handles simple case where none of the fields are nested. func parseUnnestedKeyFieldSet(raw string, prefix []string) Set { ret := Set{} + unionField := false for _, s := range strings.Fields(raw) { + if s == "..." { + continue + } + if s == "on" { + unionField = true + continue + } + + if unionField { + s = "... on " + s + unionField = false + } + next := append(prefix[0:len(prefix):len(prefix)], s) //nolint:gocritic // set cap=len in order to force slice reallocation ret = append(ret, next) } diff --git a/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go b/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go index c0e04089..c090c1b3 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go +++ b/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "go/ast" - "io/fs" "os" "path/filepath" "strings" @@ -45,6 +44,7 @@ func (m *Plugin) GenerateCode(data *codegen.Data) error { case config.LayoutSingleFile: return m.generateSingleFile(data) case config.LayoutFollowSchema: + return m.generatePerSchema(data) } @@ -53,6 +53,14 @@ func (m *Plugin) GenerateCode(data *codegen.Data) error { func (m *Plugin) generateSingleFile(data *codegen.Data) error { file := File{} + + if fileExists(data.Config.Resolver.Filename) && + data.Config.Resolver.PreserveResolver { + // file already exists and config says not to update resolver + // so just return + return nil + } + rewriter, err := rewrite.New(data.Config.Resolver.Dir()) if err != nil { return err @@ -85,7 +93,7 @@ func (m *Plugin) generateSingleFile(data *codegen.Data) error { } } - if _, err := os.Stat(data.Config.Resolver.Filename); err == nil { + if fileExists(data.Config.Resolver.Filename) { file.name = data.Config.Resolver.Filename file.imports = rewriter.ExistingImports(file.name) file.RemainingSource = rewriter.RemainingSource(file.name) @@ -104,9 +112,14 @@ func (m *Plugin) generateSingleFile(data *codegen.Data) error { newResolverTemplate = readResolverTemplate(data.Config.Resolver.ResolverTemplate) } + fileNotice := `// THIS CODE WILL BE UPDATED WITH SCHEMA CHANGES. PREVIOUS IMPLEMENTATION FOR SCHEMA CHANGES WILL BE KEPT IN THE COMMENT SECTION. IMPLEMENTATION FOR UNCHANGED SCHEMA WILL BE KEPT.` + if data.Config.Resolver.PreserveResolver { + fileNotice = `// THIS CODE IS A STARTING POINT ONLY. IT WILL NOT BE UPDATED WITH SCHEMA CHANGES.` + } + return templates.Render(templates.Options{ PackageName: data.Config.Resolver.Package, - FileNotice: `// THIS CODE WILL BE UPDATED WITH SCHEMA CHANGES. PREVIOUS IMPLEMENTATION FOR SCHEMA CHANGES WILL BE KEPT IN THE COMMENT SECTION. IMPLEMENTATION FOR UNCHANGED SCHEMA WILL BE KEPT.`, + FileNotice: fileNotice, Filename: data.Config.Resolver.Filename, Data: resolverBuild, Packages: data.Config.Packages, @@ -183,6 +196,11 @@ func (m *Plugin) generatePerSchema(data *codegen.Data) error { } for _, file := range files { + if fileExists(file.name) && + data.Config.Resolver.PreserveResolver { + // file already exists and config says not to update resolver + continue + } resolverBuild := &ResolverBuild{ File: file, PackageName: data.Config.Resolver.Package, @@ -216,7 +234,7 @@ func (m *Plugin) generatePerSchema(data *codegen.Data) error { } } - if _, err := os.Stat(data.Config.Resolver.Filename); errors.Is(err, fs.ErrNotExist) { + if !fileExists(data.Config.Resolver.Filename) { err := templates.Render(templates.Options{ PackageName: data.Config.Resolver.Package, FileNotice: ` @@ -304,3 +322,10 @@ func readResolverTemplate(customResolverTemplate string) string { } return string(contentBytes) } + +func fileExists(fileName string) bool { + if _, err := os.Stat(fileName); err == nil { + return true + } + return false +} diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go index ff499fb6..304edc34 100644 --- a/vendor/github.com/Masterminds/semver/v3/version.go +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -39,9 +39,11 @@ var ( ) // semVerRegex is the regular expression used to parse a semantic version. -const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + - `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +// This is not the official regex from the semver spec. It has been modified to allow for loose handling +// where versions like 2.1 are detected. +const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` + + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?` // Version represents a single semantic version. type Version struct { @@ -146,8 +148,8 @@ func NewVersion(v string) (*Version, error) { } sv := &Version{ - metadata: m[8], - pre: m[5], + metadata: m[5], + pre: m[4], original: v, } @@ -158,7 +160,7 @@ func NewVersion(v string) (*Version, error) { } if m[2] != "" { - sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + sv.minor, err = strconv.ParseUint(m[2], 10, 64) if err != nil { return nil, fmt.Errorf("Error parsing version segment: %s", err) } @@ -167,7 +169,7 @@ func NewVersion(v string) (*Version, error) { } if m[3] != "" { - sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + sv.patch, err = strconv.ParseUint(m[3], 10, 64) if err != nil { return nil, fmt.Errorf("Error parsing version segment: %s", err) } @@ -612,7 +614,9 @@ func containsOnly(s string, comp string) bool { func validatePrerelease(p string) error { eparts := strings.Split(p, ".") for _, p := range eparts { - if containsOnly(p, num) { + if p == "" { + return ErrInvalidMetadata + } else if containsOnly(p, num) { if len(p) > 1 && p[0] == '0' { return ErrSegmentStartsZero } @@ -631,7 +635,9 @@ func validatePrerelease(p string) error { func validateMetadata(m string) error { eparts := strings.Split(m, ".") for _, p := range eparts { - if !containsOnly(p, allowed) { + if p == "" { + return ErrInvalidMetadata + } else if !containsOnly(p, allowed) { return ErrInvalidMetadata } } diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go new file mode 100644 index 00000000..0ec4b12c --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go @@ -0,0 +1,62 @@ +package md2man + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/russross/blackfriday/v2" +) + +func fmtListFlags(flags blackfriday.ListType) string { + knownFlags := []struct { + name string + flag blackfriday.ListType + }{ + {"ListTypeOrdered", blackfriday.ListTypeOrdered}, + {"ListTypeDefinition", blackfriday.ListTypeDefinition}, + {"ListTypeTerm", blackfriday.ListTypeTerm}, + {"ListItemContainsBlock", blackfriday.ListItemContainsBlock}, + {"ListItemBeginningOfList", blackfriday.ListItemBeginningOfList}, + {"ListItemEndOfList", blackfriday.ListItemEndOfList}, + } + + var f []string + for _, kf := range knownFlags { + if flags&kf.flag != 0 { + f = append(f, kf.name) + flags &^= kf.flag + } + } + if flags != 0 { + f = append(f, fmt.Sprintf("Unknown(%#x)", flags)) + } + return strings.Join(f, "|") +} + +type debugDecorator struct { + blackfriday.Renderer +} + +func depth(node *blackfriday.Node) int { + d := 0 + for n := node.Parent; n != nil; n = n.Parent { + d++ + } + return d +} + +func (d *debugDecorator) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + fmt.Fprintf(os.Stderr, "%s%s %v %v\n", + strings.Repeat(" ", depth(node)), + map[bool]string{true: "+", false: "-"}[entering], + node, + fmtListFlags(node.ListFlags)) + var b strings.Builder + status := d.Renderer.RenderNode(io.MultiWriter(&b, w), node, entering) + if b.Len() > 0 { + fmt.Fprintf(os.Stderr, ">> %q\n", b.String()) + } + return status +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go index 42bf32aa..62d91b77 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -1,16 +1,23 @@ package md2man import ( + "os" + "strconv" + "github.com/russross/blackfriday/v2" ) // Render converts a markdown document into a roff formatted document. func Render(doc []byte) []byte { renderer := NewRoffRenderer() + var r blackfriday.Renderer = renderer + if v, _ := strconv.ParseBool(os.Getenv("MD2MAN_DEBUG")); v { + r = &debugDecorator{Renderer: r} + } return blackfriday.Run(doc, []blackfriday.Option{ - blackfriday.WithRenderer(renderer), + blackfriday.WithRenderer(r), blackfriday.WithExtensions(renderer.GetExtensions()), }...) } diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index 8a290f19..9d6c473f 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -14,10 +14,8 @@ import ( // roffRenderer implements the blackfriday.Renderer interface for creating // roff format (manpages) from markdown text type roffRenderer struct { - extensions blackfriday.Extensions listCounters []int firstHeader bool - firstDD bool listDepth int } @@ -43,7 +41,7 @@ const ( quoteTag = "\n.PP\n.RS\n" quoteCloseTag = "\n.RE\n" listTag = "\n.RS\n" - listCloseTag = "\n.RE\n" + listCloseTag = ".RE\n" dtTag = "\n.TP\n" dd2Tag = "\n" tableStart = "\n.TS\nallbox;\n" @@ -56,23 +54,18 @@ const ( // NewRoffRenderer creates a new blackfriday Renderer for generating roff documents // from markdown func NewRoffRenderer() *roffRenderer { // nolint: golint - var extensions blackfriday.Extensions - - extensions |= blackfriday.NoIntraEmphasis - extensions |= blackfriday.Tables - extensions |= blackfriday.FencedCode - extensions |= blackfriday.SpaceHeadings - extensions |= blackfriday.Footnotes - extensions |= blackfriday.Titleblock - extensions |= blackfriday.DefinitionLists - return &roffRenderer{ - extensions: extensions, - } + return &roffRenderer{} } // GetExtensions returns the list of extensions used by this renderer implementation -func (r *roffRenderer) GetExtensions() blackfriday.Extensions { - return r.extensions +func (*roffRenderer) GetExtensions() blackfriday.Extensions { + return blackfriday.NoIntraEmphasis | + blackfriday.Tables | + blackfriday.FencedCode | + blackfriday.SpaceHeadings | + blackfriday.Footnotes | + blackfriday.Titleblock | + blackfriday.DefinitionLists } // RenderHeader handles outputting the header at document start @@ -103,7 +96,23 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering switch node.Type { case blackfriday.Text: - escapeSpecialChars(w, node.Literal) + // Special case: format the NAME section as required for proper whatis parsing. + // Refer to the lexgrog(1) and groff_man(7) manual pages for details. + if node.Parent != nil && + node.Parent.Type == blackfriday.Paragraph && + node.Parent.Prev != nil && + node.Parent.Prev.Type == blackfriday.Heading && + node.Parent.Prev.FirstChild != nil && + bytes.EqualFold(node.Parent.Prev.FirstChild.Literal, []byte("NAME")) { + before, after, found := bytes.Cut(node.Literal, []byte(" - ")) + escapeSpecialChars(w, before) + if found { + out(w, ` \- `) + escapeSpecialChars(w, after) + } + } else { + escapeSpecialChars(w, node.Literal) + } case blackfriday.Softbreak: out(w, crTag) case blackfriday.Hardbreak: @@ -141,14 +150,25 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering case blackfriday.Document: break case blackfriday.Paragraph: - // roff .PP markers break lists - if r.listDepth > 0 { - return blackfriday.GoToNext - } if entering { - out(w, paraTag) + if r.listDepth > 0 { + // roff .PP markers break lists + if node.Prev != nil { // continued paragraph + if node.Prev.Type == blackfriday.List && node.Prev.ListFlags&blackfriday.ListTypeDefinition == 0 { + out(w, ".IP\n") + } else { + out(w, crTag) + } + } + } else if node.Prev != nil && node.Prev.Type == blackfriday.Heading { + out(w, crTag) + } else { + out(w, paraTag) + } } else { - out(w, crTag) + if node.Next == nil || node.Next.Type != blackfriday.List { + out(w, crTag) + } } case blackfriday.BlockQuote: if entering { @@ -211,6 +231,10 @@ func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, enteri func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) { openTag := listTag closeTag := listCloseTag + if (entering && r.listDepth == 0) || (!entering && r.listDepth == 1) { + openTag = crTag + closeTag = "" + } if node.ListFlags&blackfriday.ListTypeDefinition != 0 { // tags for definition lists handled within Item node openTag = "" @@ -239,23 +263,25 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering } else if node.ListFlags&blackfriday.ListTypeTerm != 0 { // DT (definition term): line just before DD (see below). out(w, dtTag) - r.firstDD = true } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { // DD (definition description): line that starts with ": ". // // We have to distinguish between the first DD and the // subsequent ones, as there should be no vertical // whitespace between the DT and the first DD. - if r.firstDD { - r.firstDD = false - } else { - out(w, dd2Tag) + if node.Prev != nil && node.Prev.ListFlags&(blackfriday.ListTypeTerm|blackfriday.ListTypeDefinition) == blackfriday.ListTypeDefinition { + if node.Prev.Type == blackfriday.Item && + node.Prev.LastChild != nil && + node.Prev.LastChild.Type == blackfriday.List && + node.Prev.LastChild.ListFlags&blackfriday.ListTypeDefinition == 0 { + out(w, ".IP\n") + } else { + out(w, dd2Tag) + } } } else { out(w, ".IP \\(bu 2\n") } - } else { - out(w, "\n") } } diff --git a/vendor/github.com/datarhei/gosrt/config.go b/vendor/github.com/datarhei/gosrt/config.go index 53122d87..1f687775 100644 --- a/vendor/github.com/datarhei/gosrt/config.go +++ b/vendor/github.com/datarhei/gosrt/config.go @@ -15,7 +15,7 @@ const ( MIN_PAYLOAD_SIZE = MIN_MSS_SIZE - UDP_HEADER_SIZE - SRT_HEADER_SIZE MAX_PAYLOAD_SIZE = MAX_MSS_SIZE - UDP_HEADER_SIZE - SRT_HEADER_SIZE MIN_PASSPHRASE_SIZE = 10 - MAX_PASSPHRASE_SIZE = 79 + MAX_PASSPHRASE_SIZE = 80 MAX_STREAMID_SIZE = 512 SRT_VERSION = 0x010401 ) @@ -169,6 +169,9 @@ type Config struct { // An implementation of the Logger interface Logger Logger + + // if a new IP starts sending data on an existing socket id, allow it + AllowPeerIpChange bool } // DefaultConfig is the default configuration for a SRT connection @@ -209,6 +212,7 @@ var defaultConfig Config = Config{ TooLatePacketDrop: true, TransmissionType: "live", TSBPDMode: true, + AllowPeerIpChange: false, } // DefaultConfig returns the default configuration for Dial and Listen. diff --git a/vendor/github.com/datarhei/gosrt/congestion/live/receive.go b/vendor/github.com/datarhei/gosrt/congestion/live/receive.go index 4999b757..7bcf7f18 100644 --- a/vendor/github.com/datarhei/gosrt/congestion/live/receive.go +++ b/vendor/github.com/datarhei/gosrt/congestion/live/receive.go @@ -265,11 +265,7 @@ func (r *receiver) periodicACK(now uint64) (ok bool, sequenceNumber circular.Num } minPktTsbpdTime, maxPktTsbpdTime := uint64(0), uint64(0) - - ackSequenceNumber := r.lastDeliveredSequenceNumber - - // Find the sequence number up until we have all in a row. - // Where the first gap is (or at the end of the list) is where we can ACK to. + ackSequenceNumber := r.lastACKSequenceNumber e := r.packetList.Front() if e != nil { @@ -277,49 +273,42 @@ func (r *receiver) periodicACK(now uint64) (ok bool, sequenceNumber circular.Num minPktTsbpdTime = p.Header().PktTsbpdTime maxPktTsbpdTime = p.Header().PktTsbpdTime + } - // If there are packets that should be delivered by now, move foward. - if p.Header().PktTsbpdTime <= now { - for e = e.Next(); e != nil; e = e.Next() { - p = e.Value.(packet.Packet) + // Find the sequence number up until we have all in a row. + // Where the first gap is (or at the end of the list) is where we can ACK to. - if p.Header().PktTsbpdTime > now { - break - } - } + for e := r.packetList.Front(); e != nil; e = e.Next() { + p := e.Value.(packet.Packet) - ackSequenceNumber = p.Header().PacketSequenceNumber - maxPktTsbpdTime = p.Header().PktTsbpdTime + // Skip packets that we already ACK'd. + if p.Header().PacketSequenceNumber.Lte(ackSequenceNumber) { + continue + } - if e != nil { - if e = e.Next(); e != nil { - p = e.Value.(packet.Packet) - } - } + // If there are packets that should have been delivered by now, move forward. + if p.Header().PktTsbpdTime <= now { + ackSequenceNumber = p.Header().PacketSequenceNumber + continue } + // Check if the packet is the next in the row. if p.Header().PacketSequenceNumber.Equals(ackSequenceNumber.Inc()) { ackSequenceNumber = p.Header().PacketSequenceNumber - - for e = e.Next(); e != nil; e = e.Next() { - p = e.Value.(packet.Packet) - if !p.Header().PacketSequenceNumber.Equals(ackSequenceNumber.Inc()) { - break - } - - ackSequenceNumber = p.Header().PacketSequenceNumber - maxPktTsbpdTime = p.Header().PktTsbpdTime - } + maxPktTsbpdTime = p.Header().PktTsbpdTime + continue } - ok = true - sequenceNumber = ackSequenceNumber.Inc() - - // Keep track of the last ACK's sequence. with this we can faster ignore - // packets that come in that have a lower sequence number. - r.lastACKSequenceNumber = ackSequenceNumber + break } + ok = true + sequenceNumber = ackSequenceNumber.Inc() + + // Keep track of the last ACK's sequence number. With this we can faster ignore + // packets that come in late that have a lower sequence number. + r.lastACKSequenceNumber = ackSequenceNumber + r.lastPeriodicACK = now r.nPackets = 0 @@ -338,13 +327,19 @@ func (r *receiver) periodicNAK(now uint64) (ok bool, from, to circular.Number) { // Send a periodic NAK - ackSequenceNumber := r.lastDeliveredSequenceNumber + ackSequenceNumber := r.lastACKSequenceNumber // Send a NAK only for the first gap. // Alternatively send a NAK for max. X gaps because the size of the NAK packet is limited. for e := r.packetList.Front(); e != nil; e = e.Next() { p := e.Value.(packet.Packet) + // Skip packets that we already ACK'd. + if p.Header().PacketSequenceNumber.Lte(ackSequenceNumber) { + continue + } + + // If this packet is not in sequence, we stop here and report that gap. if !p.Header().PacketSequenceNumber.Equals(ackSequenceNumber.Inc()) { nackSequenceNumber := ackSequenceNumber.Inc() diff --git a/vendor/github.com/datarhei/gosrt/conn_request.go b/vendor/github.com/datarhei/gosrt/conn_request.go index 80576955..210c2d81 100644 --- a/vendor/github.com/datarhei/gosrt/conn_request.go +++ b/vendor/github.com/datarhei/gosrt/conn_request.go @@ -7,6 +7,7 @@ import ( "github.com/datarhei/gosrt/crypto" "github.com/datarhei/gosrt/packet" + "github.com/datarhei/gosrt/rand" ) // ConnRequest is an incoming connection request @@ -206,6 +207,18 @@ func newConnRequest(ln *listener, p packet.Packet) *connRequest { return nil } + + // We only support live congestion control + if cif.HasCongestionCtl && cif.CongestionCtl != "live" { + cif.HandshakeType = packet.HandshakeType(REJ_CONGESTION) + ln.log("handshake:recv:error", func() string { return "only live congestion control is supported" }) + p.MarshalCIF(cif) + ln.log("handshake:send:dump", func() string { return p.Dump() }) + ln.log("handshake:send:cif", func() string { return cif.String() }) + ln.send(p) + + return nil + } } else { cif.HandshakeType = packet.HandshakeType(REJ_ROGUE) ln.log("handshake:recv:error", func() string { return fmt.Sprintf("only HSv4 and HSv5 are supported (got HSv%d)", cif.Version) }) @@ -328,6 +341,23 @@ func (req *connRequest) Reject(reason RejectionReason) { delete(req.ln.connReqs, req.socketId) } +// generateSocketId generates an SRT SocketID that can be used for this connection +func (req *connRequest) generateSocketId() (uint32, error) { + for i := 0; i < 10; i++ { + socketId, err := rand.Uint32() + if err != nil { + return 0, fmt.Errorf("could not generate random socket id") + } + + // check that the socket id is not already in use + if _, found := req.ln.conns[socketId]; !found { + return socketId, nil + } + } + + return 0, fmt.Errorf("could not generate unused socketid") +} + func (req *connRequest) Accept() (Conn, error) { if req.crypto != nil && len(req.passphrase) == 0 { req.Reject(REJ_BADSECRET) @@ -342,7 +372,10 @@ func (req *connRequest) Accept() (Conn, error) { } // Create a new socket ID - socketId := uint32(time.Since(req.ln.start).Microseconds()) + socketId, err := req.generateSocketId() + if err != nil { + return nil, fmt.Errorf("could not generate socket id: %w", err) + } // Select the largest TSBPD delay advertised by the caller, but at least 120ms recvTsbpdDelay := uint16(req.config.ReceiverLatency.Milliseconds()) diff --git a/vendor/github.com/datarhei/gosrt/connection.go b/vendor/github.com/datarhei/gosrt/connection.go index ea83ca79..b9decebc 100644 --- a/vendor/github.com/datarhei/gosrt/connection.go +++ b/vendor/github.com/datarhei/gosrt/connection.go @@ -69,6 +69,51 @@ type Conn interface { Version() uint32 } +type rtt struct { + rtt float64 // microseconds + rttVar float64 // microseconds + + lock sync.RWMutex +} + +func (r *rtt) Recalculate(rtt time.Duration) { + // 4.10. Round-Trip Time Estimation + lastRTT := float64(rtt.Microseconds()) + + r.lock.Lock() + defer r.lock.Unlock() + + r.rtt = r.rtt*0.875 + lastRTT*0.125 + r.rttVar = r.rttVar*0.75 + math.Abs(r.rtt-lastRTT)*0.25 +} + +func (r *rtt) RTT() float64 { + r.lock.RLock() + defer r.lock.RUnlock() + + return r.rtt +} + +func (r *rtt) RTTVar() float64 { + r.lock.RLock() + defer r.lock.RUnlock() + + return r.rttVar +} + +func (r *rtt) NAKInterval() float64 { + r.lock.RLock() + defer r.lock.RUnlock() + + // 4.8.2. Packet Retransmission (NAKs) + nakInterval := (r.rtt + 4*r.rttVar) / 2 + if nakInterval < 20000 { + nakInterval = 20000 // 20ms + } + + return nakInterval +} + type connStats struct { headerSize uint64 pktSentACK uint64 @@ -108,19 +153,16 @@ type srtConn struct { config Config - cryptoLock sync.Mutex crypto crypto.Crypto keyBaseEncryption packet.PacketEncryption kmPreAnnounceCountdown uint64 kmRefreshCountdown uint64 kmConfirmed bool + cryptoLock sync.Mutex peerIdleTimeout *time.Timer - rtt float64 // microseconds - rttVar float64 // microseconds - - nakInterval float64 + rtt rtt // microseconds ackLock sync.RWMutex ackNumbers map[uint32]time.Time @@ -232,10 +274,10 @@ func newSRTConn(config srtConnConfig) *srtConn { c.kmRefreshCountdown = c.config.KMRefreshRate // 4.10. Round-Trip Time Estimation - c.rtt = float64((100 * time.Millisecond).Microseconds()) - c.rttVar = float64((50 * time.Millisecond).Microseconds()) - - c.nakInterval = float64((20 * time.Millisecond).Microseconds()) + c.rtt = rtt{ + rtt: float64((100 * time.Millisecond).Microseconds()), + rttVar: float64((50 * time.Millisecond).Microseconds()), + } c.networkQueue = make(chan packet.Packet, 1024) @@ -788,7 +830,7 @@ func (c *srtConn) handleNAK(p packet.Packet) { // handleACKACK updates the RTT and NAK interval for the congestion control. func (c *srtConn) handleACKACK(p packet.Packet) { - c.ackLock.RLock() + c.ackLock.Lock() c.statisticsLock.Lock() c.statistics.pktRecvACKACK++ @@ -814,31 +856,17 @@ func (c *srtConn) handleACKACK(p packet.Packet) { } } - nakInterval := uint64(c.nakInterval) - - c.ackLock.RUnlock() + c.ackLock.Unlock() - c.recv.SetNAKInterval(nakInterval) + c.recv.SetNAKInterval(uint64(c.rtt.NAKInterval())) } // recalculateRTT recalculates the RTT based on a full ACK exchange func (c *srtConn) recalculateRTT(rtt time.Duration) { - // 4.10. Round-Trip Time Estimation - lastRTT := float64(rtt.Microseconds()) - - c.rtt = c.rtt*0.875 + lastRTT*0.125 - c.rttVar = c.rttVar*0.75 + math.Abs(c.rtt-lastRTT)*0.25 - - // 4.8.2. Packet Retransmission (NAKs) - nakInterval := (c.rtt + 4*c.rttVar) / 2 - if nakInterval < 20000 { - c.nakInterval = 20000 // 20ms - } else { - c.nakInterval = nakInterval - } + c.rtt.Recalculate(rtt) c.log("connection:rtt", func() string { - return fmt.Sprintf("RTT=%.0fus RTTVar=%.0fus NAKInterval=%.0fms", c.rtt, c.rttVar, c.nakInterval/1000) + return fmt.Sprintf("RTT=%.0fus RTTVar=%.0fus NAKInterval=%.0fms", c.rtt.RTT(), c.rtt.RTTVar(), c.rtt.NAKInterval()/1000) }) } @@ -1219,8 +1247,8 @@ func (c *srtConn) sendACK(seq circular.Number, lite bool) { } else { pps, bps, capacity := c.recv.PacketRate() - cif.RTT = uint32(c.rtt) - cif.RTTVar = uint32(c.rttVar) + cif.RTT = uint32(c.rtt.RTT()) + cif.RTTVar = uint32(c.rtt.RTTVar()) cif.AvailableBufferSize = c.config.FC // TODO: available buffer size (packets) cif.PacketsReceivingRate = uint32(pps) // packets receiving rate (packets/s) cif.EstimatedLinkCapacity = uint32(capacity) // estimated link capacity (packets/s), not relevant for live mode @@ -1488,7 +1516,7 @@ func (c *srtConn) Stats(s *Statistics) { UsPktSendPeriod: send.UsPktSndPeriod, PktFlowWindow: uint64(c.config.FC), PktFlightSize: send.PktFlightSize, - MsRTT: c.rtt / 1000, + MsRTT: c.rtt.RTT() / 1000, MbpsSentRate: send.MbpsEstimatedSentBandwidth, MbpsRecvRate: recv.MbpsEstimatedRecvBandwidth, MbpsLinkCapacity: recv.MbpsEstimatedLinkCapacity, diff --git a/vendor/github.com/datarhei/gosrt/listen.go b/vendor/github.com/datarhei/gosrt/listen.go index 024d10fd..765ad51c 100644 --- a/vendor/github.com/datarhei/gosrt/listen.go +++ b/vendor/github.com/datarhei/gosrt/listen.go @@ -407,6 +407,14 @@ func (ln *listener) reader(ctx context.Context) { break } + if !ln.config.AllowPeerIpChange { + if p.Header().Addr.String() != conn.RemoteAddr().String() { + // ignore the packet, it's not from the expected peer + // https://haivision.github.io/srt-rfc/draft-sharabayko-srt.html#name-security-considerations + break + } + } + conn.push(p) } } diff --git a/vendor/github.com/datarhei/gosrt/packet/packet.go b/vendor/github.com/datarhei/gosrt/packet/packet.go index a03018bd..2b42ff84 100644 --- a/vendor/github.com/datarhei/gosrt/packet/packet.go +++ b/vendor/github.com/datarhei/gosrt/packet/packet.go @@ -33,7 +33,7 @@ const ( CTRLTYPE_SHUTDOWN CtrlType = 0x0005 CTRLTYPE_ACKACK CtrlType = 0x0006 CRTLTYPE_DROPREQ CtrlType = 0x0007 // unimplemented, sender->receiver - CRTLTYPE_PEERERROR CtrlType = 0x0008 // unimplemented, receiver->sender + CRTLTYPE_PEERERROR CtrlType = 0x0008 // unimplemented, receiver->sender (only for file transfers) CTRLTYPE_USER CtrlType = 0x7FFF ) @@ -141,8 +141,8 @@ const ( EXTTYPE_KMRSP CtrlSubType = 4 EXTTYPE_SID CtrlSubType = 5 EXTTYPE_CONGESTION CtrlSubType = 6 - EXTTYPE_FILTER CtrlSubType = 7 - EXTTYPE_GROUP CtrlSubType = 8 + EXTTYPE_FILTER CtrlSubType = 7 // unimplemented + EXTTYPE_GROUP CtrlSubType = 8 // unimplemented ) func (h CtrlSubType) String() string { @@ -484,7 +484,7 @@ type CIFHandshake struct { Version uint32 // A base protocol version number. Currently used values are 4 and 5. Values greater than 5 are reserved for future use. EncryptionField uint16 // Block cipher family and key size. The values of this field are described in Table 2. The default value is AES-128. - ExtensionField uint16 // This field is message specific extension related to Handshake Type field. The value MUST be set to 0 except for the following cases. (1) If the handshake control packet is the INDUCTION message, this field is sent back by the Listener. (2) In the case of a CONCLUSION message, this field value should contain a combination of Extension Type values. For more details, see Section 4.3.1. + ExtensionField uint16 // This field is a message specific extension related to Handshake Type field. The value MUST be set to 0 except for the following cases. (1) If the handshake control packet is the INDUCTION message, this field is sent back by the Listener. (2) In the case of a CONCLUSION message, this field value should contain a combination of Extension Type values. For more details, see Section 4.3.1. InitialPacketSequenceNumber circular.Number // The sequence number of the very first data packet to be sent. MaxTransmissionUnitSize uint32 // This value is typically set to 1500, which is the default Maximum Transmission Unit (MTU) size for Ethernet, but can be less. MaxFlowWindowSize uint32 // The value of this field is the maximum number of data packets allowed to be "in flight" (i.e. the number of sent packets for which an ACK control packet has not yet been received). @@ -493,9 +493,10 @@ type CIFHandshake struct { SynCookie uint32 // Randomized value for processing a handshake. The value of this field is specified by the handshake message type. See Section 4.3. PeerIP srtnet.IP // IPv4 or IPv6 address of the packet's sender. The value consists of four 32-bit fields. In the case of IPv4 addresses, fields 2, 3 and 4 are filled with zeroes. - HasHS bool - HasKM bool - HasSID bool + HasHS bool + HasKM bool + HasSID bool + HasCongestionCtl bool // 3.2.1.1. Handshake Extension Message SRTHS *CIFHandshakeExtension @@ -505,6 +506,9 @@ type CIFHandshake struct { // 3.2.1.3. Stream ID Extension Message StreamId string + + // ??? Congestion Control Extension message (handshake.md #### Congestion controller) + CongestionCtl string } func (c CIFHandshake) String() string { @@ -537,6 +541,12 @@ func (c CIFHandshake) String() string { fmt.Fprintf(&b, " streamId : %s\n", c.StreamId) fmt.Fprintf(&b, "--- /SIDExt ---\n") } + + if c.HasCongestionCtl { + fmt.Fprintf(&b, "--- CongestionExt ---\n") + fmt.Fprintf(&b, " congestion : %s\n", c.CongestionCtl) + fmt.Fprintf(&b, "--- /CongestionExt ---\n") + } } fmt.Fprintf(&b, "--- /handshake ---") @@ -599,7 +609,7 @@ func (c *CIFHandshake) Unmarshal(data []byte) error { if extensionType == EXTTYPE_HSREQ || extensionType == EXTTYPE_HSRSP { // 3.2.1.1. Handshake Extension Message if extensionLength != 12 || len(pivot) < extensionLength { - return fmt.Errorf("invalid extension length") + return fmt.Errorf("invalid extension length of %d bytes (%s)", extensionLength, extensionType.String()) } c.HasHS = true @@ -612,7 +622,7 @@ func (c *CIFHandshake) Unmarshal(data []byte) error { } else if extensionType == EXTTYPE_KMREQ || extensionType == EXTTYPE_KMRSP { // 3.2.1.2. Key Material Extension Message if len(pivot) < extensionLength { - return fmt.Errorf("invalid extension length") + return fmt.Errorf("invalid extension length of %d bytes (%s)", extensionLength, extensionType.String()) } c.HasKM = true @@ -638,7 +648,7 @@ func (c *CIFHandshake) Unmarshal(data []byte) error { } else if extensionType == EXTTYPE_SID { // 3.2.1.3. Stream ID Extension Message if extensionLength > 512 || len(pivot) < extensionLength { - return fmt.Errorf("invalid extension length") + return fmt.Errorf("invalid extension length of %d bytes (%s)", extensionLength, extensionType.String()) } c.HasSID = true @@ -653,8 +663,30 @@ func (c *CIFHandshake) Unmarshal(data []byte) error { } c.StreamId = strings.TrimRight(b.String(), "\x00") + } else if extensionType == EXTTYPE_CONGESTION { + // ??? Congestion Control Extension message (handshake.md #### Congestion controller) + if extensionLength > 4 || len(pivot) < extensionLength { + return fmt.Errorf("invalid extension length of %d bytes (%s)", extensionLength, extensionType.String()) + } + + c.HasCongestionCtl = true + + var b strings.Builder + + for i := 0; i < extensionLength; i += 4 { + b.WriteByte(pivot[i+3]) + b.WriteByte(pivot[i+2]) + b.WriteByte(pivot[i+1]) + b.WriteByte(pivot[i+0]) + } + + c.CongestionCtl = strings.TrimRight(b.String(), "\x00") + } else if extensionType == EXTTYPE_FILTER || extensionType == EXTTYPE_GROUP { + if len(pivot) < extensionLength { + return fmt.Errorf("invalid extension length of %d bytes (%s)", extensionLength, extensionType.String()) + } } else { - return fmt.Errorf("unimplemented extension (%d)", extensionType) + return fmt.Errorf("unknown extension (%d)", extensionType) } if len(pivot) > extensionLength { @@ -695,6 +727,10 @@ func (c *CIFHandshake) Marshal(w io.Writer) { if c.HasSID { c.ExtensionField = c.ExtensionField | 4 } + + if c.HasCongestionCtl { + c.ExtensionField = c.ExtensionField | 4 + } } else { c.EncryptionField = 0 c.ExtensionField = 2 @@ -773,6 +809,33 @@ func (c *CIFHandshake) Marshal(w io.Writer) { w.Write(buffer[:4]) } } + + if c.HasCongestionCtl && c.CongestionCtl != "live" { + congestion := bytes.NewBufferString(c.CongestionCtl) + + missing := (4 - congestion.Len()%4) + if missing < 4 { + for i := 0; i < missing; i++ { + congestion.WriteByte(0) + } + } + + binary.BigEndian.PutUint16(buffer[0:], EXTTYPE_CONGESTION.Value()) + binary.BigEndian.PutUint16(buffer[2:], uint16(congestion.Len()/4)) + + w.Write(buffer[:4]) + + b := congestion.Bytes() + + for i := 0; i < len(b); i += 4 { + buffer[0] = b[i+3] + buffer[1] = b[i+2] + buffer[2] = b[i+1] + buffer[3] = b[i+0] + + w.Write(buffer[:4]) + } + } } // 3.2.1.1.1. Handshake Extension Message Flags diff --git a/vendor/github.com/datarhei/gosrt/rand/rand.go b/vendor/github.com/datarhei/gosrt/rand/rand.go index 0f354943..ff9be570 100644 --- a/vendor/github.com/datarhei/gosrt/rand/rand.go +++ b/vendor/github.com/datarhei/gosrt/rand/rand.go @@ -12,7 +12,7 @@ func RandomString(length int, charset string) (string, error) { if err != nil { return "", err } - b[i] = charset[int(j)] + b[i] = charset[j] } return string(b), nil diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go index 96040e6c..76973201 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go @@ -23,6 +23,8 @@ var ( Torrent = prefix([]byte("d8:announce")) // PAR1 matches a parquet file. Par1 = prefix([]byte{0x50, 0x41, 0x52, 0x31}) + // CBOR matches a Concise Binary Object Representation https://cbor.io/ + CBOR = prefix([]byte{0xD9, 0xD9, 0xF7}) ) // Java bytecode and Mach-O binaries share the same magic number. @@ -34,7 +36,7 @@ func classOrMachOFat(in []byte) bool { return false } - return bytes.HasPrefix(in, []byte{0xCA, 0xFE, 0xBA, 0xBE}) + return binary.BigEndian.Uint32(in) == macho.MagicFat } // Class matches a java class file. @@ -44,7 +46,7 @@ func Class(raw []byte, limit uint32) bool { // MachO matches Mach-O binaries format. func MachO(raw []byte, limit uint32) bool { - if classOrMachOFat(raw) && raw[7] < 20 { + if classOrMachOFat(raw) && raw[7] < 0x14 { return true } @@ -156,11 +158,11 @@ func Marc(raw []byte, limit uint32) bool { // the GL transmission Format (glTF). // GLB uses little endian and its header structure is as follows: // -// <-- 12-byte header --> -// | magic | version | length | -// | (uint32) | (uint32) | (uint32) | -// | \x67\x6C\x54\x46 | \x01\x00\x00\x00 | ... | -// | g l T F | 1 | ... | +// <-- 12-byte header --> +// | magic | version | length | +// | (uint32) | (uint32) | (uint32) | +// | \x67\x6C\x54\x46 | \x01\x00\x00\x00 | ... | +// | g l T F | 1 | ... | // // Visit [glTF specification] and [IANA glTF entry] for more details. // @@ -172,14 +174,15 @@ var Glb = prefix([]byte("\x67\x6C\x54\x46\x02\x00\x00\x00"), // TzIf matches a Time Zone Information Format (TZif) file. // See more: https://tools.ietf.org/id/draft-murchison-tzdist-tzif-00.html#rfc.section.3 // Its header structure is shown below: -// +---------------+---+ -// | magic (4) | <-+-- version (1) -// +---------------+---+---------------------------------------+ -// | [unused - reserved for future use] (15) | -// +---------------+---------------+---------------+-----------+ -// | isutccnt (4) | isstdcnt (4) | leapcnt (4) | -// +---------------+---------------+---------------+ -// | timecnt (4) | typecnt (4) | charcnt (4) | +// +// +---------------+---+ +// | magic (4) | <-+-- version (1) +// +---------------+---+---------------------------------------+ +// | [unused - reserved for future use] (15) | +// +---------------+---------------+---------------+-----------+ +// | isutccnt (4) | isstdcnt (4) | leapcnt (4) | +// +---------------+---------------+---------------+ +// | timecnt (4) | typecnt (4) | charcnt (4) | func TzIf(raw []byte, limit uint32) bool { // File is at least 44 bytes (header size). if len(raw) < 44 { diff --git a/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md b/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md index 042a5aa7..a45a3021 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md +++ b/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md @@ -1,4 +1,4 @@ -## 176 Supported MIME types +## 177 Supported MIME types This file is automatically generated when running tests. Do not edit manually. Extension | MIME type | Aliases @@ -79,7 +79,7 @@ Extension | MIME type | Aliases **.avif** | image/avif | - **.3gp** | video/3gpp | video/3gp, audio/3gpp **.3g2** | video/3gpp2 | video/3g2, audio/3gpp2 -**.mp4** | audio/mp4 | audio/x-m4a, audio/x-mp4a +**.mp4** | audio/mp4 | audio/x-mp4a **.mqv** | video/quicktime | - **.m4a** | audio/x-m4a | - **.m4v** | video/x-m4v | - @@ -118,6 +118,7 @@ Extension | MIME type | Aliases **.mobi** | application/x-mobipocket-ebook | - **.lit** | application/x-ms-reader | - **.bpg** | image/bpg | - +**.cbor** | application/cbor | - **.sqlite** | application/vnd.sqlite3 | application/x-sqlite3 **.dwg** | image/vnd.dwg | image/x-dwg, application/acad, application/x-acad, application/autocad_dwg, application/dwg, application/x-dwg, application/x-autocad, drawing/dwg **.nes** | application/vnd.nintendo.snes.rom | - @@ -162,7 +163,7 @@ Extension | MIME type | Aliases **.xfdf** | application/vnd.adobe.xfdf | - **.owl** | application/owl+xml | - **.php** | text/x-php | - -**.js** | application/javascript | application/x-javascript, text/javascript +**.js** | text/javascript | application/x-javascript, application/javascript **.lua** | text/x-lua | - **.pl** | text/x-perl | - **.py** | text/x-python | text/x-script.python, application/x-python diff --git a/vendor/github.com/gabriel-vasile/mimetype/tree.go b/vendor/github.com/gabriel-vasile/mimetype/tree.go index aa7c8cdb..771e302b 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/tree.go +++ b/vendor/github.com/gabriel-vasile/mimetype/tree.go @@ -21,7 +21,7 @@ var root = newMIME("application/octet-stream", "", jpm, jxs, gif, webp, exe, elf, ar, tar, xar, bz2, fits, tiff, bmp, ico, mp3, flac, midi, ape, musePack, amr, wav, aiff, au, mpeg, quickTime, mp4, webM, avi, flv, mkv, asf, aac, voc, m3u, rmvb, gzip, class, swf, crx, ttf, woff, - woff2, otf, ttc, eot, wasm, shx, dbf, dcm, rar, djvu, mobi, lit, bpg, + woff2, otf, ttc, eot, wasm, shx, dbf, dcm, rar, djvu, mobi, lit, bpg, cbor, sqlite3, dwg, nes, lnk, macho, qcp, icns, hdr, mrc, mdb, accdb, zstd, cab, rpm, xz, lzip, torrent, cpio, tzif, xcf, pat, gbr, glb, cabIS, jxr, parquet, // Keep text last because it is the slowest check. @@ -87,8 +87,8 @@ var ( html = newMIME("text/html", ".html", magic.HTML) php = newMIME("text/x-php", ".php", magic.Php) rtf = newMIME("text/rtf", ".rtf", magic.Rtf).alias("application/rtf") - js = newMIME("application/javascript", ".js", magic.Js). - alias("application/x-javascript", "text/javascript") + js = newMIME("text/javascript", ".js", magic.Js). + alias("application/x-javascript", "application/javascript") srt = newMIME("application/x-subrip", ".srt", magic.Srt). alias("application/x-srt", "text/x-srt") vtt = newMIME("text/vtt", ".vtt", magic.Vtt) @@ -156,7 +156,7 @@ var ( aac = newMIME("audio/aac", ".aac", magic.AAC) voc = newMIME("audio/x-unknown", ".voc", magic.Voc) aMp4 = newMIME("audio/mp4", ".mp4", magic.AMp4). - alias("audio/x-m4a", "audio/x-mp4a") + alias("audio/x-mp4a") m4a = newMIME("audio/x-m4a", ".m4a", magic.M4a) m3u = newMIME("application/vnd.apple.mpegurl", ".m3u", magic.M3u). alias("audio/mpegurl") @@ -261,4 +261,5 @@ var ( jxr = newMIME("image/jxr", ".jxr", magic.Jxr).alias("image/vnd.ms-photo") parquet = newMIME("application/vnd.apache.parquet", ".parquet", magic.Par1). alias("application/x-parquet") + cbor = newMIME("application/cbor", ".cbor", magic.CBOR) ) diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md index ddd65b07..b0ac30d8 100644 --- a/vendor/github.com/go-playground/validator/v10/README.md +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -1,7 +1,7 @@ Package validator ================= [![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -![Project status](https://img.shields.io/badge/version-10.22.1-green.svg) +![Project status](https://img.shields.io/badge/version-10.23.0-green.svg) [![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) [![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go index d1a3656a..2f66c183 100644 --- a/vendor/github.com/go-playground/validator/v10/baked_in.go +++ b/vendor/github.com/go-playground/validator/v10/baked_in.go @@ -205,6 +205,7 @@ var ( "fqdn": isFQDN, "unique": isUnique, "oneof": isOneOf, + "oneofci": isOneOfCI, "html": isHTML, "html_encoded": isHTMLEncoded, "url_encoded": isURLEncoded, @@ -213,6 +214,7 @@ var ( "json": isJSON, "jwt": isJWT, "hostname_port": isHostnamePort, + "port": isPort, "lowercase": isLowercase, "uppercase": isUppercase, "datetime": isDatetime, @@ -299,6 +301,23 @@ func isOneOf(fl FieldLevel) bool { return false } +// isOneOfCI is the validation function for validating if the current field's value is one of the provided string values (case insensitive). +func isOneOfCI(fl FieldLevel) bool { + vals := parseOneOfParam2(fl.Param()) + field := fl.Field() + + if field.Kind() != reflect.String { + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + v := field.String() + for _, val := range vals { + if strings.EqualFold(val, v) { + return true + } + } + return false +} + // isUnique is the validation function for validating if each array|slice|map value is unique func isUnique(fl FieldLevel) bool { field := fl.Field() @@ -2711,6 +2730,13 @@ func isHostnamePort(fl FieldLevel) bool { return true } +// IsPort validates if the current field's value represents a valid port +func isPort(fl FieldLevel) bool { + val := fl.Field().Uint() + + return val >= 1 && val <= 65535 +} + // isLowercase is the validation function for validating if the current field's value is a lowercase string. func isLowercase(fl FieldLevel) bool { field := fl.Field() diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go index 90a8ade6..c9b1616e 100644 --- a/vendor/github.com/go-playground/validator/v10/doc.go +++ b/vendor/github.com/go-playground/validator/v10/doc.go @@ -489,12 +489,19 @@ For strings, ints, and uints, oneof will ensure that the value is one of the values in the parameter. The parameter should be a list of values separated by whitespace. Values may be strings or numbers. To match strings with spaces in them, include -the target string between single quotes. +the target string between single quotes. Kind of like an 'enum'. Usage: oneof=red green oneof='red green' 'blue yellow' oneof=5 7 9 +# One Of Case Insensitive + +Works the same as oneof but is case insensitive and therefore only accepts strings. + + Usage: oneofci=red green + oneofci='red green' 'blue yellow' + # Greater Than For numbers, this will ensure that the value is greater than the diff --git a/vendor/github.com/go-playground/validator/v10/regexes.go b/vendor/github.com/go-playground/validator/v10/regexes.go index 7e1dd5a0..871cf7df 100644 --- a/vendor/github.com/go-playground/validator/v10/regexes.go +++ b/vendor/github.com/go-playground/validator/v10/regexes.go @@ -73,7 +73,7 @@ const ( cveRegexString = `^CVE-(1999|2\d{3})-(0[^0]\d{2}|0\d[^0]\d{1}|0\d{2}[^0]|[1-9]{1}\d{3,})$` // CVE Format Id https://cve.mitre.org/cve/identifiers/syntaxchange.html mongodbIdRegexString = "^[a-f\\d]{24}$" mongodbConnStringRegexString = "^mongodb(\\+srv)?:\\/\\/(([a-zA-Z\\d]+):([a-zA-Z\\d$:\\/?#\\[\\]@]+)@)?(([a-z\\d.-]+)(:[\\d]+)?)((,(([a-z\\d.-]+)(:(\\d+))?))*)?(\\/[a-zA-Z-_]{1,64})?(\\?(([a-zA-Z]+)=([a-zA-Z\\d]+))(&(([a-zA-Z\\d]+)=([a-zA-Z\\d]+))?)*)?$" - cronRegexString = `(@(annually|yearly|monthly|weekly|daily|hourly|reboot))|(@every (\d+(ns|us|µs|ms|s|m|h))+)|((((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|\*) ?){5,7})` + cronRegexString = `(@(annually|yearly|monthly|weekly|daily|hourly|reboot))|(@every (\d+(ns|us|µs|ms|s|m|h))+)|((((\d+,)+\d+|((\*|\d+)(\/|-)\d+)|\d+|\*) ?){5,7})` spicedbIDRegexString = `^(([a-zA-Z0-9/_|\-=+]{1,})|\*)$` spicedbPermissionRegexString = "^([a-z][a-z0-9_]{1,62}[a-z0-9])?$" spicedbTypeRegexString = "^([a-z][a-z0-9_]{1,61}[a-z0-9]/)?[a-z][a-z0-9_]{1,62}[a-z0-9]$" diff --git a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig new file mode 100644 index 00000000..1f664d13 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig @@ -0,0 +1,18 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab + +[{Makefile,*.mk}] +indent_style = tab + +[*.nix] +indent_size = 2 diff --git a/vendor/github.com/go-viper/mapstructure/v2/.envrc b/vendor/github.com/go-viper/mapstructure/v2/.envrc new file mode 100644 index 00000000..2e0f9f5f --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.envrc @@ -0,0 +1,4 @@ +if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" +fi +use flake . --impure diff --git a/vendor/github.com/go-viper/mapstructure/v2/.gitignore b/vendor/github.com/go-viper/mapstructure/v2/.gitignore new file mode 100644 index 00000000..470e7ca2 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.gitignore @@ -0,0 +1,6 @@ +/.devenv/ +/.direnv/ +/.pre-commit-config.yaml +/bin/ +/build/ +/var/ diff --git a/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml new file mode 100644 index 00000000..763143aa --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml @@ -0,0 +1,23 @@ +run: + timeout: 5m + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/go-viper/mapstructure) + golint: + min-confidence: 0 + goimports: + local-prefixes: github.com/go-viper/maptstructure + +linters: + disable-all: true + enable: + - gci + - gofmt + - gofumpt + - goimports + - staticcheck + # - stylecheck diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md similarity index 92% rename from vendor/github.com/mitchellh/mapstructure/CHANGELOG.md rename to vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md index c7582349..afd44e5f 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md @@ -1,3 +1,11 @@ +> [!WARNING] +> As of v2 of this library, change log can be found in GitHub releases. + +## 1.5.1 + +* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282] +* Fix map of slices not decoding properly in certain cases. [GH-266] + ## 1.5.0 * New option `IgnoreUntaggedFields` to ignore decoding to any fields diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/go-viper/mapstructure/v2/LICENSE similarity index 100% rename from vendor/github.com/mitchellh/mapstructure/LICENSE rename to vendor/github.com/go-viper/mapstructure/v2/LICENSE diff --git a/vendor/github.com/go-viper/mapstructure/v2/README.md b/vendor/github.com/go-viper/mapstructure/v2/README.md new file mode 100644 index 00000000..dd5ec69d --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/README.md @@ -0,0 +1,80 @@ +# mapstructure + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/go-viper/mapstructure/ci.yaml?branch=main&style=flat-square)](https://github.com/go-viper/mapstructure/actions?query=workflow%3ACI) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.18-61CFDD.svg?style=flat-square) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +```shell +go get github.com/go-viper/mapstructure/v2 +``` + +## Migrating from `github.com/mitchellh/mapstructure` + +[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status. + +You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`. +The API is the same, so you don't need to change anything else. + +Here is a script that can help you with the migration: + +```shell +sed -i 's/github.com\/mitchellh\/mapstructure/github.com\/go-viper\/mapstructure\/v2/g' $(find . -type f -name '*.go') +``` + +If you need more time to migrate your code, that is absolutely fine. + +Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate: + +```shell +replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0 +``` + +## Usage & Example + +For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. + +## Credits + +Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh). +This is a maintained fork of the original library. + +Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349). + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go new file mode 100644 index 00000000..1f3c69d4 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go @@ -0,0 +1,630 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "net/netip" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// cachedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into a closure to be used directly +// if the type fails to convert we return a closure always erroring to keep the previous behaviour +func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from.Type(), to.Type(), from.Interface()) + } + case DecodeHookFuncKind: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from.Kind(), to.Kind(), from.Interface()) + } + case DecodeHookFuncValue: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from, to) + } + default: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return nil, errors.New("invalid decode hook signature") + } + } +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value, +) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(fs)) + for _, f := range fs { + cached = append(cached, cachedDecodeHook(f)) + } + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + var err error + data := f.Interface() + + newFrom := f + for _, c := range cached { + data, err = c(newFrom, t) + if err != nil { + return nil, err + } + newFrom = reflect.ValueOf(data) + } + + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(ff)) + for _, f := range ff { + cached = append(cached, cachedDecodeHook(f)) + } + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, c := range cached { + out, err = c(a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.SliceOf(f) { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToURLHookFunc returns a DecodeHookFunc that converts +// strings to *url.URL. +func StringToURLHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(&url.URL{}) { + return data, nil + } + + // Convert it by parsing + return url.Parse(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}, +) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + str, ok := data.(string) + if !ok { + str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String() + } + if err := unmarshaller.UnmarshalText([]byte(str)); err != nil { + return nil, err + } + return result, nil + } +} + +// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts +// strings to netip.Addr. +func StringToNetIPAddrHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.Addr{}) { + return data, nil + } + + // Convert it by parsing + return netip.ParseAddr(data.(string)) + } +} + +// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts +// strings to netip.AddrPort. +func StringToNetIPAddrPortHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.AddrPort{}) { + return data, nil + } + + // Convert it by parsing + return netip.ParseAddrPort(data.(string)) + } +} + +// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts +// strings to basic types. +// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128 +func StringToBasicTypeHookFunc() DecodeHookFunc { + return ComposeDecodeHookFunc( + StringToInt8HookFunc(), + StringToUint8HookFunc(), + StringToInt16HookFunc(), + StringToUint16HookFunc(), + StringToInt32HookFunc(), + StringToUint32HookFunc(), + StringToInt64HookFunc(), + StringToUint64HookFunc(), + StringToIntHookFunc(), + StringToUintHookFunc(), + StringToFloat32HookFunc(), + StringToFloat64HookFunc(), + StringToBoolHookFunc(), + // byte and rune are aliases for uint8 and int32 respectively + // StringToByteHookFunc(), + // StringToRuneHookFunc(), + StringToComplex64HookFunc(), + StringToComplex128HookFunc(), + ) +} + +// StringToInt8HookFunc returns a DecodeHookFunc that converts +// strings to int8. +func StringToInt8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int8 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 8) + return int8(i64), err + } +} + +// StringToUint8HookFunc returns a DecodeHookFunc that converts +// strings to uint8. +func StringToUint8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 8) + return uint8(u64), err + } +} + +// StringToInt16HookFunc returns a DecodeHookFunc that converts +// strings to int16. +func StringToInt16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int16 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 16) + return int16(i64), err + } +} + +// StringToUint16HookFunc returns a DecodeHookFunc that converts +// strings to uint16. +func StringToUint16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 16) + return uint16(u64), err + } +} + +// StringToInt32HookFunc returns a DecodeHookFunc that converts +// strings to int32. +func StringToInt32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int32 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 32) + return int32(i64), err + } +} + +// StringToUint32HookFunc returns a DecodeHookFunc that converts +// strings to uint32. +func StringToUint32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 32) + return uint32(u64), err + } +} + +// StringToInt64HookFunc returns a DecodeHookFunc that converts +// strings to int64. +func StringToInt64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseInt(data.(string), 0, 64) + } +} + +// StringToUint64HookFunc returns a DecodeHookFunc that converts +// strings to uint64. +func StringToUint64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseUint(data.(string), 0, 64) + } +} + +// StringToIntHookFunc returns a DecodeHookFunc that converts +// strings to int. +func StringToIntHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 0) + return int(i64), err + } +} + +// StringToUintHookFunc returns a DecodeHookFunc that converts +// strings to uint. +func StringToUintHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 0) + return uint(u64), err + } +} + +// StringToFloat32HookFunc returns a DecodeHookFunc that converts +// strings to float32. +func StringToFloat32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float32 { + return data, nil + } + + // Convert it by parsing + f64, err := strconv.ParseFloat(data.(string), 32) + return float32(f64), err + } +} + +// StringToFloat64HookFunc returns a DecodeHookFunc that converts +// strings to float64. +func StringToFloat64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseFloat(data.(string), 64) + } +} + +// StringToBoolHookFunc returns a DecodeHookFunc that converts +// strings to bool. +func StringToBoolHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Bool { + return data, nil + } + + // Convert it by parsing + return strconv.ParseBool(data.(string)) + } +} + +// StringToByteHookFunc returns a DecodeHookFunc that converts +// strings to byte. +func StringToByteHookFunc() DecodeHookFunc { + return StringToUint8HookFunc() +} + +// StringToRuneHookFunc returns a DecodeHookFunc that converts +// strings to rune. +func StringToRuneHookFunc() DecodeHookFunc { + return StringToInt32HookFunc() +} + +// StringToComplex64HookFunc returns a DecodeHookFunc that converts +// strings to complex64. +func StringToComplex64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 { + return data, nil + } + + // Convert it by parsing + c128, err := strconv.ParseComplex(data.(string), 64) + return complex64(c128), err + } +} + +// StringToComplex128HookFunc returns a DecodeHookFunc that converts +// strings to complex128. +func StringToComplex128HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseComplex(data.(string), 128) + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock new file mode 100644 index 00000000..4bea8154 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.lock @@ -0,0 +1,472 @@ +{ + "nodes": { + "cachix": { + "inputs": { + "devenv": "devenv_2", + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "pre-commit-hooks": [ + "devenv", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1712055811, + "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "owner": "cachix", + "repo": "cachix", + "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, + "devenv": { + "inputs": { + "cachix": "cachix", + "flake-compat": "flake-compat_2", + "nix": "nix_2", + "nixpkgs": "nixpkgs_2", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1717245169, + "narHash": "sha256-+mW3rTBjGU8p1THJN0lX/Dd/8FbnF+3dB+mJuSaxewE=", + "owner": "cachix", + "repo": "devenv", + "rev": "c3f9f053c077c6f88a3de5276d9178c62baa3fc3", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "devenv_2": { + "inputs": { + "flake-compat": [ + "devenv", + "cachix", + "flake-compat" + ], + "nix": "nix", + "nixpkgs": "nixpkgs", + "poetry2nix": "poetry2nix", + "pre-commit-hooks": [ + "devenv", + "cachix", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1708704632, + "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "owner": "cachix", + "repo": "devenv", + "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "python-rewrite", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1717285511, + "narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "pre-commit-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "nix": { + "inputs": { + "flake-compat": "flake-compat", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "poetry2nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1688870561, + "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, + "nix_2": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression_2" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1692808169, + "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "lastModified": 1717284937, + "narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz" + }, + "original": { + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-regression_2": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1713361204, + "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1717112898, + "narHash": "sha256-7R2ZvOnvd9h8fDd65p0JnB7wXfUvreox3xFdYWd1BnY=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "6132b0f6e344ce2fe34fc051b72fb46e34f668e0", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "poetry2nix": { + "inputs": { + "flake-utils": "flake-utils", + "nix-github-actions": "nix-github-actions", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1692876271, + "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "owner": "nix-community", + "repo": "poetry2nix", + "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, + "pre-commit-hooks": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "flake-utils": "flake-utils_2", + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1713775815, + "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "flake-parts": "flake-parts", + "nixpkgs": "nixpkgs_3" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.nix b/vendor/github.com/go-viper/mapstructure/v2/flake.nix new file mode 100644 index 00000000..4ed0f533 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.nix @@ -0,0 +1,39 @@ +{ + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + flake-parts.url = "github:hercules-ci/flake-parts"; + devenv.url = "github:cachix/devenv"; + }; + + outputs = inputs@{ flake-parts, ... }: + flake-parts.lib.mkFlake { inherit inputs; } { + imports = [ + inputs.devenv.flakeModule + ]; + + systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; + + perSystem = { config, self', inputs', pkgs, system, ... }: rec { + devenv.shells = { + default = { + languages = { + go.enable = true; + }; + + pre-commit.hooks = { + nixpkgs-fmt.enable = true; + }; + + packages = with pkgs; [ + golangci-lint + ]; + + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; + + ci = devenv.shells.default; + }; + }; + }; +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go new file mode 100644 index 00000000..d1c15e47 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go @@ -0,0 +1,11 @@ +package errors + +import "errors" + +func New(text string) error { + return errors.New(text) +} + +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go new file mode 100644 index 00000000..d74e3a0b --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go @@ -0,0 +1,9 @@ +//go:build go1.20 + +package errors + +import "errors" + +func Join(errs ...error) error { + return errors.Join(errs...) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go new file mode 100644 index 00000000..700b4022 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go @@ -0,0 +1,61 @@ +//go:build !go1.20 + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors + +// Join returns an error that wraps the given errors. +// Any nil error values are discarded. +// Join returns nil if every value in errs is nil. +// The error formats as the concatenation of the strings obtained +// by calling the Error method of each element of errs, with a newline +// between each string. +// +// A non-nil error returned by Join implements the Unwrap() []error method. +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + // Since Join returns nil if every value in errs is nil, + // e.errs cannot be empty. + if len(e.errs) == 1 { + return e.errs[0].Error() + } + + b := []byte(e.errs[0].Error()) + for _, err := range e.errs[1:] { + b = append(b, '\n') + b = append(b, err.Error()...) + } + // At this point, b has at least one byte '\n'. + // return unsafe.String(&b[0], len(b)) + return string(b) +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go similarity index 87% rename from vendor/github.com/mitchellh/mapstructure/mapstructure.go rename to vendor/github.com/go-viper/mapstructure/v2/mapstructure.go index 1efb22ac..e77e63ba 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go @@ -9,84 +9,84 @@ // // The simplest function to start with is Decode. // -// Field Tags +// # Field Tags // // When decoding to a struct, mapstructure will use the field name by // default to perform the mapping. For example, if a struct has a field // "Username" then mapstructure will look for a key in the source value // of "username" (case insensitive). // -// type User struct { -// Username string -// } +// type User struct { +// Username string +// } // // You can change the behavior of mapstructure by using struct tags. // The default struct tag that mapstructure looks for is "mapstructure" // but you can customize it using DecoderConfig. // -// Renaming Fields +// # Renaming Fields // // To rename the key that mapstructure looks for, use the "mapstructure" // tag and set a value directly. For example, to change the "username" example // above to "user": // -// type User struct { -// Username string `mapstructure:"user"` -// } +// type User struct { +// Username string `mapstructure:"user"` +// } // -// Embedded Structs and Squashing +// # Embedded Structs and Squashing // // Embedded structs are treated as if they're another field with that name. // By default, the two structs below are equivalent when decoding with // mapstructure: // -// type Person struct { -// Name string -// } +// type Person struct { +// Name string +// } // -// type Friend struct { -// Person -// } +// type Friend struct { +// Person +// } // -// type Friend struct { -// Person Person -// } +// type Friend struct { +// Person Person +// } // // This would require an input that looks like below: // -// map[string]interface{}{ -// "person": map[string]interface{}{"name": "alice"}, -// } +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } // // If your "person" value is NOT nested, then you can append ",squash" to // your tag value and mapstructure will treat it as if the embedded struct // were part of the struct directly. Example: // -// type Friend struct { -// Person `mapstructure:",squash"` -// } +// type Friend struct { +// Person `mapstructure:",squash"` +// } // // Now the following input would be accepted: // -// map[string]interface{}{ -// "name": "alice", -// } +// map[string]interface{}{ +// "name": "alice", +// } // // When decoding from a struct to a map, the squash tag squashes the struct // fields into a single map. Using the example structs from above: // -// Friend{Person: Person{Name: "alice"}} +// Friend{Person: Person{Name: "alice"}} // // Will be decoded into a map: // -// map[string]interface{}{ -// "name": "alice", -// } +// map[string]interface{}{ +// "name": "alice", +// } // // DecoderConfig has a field that changes the behavior of mapstructure // to always squash embedded structs. // -// Remainder Values +// # Remainder Values // // If there are any unmapped keys in the source value, mapstructure by // default will silently ignore them. You can error by setting ErrorUnused @@ -98,20 +98,20 @@ // probably be a "map[string]interface{}" or "map[interface{}]interface{}". // See example below: // -// type Friend struct { -// Name string -// Other map[string]interface{} `mapstructure:",remain"` -// } +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } // // Given the input below, Other would be populated with the other // values that weren't used (everything but "name"): // -// map[string]interface{}{ -// "name": "bob", -// "address": "123 Maple St.", -// } +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } // -// Omit Empty Values +// # Omit Empty Values // // When decoding from a struct to any other value, you may use the // ",omitempty" suffix on your tag to omit that value if it equates to @@ -122,37 +122,37 @@ // field value is zero and a numeric type, the field is empty, and it won't // be encoded into the destination type. // -// type Source struct { -// Age int `mapstructure:",omitempty"` -// } +// type Source struct { +// Age int `mapstructure:",omitempty"` +// } // -// Unexported fields +// # Unexported fields // // Since unexported (private) struct fields cannot be set outside the package // where they are defined, the decoder will simply skip them. // // For this output type definition: // -// type Exported struct { -// private string // this unexported field will be skipped -// Public string -// } +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } // // Using this map as input: // -// map[string]interface{}{ -// "private": "I will be ignored", -// "Public": "I made it through!", -// } +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } // // The following struct will be decoded: // -// type Exported struct { -// private: "" // field is left with an empty string (zero value) -// Public: "I made it through!" -// } +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } // -// Other Configuration +// # Other Configuration // // mapstructure is highly configurable. See the DecoderConfig struct // for other features and options that are supported. @@ -160,12 +160,13 @@ package mapstructure import ( "encoding/json" - "errors" "fmt" "reflect" "sort" "strconv" "strings" + + "github.com/go-viper/mapstructure/v2/internal/errors" ) // DecodeHookFunc is the callback function that can be used for @@ -265,6 +266,10 @@ type DecoderConfig struct { // defaults to "mapstructure" TagName string + // The option of the value in the tag that indicates a field should + // be squashed. This defaults to "squash". + SquashTagOption string + // IgnoreUntaggedFields ignores all struct fields without explicit // TagName, comparable to `mapstructure:"-"` as default behaviour. IgnoreUntaggedFields bool @@ -273,6 +278,10 @@ type DecoderConfig struct { // field name or tag. Defaults to `strings.EqualFold`. This can be used // to implement case-sensitive tag values, support snake casing, etc. MatchName func(mapKey, fieldName string) bool + + // DecodeNil, if set to true, will cause the DecodeHook (if present) to run + // even if the input is nil. This can be used to provide default values. + DecodeNil bool } // A Decoder takes a raw interface value and turns it into structured @@ -282,7 +291,8 @@ type DecoderConfig struct { // structure. The top-level Decode method is just a convenience that sets // up the most basic Decoder. type Decoder struct { - config *DecoderConfig + config *DecoderConfig + cachedDecodeHook func(from reflect.Value, to reflect.Value) (interface{}, error) } // Metadata contains information about decoding a structure that @@ -400,6 +410,10 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { config.TagName = "mapstructure" } + if config.SquashTagOption == "" { + config.SquashTagOption = "squash" + } + if config.MatchName == nil { config.MatchName = strings.EqualFold } @@ -407,6 +421,9 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { result := &Decoder{ config: config, } + if config.DecodeHook != nil { + result.cachedDecodeHook = cachedDecodeHook(config.DecodeHook) + } return result, nil } @@ -414,22 +431,37 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { // Decode decodes the given raw interface to the target pointer specified // by the configuration. func (d *Decoder) Decode(input interface{}) error { - return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + + // Retain some of the original behavior when multiple errors ocurr + var joinedErr interface{ Unwrap() []error } + if errors.As(err, &joinedErr) { + return fmt.Errorf("decoding failed due to the following error(s):\n\n%w", err) + } + + return err +} + +// isNil returns true if the input is nil or a typed nil pointer. +func isNil(input interface{}) bool { + if input == nil { + return true + } + val := reflect.ValueOf(input) + return val.Kind() == reflect.Ptr && val.IsNil() } // Decodes an unknown data type into a specific reflection value. func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { - var inputVal reflect.Value - if input != nil { - inputVal = reflect.ValueOf(input) - - // We need to check here if input is a typed nil. Typed nils won't - // match the "input == nil" below so we check that here. - if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { - input = nil - } + var ( + inputVal = reflect.ValueOf(input) + outputKind = getKind(outVal) + decodeNil = d.config.DecodeNil && d.cachedDecodeHook != nil + ) + if isNil(input) { + // Typed nils won't match the "input == nil" below, so reset input. + input = nil } - if input == nil { // If the data is nil, then we don't set anything, unless ZeroFields is set // to true. @@ -440,30 +472,46 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) } } - return nil + if !decodeNil { + return nil + } } - if !inputVal.IsValid() { - // If the input value is invalid, then we just set the value - // to be the zero value. - outVal.Set(reflect.Zero(outVal.Type())) - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + if !decodeNil { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + // Hooks need a valid inputVal, so reset it to zero value of outVal type. + switch outputKind { + case reflect.Struct, reflect.Map: + var mapVal map[string]interface{} + inputVal = reflect.ValueOf(mapVal) // create nil map pointer + case reflect.Slice, reflect.Array: + var sliceVal []interface{} + inputVal = reflect.ValueOf(sliceVal) // create nil slice pointer + default: + inputVal = reflect.Zero(outVal.Type()) } - return nil } - if d.config.DecodeHook != nil { + if d.cachedDecodeHook != nil { // We have a DecodeHook, so let's pre-process the input. var err error - input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) + input, err = d.cachedDecodeHook(inputVal, outVal) if err != nil { - return fmt.Errorf("error decoding '%s': %s", name, err) + return fmt.Errorf("error decoding '%s': %w", name, err) } } + if isNil(input) { + return nil + } var err error - outputKind := getKind(outVal) addMetaKey := true switch outputKind { case reflect.Bool: @@ -478,6 +526,8 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e err = d.decodeUint(name, input, outVal) case reflect.Float32: err = d.decodeFloat(name, input, outVal) + case reflect.Complex64: + err = d.decodeComplex(name, input, outVal) case reflect.Struct: err = d.decodeStruct(name, input, outVal) case reflect.Map: @@ -742,8 +792,8 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e } default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) + "'%s' expected type '%s', got unconvertible type '%#v', value: '%#v'", + name, val, dataVal, data) } return nil @@ -796,6 +846,22 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) return nil } +func (d *Decoder) decodeComplex(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Complex64: + val.SetComplex(dataVal.Complex()) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { valType := val.Type() valKeyType := valType.Key() @@ -811,8 +877,14 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er valMap = reflect.MakeMap(mapType) } + dataVal := reflect.ValueOf(data) + + // Resolve any levels of indirection + for dataVal.Kind() == reflect.Pointer { + dataVal = reflect.Indirect(dataVal) + } + // Check input type and based on the input type jump to the proper func - dataVal := reflect.Indirect(reflect.ValueOf(data)) switch dataVal.Kind() { case reflect.Map: return d.decodeMapFromMap(name, dataVal, val, valMap) @@ -857,7 +929,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle valElemType := valType.Elem() // Accumulate errors - errors := make([]string, 0) + var errs []error // If the input data is empty, then we just match what the input data is. if dataVal.Len() == 0 { @@ -879,7 +951,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle // First decode the key into the proper type currentKey := reflect.Indirect(reflect.New(valKeyType)) if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) continue } @@ -887,7 +959,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle v := dataVal.MapIndex(k).Interface() currentVal := reflect.Indirect(reflect.New(valElemType)) if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) continue } @@ -897,12 +969,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle // Set the built up map to the value val.Set(valMap) - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil + return errors.Join(errs...) } func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { @@ -945,7 +1012,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re } // If "squash" is specified in the tag, we squash the field down. - squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 + squash = squash || strings.Contains(tagValue[index+1:], d.config.SquashTagOption) if squash { // When squashing, the embedded type can be a pointer to a struct. if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { @@ -956,6 +1023,18 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re if v.Kind() != reflect.Struct { return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) } + } else { + if strings.Index(tagValue[index+1:], "remain") != -1 { + if v.Kind() != reflect.Map { + return fmt.Errorf("error remain-tag field with invalid type: '%s'", v.Type()) + } + + ptr := v.MapRange() + for ptr.Next() { + valMap.SetMapIndex(ptr.Key(), ptr.Value()) + } + continue + } } if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { keyName = keyNameTagValue @@ -1123,10 +1202,12 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) if valSlice.IsNil() || d.config.ZeroFields { // Make a new slice to hold our result, same size as the original data. valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } else if valSlice.Len() > dataVal.Len() { + valSlice = valSlice.Slice(0, dataVal.Len()) } // Accumulate any errors - errors := make([]string, 0) + var errs []error for i := 0; i < dataVal.Len(); i++ { currentData := dataVal.Index(i).Interface() @@ -1137,19 +1218,14 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } } // Finally, set the value to the slice we built up val.Set(valSlice) - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil + return errors.Join(errs...) } func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { @@ -1161,7 +1237,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) valArray := val - if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + if isComparable(valArray) && valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { // Check input type if dataValKind != reflect.Array && dataValKind != reflect.Slice { if d.config.WeaklyTypedInput { @@ -1188,7 +1264,6 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) if dataVal.Len() > arrayType.Len() { return fmt.Errorf( "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) - } // Make a new array to hold our result, same size as the original data. @@ -1196,7 +1271,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) } // Accumulate any errors - errors := make([]string, 0) + var errs []error for i := 0; i < dataVal.Len(); i++ { currentData := dataVal.Index(i).Interface() @@ -1204,19 +1279,14 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } } // Finally, set the value to the array we built up val.Set(valArray) - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil + return errors.Join(errs...) } func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { @@ -1278,7 +1348,8 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } targetValKeysUnused := make(map[interface{}]struct{}) - errors := make([]string, 0) + + var errs []error // This slice will keep track of all the structs we'll be decoding. // There can be more than one struct if there are embedded structs @@ -1319,7 +1390,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e // We always parse the tags cause we're looking for other tags too tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") for _, tag := range tagParts[1:] { - if tag == "squash" { + if tag == d.config.SquashTagOption { squash = true break } @@ -1331,11 +1402,15 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } if squash { - if fieldVal.Kind() != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) - } else { + switch fieldVal.Kind() { + case reflect.Struct: structs = append(structs, fieldVal) + case reflect.Interface: + if !fieldVal.IsNil() { + structs = append(structs, fieldVal.Elem().Elem()) + } + default: + errs = append(errs, fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) } continue } @@ -1356,6 +1431,9 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e fieldName := field.Name tagValue := field.Tag.Get(d.config.TagName) + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } tagValue = strings.SplitN(tagValue, ",", 2)[0] if tagValue != "" { fieldName = tagValue @@ -1409,7 +1487,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } } @@ -1424,7 +1502,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e // Decode it as-if we were just decoding this map onto our map. if err := d.decodeMap(name, remain, remainField.val); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } // Set the map to nil so we have none so that the next check will @@ -1440,7 +1518,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e sort.Strings(keys) err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) + errs = append(errs, err) } if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { @@ -1451,11 +1529,11 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e sort.Strings(keys) err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) + errs = append(errs, err) } - if len(errors) > 0 { - return &Error{errors} + if err := errors.Join(errs...); err != nil { + return err } // Add the unused keys to the list of unused keys if we're tracking metadata @@ -1509,6 +1587,8 @@ func getKind(val reflect.Value) reflect.Kind { return reflect.Uint case kind >= reflect.Float32 && kind <= reflect.Float64: return reflect.Float32 + case kind >= reflect.Complex64 && kind <= reflect.Complex128: + return reflect.Complex64 default: return kind } diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go new file mode 100644 index 00000000..d0913fff --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go @@ -0,0 +1,44 @@ +//go:build !go1.20 + +package mapstructure + +import "reflect" + +func isComparable(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Invalid: + return false + + case reflect.Array: + switch v.Type().Elem().Kind() { + case reflect.Interface, reflect.Array, reflect.Struct: + for i := 0; i < v.Type().Len(); i++ { + // if !v.Index(i).Comparable() { + if !isComparable(v.Index(i)) { + return false + } + } + return true + } + return v.Type().Comparable() + + case reflect.Interface: + // return v.Elem().Comparable() + return isComparable(v.Elem()) + + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + return false + + // if !v.Field(i).Comparable() { + if !isComparable(v.Field(i)) { + return false + } + } + return true + + default: + return v.Type().Comparable() + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go new file mode 100644 index 00000000..f8255a1b --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go @@ -0,0 +1,10 @@ +//go:build go1.20 + +package mapstructure + +import "reflect" + +// TODO: remove once we drop support for Go <1.20 +func isComparable(v reflect.Value) bool { + return v.Comparable() +} diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md index 21508edb..f06ba51c 100644 --- a/vendor/github.com/klauspost/cpuid/v2/README.md +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -281,6 +281,7 @@ Exit Code 1 | AMXBF16 | Tile computational operations on BFLOAT16 numbers | | AMXINT8 | Tile computational operations on 8-bit integers | | AMXFP16 | Tile computational operations on FP16 numbers | +| AMXFP8 | Tile computational operations on FP8 numbers | | AMXTILE | Tile architecture | | APX_F | Intel APX | | AVX | AVX functions | diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index 53bc18ca..db99eb62 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -55,6 +55,12 @@ const ( Qualcomm Marvell + QEMU + QNX + ACRN + SRE + Apple + lastVendor ) @@ -75,6 +81,7 @@ const ( AMXBF16 // Tile computational operations on BFLOAT16 numbers AMXFP16 // Tile computational operations on FP16 numbers AMXINT8 // Tile computational operations on 8-bit integers + AMXFP8 // Tile computational operations on FP8 numbers AMXTILE // Tile architecture APX_F // Intel APX AVX // AVX functions @@ -296,20 +303,22 @@ const ( // CPUInfo contains information about the detected system CPU. type CPUInfo struct { - BrandName string // Brand name reported by the CPU - VendorID Vendor // Comparable CPU vendor ID - VendorString string // Raw vendor string. - featureSet flagSet // Features of the CPU - PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. - ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. - LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. - Family int // CPU family number - Model int // CPU model number - Stepping int // CPU stepping info - CacheLine int // Cache line size in bytes. Will be 0 if undetectable. - Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. - BoostFreq int64 // Max clock speed, if known, 0 otherwise - Cache struct { + BrandName string // Brand name reported by the CPU + VendorID Vendor // Comparable CPU vendor ID + VendorString string // Raw vendor string. + HypervisorVendorID Vendor // Hypervisor vendor + HypervisorVendorString string // Raw hypervisor vendor string + featureSet flagSet // Features of the CPU + PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. + LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + Family int // CPU family number + Model int // CPU model number + Stepping int // CPU stepping info + CacheLine int // Cache line size in bytes. Will be 0 if undetectable. + Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. + BoostFreq int64 // Max clock speed, if known, 0 otherwise + Cache struct { L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected L2 int // L2 Cache (per core or shared). Will be -1 if undetected @@ -318,8 +327,9 @@ type CPUInfo struct { SGX SGXSupport AMDMemEncryption AMDMemEncryptionSupport AVX10Level uint8 - maxFunc uint32 - maxExFunc uint32 + + maxFunc uint32 + maxExFunc uint32 } var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) @@ -503,7 +513,7 @@ func (c CPUInfo) FeatureSet() []string { // Uses the RDTSCP instruction. The value 0 is returned // if the CPU does not support the instruction. func (c CPUInfo) RTCounter() uint64 { - if !c.Supports(RDTSCP) { + if !c.Has(RDTSCP) { return 0 } a, _, _, d := rdtscpAsm() @@ -515,13 +525,22 @@ func (c CPUInfo) RTCounter() uint64 { // about the current cpu/core the code is running on. // If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. func (c CPUInfo) Ia32TscAux() uint32 { - if !c.Supports(RDTSCP) { + if !c.Has(RDTSCP) { return 0 } _, _, ecx, _ := rdtscpAsm() return ecx } +// SveLengths returns arm SVE vector and predicate lengths. +// Will return 0, 0 if SVE is not enabled or otherwise unable to detect. +func (c CPUInfo) SveLengths() (vl, pl uint64) { + if !c.Has(SVE) { + return 0, 0 + } + return getVectorLength() +} + // LogicalCPU will return the Logical CPU the code is currently executing on. // This is likely to change when the OS re-schedules the running thread // to another CPU. @@ -781,11 +800,16 @@ func threadsPerCore() int { _, b, _, _ := cpuidex(0xb, 0) if b&0xffff == 0 { if vend == AMD { - // Workaround for AMD returning 0, assume 2 if >= Zen 2 - // It will be more correct than not. + // if >= Zen 2 0x8000001e EBX 15-8 bits means threads per core. + // The number of threads per core is ThreadsPerCore+1 + // See PPR for AMD Family 17h Models 00h-0Fh (page 82) fam, _, _ := familyModel() _, _, _, d := cpuid(1) if (d&(1<<28)) != 0 && fam >= 23 { + if maxExtendedFunction() >= 0x8000001e { + _, b, _, _ := cpuid(0x8000001e) + return int((b>>8)&0xff) + 1 + } return 2 } } @@ -877,7 +901,9 @@ var vendorMapping = map[string]Vendor{ "GenuineTMx86": Transmeta, "Geode by NSC": NSC, "VIA VIA VIA ": VIA, - "KVMKVMKVMKVM": KVM, + "KVMKVMKVM": KVM, + "Linux KVM Hv": KVM, + "TCGTCGTCGTCG": QEMU, "Microsoft Hv": MSVM, "VMwareVMware": VMware, "XenVMMXenVMM": XenHVM, @@ -887,6 +913,10 @@ var vendorMapping = map[string]Vendor{ "SiS SiS SiS ": SiS, "RiseRiseRise": SiS, "Genuine RDC": RDC, + "QNXQVMBSQG": QNX, + "ACRNACRNACRN": ACRN, + "SRESRESRESRE": SRE, + "Apple VZ": Apple, } func vendorID() (Vendor, string) { @@ -899,6 +929,17 @@ func vendorID() (Vendor, string) { return vend, v } +func hypervisorVendorID() (Vendor, string) { + // https://lwn.net/Articles/301888/ + _, b, c, d := cpuid(0x40000000) + v := string(valAsString(b, c, d)) + vend, ok := vendorMapping[v] + if !ok { + return VendorUnknown, v + } + return vend, v +} + func cacheLine() int { if maxFunctionID() < 0x1 { return 0 @@ -1271,6 +1312,7 @@ func support() flagSet { fs.setIf(ebx&(1<<31) != 0, AVX512VL) // ecx fs.setIf(ecx&(1<<1) != 0, AVX512VBMI) + fs.setIf(ecx&(1<<3) != 0, AMXFP8) fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2) fs.setIf(ecx&(1<<11) != 0, AVX512VNNI) fs.setIf(ecx&(1<<12) != 0, AVX512BITALG) diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s index b31d6aec..b196f78e 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s @@ -24,3 +24,13 @@ TEXT ·getInstAttributes(SB), 7, $0 MOVD R1, instAttrReg1+8(FP) RET +TEXT ·getVectorLength(SB), 7, $0 + WORD $0xd2800002 // mov x2, #0 + WORD $0x04225022 // addvl x2, x2, #1 + WORD $0xd37df042 // lsl x2, x2, #3 + WORD $0xd2800003 // mov x3, #0 + WORD $0x04635023 // addpl x3, x3, #1 + WORD $0xd37df063 // lsl x3, x3, #3 + MOVD R2, vl+0(FP) + MOVD R3, pl+8(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go index 9a53504a..566743d2 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go @@ -10,6 +10,7 @@ import "runtime" func getMidr() (midr uint64) func getProcFeatures() (procFeatures uint64) func getInstAttributes() (instAttrReg0, instAttrReg1 uint64) +func getVectorLength() (vl, pl uint64) func initCPU() { cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } @@ -24,7 +25,7 @@ func addInfo(c *CPUInfo, safe bool) { detectOS(c) // ARM64 disabled since it may crash if interrupt is not intercepted by OS. - if safe && !c.Supports(ARMCPUID) && runtime.GOOS != "freebsd" { + if safe && !c.Has(ARMCPUID) && runtime.GOOS != "freebsd" { return } midr := getMidr() diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go index 9636c2bc..574f9389 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go @@ -10,6 +10,8 @@ func initCPU() { cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } xgetbv = func(uint32) (a, b uint32) { return 0, 0 } rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } + } func addInfo(info *CPUInfo, safe bool) {} +func getVectorLength() (vl, pl uint64) { return 0, 0 } diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go index 799b400c..f924c9d8 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -32,7 +32,10 @@ func addInfo(c *CPUInfo, safe bool) { c.LogicalCores = logicalCores() c.PhysicalCores = physicalCores() c.VendorID, c.VendorString = vendorID() + c.HypervisorVendorID, c.HypervisorVendorString = hypervisorVendorID() c.AVX10Level = c.supportAVX10() c.cacheSize() c.frequencies() } + +func getVectorLength() (vl, pl uint64) { return 0, 0 } diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index 3a256031..e7f874a7 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -15,224 +15,225 @@ func _() { _ = x[AMXBF16-5] _ = x[AMXFP16-6] _ = x[AMXINT8-7] - _ = x[AMXTILE-8] - _ = x[APX_F-9] - _ = x[AVX-10] - _ = x[AVX10-11] - _ = x[AVX10_128-12] - _ = x[AVX10_256-13] - _ = x[AVX10_512-14] - _ = x[AVX2-15] - _ = x[AVX512BF16-16] - _ = x[AVX512BITALG-17] - _ = x[AVX512BW-18] - _ = x[AVX512CD-19] - _ = x[AVX512DQ-20] - _ = x[AVX512ER-21] - _ = x[AVX512F-22] - _ = x[AVX512FP16-23] - _ = x[AVX512IFMA-24] - _ = x[AVX512PF-25] - _ = x[AVX512VBMI-26] - _ = x[AVX512VBMI2-27] - _ = x[AVX512VL-28] - _ = x[AVX512VNNI-29] - _ = x[AVX512VP2INTERSECT-30] - _ = x[AVX512VPOPCNTDQ-31] - _ = x[AVXIFMA-32] - _ = x[AVXNECONVERT-33] - _ = x[AVXSLOW-34] - _ = x[AVXVNNI-35] - _ = x[AVXVNNIINT8-36] - _ = x[AVXVNNIINT16-37] - _ = x[BHI_CTRL-38] - _ = x[BMI1-39] - _ = x[BMI2-40] - _ = x[CETIBT-41] - _ = x[CETSS-42] - _ = x[CLDEMOTE-43] - _ = x[CLMUL-44] - _ = x[CLZERO-45] - _ = x[CMOV-46] - _ = x[CMPCCXADD-47] - _ = x[CMPSB_SCADBS_SHORT-48] - _ = x[CMPXCHG8-49] - _ = x[CPBOOST-50] - _ = x[CPPC-51] - _ = x[CX16-52] - _ = x[EFER_LMSLE_UNS-53] - _ = x[ENQCMD-54] - _ = x[ERMS-55] - _ = x[F16C-56] - _ = x[FLUSH_L1D-57] - _ = x[FMA3-58] - _ = x[FMA4-59] - _ = x[FP128-60] - _ = x[FP256-61] - _ = x[FSRM-62] - _ = x[FXSR-63] - _ = x[FXSROPT-64] - _ = x[GFNI-65] - _ = x[HLE-66] - _ = x[HRESET-67] - _ = x[HTT-68] - _ = x[HWA-69] - _ = x[HYBRID_CPU-70] - _ = x[HYPERVISOR-71] - _ = x[IA32_ARCH_CAP-72] - _ = x[IA32_CORE_CAP-73] - _ = x[IBPB-74] - _ = x[IBPB_BRTYPE-75] - _ = x[IBRS-76] - _ = x[IBRS_PREFERRED-77] - _ = x[IBRS_PROVIDES_SMP-78] - _ = x[IBS-79] - _ = x[IBSBRNTRGT-80] - _ = x[IBSFETCHSAM-81] - _ = x[IBSFFV-82] - _ = x[IBSOPCNT-83] - _ = x[IBSOPCNTEXT-84] - _ = x[IBSOPSAM-85] - _ = x[IBSRDWROPCNT-86] - _ = x[IBSRIPINVALIDCHK-87] - _ = x[IBS_FETCH_CTLX-88] - _ = x[IBS_OPDATA4-89] - _ = x[IBS_OPFUSE-90] - _ = x[IBS_PREVENTHOST-91] - _ = x[IBS_ZEN4-92] - _ = x[IDPRED_CTRL-93] - _ = x[INT_WBINVD-94] - _ = x[INVLPGB-95] - _ = x[KEYLOCKER-96] - _ = x[KEYLOCKERW-97] - _ = x[LAHF-98] - _ = x[LAM-99] - _ = x[LBRVIRT-100] - _ = x[LZCNT-101] - _ = x[MCAOVERFLOW-102] - _ = x[MCDT_NO-103] - _ = x[MCOMMIT-104] - _ = x[MD_CLEAR-105] - _ = x[MMX-106] - _ = x[MMXEXT-107] - _ = x[MOVBE-108] - _ = x[MOVDIR64B-109] - _ = x[MOVDIRI-110] - _ = x[MOVSB_ZL-111] - _ = x[MOVU-112] - _ = x[MPX-113] - _ = x[MSRIRC-114] - _ = x[MSRLIST-115] - _ = x[MSR_PAGEFLUSH-116] - _ = x[NRIPS-117] - _ = x[NX-118] - _ = x[OSXSAVE-119] - _ = x[PCONFIG-120] - _ = x[POPCNT-121] - _ = x[PPIN-122] - _ = x[PREFETCHI-123] - _ = x[PSFD-124] - _ = x[RDPRU-125] - _ = x[RDRAND-126] - _ = x[RDSEED-127] - _ = x[RDTSCP-128] - _ = x[RRSBA_CTRL-129] - _ = x[RTM-130] - _ = x[RTM_ALWAYS_ABORT-131] - _ = x[SBPB-132] - _ = x[SERIALIZE-133] - _ = x[SEV-134] - _ = x[SEV_64BIT-135] - _ = x[SEV_ALTERNATIVE-136] - _ = x[SEV_DEBUGSWAP-137] - _ = x[SEV_ES-138] - _ = x[SEV_RESTRICTED-139] - _ = x[SEV_SNP-140] - _ = x[SGX-141] - _ = x[SGXLC-142] - _ = x[SHA-143] - _ = x[SME-144] - _ = x[SME_COHERENT-145] - _ = x[SPEC_CTRL_SSBD-146] - _ = x[SRBDS_CTRL-147] - _ = x[SRSO_MSR_FIX-148] - _ = x[SRSO_NO-149] - _ = x[SRSO_USER_KERNEL_NO-150] - _ = x[SSE-151] - _ = x[SSE2-152] - _ = x[SSE3-153] - _ = x[SSE4-154] - _ = x[SSE42-155] - _ = x[SSE4A-156] - _ = x[SSSE3-157] - _ = x[STIBP-158] - _ = x[STIBP_ALWAYSON-159] - _ = x[STOSB_SHORT-160] - _ = x[SUCCOR-161] - _ = x[SVM-162] - _ = x[SVMDA-163] - _ = x[SVMFBASID-164] - _ = x[SVML-165] - _ = x[SVMNP-166] - _ = x[SVMPF-167] - _ = x[SVMPFT-168] - _ = x[SYSCALL-169] - _ = x[SYSEE-170] - _ = x[TBM-171] - _ = x[TDX_GUEST-172] - _ = x[TLB_FLUSH_NESTED-173] - _ = x[TME-174] - _ = x[TOPEXT-175] - _ = x[TSCRATEMSR-176] - _ = x[TSXLDTRK-177] - _ = x[VAES-178] - _ = x[VMCBCLEAN-179] - _ = x[VMPL-180] - _ = x[VMSA_REGPROT-181] - _ = x[VMX-182] - _ = x[VPCLMULQDQ-183] - _ = x[VTE-184] - _ = x[WAITPKG-185] - _ = x[WBNOINVD-186] - _ = x[WRMSRNS-187] - _ = x[X87-188] - _ = x[XGETBV1-189] - _ = x[XOP-190] - _ = x[XSAVE-191] - _ = x[XSAVEC-192] - _ = x[XSAVEOPT-193] - _ = x[XSAVES-194] - _ = x[AESARM-195] - _ = x[ARMCPUID-196] - _ = x[ASIMD-197] - _ = x[ASIMDDP-198] - _ = x[ASIMDHP-199] - _ = x[ASIMDRDM-200] - _ = x[ATOMICS-201] - _ = x[CRC32-202] - _ = x[DCPOP-203] - _ = x[EVTSTRM-204] - _ = x[FCMA-205] - _ = x[FP-206] - _ = x[FPHP-207] - _ = x[GPA-208] - _ = x[JSCVT-209] - _ = x[LRCPC-210] - _ = x[PMULL-211] - _ = x[SHA1-212] - _ = x[SHA2-213] - _ = x[SHA3-214] - _ = x[SHA512-215] - _ = x[SM3-216] - _ = x[SM4-217] - _ = x[SVE-218] - _ = x[lastID-219] + _ = x[AMXFP8-8] + _ = x[AMXTILE-9] + _ = x[APX_F-10] + _ = x[AVX-11] + _ = x[AVX10-12] + _ = x[AVX10_128-13] + _ = x[AVX10_256-14] + _ = x[AVX10_512-15] + _ = x[AVX2-16] + _ = x[AVX512BF16-17] + _ = x[AVX512BITALG-18] + _ = x[AVX512BW-19] + _ = x[AVX512CD-20] + _ = x[AVX512DQ-21] + _ = x[AVX512ER-22] + _ = x[AVX512F-23] + _ = x[AVX512FP16-24] + _ = x[AVX512IFMA-25] + _ = x[AVX512PF-26] + _ = x[AVX512VBMI-27] + _ = x[AVX512VBMI2-28] + _ = x[AVX512VL-29] + _ = x[AVX512VNNI-30] + _ = x[AVX512VP2INTERSECT-31] + _ = x[AVX512VPOPCNTDQ-32] + _ = x[AVXIFMA-33] + _ = x[AVXNECONVERT-34] + _ = x[AVXSLOW-35] + _ = x[AVXVNNI-36] + _ = x[AVXVNNIINT8-37] + _ = x[AVXVNNIINT16-38] + _ = x[BHI_CTRL-39] + _ = x[BMI1-40] + _ = x[BMI2-41] + _ = x[CETIBT-42] + _ = x[CETSS-43] + _ = x[CLDEMOTE-44] + _ = x[CLMUL-45] + _ = x[CLZERO-46] + _ = x[CMOV-47] + _ = x[CMPCCXADD-48] + _ = x[CMPSB_SCADBS_SHORT-49] + _ = x[CMPXCHG8-50] + _ = x[CPBOOST-51] + _ = x[CPPC-52] + _ = x[CX16-53] + _ = x[EFER_LMSLE_UNS-54] + _ = x[ENQCMD-55] + _ = x[ERMS-56] + _ = x[F16C-57] + _ = x[FLUSH_L1D-58] + _ = x[FMA3-59] + _ = x[FMA4-60] + _ = x[FP128-61] + _ = x[FP256-62] + _ = x[FSRM-63] + _ = x[FXSR-64] + _ = x[FXSROPT-65] + _ = x[GFNI-66] + _ = x[HLE-67] + _ = x[HRESET-68] + _ = x[HTT-69] + _ = x[HWA-70] + _ = x[HYBRID_CPU-71] + _ = x[HYPERVISOR-72] + _ = x[IA32_ARCH_CAP-73] + _ = x[IA32_CORE_CAP-74] + _ = x[IBPB-75] + _ = x[IBPB_BRTYPE-76] + _ = x[IBRS-77] + _ = x[IBRS_PREFERRED-78] + _ = x[IBRS_PROVIDES_SMP-79] + _ = x[IBS-80] + _ = x[IBSBRNTRGT-81] + _ = x[IBSFETCHSAM-82] + _ = x[IBSFFV-83] + _ = x[IBSOPCNT-84] + _ = x[IBSOPCNTEXT-85] + _ = x[IBSOPSAM-86] + _ = x[IBSRDWROPCNT-87] + _ = x[IBSRIPINVALIDCHK-88] + _ = x[IBS_FETCH_CTLX-89] + _ = x[IBS_OPDATA4-90] + _ = x[IBS_OPFUSE-91] + _ = x[IBS_PREVENTHOST-92] + _ = x[IBS_ZEN4-93] + _ = x[IDPRED_CTRL-94] + _ = x[INT_WBINVD-95] + _ = x[INVLPGB-96] + _ = x[KEYLOCKER-97] + _ = x[KEYLOCKERW-98] + _ = x[LAHF-99] + _ = x[LAM-100] + _ = x[LBRVIRT-101] + _ = x[LZCNT-102] + _ = x[MCAOVERFLOW-103] + _ = x[MCDT_NO-104] + _ = x[MCOMMIT-105] + _ = x[MD_CLEAR-106] + _ = x[MMX-107] + _ = x[MMXEXT-108] + _ = x[MOVBE-109] + _ = x[MOVDIR64B-110] + _ = x[MOVDIRI-111] + _ = x[MOVSB_ZL-112] + _ = x[MOVU-113] + _ = x[MPX-114] + _ = x[MSRIRC-115] + _ = x[MSRLIST-116] + _ = x[MSR_PAGEFLUSH-117] + _ = x[NRIPS-118] + _ = x[NX-119] + _ = x[OSXSAVE-120] + _ = x[PCONFIG-121] + _ = x[POPCNT-122] + _ = x[PPIN-123] + _ = x[PREFETCHI-124] + _ = x[PSFD-125] + _ = x[RDPRU-126] + _ = x[RDRAND-127] + _ = x[RDSEED-128] + _ = x[RDTSCP-129] + _ = x[RRSBA_CTRL-130] + _ = x[RTM-131] + _ = x[RTM_ALWAYS_ABORT-132] + _ = x[SBPB-133] + _ = x[SERIALIZE-134] + _ = x[SEV-135] + _ = x[SEV_64BIT-136] + _ = x[SEV_ALTERNATIVE-137] + _ = x[SEV_DEBUGSWAP-138] + _ = x[SEV_ES-139] + _ = x[SEV_RESTRICTED-140] + _ = x[SEV_SNP-141] + _ = x[SGX-142] + _ = x[SGXLC-143] + _ = x[SHA-144] + _ = x[SME-145] + _ = x[SME_COHERENT-146] + _ = x[SPEC_CTRL_SSBD-147] + _ = x[SRBDS_CTRL-148] + _ = x[SRSO_MSR_FIX-149] + _ = x[SRSO_NO-150] + _ = x[SRSO_USER_KERNEL_NO-151] + _ = x[SSE-152] + _ = x[SSE2-153] + _ = x[SSE3-154] + _ = x[SSE4-155] + _ = x[SSE42-156] + _ = x[SSE4A-157] + _ = x[SSSE3-158] + _ = x[STIBP-159] + _ = x[STIBP_ALWAYSON-160] + _ = x[STOSB_SHORT-161] + _ = x[SUCCOR-162] + _ = x[SVM-163] + _ = x[SVMDA-164] + _ = x[SVMFBASID-165] + _ = x[SVML-166] + _ = x[SVMNP-167] + _ = x[SVMPF-168] + _ = x[SVMPFT-169] + _ = x[SYSCALL-170] + _ = x[SYSEE-171] + _ = x[TBM-172] + _ = x[TDX_GUEST-173] + _ = x[TLB_FLUSH_NESTED-174] + _ = x[TME-175] + _ = x[TOPEXT-176] + _ = x[TSCRATEMSR-177] + _ = x[TSXLDTRK-178] + _ = x[VAES-179] + _ = x[VMCBCLEAN-180] + _ = x[VMPL-181] + _ = x[VMSA_REGPROT-182] + _ = x[VMX-183] + _ = x[VPCLMULQDQ-184] + _ = x[VTE-185] + _ = x[WAITPKG-186] + _ = x[WBNOINVD-187] + _ = x[WRMSRNS-188] + _ = x[X87-189] + _ = x[XGETBV1-190] + _ = x[XOP-191] + _ = x[XSAVE-192] + _ = x[XSAVEC-193] + _ = x[XSAVEOPT-194] + _ = x[XSAVES-195] + _ = x[AESARM-196] + _ = x[ARMCPUID-197] + _ = x[ASIMD-198] + _ = x[ASIMDDP-199] + _ = x[ASIMDHP-200] + _ = x[ASIMDRDM-201] + _ = x[ATOMICS-202] + _ = x[CRC32-203] + _ = x[DCPOP-204] + _ = x[EVTSTRM-205] + _ = x[FCMA-206] + _ = x[FP-207] + _ = x[FPHP-208] + _ = x[GPA-209] + _ = x[JSCVT-210] + _ = x[LRCPC-211] + _ = x[PMULL-212] + _ = x[SHA1-213] + _ = x[SHA2-214] + _ = x[SHA3-215] + _ = x[SHA512-216] + _ = x[SM3-217] + _ = x[SM4-218] + _ = x[SVE-219] + _ = x[lastID-220] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 323, 331, 335, 339, 345, 350, 358, 363, 369, 373, 382, 400, 408, 415, 419, 423, 437, 443, 447, 451, 460, 464, 468, 473, 478, 482, 486, 493, 497, 500, 506, 509, 512, 522, 532, 545, 558, 562, 573, 577, 591, 608, 611, 621, 632, 638, 646, 657, 665, 677, 693, 707, 718, 728, 743, 751, 762, 772, 779, 788, 798, 802, 805, 812, 817, 828, 835, 842, 850, 853, 859, 864, 873, 880, 888, 892, 895, 901, 908, 921, 926, 928, 935, 942, 948, 952, 961, 965, 970, 976, 982, 988, 998, 1001, 1017, 1021, 1030, 1033, 1042, 1057, 1070, 1076, 1090, 1097, 1100, 1105, 1108, 1111, 1123, 1137, 1147, 1159, 1166, 1185, 1188, 1192, 1196, 1200, 1205, 1210, 1215, 1220, 1234, 1245, 1251, 1254, 1259, 1268, 1272, 1277, 1282, 1288, 1295, 1300, 1303, 1312, 1328, 1331, 1337, 1347, 1355, 1359, 1368, 1372, 1384, 1387, 1397, 1400, 1407, 1415, 1422, 1425, 1432, 1435, 1440, 1446, 1454, 1460, 1466, 1474, 1479, 1486, 1493, 1501, 1508, 1513, 1518, 1525, 1529, 1531, 1535, 1538, 1543, 1548, 1553, 1557, 1561, 1565, 1571, 1574, 1577, 1580, 1586} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 73, 76, 81, 90, 99, 108, 112, 122, 134, 142, 150, 158, 166, 173, 183, 193, 201, 211, 222, 230, 240, 258, 273, 280, 292, 299, 306, 317, 329, 337, 341, 345, 351, 356, 364, 369, 375, 379, 388, 406, 414, 421, 425, 429, 443, 449, 453, 457, 466, 470, 474, 479, 484, 488, 492, 499, 503, 506, 512, 515, 518, 528, 538, 551, 564, 568, 579, 583, 597, 614, 617, 627, 638, 644, 652, 663, 671, 683, 699, 713, 724, 734, 749, 757, 768, 778, 785, 794, 804, 808, 811, 818, 823, 834, 841, 848, 856, 859, 865, 870, 879, 886, 894, 898, 901, 907, 914, 927, 932, 934, 941, 948, 954, 958, 967, 971, 976, 982, 988, 994, 1004, 1007, 1023, 1027, 1036, 1039, 1048, 1063, 1076, 1082, 1096, 1103, 1106, 1111, 1114, 1117, 1129, 1143, 1153, 1165, 1172, 1191, 1194, 1198, 1202, 1206, 1211, 1216, 1221, 1226, 1240, 1251, 1257, 1260, 1265, 1274, 1278, 1283, 1288, 1294, 1301, 1306, 1309, 1318, 1334, 1337, 1343, 1353, 1361, 1365, 1374, 1378, 1390, 1393, 1403, 1406, 1413, 1421, 1428, 1431, 1438, 1441, 1446, 1452, 1460, 1466, 1472, 1480, 1485, 1492, 1499, 1507, 1514, 1519, 1524, 1531, 1535, 1537, 1541, 1544, 1549, 1554, 1559, 1563, 1567, 1571, 1577, 1580, 1583, 1586, 1592} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { @@ -270,12 +271,17 @@ func _() { _ = x[AMCC-23] _ = x[Qualcomm-24] _ = x[Marvell-25] - _ = x[lastVendor-26] + _ = x[QEMU-26] + _ = x[QNX-27] + _ = x[ACRN-28] + _ = x[SRE-29] + _ = x[Apple-30] + _ = x[lastVendor-31] } -const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvelllastVendor" +const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvellQEMUQNXACRNSREApplelastVendor" -var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 155} +var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 149, 152, 156, 159, 164, 174} func (i Vendor) String() string { if i < 0 || i >= Vendor(len(_Vendor_index)-1) { diff --git a/vendor/github.com/lithammer/shortuuid/v4/README.md b/vendor/github.com/lithammer/shortuuid/v4/README.md index c13f947a..e80857d1 100644 --- a/vendor/github.com/lithammer/shortuuid/v4/README.md +++ b/vendor/github.com/lithammer/shortuuid/v4/README.md @@ -1,11 +1,11 @@ # shortuuid [![Build Status](https://github.com/lithammer/shortuuid/workflows/CI/badge.svg)](https://github.com/lithammer/shortuuid/actions) -[![Godoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/lithammer/shortuuid) +[![Godoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://pkg.go.dev/github.com/lithammer/shortuuid/v4) A Go library that generates concise, unambiguous, URL-safe UUIDs. Based on and compatible with the Python library -[`shortuuid`](https://github.com/stochastic-technologies/shortuuid). +[`shortuuid`](https://github.com/skorokithakis/shortuuid). Often, one needs to use non-sequential IDs in places where users will see them, but the IDs must be as concise and easy to use as possible. shortuuid solves @@ -26,7 +26,8 @@ import ( ) func main() { - u := shortuuid.New() // KwSysDpxcBU9FNhGkn2dCf + u := shortuuid.New() + fmt.Println(u) // KwSysDpxcBU9FNhGkn2dCf } ``` diff --git a/vendor/github.com/lithammer/shortuuid/v4/base57.go b/vendor/github.com/lithammer/shortuuid/v4/base57.go index 24996006..6fe7482c 100644 --- a/vendor/github.com/lithammer/shortuuid/v4/base57.go +++ b/vendor/github.com/lithammer/shortuuid/v4/base57.go @@ -1,12 +1,11 @@ package shortuuid import ( + "encoding/binary" "fmt" - "math" - "math/big" - "strings" - "github.com/google/uuid" + "math/bits" + "strings" ) type base57 struct { @@ -14,78 +13,70 @@ type base57 struct { alphabet alphabet } +const ( + strLen = 22 + alphabetLen = 57 +) + // Encode encodes uuid.UUID into a string using the most significant bits (MSB) // first according to the alphabet. func (b base57) Encode(u uuid.UUID) string { - var num big.Int - num.SetString(strings.Replace(u.String(), "-", "", 4), 16) - - // Calculate encoded length. - length := math.Ceil(math.Log(math.Pow(2, 128)) / math.Log(float64(b.alphabet.Length()))) - - return b.numToString(&num, int(length)) -} - -// Decode decodes a string according to the alphabet into a uuid.UUID. If s is -// too short, its most significant bits (MSB) will be padded with 0 (zero). -func (b base57) Decode(u string) (uuid.UUID, error) { - str, err := b.stringToNum(u) - if err != nil { - return uuid.Nil, err + num := uint128{ + binary.BigEndian.Uint64(u[8:]), + binary.BigEndian.Uint64(u[:8]), } - return uuid.Parse(str) -} - -// numToString converts a number a string using the given alphabet. -func (b *base57) numToString(number *big.Int, padToLen int) string { - var ( - out []rune - digit *big.Int - ) + var outIndexes [strLen]uint64 - alphaLen := big.NewInt(b.alphabet.Length()) - - zero := new(big.Int) - for number.Cmp(zero) > 0 { - number, digit = new(big.Int).DivMod(number, alphaLen, new(big.Int)) - out = append(out, b.alphabet.chars[digit.Int64()]) + for i := strLen - 1; num.Hi > 0 || num.Lo > 0; i-- { + num, outIndexes[i] = num.quoRem64(alphabetLen) } - if padToLen > 0 { - remainder := math.Max(float64(padToLen-len(out)), 0) - out = append(out, []rune(strings.Repeat(string(b.alphabet.chars[0]), int(remainder)))...) + var sb strings.Builder + sb.Grow(strLen) + for i := 0; i < strLen; i++ { + sb.WriteRune(b.alphabet.chars[outIndexes[i]]) } - - reverse(out) - - return string(out) + return sb.String() } -// stringToNum converts a string a number using the given alphabet. -func (b *base57) stringToNum(s string) (string, error) { - n := big.NewInt(0) +// Decode decodes a string according to the alphabet into a uuid.UUID. If s is +// too short, its most significant bits (MSB) will be padded with 0 (zero). +func (b base57) Decode(s string) (u uuid.UUID, err error) { + var n uint128 + var index int64 for _, char := range s { - n.Mul(n, big.NewInt(b.alphabet.Length())) - - index, err := b.alphabet.Index(char) + index, err = b.alphabet.Index(char) if err != nil { - return "", err + return + } + n, err = n.mulAdd64(alphabetLen, uint64(index)) + if err != nil { + return } - - n.Add(n, big.NewInt(index)) } + binary.BigEndian.PutUint64(u[:8], n.Hi) + binary.BigEndian.PutUint64(u[8:], n.Lo) + return +} - if n.BitLen() > 128 { - return "", fmt.Errorf("number is out of range (need a 128-bit value)") - } +type uint128 struct { + Lo, Hi uint64 +} - return fmt.Sprintf("%032x", n), nil +func (u uint128) quoRem64(v uint64) (q uint128, r uint64) { + q.Hi, r = bits.Div64(0, u.Hi, v) + q.Lo, r = bits.Div64(r, u.Lo, v) + return } -// reverse reverses a inline. -func reverse(a []rune) { - for i, j := 0, len(a)-1; i < j; i, j = i+1, j-1 { - a[i], a[j] = a[j], a[i] +func (u uint128) mulAdd64(m uint64, a uint64) (uint128, error) { + hi, lo := bits.Mul64(u.Lo, m) + p0, p1 := bits.Mul64(u.Hi, m) + lo, c0 := bits.Add64(lo, a, 0) + hi, c1 := bits.Add64(hi, p1, c0) + if p0 != 0 || c1 != 0 { + return uint128{}, fmt.Errorf("number is out of range (need a 128-bit value)") } + return uint128{lo, hi}, nil } diff --git a/vendor/github.com/minio/minio-go/v7/api-prompt-object.go b/vendor/github.com/minio/minio-go/v7/api-prompt-object.go new file mode 100644 index 00000000..dac062a7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-prompt-object.go @@ -0,0 +1,78 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2024 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "io" + "net/http" + + "github.com/goccy/go-json" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// PromptObject performs language model inference with the prompt and referenced object as context. +// Inference is performed using a Lambda handler that can process the prompt and object. +// Currently, this functionality is limited to certain MinIO servers. +func (c *Client) PromptObject(ctx context.Context, bucketName, objectName, prompt string, opts PromptObjectOptions) (io.ReadCloser, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidBucketName", + Message: err.Error(), + } + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "XMinioInvalidObjectName", + Message: err.Error(), + } + } + + opts.AddLambdaArnToReqParams(opts.LambdaArn) + opts.SetHeader("Content-Type", "application/json") + opts.AddPromptArg("prompt", prompt) + promptReqBytes, err := json.Marshal(opts.PromptArgs) + if err != nil { + return nil, err + } + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: opts.toQueryValues(), + customHeader: opts.Header(), + contentSHA256Hex: sum256Hex(promptReqBytes), + contentBody: bytes.NewReader(promptReqBytes), + contentLength: int64(len(promptReqBytes)), + }) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + defer closeResponse(resp) + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + + return resp.Body, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-prompt-options.go b/vendor/github.com/minio/minio-go/v7/api-prompt-options.go new file mode 100644 index 00000000..4493a75d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-prompt-options.go @@ -0,0 +1,84 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2024 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + "net/url" +) + +// PromptObjectOptions provides options to PromptObject call. +// LambdaArn is the ARN of the Prompt Lambda to be invoked. +// PromptArgs is a map of key-value pairs to be passed to the inference action on the Prompt Lambda. +// "prompt" is a reserved key and should not be used as a key in PromptArgs. +type PromptObjectOptions struct { + LambdaArn string + PromptArgs map[string]any + headers map[string]string + reqParams url.Values +} + +// Header returns the http.Header representation of the POST options. +func (o PromptObjectOptions) Header() http.Header { + headers := make(http.Header, len(o.headers)) + for k, v := range o.headers { + headers.Set(k, v) + } + return headers +} + +// AddPromptArg Add a key value pair to the prompt arguments where the key is a string and +// the value is a JSON serializable. +func (o *PromptObjectOptions) AddPromptArg(key string, value any) { + if o.PromptArgs == nil { + o.PromptArgs = make(map[string]any) + } + o.PromptArgs[key] = value +} + +// AddLambdaArnToReqParams adds the lambdaArn to the request query string parameters. +func (o *PromptObjectOptions) AddLambdaArnToReqParams(lambdaArn string) { + if o.reqParams == nil { + o.reqParams = make(url.Values) + } + o.reqParams.Add("lambdaArn", lambdaArn) +} + +// SetHeader adds a key value pair to the options. The +// key-value pair will be part of the HTTP POST request +// headers. +func (o *PromptObjectOptions) SetHeader(key, value string) { + if o.headers == nil { + o.headers = make(map[string]string) + } + o.headers[http.CanonicalHeaderKey(key)] = value +} + +// toQueryValues - Convert the reqParams in Options to query string parameters. +func (o *PromptObjectOptions) toQueryValues() url.Values { + urlValues := make(url.Values) + if o.reqParams != nil { + for key, values := range o.reqParams { + for _, value := range values { + urlValues.Add(key, value) + } + } + } + + return urlValues +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go index 0ae9142e..3023b949 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go @@ -85,7 +85,10 @@ func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, fanOutData policy.SetEncryption(fanOutReq.SSE) // Set checksum headers if any. - policy.SetChecksum(fanOutReq.Checksum) + err := policy.SetChecksum(fanOutReq.Checksum) + if err != nil { + return nil, err + } url, formData, err := c.PresignedPostPolicy(ctx, policy) if err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 380ec4fd..88e8d434 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -133,7 +133,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.80" + libraryVersion = "v7.0.81" ) // User Agent should always following the below style. diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index c0180b36..43383d13 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -160,7 +160,7 @@ func logError(testName, function string, args map[string]interface{}, startTime } else { logFailure(testName, function, args, startTime, alert, message, err) if !isRunOnFail() { - panic(err) + panic(fmt.Sprintf("Test failed with message: %s, err: %v", message, err)) } } } @@ -393,6 +393,42 @@ func getFuncNameLoc(caller int) string { return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") } +type ClientConfig struct { + // MinIO client configuration + TraceOn bool // Turn on tracing of HTTP requests and responses to stderr + CredsV2 bool // Use V2 credentials if true, otherwise use v4 + TrailingHeaders bool // Send trailing headers in requests +} + +func NewClient(config ClientConfig) (*minio.Client, error) { + // Instantiate new MinIO client + var creds *credentials.Credentials + if config.CredsV2 { + creds = credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), "") + } else { + creds = credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), "") + } + opts := &minio.Options{ + Creds: creds, + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + TrailingHeaders: config.TrailingHeaders, + } + client, err := minio.New(os.Getenv(serverEndpoint), opts) + if err != nil { + return nil, err + } + + if config.TraceOn { + client.TraceOn(os.Stderr) + } + + // Set user agent. + client.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + return client, nil +} + // Tests bucket re-create errors. func testMakeBucketError() { region := "eu-central-1" @@ -407,27 +443,12 @@ func testMakeBucketError() { "region": region, } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - Transport: createHTTPTransport(), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -462,20 +483,12 @@ func testMetadataSizeLimit() { "objectName": "", "opts.UserMetadata": "", } - rand.Seed(startTime.Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - Transport: createHTTPTransport(), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client creation failed", err) return } - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -531,27 +544,12 @@ func testMakeBucketRegions() { "region": region, } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -598,27 +596,12 @@ func testPutObjectReadAt() { "opts": "objectContentType", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -697,27 +680,12 @@ func testListObjectVersions() { "recursive": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -817,27 +785,12 @@ func testStatObjectWithVersioning() { function := "StatObject" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -935,27 +888,12 @@ func testGetObjectWithVersioning() { function := "GetObject()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1075,27 +1013,12 @@ func testPutObjectWithVersioning() { function := "GetObject()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1223,28 +1146,12 @@ func testListMultipartUpload() { function := "GetObject()" args := map[string]interface{}{} - // Instantiate new minio client object. - opts := &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - } - c, err := minio.New(os.Getenv(serverEndpoint), opts) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - core, err := minio.NewCore(os.Getenv(serverEndpoint), opts) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO core client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + core := minio.Core{Client: c} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -1347,27 +1254,12 @@ func testCopyObjectWithVersioning() { function := "CopyObject()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1485,27 +1377,12 @@ func testConcurrentCopyObjectWithVersioning() { function := "CopyObject()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1646,27 +1523,12 @@ func testComposeObjectWithVersioning() { function := "ComposeObject()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1787,27 +1649,12 @@ func testRemoveObjectWithVersioning() { function := "DeleteObject()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1900,27 +1747,12 @@ func testRemoveObjectsWithVersioning() { function := "DeleteObjects()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1996,27 +1828,12 @@ func testObjectTaggingWithVersioning() { function := "{Get,Set,Remove}ObjectTagging()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2164,27 +1981,12 @@ func testPutObjectWithChecksums() { return } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2230,7 +2032,7 @@ func testPutObjectWithChecksums() { h := test.cs.Hasher() h.Reset() - // Test with Wrong CRC. + // Test with a bad CRC - we haven't called h.Write(b), so this is a checksum of empty data meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) args["metadata"] = meta args["range"] = "false" @@ -2350,28 +2152,12 @@ func testPutObjectWithTrailingChecksums() { return } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - TrailingHeaders: true, - }) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2541,28 +2327,12 @@ func testPutMultipartObjectWithChecksums(trailing bool) { return } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - TrailingHeaders: trailing, - }) + c, err := NewClient(ClientConfig{TrailingHeaders: trailing}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2620,7 +2390,7 @@ func testPutMultipartObjectWithChecksums(trailing bool) { cmpChecksum := func(got, want string) { if want != got { logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) - //fmt.Printf("want %s, got %s\n", want, got) + // fmt.Printf("want %s, got %s\n", want, got) return } } @@ -2741,25 +2511,12 @@ func testTrailingChecksums() { return } - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - TrailingHeaders: true, - }) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2881,7 +2638,6 @@ func testTrailingChecksums() { test.ChecksumCRC32C = hashMultiPart(b, int(test.PO.PartSize), test.hasher) // Set correct CRC. - // c.TraceOn(os.Stderr) resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) @@ -2933,6 +2689,8 @@ func testTrailingChecksums() { delete(args, "metadata") } + + logSuccess(testName, function, args, startTime) } // Test PutObject with custom checksums. @@ -2952,25 +2710,12 @@ func testPutObjectWithAutomaticChecksums() { return } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - TrailingHeaders: true, - }) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2997,8 +2742,6 @@ func testPutObjectWithAutomaticChecksums() { {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))}, } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) // defer c.TraceOff() for i, test := range tests { @@ -3108,20 +2851,12 @@ func testGetObjectAttributes() { return } - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - TrailingHeaders: true, - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName err = c.MakeBucket( @@ -3315,19 +3050,12 @@ func testGetObjectAttributesSSECEncryption() { return } - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - TrailingHeaders: true, - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - Transport: createHTTPTransport(), - }) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName err = c.MakeBucket( @@ -3401,19 +3129,12 @@ func testGetObjectAttributesErrorCases() { return } - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - TrailingHeaders: true, - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) unknownBucket := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-bucket-") unknownObject := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-object-") @@ -3657,27 +3378,12 @@ func testPutObjectWithMetadata() { return } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -3764,27 +3470,12 @@ func testPutObjectWithContentLanguage() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -3834,27 +3525,12 @@ func testPutObjectStreaming() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -3906,27 +3582,12 @@ func testGetObjectSeekEnd() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4029,27 +3690,12 @@ func testGetObjectClosedTwice() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4120,26 +3766,13 @@ func testRemoveObjectsContext() { "bucketName": "", } - // Seed random based on current tie. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4217,27 +3850,12 @@ func testRemoveMultipleObjects() { "bucketName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4301,27 +3919,12 @@ func testRemoveMultipleObjectsWithResult() { "bucketName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4437,27 +4040,12 @@ func testFPutObjectMultipart() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4543,27 +4131,12 @@ func testFPutObject() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") location := "us-east-1" @@ -4713,27 +4286,13 @@ func testFPutObjectContext() { "fileName": "", "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4814,27 +4373,13 @@ func testFPutObjectContextV2() { "objectName": "", "opts": "minio.PutObjectOptions{ContentType:objectContentType}", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4919,24 +4464,12 @@ func testPutObjectContext() { "opts": "", } - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4989,27 +4522,12 @@ func testGetObjectS3Zip() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{"x-minio-extract": true} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -5173,27 +4691,12 @@ func testGetObjectReadSeekFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -5343,27 +4846,12 @@ func testGetObjectReadAtFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -5521,27 +5009,12 @@ func testGetObjectReadAtWhenEOFWasReached() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -5641,27 +5114,12 @@ func testPresignedPostPolicy() { "policy": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -5689,50 +5147,22 @@ func testPresignedPostPolicy() { return } - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - policy := minio.NewPostPolicy() - - if err := policy.SetBucket(""); err == nil { - logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err) - return - } - if err := policy.SetKey(""); err == nil { - logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) - return - } - if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { - logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) - return - } - if err := policy.SetContentType(""); err == nil { - logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) - return - } - if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { - logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) - return - } - if err := policy.SetUserMetadata("", ""); err == nil { - logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err) - return - } - policy.SetBucket(bucketName) policy.SetKey(objectName) policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days policy.SetContentType("binary/octet-stream") policy.SetContentLengthRange(10, 1024*1024) policy.SetUserMetadata(metadataKey, metadataValue) + policy.SetContentEncoding("gzip") // Add CRC32C checksum := minio.ChecksumCRC32C.ChecksumBytes(buf) - policy.SetChecksum(checksum) + err = policy.SetChecksum(checksum) + if err != nil { + logError(testName, function, args, startTime, "", "SetChecksum failed", err) + return + } args["policy"] = policy.String() @@ -5828,7 +5258,7 @@ func testPresignedPostPolicy() { expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName - if !strings.Contains(expectedLocation, "s3.amazonaws.com/") { + if !strings.Contains(expectedLocation, ".amazonaws.com/") { // Test when not against AWS S3. if val, ok := res.Header["Location"]; ok { if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { @@ -5840,9 +5270,194 @@ func testPresignedPostPolicy() { return } } - want := checksum.Encoded() - if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != want { - logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", want, got), nil) + wantChecksumCrc32c := checksum.Encoded() + if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != wantChecksumCrc32c { + logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", wantChecksumCrc32c, got), nil) + return + } + + // Ensure that when we subsequently GetObject, the checksum is returned + gopts := minio.GetObjectOptions{Checksum: true} + r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.ChecksumCRC32C != wantChecksumCrc32c { + logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %s, got %s", wantChecksumCrc32c, st.ChecksumCRC32C), nil) + return + } + + logSuccess(testName, function, args, startTime) +} + +// testPresignedPostPolicyWrongFile tests that when we have a policy with a checksum, we cannot POST the wrong file +func testPresignedPostPolicyWrongFile() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PresignedPostPolicy(policy)" + args := map[string]interface{}{ + "policy": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + // Azure requires the key to not start with a number + metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") + metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + policy := minio.NewPostPolicy() + policy.SetBucket(bucketName) + policy.SetKey(objectName) + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + policy.SetContentType("binary/octet-stream") + policy.SetContentLengthRange(10, 1024*1024) + policy.SetUserMetadata(metadataKey, metadataValue) + + // Add CRC32C of the 33kB file that the policy will explicitly allow. + checksum := minio.ChecksumCRC32C.ChecksumBytes(buf) + err = policy.SetChecksum(checksum) + if err != nil { + logError(testName, function, args, startTime, "", "SetChecksum failed", err) + return + } + + args["policy"] = policy.String() + + presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) + return + } + + // At this stage, we have a policy that allows us to upload datafile-33-kB. + // Test that uploading datafile-10-kB, with a different checksum, fails as expected + filePath := getMintDataDirFilePath("datafile-10-kB") + if filePath == "" { + // Make a temp file with 10 KB data. + file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + if _, err = io.Copy(file, getDataReader("datafile-10-kB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + filePath = file.Name() + } + fileReader := getDataReader("datafile-10-kB") + defer fileReader.Close() + buf10k, err := io.ReadAll(fileReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + otherChecksum := minio.ChecksumCRC32C.ChecksumBytes(buf10k) + + var formBuf bytes.Buffer + writer := multipart.NewWriter(&formBuf) + for k, v := range formData { + if k == "x-amz-checksum-crc32c" { + v = otherChecksum.Encoded() + } + writer.WriteField(k, v) + } + + // Add file to post request + f, err := os.Open(filePath) + defer f.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + w, err := writer.CreateFormFile("file", filePath) + if err != nil { + logError(testName, function, args, startTime, "", "CreateFormFile failed", err) + return + } + _, err = io.Copy(w, f) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + writer.Close() + + httpClient := &http.Client{ + Timeout: 30 * time.Second, + Transport: createHTTPTransport(), + } + args["url"] = presignedPostPolicyURL.String() + + req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request failed", err) + return + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + // Make the POST request with the form data. + res, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request failed", err) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusForbidden { + logError(testName, function, args, startTime, "", "HTTP request unexpected status", errors.New(res.Status)) + return + } + + // Read the response body, ensure it has checksum failure message + resBody, err := io.ReadAll(res.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Normalize the response body, because S3 uses quotes around the policy condition components + // in the error message, MinIO does not. + resBodyStr := strings.ReplaceAll(string(resBody), `"`, "") + if !strings.Contains(resBodyStr, "Policy Condition failed: [eq, $x-amz-checksum-crc32c, aHnJMw==]") { + logError(testName, function, args, startTime, "", "Unexpected response body", errors.New(resBodyStr)) return } @@ -5857,27 +5472,12 @@ func testCopyObject() { function := "CopyObject(dst, src)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -6052,27 +5652,12 @@ func testSSECEncryptedGetObjectReadSeekFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6235,27 +5820,12 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6416,27 +5986,12 @@ func testSSECEncryptedGetObjectReadAtFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6600,27 +6155,12 @@ func testSSES3EncryptedGetObjectReadAtFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6785,27 +6325,13 @@ func testSSECEncryptionPutGet() { "objectName": "", "sse": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6895,27 +6421,13 @@ func testSSECEncryptionFPut() { "contentType": "", "sse": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -7018,27 +6530,13 @@ func testSSES3EncryptionPutGet() { "objectName": "", "sse": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -7126,27 +6624,13 @@ func testSSES3EncryptionFPut() { "contentType": "", "sse": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -7255,26 +6739,12 @@ func testBucketNotification() { return } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - bucketName := os.Getenv("NOTIFY_BUCKET") args["bucketName"] = bucketName @@ -7350,26 +6820,12 @@ func testFunctional() { functionAll := "" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) return } - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -8029,24 +7485,12 @@ func testGetObjectModified() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8125,24 +7569,12 @@ func testPutObjectUploadSeekedObject() { "contentType": "binary/octet-stream", } - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8245,27 +7677,12 @@ func testMakeBucketErrorV2() { "region": "eu-west-1", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") region := "eu-west-1" @@ -8305,27 +7722,12 @@ func testGetObjectClosedTwiceV2() { "region": "eu-west-1", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8396,27 +7798,12 @@ func testFPutObjectV2() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8557,27 +7944,12 @@ func testMakeBucketRegionsV2() { "region": "eu-west-1", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8620,27 +7992,12 @@ func testGetObjectReadSeekFunctionalV2() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8775,27 +8132,12 @@ func testGetObjectReadAtFunctionalV2() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8937,27 +8279,12 @@ func testCopyObjectV2() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -9156,13 +8483,7 @@ func testComposeObjectErrorCasesV2() { function := "ComposeObject(destination, sourceList)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9254,13 +8575,7 @@ func testCompose10KSourcesV2() { function := "ComposeObject(destination, sourceList)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9276,13 +8591,7 @@ func testEncryptedEmptyObject() { function := "PutObject(bucketName, objectName, reader, objectSize, opts)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return @@ -9430,7 +8739,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, dstEncryption = sseDst } // 3. get copied object and check if content is equal - coreClient := minio.Core{c} + coreClient := minio.Core{Client: c} reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) @@ -9537,13 +8846,7 @@ func testUnencryptedToSSECCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9552,7 +8855,6 @@ func testUnencryptedToSSECCopyObject() { bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst) } @@ -9564,13 +8866,7 @@ func testUnencryptedToSSES3CopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9580,7 +8876,6 @@ func testUnencryptedToSSES3CopyObject() { var sseSrc encrypt.ServerSide sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9592,13 +8887,7 @@ func testUnencryptedToUnencryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9607,7 +8896,6 @@ func testUnencryptedToUnencryptedCopyObject() { bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") var sseSrc, sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9619,13 +8907,7 @@ func testEncryptedSSECToSSECCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9635,7 +8917,6 @@ func testEncryptedSSECToSSECCopyObject() { sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9647,13 +8928,7 @@ func testEncryptedSSECToSSES3CopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9663,7 +8938,6 @@ func testEncryptedSSECToSSES3CopyObject() { sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9675,13 +8949,7 @@ func testEncryptedSSECToUnencryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9691,7 +8959,6 @@ func testEncryptedSSECToUnencryptedCopyObject() { sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) var sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9703,13 +8970,7 @@ func testEncryptedSSES3ToSSECCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9719,7 +8980,6 @@ func testEncryptedSSES3ToSSECCopyObject() { sseSrc := encrypt.NewSSE() sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9731,13 +8991,7 @@ func testEncryptedSSES3ToSSES3CopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9747,7 +9001,6 @@ func testEncryptedSSES3ToSSES3CopyObject() { sseSrc := encrypt.NewSSE() sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9759,13 +9012,7 @@ func testEncryptedSSES3ToUnencryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9775,7 +9022,6 @@ func testEncryptedSSES3ToUnencryptedCopyObject() { sseSrc := encrypt.NewSSE() var sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9787,13 +9033,7 @@ func testEncryptedCopyObjectV2() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9803,7 +9043,6 @@ func testEncryptedCopyObjectV2() { sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9814,13 +9053,7 @@ func testDecryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9874,26 +9107,14 @@ func testSSECMultipartEncryptedToSSECCopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10072,26 +9293,14 @@ func testSSECEncryptedToSSECCopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10250,26 +9459,14 @@ func testSSECEncryptedToUnencryptedCopyPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10427,26 +9624,14 @@ func testSSECEncryptedToSSES3CopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10607,26 +9792,14 @@ func testUnencryptedToSSECCopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10782,26 +9955,14 @@ func testUnencryptedToUnencryptedCopyPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10953,26 +10114,14 @@ func testUnencryptedToSSES3CopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -11126,26 +10275,14 @@ func testSSES3EncryptedToSSECCopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -11302,26 +10439,14 @@ func testSSES3EncryptedToUnencryptedCopyPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -11474,26 +10599,14 @@ func testSSES3EncryptedToSSES3CopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -11648,19 +10761,12 @@ func testUserMetadataCopying() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // c.TraceOn(os.Stderr) testUserMetadataCopyingWrapper(c) } @@ -11825,19 +10931,12 @@ func testUserMetadataCopyingV2() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } - // c.TraceOn(os.Stderr) testUserMetadataCopyingWrapper(c) } @@ -11848,13 +10947,7 @@ func testStorageClassMetadataPutObject() { args := map[string]interface{}{} testName := getFuncName() - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return @@ -11936,13 +11029,7 @@ func testStorageClassInvalidMetadataPutObject() { args := map[string]interface{}{} testName := getFuncName() - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return @@ -11979,13 +11066,7 @@ func testStorageClassMetadataCopyObject() { args := map[string]interface{}{} testName := getFuncName() - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - Transport: createHTTPTransport(), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return @@ -12106,27 +11187,12 @@ func testPutObjectNoLengthV2() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -12182,27 +11248,12 @@ func testPutObjectsUnknownV2() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -12273,27 +11324,12 @@ func testPutObject0ByteV2() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -12338,13 +11374,7 @@ func testComposeObjectErrorCases() { function := "ComposeObject(destination, sourceList)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return @@ -12361,13 +11391,7 @@ func testCompose10KSources() { function := "ComposeObject(destination, sourceList)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return @@ -12385,26 +11409,12 @@ func testFunctionalV2() { functionAll := "" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - Transport: createHTTPTransport(), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") location := "us-east-1" @@ -12838,27 +11848,13 @@ func testGetObjectContext() { "bucketName": "", "objectName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -12941,27 +11937,13 @@ func testFGetObjectContext() { "objectName": "", "fileName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13033,24 +12015,12 @@ func testGetObjectRanges() { defer cancel() rng := rand.NewSource(time.Now().UnixNano()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rng, "minio-go-test-") args["bucketName"] = bucketName @@ -13140,27 +12110,13 @@ func testGetObjectACLContext() { "bucketName": "", "objectName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13318,24 +12274,12 @@ func testPutObjectContextV2() { "size": "", "opts": "", } - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13390,27 +12334,13 @@ func testGetObjectContextV2() { "bucketName": "", "objectName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13491,27 +12421,13 @@ func testFGetObjectContextV2() { "objectName": "", "fileName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13580,27 +12496,13 @@ func testListObjects() { "objectPrefix": "", "recursive": "true", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13684,24 +12586,12 @@ func testCors() { "cors": "", } - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Create or reuse a bucket that will get cors settings applied to it and deleted when done bucketName := os.Getenv("MINIO_GO_TEST_BUCKET_CORS") if bucketName == "" { @@ -14420,24 +13310,12 @@ func testCorsSetGetDelete() { "cors": "", } - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -14519,27 +13397,13 @@ func testRemoveObjects() { "objectPrefix": "", "recursive": "true", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -14653,27 +13517,13 @@ func testGetBucketTagging() { args := map[string]interface{}{ "bucketName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -14709,27 +13559,13 @@ func testSetBucketTagging() { "bucketName": "", "tags": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -14795,27 +13631,13 @@ func testRemoveBucketTagging() { args := map[string]interface{}{ "bucketName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -14961,6 +13783,7 @@ func main() { testGetObjectReadAtFunctional() testGetObjectReadAtWhenEOFWasReached() testPresignedPostPolicy() + testPresignedPostPolicyWrongFile() testCopyObject() testComposeObjectErrorCases() testCompose10KSources() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go index f1c76c78..787f0a38 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -58,9 +58,10 @@ type WebIdentityResult struct { // WebIdentityToken - web identity token with expiry. type WebIdentityToken struct { - Token string - AccessToken string - Expiry int + Token string + AccessToken string + RefreshToken string + Expiry int } // A STSWebIdentity retrieves credentials from MinIO service, and keeps track if diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index 19687e02..26bf441b 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -85,7 +85,7 @@ func (p *PostPolicy) SetExpires(t time.Time) error { // SetKey - Sets an object name for the policy based upload. func (p *PostPolicy) SetKey(key string) error { - if strings.TrimSpace(key) == "" || key == "" { + if strings.TrimSpace(key) == "" { return errInvalidArgument("Object name is empty.") } policyCond := policyCondition{ @@ -118,7 +118,7 @@ func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { // SetBucket - Sets bucket at which objects will be uploaded to. func (p *PostPolicy) SetBucket(bucketName string) error { - if strings.TrimSpace(bucketName) == "" || bucketName == "" { + if strings.TrimSpace(bucketName) == "" { return errInvalidArgument("Bucket name is empty.") } policyCond := policyCondition{ @@ -135,7 +135,7 @@ func (p *PostPolicy) SetBucket(bucketName string) error { // SetCondition - Sets condition for credentials, date and algorithm func (p *PostPolicy) SetCondition(matchType, condition, value string) error { - if strings.TrimSpace(value) == "" || value == "" { + if strings.TrimSpace(value) == "" { return errInvalidArgument("No value specified for condition") } @@ -156,7 +156,7 @@ func (p *PostPolicy) SetCondition(matchType, condition, value string) error { // SetTagging - Sets tagging for the object for this policy based upload. func (p *PostPolicy) SetTagging(tagging string) error { - if strings.TrimSpace(tagging) == "" || tagging == "" { + if strings.TrimSpace(tagging) == "" { return errInvalidArgument("No tagging specified.") } _, err := tags.ParseObjectXML(strings.NewReader(tagging)) @@ -178,7 +178,7 @@ func (p *PostPolicy) SetTagging(tagging string) error { // SetContentType - Sets content-type of the object for this policy // based upload. func (p *PostPolicy) SetContentType(contentType string) error { - if strings.TrimSpace(contentType) == "" || contentType == "" { + if strings.TrimSpace(contentType) == "" { return errInvalidArgument("No content type specified.") } policyCond := policyCondition{ @@ -211,7 +211,7 @@ func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) erro // SetContentDisposition - Sets content-disposition of the object for this policy func (p *PostPolicy) SetContentDisposition(contentDisposition string) error { - if strings.TrimSpace(contentDisposition) == "" || contentDisposition == "" { + if strings.TrimSpace(contentDisposition) == "" { return errInvalidArgument("No content disposition specified.") } policyCond := policyCondition{ @@ -226,27 +226,44 @@ func (p *PostPolicy) SetContentDisposition(contentDisposition string) error { return nil } +// SetContentEncoding - Sets content-encoding of the object for this policy +func (p *PostPolicy) SetContentEncoding(contentEncoding string) error { + if strings.TrimSpace(contentEncoding) == "" { + return errInvalidArgument("No content encoding specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Encoding", + value: contentEncoding, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Encoding"] = contentEncoding + return nil +} + // SetContentLengthRange - Set new min and max content length // condition for all incoming uploads. -func (p *PostPolicy) SetContentLengthRange(min, max int64) error { - if min > max { +func (p *PostPolicy) SetContentLengthRange(minLen, maxLen int64) error { + if minLen > maxLen { return errInvalidArgument("Minimum limit is larger than maximum limit.") } - if min < 0 { + if minLen < 0 { return errInvalidArgument("Minimum limit cannot be negative.") } - if max <= 0 { + if maxLen <= 0 { return errInvalidArgument("Maximum limit cannot be non-positive.") } - p.contentLengthRange.min = min - p.contentLengthRange.max = max + p.contentLengthRange.min = minLen + p.contentLengthRange.max = maxLen return nil } // SetSuccessActionRedirect - Sets the redirect success url of the object for this policy // based upload. func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { - if strings.TrimSpace(redirect) == "" || redirect == "" { + if strings.TrimSpace(redirect) == "" { return errInvalidArgument("Redirect is empty") } policyCond := policyCondition{ @@ -264,7 +281,7 @@ func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { // SetSuccessStatusAction - Sets the status success code of the object for this policy // based upload. func (p *PostPolicy) SetSuccessStatusAction(status string) error { - if strings.TrimSpace(status) == "" || status == "" { + if strings.TrimSpace(status) == "" { return errInvalidArgument("Status is empty") } policyCond := policyCondition{ @@ -282,10 +299,10 @@ func (p *PostPolicy) SetSuccessStatusAction(status string) error { // SetUserMetadata - Set user metadata as a key/value couple. // Can be retrieved through a HEAD request or an event. func (p *PostPolicy) SetUserMetadata(key, value string) error { - if strings.TrimSpace(key) == "" || key == "" { + if strings.TrimSpace(key) == "" { return errInvalidArgument("Key is empty") } - if strings.TrimSpace(value) == "" || value == "" { + if strings.TrimSpace(value) == "" { return errInvalidArgument("Value is empty") } headerName := fmt.Sprintf("x-amz-meta-%s", key) @@ -304,7 +321,7 @@ func (p *PostPolicy) SetUserMetadata(key, value string) error { // SetUserMetadataStartsWith - Set how an user metadata should starts with. // Can be retrieved through a HEAD request or an event. func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error { - if strings.TrimSpace(key) == "" || key == "" { + if strings.TrimSpace(key) == "" { return errInvalidArgument("Key is empty") } headerName := fmt.Sprintf("x-amz-meta-%s", key) @@ -321,11 +338,29 @@ func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error { } // SetChecksum sets the checksum of the request. -func (p *PostPolicy) SetChecksum(c Checksum) { +func (p *PostPolicy) SetChecksum(c Checksum) error { if c.IsSet() { p.formData[amzChecksumAlgo] = c.Type.String() p.formData[c.Type.Key()] = c.Encoded() + + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", amzChecksumAlgo), + value: c.Type.String(), + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + policyCond = policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", c.Type.Key()), + value: c.Encoded(), + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } } + return nil } // SetEncryption - sets encryption headers for POST API diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go index bfeea95f..81fcf16f 100644 --- a/vendor/github.com/minio/minio-go/v7/retry-continous.go +++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go @@ -20,7 +20,7 @@ package minio import "time" // newRetryTimerContinous creates a timer with exponentially increasing delays forever. -func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { +func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitter float64, doneCh chan struct{}) <-chan int { attemptCh := make(chan int) // normalize jitter to the range [0, 1.0] @@ -39,10 +39,10 @@ func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64, if attempt > maxAttempt { attempt = maxAttempt } - // sleep = random_between(0, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap + // sleep = random_between(0, min(maxSleep, base * 2 ** attempt)) + sleep := baseSleep * time.Duration(1< maxSleep { + sleep = maxSleep } if jitter != NoJitter { sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go index d15eb590..4cc45920 100644 --- a/vendor/github.com/minio/minio-go/v7/retry.go +++ b/vendor/github.com/minio/minio-go/v7/retry.go @@ -45,7 +45,7 @@ var DefaultRetryCap = time.Second // newRetryTimer creates a timer with exponentially increasing // delays until the maximum retry attempts are reached. -func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time.Duration, jitter float64) <-chan int { +func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, maxSleep time.Duration, jitter float64) <-chan int { attemptCh := make(chan int) // computes the exponential backoff duration according to @@ -59,10 +59,10 @@ func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time jitter = MaxJitter } - // sleep = random_between(0, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap + // sleep = random_between(0, min(maxSleep, base * 2 ** attempt)) + sleep := baseSleep * time.Duration(1< maxSleep { + sleep = maxSleep } if jitter != NoJitter { sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 0018dc7d..00000000 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index 3a754ca7..00000000 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,279 +0,0 @@ -package mapstructure - -import ( - "encoding" - "errors" - "fmt" - "net" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - var f3 DecodeHookFuncValue - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2, f3} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Value, to reflect.Value) (interface{}, error) { - - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from.Type(), to.Type(), from.Interface()) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), from.Interface()) - case DecodeHookFuncValue: - return f(from, to) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - var err error - data := f.Interface() - - newFrom := f - for _, f1 := range fs { - data, err = DecodeHookExec(f1, newFrom, t) - if err != nil { - return nil, err - } - newFrom = reflect.ValueOf(data) - } - - return data, nil - } -} - -// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. -// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. -func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { - return func(a, b reflect.Value) (interface{}, error) { - var allErrs string - var out interface{} - var err error - - for _, f := range ff { - out, err = DecodeHookExec(f, a, b) - if err != nil { - allErrs += err.Error() + "\n" - continue - } - - return out, nil - } - - return nil, errors.New(allErrs) - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -// StringToIPHookFunc returns a DecodeHookFunc that converts -// strings to net.IP -func StringToIPHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IP{}) { - return data, nil - } - - // Convert it by parsing - ip := net.ParseIP(data.(string)) - if ip == nil { - return net.IP{}, fmt.Errorf("failed parsing ip %v", data) - } - - return ip, nil - } -} - -// StringToIPNetHookFunc returns a DecodeHookFunc that converts -// strings to net.IPNet -func StringToIPNetHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IPNet{}) { - return data, nil - } - - // Convert it by parsing - _, net, err := net.ParseCIDR(data.(string)) - return net, err - } -} - -// StringToTimeHookFunc returns a DecodeHookFunc that converts -// strings to time.Time. -func StringToTimeHookFunc(layout string) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Time{}) { - return data, nil - } - - // Convert it by parsing - return time.Parse(layout, data.(string)) - } -} - -// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to -// the decoder. -// -// Note that this is significantly different from the WeaklyTypedInput option -// of the DecoderConfig. -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } - return "0", nil - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} - -func RecursiveStructToMapHookFunc() DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - if f.Kind() != reflect.Struct { - return f.Interface(), nil - } - - var i interface{} = struct{}{} - if t.Type() != reflect.TypeOf(&i).Elem() { - return f.Interface(), nil - } - - m := make(map[string]interface{}) - t.Set(reflect.ValueOf(m)) - - return f.Interface(), nil - } -} - -// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies -// strings to the UnmarshalText function, when the target type -// implements the encoding.TextUnmarshaler interface -func TextUnmarshallerHookFunc() DecodeHookFuncType { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - result := reflect.New(t).Interface() - unmarshaller, ok := result.(encoding.TextUnmarshaler) - if !ok { - return data, nil - } - if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { - return nil, err - } - return result, nil - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5a..00000000 --- a/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 4d4b4aad..7e19eba0 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -7,10 +7,13 @@ import ( "time" ) -type CompareType int +// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it. +type CompareType = compareResult + +type compareResult int const ( - compareLess CompareType = iota - 1 + compareLess compareResult = iota - 1 compareEqual compareGreater ) @@ -39,7 +42,7 @@ var ( bytesType = reflect.TypeOf([]byte{}) ) -func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { +func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) { obj1Value := reflect.ValueOf(obj1) obj2Value := reflect.ValueOf(obj2) @@ -325,7 +328,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) } - return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + if timeObj1.Before(timeObj2) { + return compareLess, true + } + if timeObj1.Equal(timeObj2) { + return compareEqual, true + } + return compareGreater, true } case reflect.Slice: { @@ -345,7 +354,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) } - return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true } case reflect.Uintptr: { @@ -381,7 +390,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -394,7 +403,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -406,7 +415,7 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -419,7 +428,7 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -431,7 +440,7 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -443,10 +452,10 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) } -func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } @@ -469,7 +478,7 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare return true } -func containsValue(values []CompareType, value CompareType) bool { +func containsValue(values []compareResult, value compareResult) bool { for _, v := range values { if v == value { return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 3ddab109..19063416 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -104,8 +104,8 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -186,7 +186,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -568,6 +568,23 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) } +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -604,7 +621,16 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...) +} + +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index a84e09bd..21629087 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -186,8 +186,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { @@ -197,8 +197,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -336,7 +336,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -361,7 +361,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1128,6 +1128,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin return NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1200,7 +1234,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str return NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -1209,7 +1261,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface return NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 00df62a0..1d2f7182 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -6,7 +6,7 @@ import ( ) // isOrdered checks that collection contains orderable elements. -func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { objKind := reflect.TypeOf(object).Kind() if objKind != reflect.Slice && objKind != reflect.Array { return false @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 0b7570f2..4e91332b 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,9 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - "gopkg.in/yaml.v3" + + // Wrapper around gopkg.in/yaml.v3 + "github.com/stretchr/testify/assert/yaml" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -45,6 +47,10 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool // for table driven tests. type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool +// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful +// for table driven tests. +type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool + // Comparison is a custom function that returns true on success and false on failure type Comparison func() (success bool) @@ -496,7 +502,13 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b h.Helper() } - if !samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + return Fail(t, "Both arguments must be pointers", msgAndArgs...) + } + + if !same { + // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ "expected: %p %#v\n"+ "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) @@ -516,7 +528,13 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} h.Helper() } - if samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + //fails when the arguments are not pointers + return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) + } + + if same { return Fail(t, fmt.Sprintf( "Expected and actual point to the same object: %p %#v", expected, expected), msgAndArgs...) @@ -524,21 +542,23 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} return true } -// samePointers compares two generic interface objects and returns whether -// they point to the same object -func samePointers(first, second interface{}) bool { +// samePointers checks if two generic interface objects are pointers of the same +// type pointing to the same object. It returns two values: same indicating if +// they are the same type and point to the same object, and ok indicating that +// both inputs are pointers. +func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false + return false, false //not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) if firstType != secondType { - return false + return false, true // both are pointers, but of different types } // compare pointer addresses - return first == second + return first == second, true } // formatUnequalValues takes two values of arbitrary types and returns string @@ -572,8 +592,8 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { @@ -615,21 +635,6 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } - if aType.Kind() == reflect.Ptr { - aType = aType.Elem() - } - if bType.Kind() == reflect.Ptr { - bType = bType.Elem() - } - - if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) - } - - if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) - } - expected = copyExportedFields(expected) actual = copyExportedFields(actual) @@ -1170,6 +1175,39 @@ func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) stri return msg.String() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isEmpty(listA) && isEmpty(listB) { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + if !isList(t, listA, msgAndArgs...) { + return Fail(t, "listA is not a list type", msgAndArgs...) + } + if !isList(t, listB, msgAndArgs...) { + return Fail(t, "listB is not a list type", msgAndArgs...) + } + + extraA, extraB := diffLists(listA, listB) + if len(extraA) == 0 && len(extraB) == 0 { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + return true +} + // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -1488,6 +1526,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd if err != nil { return Fail(t, err.Error(), msgAndArgs...) } + if math.IsNaN(actualEpsilon) { + return Fail(t, "relative error is NaN", msgAndArgs...) + } if actualEpsilon > epsilon { return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) @@ -1611,7 +1652,6 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { - var r *regexp.Regexp if rr, ok := rx.(*regexp.Regexp); ok { r = rr @@ -1619,7 +1659,14 @@ func matchRegexp(rx interface{}, str interface{}) bool { r = regexp.MustCompile(fmt.Sprint(rx)) } - return (r.FindStringIndex(fmt.Sprint(str)) != nil) + switch v := str.(type) { + case []byte: + return r.Match(v) + case string: + return r.MatchString(v) + default: + return r.MatchString(fmt.Sprint(v)) + } } @@ -1872,7 +1919,7 @@ var spewConfigStringerEnabled = spew.ConfigState{ MaxDepth: 10, } -type tHelper interface { +type tHelper = interface { Helper() } @@ -1911,6 +1958,9 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // CollectT implements the TestingT interface and collects all errors. type CollectT struct { + // A slice of errors. Non-nil slice denotes a failure. + // If it's non-nil but len(c.errors) == 0, this is also a failure + // obtained by direct c.FailNow() call. errors []error } @@ -1919,9 +1969,10 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) } -// FailNow panics. -func (*CollectT) FailNow() { - panic("Assertion failed") +// FailNow stops execution by calling runtime.Goexit. +func (c *CollectT) FailNow() { + c.fail() + runtime.Goexit() } // Deprecated: That was a method for internal usage that should not have been published. Now just panics. @@ -1934,6 +1985,16 @@ func (*CollectT) Copy(TestingT) { panic("Copy() is deprecated") } +func (c *CollectT) fail() { + if !c.failed() { + c.errors = []error{} // Make it non-nil to mark a failure. + } +} + +func (c *CollectT) failed() bool { + return c.errors != nil +} + // EventuallyWithT asserts that given condition will be met in waitFor time, // periodically checking target function each tick. In contrast to Eventually, // it supplies a CollectT to the condition function, so that the condition @@ -1951,14 +2012,14 @@ func (*CollectT) Copy(TestingT) { // assert.EventuallyWithT(t, func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } var lastFinishedTickErrs []error - ch := make(chan []error, 1) + ch := make(chan *CollectT, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1978,16 +2039,16 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time go func() { collect := new(CollectT) defer func() { - ch <- collect.errors + ch <- collect }() condition(collect) }() - case errs := <-ch: - if len(errs) == 0 { + case collect := <-ch: + if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. - lastFinishedTickErrs = errs + lastFinishedTickErrs = collect.errors tick = ticker.C } } @@ -2049,7 +2110,7 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { ), msgAndArgs...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -2090,6 +2151,24 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ ), msgAndArgs...) } +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !errors.As(err, target) { + return true + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ + "found: %q\n"+ + "in chain: %s", target, chain, + ), msgAndArgs...) +} + func buildErrorChainString(err error) string { if err == nil { return "" diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go new file mode 100644 index 00000000..baa0cc7d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -0,0 +1,25 @@ +//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default +// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that calls a pluggable implementation. +// +// This implementation is selected with the testify_yaml_custom build tag. +// +// go test -tags testify_yaml_custom +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]. +// +// In your test package: +// +// import assertYaml "github.com/stretchr/testify/assert/yaml" +// +// func init() { +// assertYaml.Unmarshal = func (in []byte, out interface{}) error { +// // ... +// return nil +// } +// } +package yaml + +var Unmarshal func(in []byte, out interface{}) error diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go new file mode 100644 index 00000000..b83c6cf6 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -0,0 +1,37 @@ +//go:build !testify_yaml_fail && !testify_yaml_custom +// +build !testify_yaml_fail,!testify_yaml_custom + +// Package yaml is just an indirection to handle YAML deserialization. +// +// This package is just an indirection that allows the builder to override the +// indirection with an alternative implementation of this package that uses +// another implementation of YAML deserialization. This allows to not either not +// use YAML deserialization at all, or to use another implementation than +// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]). +// +// Alternative implementations are selected using build tags: +// +// - testify_yaml_fail: [Unmarshal] always fails with an error +// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it +// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or +// [github.com/stretchr/testify/assert.YAMLEqf]. +// +// Usage: +// +// go test -tags testify_yaml_fail +// +// You can check with "go list" which implementation is linked: +// +// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// +// [PR #1120]: https://github.com/stretchr/testify/pull/1120 +package yaml + +import goyaml "gopkg.in/yaml.v3" + +// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal]. +func Unmarshal(in []byte, out interface{}) error { + return goyaml.Unmarshal(in, out) +} diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go new file mode 100644 index 00000000..e78f7dfe --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -0,0 +1,18 @@ +//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default +// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that always fail. +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]: +// +// go test -tags testify_yaml_fail +package yaml + +import "errors" + +var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)") + +func Unmarshal([]byte, interface{}) error { + return errNotImplemented +} diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 506a82f8..d8921950 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -34,9 +34,9 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") +// require.Contains(t, "Hello World", "World") +// require.Contains(t, ["Hello", "World"], "World") +// require.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -50,9 +50,9 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int // Containsf asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +// require.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// require.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// require.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -91,7 +91,7 @@ func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +// require.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -106,7 +106,7 @@ func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +// require.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -120,7 +120,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Empty(t, obj) +// require.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -134,7 +134,7 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Emptyf(t, obj, "error message %s", "formatted") +// require.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -147,7 +147,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { // Equal asserts that two objects are equal. // -// assert.Equal(t, 123, 123) +// require.Equal(t, 123, 123) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -166,7 +166,7 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) +// require.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -181,7 +181,7 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +// require.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -200,8 +200,8 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // Exported int // notExported int // } -// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true -// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false +// require.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true +// require.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -220,8 +220,8 @@ func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, m // Exported int // notExported int // } -// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true -// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false +// require.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true +// require.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -232,10 +232,10 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValues(t, uint32(123), int32(123)) +// require.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -246,10 +246,10 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") +// require.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -262,7 +262,7 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Equalf asserts that two objects are equal. // -// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// require.Equalf(t, 123, 123, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -280,8 +280,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) +// if require.Error(t, err) { +// require.Equal(t, expectedError, err) // } func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -321,7 +321,7 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContains(t, err, expectedErrorSubString) +// require.ErrorContains(t, err, expectedErrorSubString) func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -336,7 +336,7 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +// require.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -374,8 +374,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) +// if require.Errorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedErrorf, err) // } func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -390,7 +390,7 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { // Eventually asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +// require.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -415,10 +415,10 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithT(t, func(c *assert.CollectT) { +// require.EventuallyWithT(t, func(c *require.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -443,10 +443,10 @@ func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitF // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { +// require.EventuallyWithTf(t, func(c *require.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -460,7 +460,7 @@ func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), wait // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -473,7 +473,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // Exactly asserts that two objects are equal in value and type. // -// assert.Exactly(t, int32(123), int64(123)) +// require.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -486,7 +486,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") +// require.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -543,7 +543,7 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { // False asserts that the specified value is false. // -// assert.False(t, myBool) +// require.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -556,7 +556,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) { // Falsef asserts that the specified value is false. // -// assert.Falsef(t, myBool, "error message %s", "formatted") +// require.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -593,9 +593,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { // Greater asserts that the first element is greater than the second // -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") +// require.Greater(t, 2, 1) +// require.Greater(t, float64(2), float64(1)) +// require.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -608,10 +608,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // GreaterOrEqual asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") +// require.GreaterOrEqual(t, 2, 1) +// require.GreaterOrEqual(t, 2, 2) +// require.GreaterOrEqual(t, "b", "a") +// require.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -624,10 +624,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // GreaterOrEqualf asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -640,9 +640,9 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg // Greaterf asserts that the first element is greater than the second // -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +// require.Greaterf(t, 2, 1, "error message %s", "formatted") +// require.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +// require.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -656,7 +656,7 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -672,7 +672,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -688,7 +688,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -704,7 +704,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -719,7 +719,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // HTTPError asserts that a specified handler returns an error status code. // -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -734,7 +734,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPErrorf asserts that a specified handler returns an error status code. // -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -749,7 +749,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPRedirect asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -764,7 +764,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // HTTPRedirectf asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -779,7 +779,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // HTTPStatusCode asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) +// require.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { @@ -794,7 +794,7 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url str // HTTPStatusCodef asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// require.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { @@ -809,7 +809,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st // HTTPSuccess asserts that a specified handler returns a success status code. // -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// require.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -824,7 +824,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string // HTTPSuccessf asserts that a specified handler returns a success status code. // -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// require.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -839,7 +839,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // Implements asserts that an object is implemented by the specified interface. // -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +// require.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -852,7 +852,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -865,7 +865,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +// require.InDelta(t, math.Pi, 22/7.0, 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -922,7 +922,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// require.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -979,9 +979,9 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl // IsDecreasing asserts that the collection is decreasing // -// assert.IsDecreasing(t, []int{2, 1, 0}) -// assert.IsDecreasing(t, []float{2, 1}) -// assert.IsDecreasing(t, []string{"b", "a"}) +// require.IsDecreasing(t, []int{2, 1, 0}) +// require.IsDecreasing(t, []float{2, 1}) +// require.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -994,9 +994,9 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsDecreasingf asserts that the collection is decreasing // -// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// require.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1009,9 +1009,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface // IsIncreasing asserts that the collection is increasing // -// assert.IsIncreasing(t, []int{1, 2, 3}) -// assert.IsIncreasing(t, []float{1, 2}) -// assert.IsIncreasing(t, []string{"a", "b"}) +// require.IsIncreasing(t, []int{1, 2, 3}) +// require.IsIncreasing(t, []float{1, 2}) +// require.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1024,9 +1024,9 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsIncreasingf asserts that the collection is increasing // -// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// require.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1039,9 +1039,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface // IsNonDecreasing asserts that the collection is not decreasing // -// assert.IsNonDecreasing(t, []int{1, 1, 2}) -// assert.IsNonDecreasing(t, []float{1, 2}) -// assert.IsNonDecreasing(t, []string{"a", "b"}) +// require.IsNonDecreasing(t, []int{1, 1, 2}) +// require.IsNonDecreasing(t, []float{1, 2}) +// require.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1054,9 +1054,9 @@ func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonDecreasingf asserts that the collection is not decreasing // -// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1069,9 +1069,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf // IsNonIncreasing asserts that the collection is not increasing // -// assert.IsNonIncreasing(t, []int{2, 1, 1}) -// assert.IsNonIncreasing(t, []float{2, 1}) -// assert.IsNonIncreasing(t, []string{"b", "a"}) +// require.IsNonIncreasing(t, []int{2, 1, 1}) +// require.IsNonIncreasing(t, []float{2, 1}) +// require.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1084,9 +1084,9 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonIncreasingf asserts that the collection is not increasing // -// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1121,7 +1121,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin // JSONEq asserts that two JSON strings are equivalent. // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// require.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1134,7 +1134,7 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ // JSONEqf asserts that two JSON strings are equivalent. // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// require.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1148,7 +1148,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// assert.Len(t, mySlice, 3) +// require.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1162,7 +1162,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +// require.Lenf(t, mySlice, 3, "error message %s", "formatted") func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1175,9 +1175,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // Less asserts that the first element is less than the second // -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") +// require.Less(t, 1, 2) +// require.Less(t, float64(1), float64(2)) +// require.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1190,10 +1190,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // LessOrEqual asserts that the first element is less than or equal to the second // -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") +// require.LessOrEqual(t, 1, 2) +// require.LessOrEqual(t, 2, 2) +// require.LessOrEqual(t, "a", "b") +// require.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1206,10 +1206,10 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // LessOrEqualf asserts that the first element is less than or equal to the second // -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, "b", "b", "error message %s", "formatted") func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1222,9 +1222,9 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . // Lessf asserts that the first element is less than the second // -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") -// assert.Lessf(t, "a", "b", "error message %s", "formatted") +// require.Lessf(t, 1, 2, "error message %s", "formatted") +// require.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +// require.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1237,8 +1237,8 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter // Negative asserts that the specified element is negative // -// assert.Negative(t, -1) -// assert.Negative(t, -1.23) +// require.Negative(t, -1) +// require.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1251,8 +1251,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Negativef asserts that the specified element is negative // -// assert.Negativef(t, -1, "error message %s", "formatted") -// assert.Negativef(t, -1.23, "error message %s", "formatted") +// require.Negativef(t, -1, "error message %s", "formatted") +// require.Negativef(t, -1.23, "error message %s", "formatted") func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1266,7 +1266,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +// require.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1280,7 +1280,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1293,7 +1293,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. // Nil asserts that the specified object is nil. // -// assert.Nil(t, err) +// require.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1306,7 +1306,7 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Nilf asserts that the specified object is nil. // -// assert.Nilf(t, err, "error message %s", "formatted") +// require.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1344,8 +1344,8 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) { // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoError(t, err) { +// require.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1360,8 +1360,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { // NoErrorf asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoErrorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedObj, actualObj) // } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1400,9 +1400,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) { // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") +// require.NotContains(t, "Hello World", "Earth") +// require.NotContains(t, ["Hello", "World"], "Earth") +// require.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1416,9 +1416,9 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +// require.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// require.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// require.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1429,11 +1429,51 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a t.FailNow() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// require.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatch(t, listA, listB, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// require.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatchf(t, listA, listB, msg, args...) { + return + } + t.FailNow() +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmpty(t, obj) { +// require.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1448,8 +1488,8 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmptyf(t, obj, "error message %s", "formatted") { +// require.Equal(t, "two", obj[1]) // } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1463,7 +1503,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // NotEqual asserts that the specified values are NOT equal. // -// assert.NotEqual(t, obj1, obj2) +// require.NotEqual(t, obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1479,7 +1519,7 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . // NotEqualValues asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValues(t, obj1, obj2) +// require.NotEqualValues(t, obj1, obj2) func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1492,7 +1532,7 @@ func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAnd // NotEqualValuesf asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1505,7 +1545,7 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s // NotEqualf asserts that the specified values are NOT equal. // -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualf(t, obj1, obj2, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1519,7 +1559,31 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAs(t, err, target, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAsf(t, err, target, msg, args...) { + return + } + t.FailNow() +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1531,7 +1595,7 @@ func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) t.FailNow() } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1545,7 +1609,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf // NotImplements asserts that an object does not implement the specified interface. // -// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +// require.NotImplements(t, (*MyInterface)(nil), new(MyObject)) func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1558,7 +1622,7 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, // NotImplementsf asserts that an object does not implement the specified interface. // -// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1571,7 +1635,7 @@ func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, // NotNil asserts that the specified object is not nil. // -// assert.NotNil(t, err) +// require.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1584,7 +1648,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotNilf asserts that the specified object is not nil. // -// assert.NotNilf(t, err, "error message %s", "formatted") +// require.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1597,7 +1661,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanics(t, func(){ RemainCalm() }) +// require.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1610,7 +1674,7 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +// require.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1623,8 +1687,8 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac // NotRegexp asserts that a specified regexp does not match a string. // -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") +// require.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// require.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1637,8 +1701,8 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +// require.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// require.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1651,7 +1715,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // NotSame asserts that two pointers do not reference the same object. // -// assert.NotSame(t, ptr1, ptr2) +// require.NotSame(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1667,7 +1731,7 @@ func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // NotSamef asserts that two pointers do not reference the same object. // -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// require.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1685,8 +1749,8 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, // contain all elements given in the specified subset list(array, slice...) or // map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2]) -// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], [1, 2]) +// require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1701,8 +1765,8 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i // contain all elements given in the specified subset list(array, slice...) or // map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") -// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1737,7 +1801,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { // Panics asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panics(t, func(){ GoCrazy() }) +// require.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1752,7 +1816,7 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1767,7 +1831,7 @@ func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAn // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1781,7 +1845,7 @@ func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1795,7 +1859,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1808,7 +1872,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, // Panicsf asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +// require.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1821,8 +1885,8 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} // Positive asserts that the specified element is positive // -// assert.Positive(t, 1) -// assert.Positive(t, 1.23) +// require.Positive(t, 1) +// require.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1835,8 +1899,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Positivef asserts that the specified element is positive // -// assert.Positivef(t, 1, "error message %s", "formatted") -// assert.Positivef(t, 1.23, "error message %s", "formatted") +// require.Positivef(t, 1, "error message %s", "formatted") +// require.Positivef(t, 1.23, "error message %s", "formatted") func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1849,8 +1913,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { // Regexp asserts that a specified regexp matches a string. // -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") +// require.Regexp(t, regexp.MustCompile("start"), "it's starting") +// require.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1863,8 +1927,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +// require.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// require.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1877,7 +1941,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // Same asserts that two pointers reference the same object. // -// assert.Same(t, ptr1, ptr2) +// require.Same(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1893,7 +1957,7 @@ func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...in // Samef asserts that two pointers reference the same object. // -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// require.Samef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1910,8 +1974,8 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg // Subset asserts that the specified list(array, slice...) or map contains all // elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2]) -// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], [1, 2]) +// require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1925,8 +1989,8 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte // Subsetf asserts that the specified list(array, slice...) or map contains all // elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") -// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1939,7 +2003,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // True asserts that the specified value is true. // -// assert.True(t, myBool) +// require.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1952,7 +2016,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) { // Truef asserts that the specified value is true. // -// assert.Truef(t, myBool, "error message %s", "formatted") +// require.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1965,7 +2029,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) { // WithinDuration asserts that the two times are within duration delta of each other. // -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +// require.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1978,7 +2042,7 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time // WithinDurationf asserts that the two times are within duration delta of each other. // -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// require.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1991,7 +2055,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim // WithinRange asserts that a time is within a time range (inclusive). // -// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +// require.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -2004,7 +2068,7 @@ func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, m // WithinRangef asserts that a time is within a time range (inclusive). // -// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +// require.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl index 55e42dde..8b328368 100644 --- a/vendor/github.com/stretchr/testify/require/require.go.tmpl +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -1,4 +1,4 @@ -{{.Comment}} +{{ replace .Comment "assert." "require."}} func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { if h, ok := t.(tHelper); ok { h.Helper() } if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index eee8310a..1bd87304 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -187,8 +187,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { @@ -198,8 +198,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { @@ -337,7 +337,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -362,7 +362,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), w // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1129,6 +1129,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1201,7 +1235,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -1210,7 +1262,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go index 91772dfe..6b7ce929 100644 --- a/vendor/github.com/stretchr/testify/require/requirements.go +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -6,7 +6,7 @@ type TestingT interface { FailNow() } -type tHelper interface { +type tHelper = interface { Helper() } diff --git a/vendor/github.com/urfave/cli/v2/help.go b/vendor/github.com/urfave/cli/v2/help.go index 874be941..f1b9e7f1 100644 --- a/vendor/github.com/urfave/cli/v2/help.go +++ b/vendor/github.com/urfave/cli/v2/help.go @@ -150,7 +150,7 @@ func printCommandSuggestions(commands []*Command, writer io.Writer) { if command.Hidden { continue } - if strings.HasSuffix(os.Getenv("SHELL"), "zsh") { + if strings.HasSuffix(os.Getenv("0"), "zsh") { for _, name := range command.Names() { _, _ = fmt.Fprintf(writer, "%s:%s\n", name, command.Usage) } diff --git a/vendor/github.com/vektah/gqlparser/v2/.go-version b/vendor/github.com/vektah/gqlparser/v2/.go-version index 66e2ae6c..d28b1eb8 100644 --- a/vendor/github.com/vektah/gqlparser/v2/.go-version +++ b/vendor/github.com/vektah/gqlparser/v2/.go-version @@ -1 +1 @@ -1.19.1 +1.22.9 diff --git a/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go b/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go index 483a086d..45a0400a 100644 --- a/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go +++ b/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go @@ -1,10 +1,10 @@ package gqlerror import ( - "bytes" "errors" "fmt" "strconv" + "strings" "github.com/vektah/gqlparser/v2/ast" ) @@ -38,7 +38,7 @@ type Location struct { type List []*Error func (err *Error) Error() string { - var res bytes.Buffer + var res strings.Builder if err == nil { return "" } @@ -80,7 +80,7 @@ func (err *Error) AsError() error { } func (errs List) Error() string { - var buf bytes.Buffer + var buf strings.Builder for _, err := range errs { buf.WriteString(err.Error()) buf.WriteByte('\n') diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go index 7e023090..bbf391fe 100644 --- a/vendor/golang.org/x/crypto/sha3/doc.go +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -5,6 +5,10 @@ // Package sha3 implements the SHA-3 fixed-output-length hash functions and // the SHAKE variable-output-length hash functions defined by FIPS-202. // +// All types in this package also implement [encoding.BinaryMarshaler], +// [encoding.BinaryAppender] and [encoding.BinaryUnmarshaler] to marshal and +// unmarshal the internal state of the hash. +// // Both types of hash function use the "sponge" construction and the Keccak // permutation. For a detailed specification see http://keccak.noekeon.org/ // diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go index c544b29e..31fffbe0 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes.go +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -48,33 +48,52 @@ func init() { crypto.RegisterHash(crypto.SHA3_512, New512) } +const ( + dsbyteSHA3 = 0b00000110 + dsbyteKeccak = 0b00000001 + dsbyteShake = 0b00011111 + dsbyteCShake = 0b00000100 + + // rateK[c] is the rate in bytes for Keccak[c] where c is the capacity in + // bits. Given the sponge size is 1600 bits, the rate is 1600 - c bits. + rateK256 = (1600 - 256) / 8 + rateK448 = (1600 - 448) / 8 + rateK512 = (1600 - 512) / 8 + rateK768 = (1600 - 768) / 8 + rateK1024 = (1600 - 1024) / 8 +) + func new224Generic() *state { - return &state{rate: 144, outputLen: 28, dsbyte: 0x06} + return &state{rate: rateK448, outputLen: 28, dsbyte: dsbyteSHA3} } func new256Generic() *state { - return &state{rate: 136, outputLen: 32, dsbyte: 0x06} + return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteSHA3} } func new384Generic() *state { - return &state{rate: 104, outputLen: 48, dsbyte: 0x06} + return &state{rate: rateK768, outputLen: 48, dsbyte: dsbyteSHA3} } func new512Generic() *state { - return &state{rate: 72, outputLen: 64, dsbyte: 0x06} + return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteSHA3} } // NewLegacyKeccak256 creates a new Keccak-256 hash. // // Only use this function if you require compatibility with an existing cryptosystem // that uses non-standard padding. All other users should use New256 instead. -func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} } +func NewLegacyKeccak256() hash.Hash { + return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteKeccak} +} // NewLegacyKeccak512 creates a new Keccak-512 hash. // // Only use this function if you require compatibility with an existing cryptosystem // that uses non-standard padding. All other users should use New512 instead. -func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} } +func NewLegacyKeccak512() hash.Hash { + return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteKeccak} +} // Sum224 returns the SHA3-224 digest of the data. func Sum224(data []byte) (digest [28]byte) { diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go index afedde5a..6658c444 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3.go +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -4,6 +4,15 @@ package sha3 +import ( + "crypto/subtle" + "encoding/binary" + "errors" + "unsafe" + + "golang.org/x/sys/cpu" +) + // spongeDirection indicates the direction bytes are flowing through the sponge. type spongeDirection int @@ -14,16 +23,13 @@ const ( spongeSqueezing ) -const ( - // maxRate is the maximum size of the internal buffer. SHAKE-256 - // currently needs the largest buffer. - maxRate = 168 -) - type state struct { - // Generic sponge components. - a [25]uint64 // main state of the hash - rate int // the number of bytes of state to use + a [1600 / 8]byte // main state of the hash + + // a[n:rate] is the buffer. If absorbing, it's the remaining space to XOR + // into before running the permutation. If squeezing, it's the remaining + // output to produce before running the permutation. + n, rate int // dsbyte contains the "domain separation" bits and the first bit of // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the @@ -39,10 +45,6 @@ type state struct { // Extendable-Output Functions (May 2014)" dsbyte byte - i, n int // storage[i:n] is the buffer, i is only used while squeezing - storage [maxRate]byte - - // Specific to SHA-3 and SHAKE. outputLen int // the default output size in bytes state spongeDirection // whether the sponge is absorbing or squeezing } @@ -61,7 +63,7 @@ func (d *state) Reset() { d.a[i] = 0 } d.state = spongeAbsorbing - d.i, d.n = 0, 0 + d.n = 0 } func (d *state) clone() *state { @@ -69,22 +71,25 @@ func (d *state) clone() *state { return &ret } -// permute applies the KeccakF-1600 permutation. It handles -// any input-output buffering. +// permute applies the KeccakF-1600 permutation. func (d *state) permute() { - switch d.state { - case spongeAbsorbing: - // If we're absorbing, we need to xor the input into the state - // before applying the permutation. - xorIn(d, d.storage[:d.rate]) - d.n = 0 - keccakF1600(&d.a) - case spongeSqueezing: - // If we're squeezing, we need to apply the permutation before - // copying more output. - keccakF1600(&d.a) - d.i = 0 - copyOut(d, d.storage[:d.rate]) + var a *[25]uint64 + if cpu.IsBigEndian { + a = new([25]uint64) + for i := range a { + a[i] = binary.LittleEndian.Uint64(d.a[i*8:]) + } + } else { + a = (*[25]uint64)(unsafe.Pointer(&d.a)) + } + + keccakF1600(a) + d.n = 0 + + if cpu.IsBigEndian { + for i := range a { + binary.LittleEndian.PutUint64(d.a[i*8:], a[i]) + } } } @@ -92,53 +97,36 @@ func (d *state) permute() { // the multi-bitrate 10..1 padding rule, and permutes the state. func (d *state) padAndPermute() { // Pad with this instance's domain-separator bits. We know that there's - // at least one byte of space in d.buf because, if it were full, + // at least one byte of space in the sponge because, if it were full, // permute would have been called to empty it. dsbyte also contains the // first one bit for the padding. See the comment in the state struct. - d.storage[d.n] = d.dsbyte - d.n++ - for d.n < d.rate { - d.storage[d.n] = 0 - d.n++ - } + d.a[d.n] ^= d.dsbyte // This adds the final one bit for the padding. Because of the way that // bits are numbered from the LSB upwards, the final bit is the MSB of // the last byte. - d.storage[d.rate-1] ^= 0x80 + d.a[d.rate-1] ^= 0x80 // Apply the permutation d.permute() d.state = spongeSqueezing - d.n = d.rate - copyOut(d, d.storage[:d.rate]) } // Write absorbs more data into the hash's state. It panics if any // output has already been read. -func (d *state) Write(p []byte) (written int, err error) { +func (d *state) Write(p []byte) (n int, err error) { if d.state != spongeAbsorbing { panic("sha3: Write after Read") } - written = len(p) + + n = len(p) for len(p) > 0 { - if d.n == 0 && len(p) >= d.rate { - // The fast path; absorb a full "rate" bytes of input and apply the permutation. - xorIn(d, p[:d.rate]) - p = p[d.rate:] - keccakF1600(&d.a) - } else { - // The slow path; buffer the input until we can fill the sponge, and then xor it in. - todo := d.rate - d.n - if todo > len(p) { - todo = len(p) - } - d.n += copy(d.storage[d.n:], p[:todo]) - p = p[todo:] - - // If the sponge is full, apply the permutation. - if d.n == d.rate { - d.permute() - } + x := subtle.XORBytes(d.a[d.n:d.rate], d.a[d.n:d.rate], p) + d.n += x + p = p[x:] + + // If the sponge is full, apply the permutation. + if d.n == d.rate { + d.permute() } } @@ -156,14 +144,14 @@ func (d *state) Read(out []byte) (n int, err error) { // Now, do the squeezing. for len(out) > 0 { - n := copy(out, d.storage[d.i:d.n]) - d.i += n - out = out[n:] - // Apply the permutation if we've squeezed the sponge dry. - if d.i == d.rate { + if d.n == d.rate { d.permute() } + + x := copy(out, d.a[d.n:d.rate]) + d.n += x + out = out[x:] } return @@ -183,3 +171,74 @@ func (d *state) Sum(in []byte) []byte { dup.Read(hash) return append(in, hash...) } + +const ( + magicSHA3 = "sha\x08" + magicShake = "sha\x09" + magicCShake = "sha\x0a" + magicKeccak = "sha\x0b" + // magic || rate || main state || n || sponge direction + marshaledSize = len(magicSHA3) + 1 + 200 + 1 + 1 +) + +func (d *state) MarshalBinary() ([]byte, error) { + return d.AppendBinary(make([]byte, 0, marshaledSize)) +} + +func (d *state) AppendBinary(b []byte) ([]byte, error) { + switch d.dsbyte { + case dsbyteSHA3: + b = append(b, magicSHA3...) + case dsbyteShake: + b = append(b, magicShake...) + case dsbyteCShake: + b = append(b, magicCShake...) + case dsbyteKeccak: + b = append(b, magicKeccak...) + default: + panic("unknown dsbyte") + } + // rate is at most 168, and n is at most rate. + b = append(b, byte(d.rate)) + b = append(b, d.a[:]...) + b = append(b, byte(d.n), byte(d.state)) + return b, nil +} + +func (d *state) UnmarshalBinary(b []byte) error { + if len(b) != marshaledSize { + return errors.New("sha3: invalid hash state") + } + + magic := string(b[:len(magicSHA3)]) + b = b[len(magicSHA3):] + switch { + case magic == magicSHA3 && d.dsbyte == dsbyteSHA3: + case magic == magicShake && d.dsbyte == dsbyteShake: + case magic == magicCShake && d.dsbyte == dsbyteCShake: + case magic == magicKeccak && d.dsbyte == dsbyteKeccak: + default: + return errors.New("sha3: invalid hash state identifier") + } + + rate := int(b[0]) + b = b[1:] + if rate != d.rate { + return errors.New("sha3: invalid hash state function") + } + + copy(d.a[:], b) + b = b[len(d.a):] + + n, state := int(b[0]), spongeDirection(b[1]) + if n > d.rate { + return errors.New("sha3: invalid hash state") + } + d.n = n + if state != spongeAbsorbing && state != spongeSqueezing { + return errors.New("sha3: invalid hash state") + } + d.state = state + + return nil +} diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go index a01ef435..a6b3a428 100644 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -16,9 +16,12 @@ package sha3 // [2] https://doi.org/10.6028/NIST.SP.800-185 import ( + "bytes" "encoding/binary" + "errors" "hash" "io" + "math/bits" ) // ShakeHash defines the interface to hash functions that support @@ -50,41 +53,33 @@ type cshakeState struct { initBlock []byte } -// Consts for configuring initial SHA-3 state -const ( - dsbyteShake = 0x1f - dsbyteCShake = 0x04 - rate128 = 168 - rate256 = 136 -) +func bytepad(data []byte, rate int) []byte { + out := make([]byte, 0, 9+len(data)+rate-1) + out = append(out, leftEncode(uint64(rate))...) + out = append(out, data...) + if padlen := rate - len(out)%rate; padlen < rate { + out = append(out, make([]byte, padlen)...) + } + return out +} -func bytepad(input []byte, w int) []byte { - // leftEncode always returns max 9 bytes - buf := make([]byte, 0, 9+len(input)+w) - buf = append(buf, leftEncode(uint64(w))...) - buf = append(buf, input...) - padlen := w - (len(buf) % w) - return append(buf, make([]byte, padlen)...) -} - -func leftEncode(value uint64) []byte { - var b [9]byte - binary.BigEndian.PutUint64(b[1:], value) - // Trim all but last leading zero bytes - i := byte(1) - for i < 8 && b[i] == 0 { - i++ +func leftEncode(x uint64) []byte { + // Let n be the smallest positive integer for which 2^(8n) > x. + n := (bits.Len64(x) + 7) / 8 + if n == 0 { + n = 1 } - // Prepend number of encoded bytes - b[i-1] = 9 - i - return b[i-1:] + // Return n || x with n as a byte and x an n bytes in big-endian order. + b := make([]byte, 9) + binary.BigEndian.PutUint64(b[1:], x) + b = b[9-n-1:] + b[0] = byte(n) + return b } func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}} - - // leftEncode returns max 9 bytes - c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) + c.initBlock = make([]byte, 0, 9+len(N)+9+len(S)) // leftEncode returns max 9 bytes c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...) c.initBlock = append(c.initBlock, N...) c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...) @@ -111,6 +106,30 @@ func (c *state) Clone() ShakeHash { return c.clone() } +func (c *cshakeState) MarshalBinary() ([]byte, error) { + return c.AppendBinary(make([]byte, 0, marshaledSize+len(c.initBlock))) +} + +func (c *cshakeState) AppendBinary(b []byte) ([]byte, error) { + b, err := c.state.AppendBinary(b) + if err != nil { + return nil, err + } + b = append(b, c.initBlock...) + return b, nil +} + +func (c *cshakeState) UnmarshalBinary(b []byte) error { + if len(b) <= marshaledSize { + return errors.New("sha3: invalid hash state") + } + if err := c.state.UnmarshalBinary(b[:marshaledSize]); err != nil { + return err + } + c.initBlock = bytes.Clone(b[marshaledSize:]) + return nil +} + // NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. // Its generic security strength is 128 bits against all attacks if at // least 32 bytes of its output are used. @@ -126,11 +145,11 @@ func NewShake256() ShakeHash { } func newShake128Generic() *state { - return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake} + return &state{rate: rateK256, outputLen: 32, dsbyte: dsbyteShake} } func newShake256Generic() *state { - return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake} + return &state{rate: rateK512, outputLen: 64, dsbyte: dsbyteShake} } // NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, @@ -143,7 +162,7 @@ func NewCShake128(N, S []byte) ShakeHash { if len(N) == 0 && len(S) == 0 { return NewShake128() } - return newCShake(N, S, rate128, 32, dsbyteCShake) + return newCShake(N, S, rateK256, 32, dsbyteCShake) } // NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, @@ -156,7 +175,7 @@ func NewCShake256(N, S []byte) ShakeHash { if len(N) == 0 && len(S) == 0 { return NewShake256() } - return newCShake(N, S, rate256, 64, dsbyteCShake) + return newCShake(N, S, rateK512, 64, dsbyteCShake) } // ShakeSum128 writes an arbitrary-length digest of data into hash. diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go deleted file mode 100644 index 6ada5c95..00000000 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -import ( - "crypto/subtle" - "encoding/binary" - "unsafe" - - "golang.org/x/sys/cpu" -) - -// xorIn xors the bytes in buf into the state. -func xorIn(d *state, buf []byte) { - if cpu.IsBigEndian { - for i := 0; len(buf) >= 8; i++ { - a := binary.LittleEndian.Uint64(buf) - d.a[i] ^= a - buf = buf[8:] - } - } else { - ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) - subtle.XORBytes(ab[:], ab[:], buf) - } -} - -// copyOut copies uint64s to a byte buffer. -func copyOut(d *state, b []byte) { - if cpu.IsBigEndian { - for i := 0; len(b) >= 8; i++ { - binary.LittleEndian.PutUint64(b, d.a[i]) - b = b[8:] - } - } else { - ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) - copy(b, ab[:]) - } -} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go index 3a7e5ab1..885c4c59 100644 --- a/vendor/golang.org/x/net/html/doc.go +++ b/vendor/golang.org/x/net/html/doc.go @@ -78,16 +78,11 @@ example, to process each anchor node in depth-first order: if err != nil { // ... } - var f func(*html.Node) - f = func(n *html.Node) { + for n := range doc.Descendants() { if n.Type == html.ElementNode && n.Data == "a" { // Do something with n... } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } } - f(doc) The relevant specifications include: https://html.spec.whatwg.org/multipage/syntax.html and diff --git a/vendor/golang.org/x/net/html/iter.go b/vendor/golang.org/x/net/html/iter.go new file mode 100644 index 00000000..54be8fd3 --- /dev/null +++ b/vendor/golang.org/x/net/html/iter.go @@ -0,0 +1,56 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package html + +import "iter" + +// Ancestors returns an iterator over the ancestors of n, starting with n.Parent. +// +// Mutating a Node or its parents while iterating may have unexpected results. +func (n *Node) Ancestors() iter.Seq[*Node] { + _ = n.Parent // eager nil check + + return func(yield func(*Node) bool) { + for p := n.Parent; p != nil && yield(p); p = p.Parent { + } + } +} + +// ChildNodes returns an iterator over the immediate children of n, +// starting with n.FirstChild. +// +// Mutating a Node or its children while iterating may have unexpected results. +func (n *Node) ChildNodes() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + for c := n.FirstChild; c != nil && yield(c); c = c.NextSibling { + } + } + +} + +// Descendants returns an iterator over all nodes recursively beneath +// n, excluding n itself. Nodes are visited in depth-first preorder. +// +// Mutating a Node or its descendants while iterating may have unexpected results. +func (n *Node) Descendants() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + n.descendants(yield) + } +} + +func (n *Node) descendants(yield func(*Node) bool) bool { + for c := range n.ChildNodes() { + if !yield(c) || !c.descendants(yield) { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go index 1350eef2..77741a19 100644 --- a/vendor/golang.org/x/net/html/node.go +++ b/vendor/golang.org/x/net/html/node.go @@ -38,6 +38,10 @@ var scopeMarker = Node{Type: scopeMarkerNode} // that it looks like "a cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1578,6 +1668,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { cs.reqBodyClosed = make(chan struct{}) } bodyClosed := cs.reqBodyClosed + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1602,16 +1693,40 @@ func (cs *clientStream) cleanupWriteRequest(err error) { if cs.sentHeaders { if se, ok := err.(StreamError); ok { if se.Cause != errFromPeer { - cc.writeStreamReset(cs.ID, se.Code, err) + cc.writeStreamReset(cs.ID, se.Code, false, err) } } else { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + // We're cancelling an in-flight request. + // + // This could be due to the server becoming unresponsive. + // To avoid sending too many requests on a dead connection, + // we let the request continue to consume a concurrency slot + // until we can confirm the server is still responding. + // We do this by sending a PING frame along with the RST_STREAM + // (unless a ping is already in flight). + // + // For simplicity, we don't bother tracking the PING payload: + // We reset cc.pendingResets any time we receive a PING ACK. + // + // We skip this if the conn is going to be closed on idle, + // because it's short lived and will probably be closed before + // we get the ping response. + ping := false + if !closeOnIdle { + cc.mu.Lock() + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ + cc.mu.Unlock() + } + cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) } } cs.bufPipe.CloseWithError(err) // no-op if already closed } else { if cs.sentHeaders && !cs.sentEndStream { - cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil) } cs.bufPipe.CloseWithError(errRequestCanceled) } @@ -1633,12 +1748,17 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { - cc.lastActive = time.Now() + if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 { + // This is the very first request sent to this connection. + // Return a fatal error which aborts the retry loop. + return errClientConnNotEstablished + } + cc.lastActive = cc.t.now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { + if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) { return nil } cc.pendingRequests++ @@ -2180,10 +2300,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = time.Now() + cc.lastActive = cc.t.now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() + cc.lastIdle = cc.t.now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2243,7 +2363,6 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - cc.t.connPool().MarkDead(cc) defer cc.closeConn() defer close(cc.readerDone) @@ -2267,6 +2386,24 @@ func (rl *clientConnReadLoop) cleanup() { } cc.closed = true + // If the connection has never been used, and has been open for only a short time, + // leave it in the connection pool for a little while. + // + // This avoids a situation where new connections are constantly created, + // added to the pool, fail, and are removed from the pool, without any error + // being surfaced to the user. + const unusedWaitTime = 5 * time.Second + idleTime := cc.t.now().Sub(cc.lastActive) + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { + cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.t.connPool().MarkDead(cc) + }) + } else { + cc.mu.Unlock() // avoid any deadlocks in MarkDead + cc.t.connPool().MarkDead(cc) + cc.mu.Lock() + } + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2494,15 +2631,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra if f.StreamEnded() { return nil, errors.New("1xx informational response with END_STREAM flag") } - cs.num1xx++ - const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http - if cs.num1xx > max1xxResponses { - return nil, errors.New("http2: too many 1xx informational responses") - } if fn := cs.get1xxTraceFunc(); fn != nil { + // If the 1xx response is being delivered to the user, + // then they're responsible for limiting the number + // of responses. if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { return nil, err } + } else { + // If the user didn't examine the 1xx response, then we + // limit the size of all 1xx headers. + // + // This differs a bit from the HTTP/1 implementation, which + // limits the size of all 1xx headers plus the final response. + // Use the larger limit of MaxHeaderListSize and + // net/http.Transport.MaxResponseHeaderBytes. + limit := int64(cs.cc.t.maxHeaderListSize()) + if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit { + limit = t1.MaxResponseHeaderBytes + } + for _, h := range f.Fields { + cs.totalHeaderSize += int64(h.Size()) + } + if cs.totalHeaderSize > limit { + if VerboseLogs { + log.Printf("http2: 1xx informational responses too large") + } + return nil, errors.New("header list too large") + } } if statusCode == 100 { traceGot100Continue(cs.trace) @@ -3046,6 +3202,11 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { close(c) delete(cc.pings, f.Data) } + if cc.pendingResets > 0 { + // See clientStream.cleanupWriteRequest. + cc.pendingResets = 0 + cc.cond.Broadcast() + } return nil } cc := rl.cc @@ -3068,13 +3229,20 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { return ConnectionError(ErrCodeProtocol) } -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { +// writeStreamReset sends a RST_STREAM frame. +// When ping is true, it also sends a PING frame with a random payload. +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) { // TODO: map err to more interesting error codes, once the // HTTP community comes up with some. But currently for // RST_STREAM there's no equivalent to GOAWAY frame's debug // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) + if ping { + var payload [8]byte + rand.Read(payload[:]) + cc.fr.WritePing(false, payload) + } cc.bw.Flush() cc.wmu.Unlock() } @@ -3228,7 +3396,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Since(cc.lastActive) + ci.IdleTime = cc.t.timeSince(cc.lastActive) } cc.mu.Unlock() diff --git a/vendor/golang.org/x/net/http2/unencrypted.go b/vendor/golang.org/x/net/http2/unencrypted.go new file mode 100644 index 00000000..b2de2116 --- /dev/null +++ b/vendor/golang.org/x/net/http2/unencrypted.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "crypto/tls" + "errors" + "net" +) + +const nextProtoUnencryptedHTTP2 = "unencrypted_http2" + +// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn. +// +// TLSNextProto functions accept a *tls.Conn. +// +// When passing an unencrypted HTTP/2 connection to a TLSNextProto function, +// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection. +// To be extra careful about mistakes (accidentally dropping TLS encryption in a place +// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method +// that returns the actual connection we want to use. +func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) { + conner, ok := tc.NetConn().(interface { + UnencryptedNetConn() net.Conn + }) + if !ok { + return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff") + } + return conner.UnencryptedNetConn(), nil +} diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go index cebde763..3c9576e2 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go @@ -4,27 +4,27 @@ package socket type iovec struct { - Base *byte - Len uint64 + Base *byte + Len uint64 } type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen uint32 - Control *byte - Controllen uint32 - Flags int32 + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 } type cmsghdr struct { - Len uint32 - Level int32 - Type int32 + Len uint32 + Level int32 + Type int32 } const ( - sizeofIovec = 0x10 - sizeofMsghdr = 0x30 + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 ) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go index cebde763..3c9576e2 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go @@ -4,27 +4,27 @@ package socket type iovec struct { - Base *byte - Len uint64 + Base *byte + Len uint64 } type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen uint32 - Control *byte - Controllen uint32 - Flags int32 + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 } type cmsghdr struct { - Len uint32 - Level int32 - Type int32 + Len uint32 + Level int32 + Type int32 } const ( - sizeofIovec = 0x10 - sizeofMsghdr = 0x30 + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 ) diff --git a/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s new file mode 100644 index 00000000..ec2acfe5 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctlbyname(SB) +GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go new file mode 100644 index 00000000..b838cb9e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +package cpu + +// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl +// call (see issue 43089). It also restricts AVX512 support for Darwin to +// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233). +// +// Background: +// Darwin implements a special mechanism to economize on thread state when +// AVX512 specific registers are not in use. This scheme minimizes state when +// preempting threads that haven't yet used any AVX512 instructions, but adds +// special requirements to check for AVX512 hardware support at runtime (e.g. +// via sysctl call or commpage inspection). See issue 43089 and link below for +// full background: +// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240 +// +// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0 +// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption +// of the AVX512 mask registers (K0-K7) upon signal return. For this reason +// AVX512 is considered unsafe to use on Darwin for kernel versions prior to +// 21.3.0, where a fix has been confirmed. See issue 49233 for full background. +func darwinSupportsAVX512() bool { + return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0) +} + +// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies +func darwinKernelVersionCheck(major, minor, patch int) bool { + var release [256]byte + err := darwinOSRelease(&release) + if err != nil { + return false + } + + var mmp [3]int + c := 0 +Loop: + for _, b := range release[:] { + switch { + case b >= '0' && b <= '9': + mmp[c] = 10*mmp[c] + int(b-'0') + case b == '.': + c++ + if c > 2 { + return false + } + case b == 0: + break Loop + default: + return false + } + } + if c != 2 { + return false + } + return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go index 910728fb..32a44514 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -6,10 +6,10 @@ package cpu -// cpuid is implemented in cpu_x86.s for gc compiler +// cpuid is implemented in cpu_gc_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) -// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.s b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s similarity index 94% rename from vendor/golang.org/x/sys/cpu/cpu_x86.s rename to vendor/golang.org/x/sys/cpu/cpu_gc_x86.s index 7d7ba33e..ce208ce6 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.s +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s @@ -18,7 +18,7 @@ TEXT ·cpuid(SB), NOSPLIT, $0-24 RET // func xgetbv() (eax, edx uint32) -TEXT ·xgetbv(SB),NOSPLIT,$0-8 +TEXT ·xgetbv(SB), NOSPLIT, $0-8 MOVL $0, CX XGETBV MOVL AX, eax+0(FP) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go index 99c60fe9..170d21dd 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -23,9 +23,3 @@ func xgetbv() (eax, edx uint32) { gccgoXgetbv(&a, &d) return a, d } - -// gccgo doesn't build on Darwin, per: -// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 -func darwinSupportsAVX512() bool { - return false -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index 08f35ea1..f1caf0f7 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -110,7 +110,6 @@ func doinit() { ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) ARM64.HasDIT = isSet(hwCap, hwcap_DIT) - // HWCAP2 feature bits ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_x86.go b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go new file mode 100644 index 00000000..a0fd7e2f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc)) + +package cpu + +func darwinSupportsAVX512() bool { + panic("only implemented for gc && amd64 && darwin") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index c29f5e4c..600a6807 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -92,10 +92,8 @@ func archInit() { osSupportsAVX = isSet(1, eax) && isSet(2, eax) if runtime.GOOS == "darwin" { - // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. - // Since users can't rely on mask register contents, let's not advertise AVX-512 support. - // See issue 49233. - osSupportsAVX512 = false + // Darwin requires special AVX512 checks, see cpu_darwin_x86.go + osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() } else { // Check if OPMASK and ZMM registers have OS support. osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) diff --git a/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go new file mode 100644 index 00000000..4d0888b0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go @@ -0,0 +1,98 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on Darwin without depending on x/sys/unix. + +//go:build darwin && amd64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +type _C_int int32 + +// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419 +func darwinOSRelease(release *[256]byte) error { + // from x/sys/unix/zerrors_openbsd_amd64.go + const ( + CTL_KERN = 0x1 + KERN_OSRELEASE = 0x2 + ) + + mib := []_C_int{CTL_KERN, KERN_OSRELEASE} + n := unsafe.Sizeof(*release) + + return sysctl(mib, &release[0], &n, nil, 0) +} + +type Errno = syscall.Errno + +var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes. + +// from x/sys/unix/zsyscall_darwin_amd64.go L791-807 +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + if _, _, err := syscall_syscall6( + libc_sysctl_trampoline_addr, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + ); err != 0 { + return err + } + + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +// adapted from internal/cpu/cpu_arm64_darwin.go +func darwinSysctlEnabled(name []byte) bool { + out := int32(0) + nout := unsafe.Sizeof(out) + if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil { + return false + } + return out > 0 +} + +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +var libc_sysctlbyname_trampoline_addr uintptr + +// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix +func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + if _, _, err := syscall_syscall6( + libc_sysctlbyname_trampoline_addr, + uintptr(unsafe.Pointer(name)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + 0, + ); err != 0 { + return err + } + + return nil +} + +//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680ea..7ca4fa12 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { return &value, err } +// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC +// association for the network device specified by ifname. +func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) + return &value, err +} + +// IoctlGetHwTstamp retrieves the hardware timestamping configuration +// for the network device specified by ifname. +func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := HwTstampConfig{} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd) + return &value, err +} + +// IoctlSetHwTstamp updates the hardware timestamping configuration for +// the network device specified by ifname. +func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error { + ifr, err := NewIfreq(ifname) + if err != nil { + return err + } + ifrd := ifr.withData(unsafe.Pointer(cfg)) + return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd) +} + +// FdToClockID derives the clock ID from the file descriptor number +// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is +// suitable for system calls like ClockGettime. +func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) } + +// IoctlPtpClockGetcaps returns the description of a given PTP device. +func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) { + var value PtpClockCaps + err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetPrecise returns a description of the clock +// offset compared to the system clock. +func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) { + var value PtpSysOffsetPrecise + err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetExtended returns an extended description of the +// clock offset compared to the system clock. The samples parameter +// specifies the desired number of measurements. +func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) { + value := PtpSysOffsetExtended{Samples: uint32(samples)} + err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinGetfunc returns the configuration of the specified +// I/O pin on given PTP device. +func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) { + value := PtpPinDesc{Index: uint32(index)} + err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinSetfunc updates configuration of the specified PTP +// I/O pin. +func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error { + return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd)) +} + +// IoctlPtpPeroutRequest configures the periodic output mode of the +// PTP I/O pins. +func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error { + return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r)) +} + +// IoctlPtpExttsRequest configures the external timestamping mode +// of the PTP I/O pins. +func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error { + return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r)) +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index ac54ecab..6ab02b6c 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -158,6 +158,16 @@ includes_Linux=' #endif #define _GNU_SOURCE +// See the description in unix/linux/types.go +#if defined(__ARM_EABI__) || \ + (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \ + (defined(__powerpc__) && (!defined(__powerpc64__))) +# ifdef _TIME_BITS +# undef _TIME_BITS +# endif +# define _TIME_BITS 32 +#endif + // is broken on powerpc64, as it fails to include definitions of // these structures. We just include them copied from . #if defined(__powerpc__) @@ -256,6 +266,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -527,6 +538,7 @@ ccflags="$@" $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^PTP_/ || $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index f08abd43..230a9454 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1860,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys ClockSettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CloseRange(first uint, last uint, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac..7bf5c04b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) @@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) { // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ func isSpecialPath(path []byte) (v bool) { var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} var i, j int for i = 0; i < len(special); i++ { @@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { //sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT //sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT //sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT + +func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg) + runtime.ExitSyscall() + val = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) { + switch op.(type) { + case *Flock_t: + err = FcntlFlock(fd, cmd, op.(*Flock_t)) + if err != nil { + ret = -1 + } + return + case int: + return FcntlInt(fd, cmd, op.(int)) + case *F_cnvrt: + return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt)))) + case unsafe.Pointer: + return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer))) + default: + return -1, EINVAL + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO: use LE call instead if the call is implemented + originalOffset, err := Seek(infd, 0, SEEK_CUR) + if err != nil { + return -1, err + } + //start reading data from in_fd + if offset != nil { + _, err := Seek(infd, *offset, SEEK_SET) + if err != nil { + return -1, err + } + } + + buf := make([]byte, count) + readBuf := make([]byte, 0) + var n int = 0 + for i := 0; i < count; i += n { + n, err := Read(infd, buf) + if n == 0 { + if err != nil { + return -1, err + } else { // EOF + break + } + } + readBuf = append(readBuf, buf...) + buf = buf[0:0] + } + + n2, err := Write(outfd, readBuf) + if err != nil { + return -1, err + } + + //When sendfile() returns, this variable will be set to the + // offset of the byte following the last byte that was read. + if offset != nil { + *offset = *offset + int64(n) + // If offset is not NULL, then sendfile() does not modify the file + // offset of in_fd + _, err := Seek(infd, originalOffset, SEEK_SET) + if err != nil { + return -1, err + } + } + return n2, nil +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index de3b4624..ccba391c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2625,6 +2625,28 @@ const ( PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 PSTOREFS_MAGIC = 0x6165676c + PTP_CLK_MAGIC = '=' + PTP_ENABLE_FEATURE = 0x1 + PTP_EXTTS_EDGES = 0x6 + PTP_EXTTS_EVENT_VALID = 0x1 + PTP_EXTTS_V1_VALID_FLAGS = 0x7 + PTP_EXTTS_VALID_FLAGS = 0x1f + PTP_EXT_OFFSET = 0x10 + PTP_FALLING_EDGE = 0x4 + PTP_MAX_SAMPLES = 0x19 + PTP_PEROUT_DUTY_CYCLE = 0x2 + PTP_PEROUT_ONE_SHOT = 0x1 + PTP_PEROUT_PHASE = 0x4 + PTP_PEROUT_V1_VALID_FLAGS = 0x0 + PTP_PEROUT_VALID_FLAGS = 0x7 + PTP_PIN_GETFUNC = 0xc0603d06 + PTP_PIN_GETFUNC2 = 0xc0603d0f + PTP_RISING_EDGE = 0x2 + PTP_STRICT_FLAGS = 0x8 + PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09 + PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12 + PTP_SYS_OFFSET_PRECISE = 0xc0403d08 + PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11 PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 8aa6d77c..0c00cb3f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -237,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 PTRACE_GET_THREAD_AREA = 0x19 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index da428f42..dfb36455 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -237,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_ARCH_PRCTL = 0x1e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index bf45bfec..d46dcf78 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETCRUNCHREGS = 0x19 PTRACE_GETFDPIC = 0x1f PTRACE_GETFDPIC_EXEC = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 71c67162..3af3248a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -240,6 +240,20 @@ const ( PROT_BTI = 0x10 PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9476628f..292bcf02 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -238,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b9e85f3c..782b7110 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index a48b68a7..84973fd9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ea00e852..6d9cbc3b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 91c64687..5f9fedbc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 8cbf38d6..bb0026ee 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -237,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index a2df7341..46120db5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -237,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 24791379..5c951634 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -237,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index d265f146..11a84d5a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFDPIC = 0x21 PTRACE_GETFDPIC_EXEC = 0x0 PTRACE_GETFDPIC_INTERP = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 3f2d6443..f78c4617 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 PTRACE_GET_LAST_BREAK = 0x5006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 5d8b727a..aeb777c3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -239,6 +239,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPAREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETFPREGS64 = 0x19 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index af30da55..5cc1e8eb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockSettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 3a69e454..8daaf3fa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1752,12 +1752,6 @@ const ( IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 IFLA_NETKIT_UNSPEC = 0x0 IFLA_NETKIT_PEER_INFO = 0x1 IFLA_NETKIT_PRIMARY = 0x2 @@ -1796,6 +1790,7 @@ const ( IFLA_VXLAN_DF = 0x1d IFLA_VXLAN_VNIFILTER = 0x1e IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_VXLAN_LABEL_POLICY = 0x20 IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1825,6 +1820,8 @@ const ( IFLA_GTP_ROLE = 0x4 IFLA_GTP_CREATE_SOCKETS = 0x5 IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_GTP_LOCAL = 0x7 + IFLA_GTP_LOCAL6 = 0x8 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1857,6 +1854,7 @@ const ( IFLA_BOND_AD_LACP_ACTIVE = 0x1d IFLA_BOND_MISSED_MAX = 0x1e IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_COUPLED_CONTROL = 0x20 IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1925,6 +1923,7 @@ const ( IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 IFLA_HSR_PROTOCOL = 0x7 + IFLA_HSR_INTERLINK = 0x8 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -1977,6 +1976,15 @@ const ( IFLA_DSA_MASTER = 0x1 ) +const ( + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -4110,6 +4118,106 @@ type EthtoolDrvinfo struct { Regdump_len uint32 } +type EthtoolTsInfo struct { + Cmd uint32 + So_timestamping uint32 + Phc_index int32 + Tx_types uint32 + Tx_reserved [3]uint32 + Rx_filters uint32 + Rx_reserved [3]uint32 +} + +type HwTstampConfig struct { + Flags int32 + Tx_type int32 + Rx_filter int32 +} + +const ( + HWTSTAMP_FILTER_NONE = 0x0 + HWTSTAMP_FILTER_ALL = 0x1 + HWTSTAMP_FILTER_SOME = 0x2 + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3 + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6 + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9 + HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc +) + +const ( + HWTSTAMP_TX_OFF = 0x0 + HWTSTAMP_TX_ON = 0x1 + HWTSTAMP_TX_ONESTEP_SYNC = 0x2 +) + +type ( + PtpClockCaps struct { + Max_adj int32 + N_alarm int32 + N_ext_ts int32 + N_per_out int32 + Pps int32 + N_pins int32 + Cross_timestamping int32 + Adjust_phase int32 + Max_phase_adj int32 + Rsv [11]int32 + } + PtpClockTime struct { + Sec int64 + Nsec uint32 + Reserved uint32 + } + PtpExttsEvent struct { + T PtpClockTime + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpExttsRequest struct { + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpPeroutRequest struct { + StartOrPhase PtpClockTime + Period PtpClockTime + Index uint32 + Flags uint32 + On PtpClockTime + } + PtpPinDesc struct { + Name [64]byte + Index uint32 + Func uint32 + Chan uint32 + Rsv [5]uint32 + } + PtpSysOffset struct { + Samples uint32 + Rsv [3]uint32 + Ts [51]PtpClockTime + } + PtpSysOffsetExtended struct { + Samples uint32 + Rsv [3]uint32 + Ts [25][3]PtpClockTime + } + PtpSysOffsetPrecise struct { + Device PtpClockTime + Realtime PtpClockTime + Monoraw PtpClockTime + Rsv [4]uint32 + } +) + +const ( + PTP_PF_NONE = 0x0 + PTP_PF_EXTTS = 0x1 + PTP_PF_PEROUT = 0x2 + PTP_PF_PHYSYNC = 0x3 +) + type ( HIDRawReportDescriptor struct { Size uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af4..2e5d5a44 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -377,6 +377,12 @@ type Flock_t struct { Pid int32 } +type F_cnvrt struct { + Cvtcmd int32 + Pccsid int16 + Fccsid int16 +} + type Termios struct { Cflag uint32 Iflag uint32 diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 5cee9a31..4510bfc3 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -725,20 +725,12 @@ func DurationSinceBoot() time.Duration { } func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e + type _FILE_END_OF_FILE_INFO struct { + EndOfFile int64 } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil + var info _FILE_END_OF_FILE_INFO + info.EndOfFile = length + return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) } func Gettimeofday(tv *Timeval) (err error) { @@ -894,6 +886,11 @@ const socket_error = uintptr(^uint32(0)) //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx +//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange +//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1685,13 +1682,16 @@ func (s NTStatus) Error() string { // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for // the more common *uint16 string type. func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) + s16, err := UTF16FromString(s) if err != nil { return nil, err } - RtlInitUnicodeString(&u, s16) - return &u, nil + n := uint16(len(s16) * 2) + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NULL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil } // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 7b97a154..51311e20 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -2203,6 +2203,132 @@ const ( IfOperStatusLowerLayerDown = 7 ) +const ( + IF_MAX_PHYS_ADDRESS_LENGTH = 32 + IF_MAX_STRING_SIZE = 256 +) + +// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex. +const ( + MibIfEntryNormal = 0 + MibIfEntryNormalWithoutStatistics = 2 +) + +// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type. +const ( + MibParameterNotification = 0 + MibAddInstance = 1 + MibDeleteInstance = 2 + MibInitialNotification = 3 +) + +// MibIfRow2 stores information about a particular interface. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2. +type MibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid GUID + Alias [IF_MAX_STRING_SIZE + 1]uint16 + Description [IF_MAX_STRING_SIZE + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint8 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid GUID + ConnectionType uint32 + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. +type MibUnicastIpAddressRow struct { + Address RawSockaddrInet6 // SOCKADDR_INET union + InterfaceLuid uint64 + InterfaceIndex uint32 + PrefixOrigin uint32 + SuffixOrigin uint32 + ValidLifetime uint32 + PreferredLifetime uint32 + OnLinkPrefixLength uint8 + SkipAsSource uint8 + DadState uint32 + ScopeId uint32 + CreationTimeStamp Filetime +} + +const ScopeLevelCount = 16 + +// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface. +// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row. +type MibIpInterfaceRow struct { + Family uint16 + InterfaceLuid uint64 + InterfaceIndex uint32 + MaxReassemblySize uint32 + InterfaceIdentifier uint64 + MinRouterAdvertisementInterval uint32 + MaxRouterAdvertisementInterval uint32 + AdvertisingEnabled uint8 + ForwardingEnabled uint8 + WeakHostSend uint8 + WeakHostReceive uint8 + UseAutomaticMetric uint8 + UseNeighborUnreachabilityDetection uint8 + ManagedAddressConfigurationSupported uint8 + OtherStatefulConfigurationSupported uint8 + AdvertiseDefaultRoute uint8 + RouterDiscoveryBehavior uint32 + DadTransmits uint32 + BaseReachableTime uint32 + RetransmitTime uint32 + PathMtuDiscoveryTimeout uint32 + LinkLocalAddressBehavior uint32 + LinkLocalAddressTimeout uint32 + ZoneIndices [ScopeLevelCount]uint32 + SitePrefixLength uint32 + Metric uint32 + NlMtu uint32 + Connected uint8 + SupportsWakeUpPatterns uint8 + SupportsNeighborDiscovery uint8 + SupportsRouterDiscovery uint8 + ReachableTime uint32 + TransmitOffload uint32 + ReceiveOffload uint32 + DisableDefaultRoutes uint8 +} + // Console related constants used for the mode parameter to SetConsoleMode. See // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 4c2e1bdc..6f525288 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -181,10 +181,15 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") + procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -1606,6 +1611,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si return } +func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1638,6 +1651,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func AddDllDirectory(path *uint16) (cookie uintptr, err error) { r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) cookie = uintptr(r0) diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index 18d1adb0..a6b5ed0a 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -344,7 +344,12 @@ func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (r } // UsesImport reports whether a given import is used. +// The provided File must have been parsed with syntactic object resolution +// (not using go/parser.SkipObjectResolution). func UsesImport(f *ast.File, path string) (used bool) { + if f.Scope == nil { + panic("file f was not parsed with syntactic object resolution") + } spec := importSpec(f, path) if spec == nil { return diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 137cc8df..f3ab0a2e 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -2,22 +2,64 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package gcexportdata provides functions for locating, reading, and -// writing export data files containing type information produced by the -// gc compiler. This package supports go1.7 export data format and all -// later versions. -// -// Although it might seem convenient for this package to live alongside -// go/types in the standard library, this would cause version skew -// problems for developer tools that use it, since they must be able to -// consume the outputs of the gc compiler both before and after a Go -// update such as from Go 1.7 to Go 1.8. Because this package lives in -// golang.org/x/tools, sites can update their version of this repo some -// time before the Go 1.8 release and rebuild and redeploy their -// developer tools, which will then be able to consume both Go 1.7 and -// Go 1.8 export data files, so they will work before and after the -// Go update. (See discussion at https://golang.org/issue/15651.) -package gcexportdata // import "golang.org/x/tools/go/gcexportdata" +// Package gcexportdata provides functions for reading and writing +// export data, which is a serialized description of the API of a Go +// package including the names, kinds, types, and locations of all +// exported declarations. +// +// The standard Go compiler (cmd/compile) writes an export data file +// for each package it compiles, which it later reads when compiling +// packages that import the earlier one. The compiler must thus +// contain logic to both write and read export data. +// (See the "Export" section in the cmd/compile/README file.) +// +// The [Read] function in this package can read files produced by the +// compiler, producing [go/types] data structures. As a matter of +// policy, Read supports export data files produced by only the last +// two Go releases plus tip; see https://go.dev/issue/68898. The +// export data files produced by the compiler contain additional +// details related to generics, inlining, and other optimizations that +// cannot be decoded by the [Read] function. +// +// In files written by the compiler, the export data is not at the +// start of the file. Before calling Read, use [NewReader] to locate +// the desired portion of the file. +// +// The [Write] function in this package encodes the exported API of a +// Go package ([types.Package]) as a file. Such files can be later +// decoded by Read, but cannot be consumed by the compiler. +// +// # Future changes +// +// Although Read supports the formats written by both Write and the +// compiler, the two are quite different, and there is an open +// proposal (https://go.dev/issue/69491) to separate these APIs. +// +// Under that proposal, this package would ultimately provide only the +// Read operation for compiler export data, which must be defined in +// this module (golang.org/x/tools), not in the standard library, to +// avoid version skew for developer tools that need to read compiler +// export data both before and after a Go release, such as from Go +// 1.23 to Go 1.24. Because this package lives in the tools module, +// clients can update their version of the module some time before the +// Go 1.24 release and rebuild and redeploy their tools, which will +// then be able to consume both Go 1.23 and Go 1.24 export data files, +// so they will work before and after the Go update. (See discussion +// at https://go.dev/issue/15651.) +// +// The operations to import and export [go/types] data structures +// would be defined in the go/types package as Import and Export. +// [Write] would (eventually) delegate to Export, +// and [Read], when it detects a file produced by Export, +// would delegate to Import. +// +// # Deprecations +// +// The [NewImporter] and [Find] functions are deprecated and should +// not be used in new code. The [WriteBundle] and [ReadBundle] +// functions are experimental, and there is an open proposal to +// deprecate them (https://go.dev/issue/69573). +package gcexportdata import ( "bufio" @@ -100,6 +142,11 @@ func readAll(r io.Reader) ([]byte, error) { // Read reads export data from in, decodes it, and returns type // information for the package. // +// Read is capable of reading export data produced by [Write] at the +// same source code version, or by the last two Go releases (plus tip) +// of the standard Go compiler. Reading files from older compilers may +// produce an error. +// // The package path (effectively its linker symbol prefix) is // specified by path, since unlike the package name, this information // may not be recorded in the export data. @@ -128,14 +175,26 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 + case 'v', 'c', 'd': + // binary, produced by cmd/compile till go1.10 return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': // indexed, till go1.19 + case 'i': + // indexed, produced by cmd/compile till go1.19, + // and also by [Write]. + // + // If proposal #69491 is accepted, go/types + // serialization will be implemented by + // types.Export, to which Write would eventually + // delegate (explicitly dropping any pretence at + // inter-version Write-Read compatibility). + // This [Read] function would delegate to types.Import + // when it detects that the file was produced by Export. _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': // unified, from go1.20 + case 'u': + // unified, produced by cmd/compile since go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/vendor/golang.org/x/tools/go/loader/loader.go b/vendor/golang.org/x/tools/go/loader/loader.go index 013c0f50..2d4865f6 100644 --- a/vendor/golang.org/x/tools/go/loader/loader.go +++ b/vendor/golang.org/x/tools/go/loader/loader.go @@ -23,7 +23,6 @@ import ( "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/internal/cgo" - "golang.org/x/tools/internal/versions" ) var ignoreVendor build.ImportMode @@ -341,13 +340,12 @@ func (conf *Config) addImport(path string, tests bool) { func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) { for _, info := range prog.AllPackages { for _, f := range info.Files { - if f.Pos() == token.NoPos { - // This can happen if the parser saw - // too many errors and bailed out. - // (Use parser.AllErrors to prevent that.) + if f.FileStart == token.NoPos { + // Workaround for #70162 (undefined FileStart). + // TODO(adonovan): delete once go1.24 is assured. continue } - if !tokenFileContainsPos(prog.Fset.File(f.Pos()), start) { + if !tokenFileContainsPos(prog.Fset.File(f.FileStart), start) { continue } if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil { @@ -1029,18 +1027,18 @@ func (imp *importer) newPackageInfo(path, dir string) *PackageInfo { info := &PackageInfo{ Pkg: pkg, Info: types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), }, errorFunc: imp.conf.TypeChecker.Error, dir: dir, } - versions.InitFileVersions(&info.Info) // Copy the types.Config so we can vary it across PackageInfos. tc := imp.conf.TypeChecker diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 8f7afcb5..96db9daf 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -79,7 +79,7 @@ type DriverResponse struct { // driver is the type for functions that query the build system for the // packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) +type driver func(cfg *Config, patterns []string) (*DriverResponse, error) // findExternalDriver returns the file path of a tool that supplies // the build system package structure, or "" if not found. @@ -103,7 +103,7 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*DriverResponse, error) { + return func(cfg *Config, patterns []string) (*DriverResponse, error) { req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, @@ -117,7 +117,7 @@ func findExternalDriver(cfg *Config) driver { buf := new(bytes.Buffer) stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd := exec.CommandContext(cfg.Context, tool, patterns...) cmd.Dir = cfg.Dir // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 1a3a5b44..76f910ec 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -80,6 +80,12 @@ type golistState struct { cfg *Config ctx context.Context + runner *gocommand.Runner + + // overlay is the JSON file that encodes the Config.Overlay + // mapping, used by 'go list -overlay=...'. + overlay string + envOnce sync.Once goEnvError error goEnv map[string]string @@ -127,7 +133,10 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { +// +// overlay is the JSON file that encodes the cfg.Overlay +// mapping, used by 'go list -overlay=...' +func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -142,13 +151,15 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error cfg: cfg, ctx: ctx, vendorDirs: map[string]bool{}, + overlay: overlay, + runner: runner, } // Fill in response.Sizes asynchronously if necessary. - if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { errCh := make(chan error) go func() { - compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), cfg.gocmdRunner) + compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner) response.dr.Compiler = compiler response.dr.Arch = arch errCh <- err @@ -681,7 +692,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { // getGoVersion returns the effective minor version of the go command. func (state *golistState) getGoVersion() (int, error) { state.goVersionOnce.Do(func() { - state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner) }) return state.goVersion, state.goVersionError } @@ -751,7 +762,7 @@ func jsonFlag(cfg *Config, goVersion int) string { } } addFields("Name", "ImportPath", "Error") // These fields are always needed - if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", "SwigFiles", "SwigCXXFiles", "SysoFiles") @@ -759,7 +770,7 @@ func jsonFlag(cfg *Config, goVersion int) string { addFields("TestGoFiles", "XTestGoFiles") } } - if cfg.Mode&NeedTypes != 0 { + if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, // even when -compiled isn't passed in. // TODO(#52435): Should we make the test ask for -compiled, or automatically @@ -840,7 +851,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation { Env: cfg.Env, Logf: cfg.Logf, WorkingDir: cfg.Dir, - Overlay: cfg.goListOverlayFile, + Overlay: state.overlay, } } @@ -851,11 +862,8 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, inv := state.cfgInvocation() inv.Verb = verb inv.Args = args - gocmdRunner := cfg.gocmdRunner - if gocmdRunner == nil { - gocmdRunner = &gocommand.Runner{} - } - stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + + stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv) if err != nil { // Check for 'go' executable not being found. if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { @@ -879,6 +887,12 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return nil, friendlyErr } + // Return an error if 'go list' failed due to missing tools in + // $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606). + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) { + return nil, friendlyErr + } + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field // and should be suppressed by go list -e. // diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index f227f1ba..2ecc6423 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,13 +16,13 @@ import ( "go/scanner" "go/token" "go/types" - "io" "log" "os" "path/filepath" "runtime" "strings" "sync" + "sync/atomic" "time" "golang.org/x/sync/errgroup" @@ -31,7 +31,6 @@ import ( "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" "golang.org/x/tools/internal/typesinternal" - "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. @@ -56,7 +55,7 @@ const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota - // NeedFiles adds GoFiles and OtherFiles. + // NeedFiles adds GoFiles, OtherFiles, and IgnoredFiles NeedFiles // NeedCompiledGoFiles adds CompiledGoFiles. @@ -78,7 +77,7 @@ const ( // NeedSyntax adds Syntax and Fset. NeedSyntax - // NeedTypesInfo adds TypesInfo. + // NeedTypesInfo adds TypesInfo and Fset. NeedTypesInfo // NeedTypesSizes adds TypesSizes. @@ -145,13 +144,7 @@ const ( // A Config specifies details about how packages should be loaded. // The zero value is a valid configuration. // -// Calls to Load do not modify this struct. -// -// TODO(adonovan): #67702: this is currently false: in fact, -// calls to [Load] do not modify the public fields of this struct, but -// may modify hidden fields, so concurrent calls to [Load] must not -// use the same Config. But perhaps we should reestablish the -// documented invariant. +// Calls to [Load] do not modify this struct. type Config struct { // Mode controls the level of information returned for each package. Mode LoadMode @@ -182,19 +175,10 @@ type Config struct { // Env []string - // gocmdRunner guards go command calls from concurrency errors. - gocmdRunner *gocommand.Runner - // BuildFlags is a list of command-line flags to be passed through to // the build system's query tool. BuildFlags []string - // modFile will be used for -modfile in go command invocations. - modFile string - - // modFlag will be used for -modfile in go command invocations. - modFlag string - // Fset provides source position information for syntax trees and types. // If Fset is nil, Load will use a new fileset, but preserve Fset's value. Fset *token.FileSet @@ -241,9 +225,13 @@ type Config struct { // drivers may vary in their level of support for overlays. Overlay map[string][]byte - // goListOverlayFile is the JSON file that encodes the Overlay - // mapping, used by 'go list -overlay=...' - goListOverlayFile string + // -- Hidden configuration fields only for use in x/tools -- + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string } // Load loads and returns the Go packages named by the given patterns. @@ -334,21 +322,24 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro } else if !response.NotHandled { return response, true, nil } - // (fall through) + // not handled: fall through } // go list fallback - // + // Write overlays once, as there are many calls // to 'go list' (one per chunk plus others too). - overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) + overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) if err != nil { return nil, false, err } defer cleanupOverlay() - cfg.goListOverlayFile = overlay - response, err := callDriverOnChunks(goListDriver, cfg, chunks) + var runner gocommand.Runner // (shared across many 'go list' calls) + driver := func(cfg *Config, patterns []string) (*DriverResponse, error) { + return goListDriver(cfg, &runner, overlayFile, patterns) + } + response, err := callDriverOnChunks(driver, cfg, chunks) if err != nil { return nil, false, err } @@ -386,16 +377,14 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { if len(chunks) == 0 { - return driver(cfg) + return driver(cfg, nil) } responses := make([]*DriverResponse, len(chunks)) errNotHandled := errors.New("driver returned NotHandled") var g errgroup.Group for i, chunk := range chunks { - i := i - chunk := chunk g.Go(func() (err error) { - responses[i], err = driver(cfg, chunk...) + responses[i], err = driver(cfg, chunk) if responses[i] != nil && responses[i].NotHandled { err = errNotHandled } @@ -692,18 +681,19 @@ func (p *Package) String() string { return p.ID } // loaderPackage augments Package with state used during the loading phase type loaderPackage struct { *Package - importErrors map[string]error // maps each bad import to its error - loadOnce sync.Once - color uint8 // for cycle detection - needsrc bool // load from source (Mode >= LoadTypes) - needtypes bool // type information is either requested or depended on - initial bool // package was matched by a pattern - goVersion int // minor version number of go command on PATH + importErrors map[string]error // maps each bad import to its error + preds []*loaderPackage // packages that import this one + unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH } // loader holds the working state of a single call to load. type loader struct { - pkgs map[string]*loaderPackage + pkgs map[string]*loaderPackage // keyed by Package.ID Config sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue @@ -749,9 +739,6 @@ func newLoader(cfg *Config) *loader { if ld.Config.Env == nil { ld.Config.Env = os.Environ() } - if ld.Config.gocmdRunner == nil { - ld.Config.gocmdRunner = &gocommand.Runner{} - } if ld.Context == nil { ld.Context = context.Background() } @@ -765,7 +752,7 @@ func newLoader(cfg *Config) *loader { ld.requestedMode = ld.Mode ld.Mode = impliedLoadMode(ld.Mode) - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { if ld.Fset == nil { ld.Fset = token.NewFileSet() } @@ -806,7 +793,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" // This package needs type information if the caller requested types and the package is // either a root, or it's a non-root and the user requested dependencies ... - needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) // This package needs source if the call requested source (or types info, which implies source) // and the package is either a root, or itas a non- root and the user requested dependencies... needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || @@ -831,9 +818,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } } - if ld.Mode&NeedImports != 0 { - // Materialize the import graph. - + // Materialize the import graph if it is needed (NeedImports), + // or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}). + var leaves []*loaderPackage // packages with no unfinished successors + if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { const ( white = 0 // new grey = 1 // in progress @@ -852,63 +840,76 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // dependency on a package that does. These are the only packages // for which we load source code. var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: + var visit func(from, lpkg *loaderPackage) bool + visit = func(from, lpkg *loaderPackage) bool { + if lpkg.color == grey { panic("internal error: grey node") } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - lpkg.Imports = make(map[string]*Package, len(stubs)) - for importPath, ipkg := range stubs { - var importErr error - imp := ld.pkgs[ipkg.ID] - if imp == nil { - // (includes package "C" when DisableCgo) - importErr = fmt.Errorf("missing package: %q", ipkg.ID) - } else if imp.color == grey { - importErr = fmt.Errorf("import cycle: %s", stack) + if lpkg.color == white { + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(lpkg, imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package } - if importErr != nil { - if lpkg.importErrors == nil { - lpkg.importErrors = make(map[string]error) + + // -- postorder -- + + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true } - lpkg.importErrors[importPath] = importErr - continue } - if visit(imp) { - lpkg.needsrc = true + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes } - lpkg.Imports[importPath] = imp.Package - } - // Complete type information is required for the - // immediate dependencies of each source package. - if lpkg.needsrc && ld.Mode&NeedTypes != 0 { - for _, ipkg := range lpkg.Imports { - ld.pkgs[ipkg.ID].needtypes = true + // Add packages with no imports directly to the queue of leaves. + if len(lpkg.Imports) == 0 { + leaves = append(leaves, lpkg) } + + stack = stack[:len(stack)-1] // pop + lpkg.color = black } - // NeedTypeSizes causes TypeSizes to be set even - // on packages for which types aren't needed. - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes + // Add edge from predecessor. + if from != nil { + from.unfinishedSuccs.Add(+1) // incref + lpkg.preds = append(lpkg.preds, from) } - stack = stack[:len(stack)-1] // pop - lpkg.color = black return lpkg.needsrc } // For each initial package, create its import DAG. for _, lpkg := range initial { - visit(lpkg) + visit(nil, lpkg) } } else { @@ -921,16 +922,45 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { - var wg sync.WaitGroup - for _, lpkg := range initial { - wg.Add(1) - go func(lpkg *loaderPackage) { - ld.loadRecursive(lpkg) - wg.Done() - }(lpkg) + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { + + // We avoid using g.SetLimit to limit concurrency as + // it makes g.Go stop accepting work, which prevents + // workers from enqeuing, and thus finishing, and thus + // allowing the group to make progress: deadlock. + // + // Instead we use the ioLimit and cpuLimit semaphores. + g, _ := errgroup.WithContext(ld.Context) + + // enqueues adds a package to the type-checking queue. + // It must have no unfinished successors. + var enqueue func(*loaderPackage) + enqueue = func(lpkg *loaderPackage) { + g.Go(func() error { + // Parse and type-check. + ld.loadPackage(lpkg) + + // Notify each waiting predecessor, + // and enqueue it when it becomes a leaf. + for _, pred := range lpkg.preds { + if pred.unfinishedSuccs.Add(-1) == 0 { // decref + enqueue(pred) + } + } + + return nil + }) + } + + // Load leaves first, adding new packages + // to the queue as they become leaves. + for _, leaf := range leaves { + enqueue(leaf) + } + + if err := g.Wait(); err != nil { + return nil, err // cancelled } - wg.Wait() } // If the context is done, return its error and @@ -977,7 +1007,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { if ld.requestedMode&NeedSyntax == 0 { ld.pkgs[i].Syntax = nil } - if ld.requestedMode&NeedTypes == 0 && ld.requestedMode&NeedSyntax == 0 { + if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 { ld.pkgs[i].Fset = nil } if ld.requestedMode&NeedTypesInfo == 0 { @@ -994,31 +1024,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { return result, nil } -// loadRecursive loads the specified package and its dependencies, -// recursively, in parallel, in topological order. -// It is atomic and idempotent. -// Precondition: ld.Mode&NeedTypes. -func (ld *loader) loadRecursive(lpkg *loaderPackage) { - lpkg.loadOnce.Do(func() { - // Load the direct dependencies, in parallel. - var wg sync.WaitGroup - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - wg.Add(1) - go func(imp *loaderPackage) { - ld.loadRecursive(imp) - wg.Done() - }(imp) - } - wg.Wait() - ld.loadPackage(lpkg) - }) -} - -// loadPackage loads the specified package. +// loadPackage loads/parses/typechecks the specified package. // It must be called only once per Package, // after immediate dependencies are loaded. -// Precondition: ld.Mode & NeedTypes. +// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0. func (ld *loader) loadPackage(lpkg *loaderPackage) { if lpkg.PkgPath == "unsafe" { // Fill in the blanks to avoid surprises. @@ -1054,6 +1063,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { if !lpkg.needtypes && !lpkg.needsrc { return } + + // TODO(adonovan): this condition looks wrong: + // I think it should be lpkg.needtypes && !lpg.needsrc, + // so that NeedSyntax without NeedTypes can be satisfied by export data. if !lpkg.needsrc { if err := ld.loadFromExportData(lpkg); err != nil { lpkg.Errors = append(lpkg.Errors, Error{ @@ -1159,7 +1172,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } lpkg.Syntax = files - if ld.Config.Mode&NeedTypes == 0 { + if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 { return } @@ -1170,16 +1183,20 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } - lpkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), + // Populate TypesInfo only if needed, as it + // causes the type checker to work much harder. + if ld.Config.Mode&NeedTypesInfo != 0 { + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), + } } - versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1232,6 +1249,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } } + // Type-checking is CPU intensive. + cpuLimit <- unit{} // acquire a token + defer func() { <-cpuLimit }() // release a token + typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) lpkg.importErrors = nil // no longer needed @@ -1296,8 +1317,11 @@ type importerFunc func(path string) (*types.Package, error) func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } // We use a counting semaphore to limit -// the number of parallel I/O calls per process. -var ioLimit = make(chan bool, 20) +// the number of parallel I/O calls or CPU threads per process. +var ( + ioLimit = make(chan unit, 20) + cpuLimit = make(chan unit, runtime.GOMAXPROCS(0)) +) func (ld *loader) parseFile(filename string) (*ast.File, error) { ld.parseCacheMu.Lock() @@ -1314,20 +1338,28 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var src []byte for f, contents := range ld.Config.Overlay { + // TODO(adonovan): Inefficient for large overlays. + // Do an exact name-based map lookup + // (for nonexistent files) followed by a + // FileID-based map lookup (for existing ones). if sameFile(f, filename) { src = contents + break } } var err error if src == nil { - ioLimit <- true // wait + ioLimit <- unit{} // acquire a token src, err = os.ReadFile(filename) - <-ioLimit // signal + <-ioLimit // release a token } if err != nil { v.err = err } else { + // Parsing is CPU intensive. + cpuLimit <- unit{} // acquire a token v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + <-cpuLimit // release a token } close(v.ready) @@ -1342,18 +1374,21 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { // Because files are scanned in parallel, the token.Pos // positions of the resulting ast.Files are not ordered. func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { - var wg sync.WaitGroup - n := len(filenames) - parsed := make([]*ast.File, n) - errors := make([]error, n) - for i, file := range filenames { - wg.Add(1) - go func(i int, filename string) { + var ( + n = len(filenames) + parsed = make([]*ast.File, n) + errors = make([]error, n) + ) + var g errgroup.Group + for i, filename := range filenames { + // This creates goroutines unnecessarily in the + // cache-hit case, but that case is uncommon. + g.Go(func() error { parsed[i], errors[i] = ld.parseFile(filename) - wg.Done() - }(i, file) + return nil + }) } - wg.Wait() + g.Wait() // Eliminate nils, preserving order. var o int @@ -1524,4 +1559,4 @@ func usesExportData(cfg *Config) bool { return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 } -var _ interface{} = io.Discard // assert build toolchain is go1.16 or later +type unit struct{} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index a70b727f..16ed3c17 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -281,25 +281,25 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { T := o.Type() if alias, ok := T.(*types.Alias); ok { - if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam, nil); r != nil { + if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil { return Path(r), nil } - if r := find(obj, aliases.Rhs(alias), append(path, opRhs), nil); r != nil { + if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil { return Path(r), nil } } else if tname.IsAlias() { // legacy alias - if r := find(obj, T, path, nil); r != nil { + if r := find(obj, T, path); r != nil { return Path(r), nil } } else if named, ok := T.(*types.Named); ok { // defined (named) type - if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil { return Path(r), nil } - if r := find(obj, named.Underlying(), append(path, opUnderlying), nil); r != nil { + if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil { return Path(r), nil } } @@ -312,7 +312,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if _, ok := o.(*types.TypeName); !ok { if o.Exported() { // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + if r := find(obj, o.Type(), append(path, opType)); r != nil { return Path(r), nil } } @@ -332,7 +332,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if m == obj { return Path(path2), nil // found declared method } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + if r := find(obj, m.Type(), append(path2, opType)); r != nil { return Path(r), nil } } @@ -447,46 +447,64 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // // The seen map is used to short circuit cycles through type parameters. If // nil, it will be allocated as necessary. -func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { +// +// The seenMethods map is used internally to short circuit cycles through +// interface methods, such as occur in the following example: +// +// type I interface { f() interface{I} } +// +// See golang/go#68046 for details. +func find(obj types.Object, T types.Type, path []byte) []byte { + return (&finder{obj: obj}).find(T, path) +} + +// finder closes over search state for a call to find. +type finder struct { + obj types.Object // the sought object + seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters + seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces +} + +func (f *finder) find(T types.Type, path []byte) []byte { switch T := T.(type) { case *types.Alias: - return find(obj, types.Unalias(T), path, seen) + return f.find(types.Unalias(T), path) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. return nil case *types.Pointer: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Slice: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Array: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Chan: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Map: - if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + if r := f.find(T.Key(), append(path, opKey)); r != nil { return r } - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Signature: - if r := findTypeParam(obj, T.RecvTypeParams(), path, opRecvTypeParam, nil); r != nil { + if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil { return r } - if r := findTypeParam(obj, T.TypeParams(), path, opTypeParam, seen); r != nil { + if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil { return r } - if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + if r := f.find(T.Params(), append(path, opParams)); r != nil { return r } - return find(obj, T.Results(), append(path, opResults), seen) + return f.find(T.Results(), append(path, opResults)) case *types.Struct: for i := 0; i < T.NumFields(); i++ { fld := T.Field(i) path2 := appendOpArg(path, opField, i) - if fld == obj { + if fld == f.obj { return path2 // found field var } - if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + if r := f.find(fld.Type(), append(path2, opType)); r != nil { return r } } @@ -495,10 +513,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] for i := 0; i < T.Len(); i++ { v := T.At(i) path2 := appendOpArg(path, opAt, i) - if v == obj { + if v == f.obj { return path2 // found param/result var } - if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + if r := f.find(v.Type(), append(path2, opType)); r != nil { return r } } @@ -506,28 +524,35 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] case *types.Interface: for i := 0; i < T.NumMethods(); i++ { m := T.Method(i) + if f.seenMethods[m] { + return nil + } path2 := appendOpArg(path, opMethod, i) - if m == obj { + if m == f.obj { return path2 // found interface method } - if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + if f.seenMethods == nil { + f.seenMethods = make(map[*types.Func]bool) + } + f.seenMethods[m] = true + if r := f.find(m.Type(), append(path2, opType)); r != nil { return r } } return nil case *types.TypeParam: name := T.Obj() - if name == obj { - return append(path, opObj) - } - if seen[name] { + if f.seenTParamNames[name] { return nil } - if seen == nil { - seen = make(map[*types.TypeName]bool) + if name == f.obj { + return append(path, opObj) } - seen[name] = true - if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + if f.seenTParamNames == nil { + f.seenTParamNames = make(map[*types.TypeName]bool) + } + f.seenTParamNames[name] = true + if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil { return r } return nil @@ -535,11 +560,15 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte { + return (&finder{obj: obj}).findTypeParam(list, path, op) +} + +func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) path2 := appendOpArg(path, op, i) - if r := find(obj, tparam, path2, seen); r != nil { + if r := f.find(tparam, path2); r != nil { return r } } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 1e19fbed..7dfc31a3 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -246,6 +246,26 @@ import ( // IExportShallow encodes "shallow" export data for the specified package. // +// For types, we use "shallow" export data. Historically, the Go +// compiler always produced a summary of the types for a given package +// that included types from other packages that it indirectly +// referenced: "deep" export data. This had the advantage that the +// compiler (and analogous tools such as gopls) need only load one +// file per direct import. However, it meant that the files tended to +// get larger based on the level of the package in the import +// graph. For example, higher-level packages in the kubernetes module +// have over 1MB of "deep" export data, even when they have almost no +// content of their own, merely because they mention a major type that +// references many others. In pathological cases the export data was +// 300x larger than the source for a package due to this quadratic +// growth. +// +// "Shallow" export data means that the serialized types describe only +// a single package. If those types mention types from other packages, +// the type checker may need to request additional packages beyond +// just the direct imports. Type information for the entire transitive +// closure of imports is provided (lazily) by the DAG. +// // No promises are made about the encoding other than that it can be decoded by // the same version of IIExportShallow. If you plan to save export data in the // file system, be sure to include a cryptographic digest of the executable in @@ -268,8 +288,8 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) } // IImportShallow decodes "shallow" types.Package data encoded by -// IExportShallow in the same executable. This function cannot import data from -// cmd/compile or gcexportdata.Write. +// [IExportShallow] in the same executable. This function cannot import data +// from cmd/compile or gcexportdata.Write. // // The importer calls getPackages to obtain package symbols for all // packages mentioned in the export data, including the one being diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 21908a15..e260c0e8 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -558,6 +558,14 @@ type importReader struct { prevColumn int64 } +// markBlack is redefined in iimport_go123.go, to work around golang/go#69912. +// +// If TypeNames are not marked black (in the sense of go/types cycle +// detection), they may be mutated when dot-imported. Fix this by punching a +// hole through the type, when compiling with Go 1.23. (The bug has been fixed +// for 1.24, but the fix was not worth back-porting). +var markBlack = func(name *types.TypeName) {} + func (r *importReader) obj(name string) { tag := r.byte() pos := r.pos() @@ -570,6 +578,7 @@ func (r *importReader) obj(name string) { } typ := r.typ() obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + markBlack(obj) // workaround for golang/go#69912 r.declare(obj) case constTag: @@ -590,6 +599,9 @@ func (r *importReader) obj(name string) { // declaration before recursing. obj := types.NewTypeName(pos, r.currPkg, name, nil) named := types.NewNamed(obj, nil, nil) + + markBlack(obj) // workaround for golang/go#69912 + // Declare obj before calling r.tparamList, so the new type name is recognized // if used in the constraint of one of its own typeparams (see #48280). r.declare(obj) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go new file mode 100644 index 00000000..7586bfac --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go @@ -0,0 +1,53 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 && !go1.24 + +package gcimporter + +import ( + "go/token" + "go/types" + "unsafe" +) + +// TODO(rfindley): delete this workaround once go1.24 is assured. + +func init() { + // Update markBlack so that it correctly sets the color + // of imported TypeNames. + // + // See the doc comment for markBlack for details. + + type color uint32 + const ( + white color = iota + black + grey + ) + type object struct { + _ *types.Scope + _ token.Pos + _ *types.Package + _ string + _ types.Type + _ uint32 + color_ color + _ token.Pos + } + type typeName struct { + object + } + + // If the size of types.TypeName changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{})) + var _ [-delta * delta]int + + markBlack = func(obj *types.TypeName) { + type uP = unsafe.Pointer + var ptr *typeName + *(*uP)(uP(&ptr)) = uP(obj) + ptr.color_ = black + } +} diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index c1510817..5ae57697 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -27,7 +27,6 @@ import ( "unicode" "unicode/utf8" - "golang.org/x/sync/errgroup" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" @@ -91,18 +90,6 @@ type ImportFix struct { Relevance float64 // see pkg } -// An ImportInfo represents a single import statement. -type ImportInfo struct { - ImportPath string // import path, e.g. "crypto/rand". - Name string // import name, e.g. "crand", or "" if none. -} - -// A packageInfo represents what's known about a package. -type packageInfo struct { - name string // real package name, if known. - exports map[string]bool // known exports. -} - // parseOtherFiles parses all the Go files in srcDir except filename, including // test files if filename looks like a test. // @@ -162,8 +149,8 @@ func addGlobals(f *ast.File, globals map[string]bool) { // collectReferences builds a map of selector expressions, from // left hand side (X) to a set of right hand sides (Sel). -func collectReferences(f *ast.File) references { - refs := references{} +func collectReferences(f *ast.File) References { + refs := References{} var visitor visitFn visitor = func(node ast.Node) ast.Visitor { @@ -233,7 +220,7 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { allFound := true for right := range syms { - if !pkgInfo.exports[right] { + if !pkgInfo.Exports[right] { allFound = false break } @@ -246,11 +233,6 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { return nil } -// references is set of references found in a Go file. The first map key is the -// left hand side of a selector expression, the second key is the right hand -// side, and the value should always be true. -type references map[string]map[string]bool - // A pass contains all the inputs and state necessary to fix a file's imports. // It can be modified in some ways during use; see comments below. type pass struct { @@ -258,27 +240,29 @@ type pass struct { fset *token.FileSet // fset used to parse f and its siblings. f *ast.File // the file being fixed. srcDir string // the directory containing f. - env *ProcessEnv // the environment to use for go commands, etc. - loadRealPackageNames bool // if true, load package names from disk rather than guessing them. - otherFiles []*ast.File // sibling files. + logf func(string, ...any) + source Source // the environment to use for go commands, etc. + loadRealPackageNames bool // if true, load package names from disk rather than guessing them. + otherFiles []*ast.File // sibling files. + goroot string // Intermediate state, generated by load. existingImports map[string][]*ImportInfo - allRefs references - missingRefs references + allRefs References + missingRefs References // Inputs to fix. These can be augmented between successive fix calls. lastTry bool // indicates that this is the last call and fix should clean up as best it can. candidates []*ImportInfo // candidate imports in priority order. - knownPackages map[string]*packageInfo // information about all known packages. + knownPackages map[string]*PackageInfo // information about all known packages. } // loadPackageNames saves the package names for everything referenced by imports. -func (p *pass) loadPackageNames(imports []*ImportInfo) error { - if p.env.Logf != nil { - p.env.Logf("loading package names for %v packages", len(imports)) +func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) error { + if p.logf != nil { + p.logf("loading package names for %v packages", len(imports)) defer func() { - p.env.Logf("done loading package names for %v packages", len(imports)) + p.logf("done loading package names for %v packages", len(imports)) }() } var unknown []string @@ -289,20 +273,17 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error { unknown = append(unknown, imp.ImportPath) } - resolver, err := p.env.GetResolver() - if err != nil { - return err - } - - names, err := resolver.loadPackageNames(unknown, p.srcDir) + names, err := p.source.LoadPackageNames(ctx, p.srcDir, unknown) if err != nil { return err } + // TODO(rfindley): revisit this. Why do we need to store known packages with + // no exports? The inconsistent data is confusing. for path, name := range names { - p.knownPackages[path] = &packageInfo{ - name: name, - exports: map[string]bool{}, + p.knownPackages[path] = &PackageInfo{ + Name: name, + Exports: map[string]bool{}, } } return nil @@ -330,8 +311,8 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { return imp.Name } known := p.knownPackages[imp.ImportPath] - if known != nil && known.name != "" { - return withoutVersion(known.name) + if known != nil && known.Name != "" { + return withoutVersion(known.Name) } return ImportPathToAssumedName(imp.ImportPath) } @@ -339,9 +320,9 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { // load reads in everything necessary to run a pass, and reports whether the // file already has all the imports it needs. It fills in p.missingRefs with the // file's missing symbols, if any, or removes unused imports if not. -func (p *pass) load() ([]*ImportFix, bool) { - p.knownPackages = map[string]*packageInfo{} - p.missingRefs = references{} +func (p *pass) load(ctx context.Context) ([]*ImportFix, bool) { + p.knownPackages = map[string]*PackageInfo{} + p.missingRefs = References{} p.existingImports = map[string][]*ImportInfo{} // Load basic information about the file in question. @@ -364,9 +345,11 @@ func (p *pass) load() ([]*ImportFix, bool) { // f's imports by the identifier they introduce. imports := collectImports(p.f) if p.loadRealPackageNames { - err := p.loadPackageNames(append(imports, p.candidates...)) + err := p.loadPackageNames(ctx, append(imports, p.candidates...)) if err != nil { - p.env.logf("loading package names: %v", err) + if p.logf != nil { + p.logf("loading package names: %v", err) + } return nil, false } } @@ -535,9 +518,10 @@ func (p *pass) assumeSiblingImportsValid() { // We have the stdlib in memory; no need to guess. rights = symbolNameSet(m) } - p.addCandidate(imp, &packageInfo{ + // TODO(rfindley): we should set package name here, for consistency. + p.addCandidate(imp, &PackageInfo{ // no name; we already know it. - exports: rights, + Exports: rights, }) } } @@ -546,14 +530,14 @@ func (p *pass) assumeSiblingImportsValid() { // addCandidate adds a candidate import to p, and merges in the information // in pkg. -func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { +func (p *pass) addCandidate(imp *ImportInfo, pkg *PackageInfo) { p.candidates = append(p.candidates, imp) if existing, ok := p.knownPackages[imp.ImportPath]; ok { - if existing.name == "" { - existing.name = pkg.name + if existing.Name == "" { + existing.Name = pkg.Name } - for export := range pkg.exports { - existing.exports[export] = true + for export := range pkg.Exports { + existing.Exports[export] = true } } else { p.knownPackages[imp.ImportPath] = pkg @@ -581,19 +565,42 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P // getFixes gets the import fixes that need to be made to f in order to fix the imports. // It does not modify the ast. func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { + source, err := NewProcessEnvSource(env, filename, f.Name.Name) + if err != nil { + return nil, err + } + goEnv, err := env.goEnv() + if err != nil { + return nil, err + } + return getFixesWithSource(ctx, fset, f, filename, goEnv["GOROOT"], env.logf, source) +} + +func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, goroot string, logf func(string, ...any), source Source) ([]*ImportFix, error) { + // This logic is defensively duplicated from getFixes. abs, err := filepath.Abs(filename) if err != nil { return nil, err } srcDir := filepath.Dir(abs) - env.logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) + + if logf != nil { + logf("fixImports(filename=%q), srcDir=%q ...", filename, abs, srcDir) + } // First pass: looking only at f, and using the naive algorithm to // derive package names from import paths, see if the file is already // complete. We can't add any imports yet, because we don't know // if missing references are actually package vars. - p := &pass{fset: fset, f: f, srcDir: srcDir, env: env} - if fixes, done := p.load(); done { + p := &pass{ + fset: fset, + f: f, + srcDir: srcDir, + logf: logf, + goroot: goroot, + source: source, + } + if fixes, done := p.load(ctx); done { return fixes, nil } @@ -605,7 +612,7 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st // Second pass: add information from other files in the same package, // like their package vars and imports. p.otherFiles = otherFiles - if fixes, done := p.load(); done { + if fixes, done := p.load(ctx); done { return fixes, nil } @@ -618,10 +625,17 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st // Third pass: get real package names where we had previously used // the naive algorithm. - p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} + p = &pass{ + fset: fset, + f: f, + srcDir: srcDir, + logf: logf, + goroot: goroot, + source: p.source, // safe to reuse, as it's just a wrapper around env + } p.loadRealPackageNames = true p.otherFiles = otherFiles - if fixes, done := p.load(); done { + if fixes, done := p.load(ctx); done { return fixes, nil } @@ -835,7 +849,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return true }, dirFound: func(pkg *pkg) bool { - return pkgIsCandidate(filename, references{searchPkg: nil}, pkg) + return pkgIsCandidate(filename, References{searchPkg: nil}, pkg) }, packageNameLoaded: func(pkg *pkg) bool { return pkg.packageName == searchPkg @@ -1086,11 +1100,7 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) return e.GocmdRunner.Run(ctx, inv) } -func addStdlibCandidates(pass *pass, refs references) error { - goenv, err := pass.env.goEnv() - if err != nil { - return err - } +func addStdlibCandidates(pass *pass, refs References) error { localbase := func(nm string) string { ans := path.Base(nm) if ans[0] == 'v' { @@ -1105,13 +1115,13 @@ func addStdlibCandidates(pass *pass, refs references) error { } add := func(pkg string) { // Prevent self-imports. - if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir { + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.goroot, "src", pkg) == pass.srcDir { return } exports := symbolNameSet(stdlib.PackageSymbols[pkg]) pass.addCandidate( &ImportInfo{ImportPath: pkg}, - &packageInfo{name: localbase(pkg), exports: exports}) + &PackageInfo{Name: localbase(pkg), Exports: exports}) } for left := range refs { if left == "rand" { @@ -1175,91 +1185,14 @@ type scanCallback struct { exportsLoaded func(pkg *pkg, exports []stdlib.Symbol) } -func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error { +func addExternalCandidates(ctx context.Context, pass *pass, refs References, filename string) error { ctx, done := event.Start(ctx, "imports.addExternalCandidates") defer done() - var mu sync.Mutex - found := make(map[string][]pkgDistance) - callback := &scanCallback{ - rootFound: func(gopathwalk.Root) bool { - return true // We want everything. - }, - dirFound: func(pkg *pkg) bool { - return pkgIsCandidate(filename, refs, pkg) - }, - packageNameLoaded: func(pkg *pkg) bool { - if _, want := refs[pkg.packageName]; !want { - return false - } - if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName { - // The candidate is in the same directory and has the - // same package name. Don't try to import ourselves. - return false - } - if !canUse(filename, pkg.dir) { - return false - } - mu.Lock() - defer mu.Unlock() - found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)}) - return false // We'll do our own loading after we sort. - }, - } - resolver, err := pass.env.GetResolver() + results, err := pass.source.ResolveReferences(ctx, filename, refs) if err != nil { return err } - if err = resolver.scan(ctx, callback); err != nil { - return err - } - - // Search for imports matching potential package references. - type result struct { - imp *ImportInfo - pkg *packageInfo - } - results := make([]*result, len(refs)) - - g, ctx := errgroup.WithContext(ctx) - - searcher := symbolSearcher{ - logf: pass.env.logf, - srcDir: pass.srcDir, - xtest: strings.HasSuffix(pass.f.Name.Name, "_test"), - loadExports: resolver.loadExports, - } - - i := 0 - for pkgName, symbols := range refs { - index := i // claim an index in results - i++ - pkgName := pkgName - symbols := symbols - - g.Go(func() error { - found, err := searcher.search(ctx, found[pkgName], pkgName, symbols) - if err != nil { - return err - } - if found == nil { - return nil // No matching package. - } - - imp := &ImportInfo{ - ImportPath: found.importPathShort, - } - pkg := &packageInfo{ - name: pkgName, - exports: symbols, - } - results[index] = &result{imp, pkg} - return nil - }) - } - if err := g.Wait(); err != nil { - return err - } for _, result := range results { if result == nil { @@ -1267,7 +1200,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil } // Don't offer completions that would shadow predeclared // names, such as github.com/coreos/etcd/error. - if types.Universe.Lookup(result.pkg.name) != nil { // predeclared + if types.Universe.Lookup(result.Package.Name) != nil { // predeclared // Ideally we would skip this candidate only // if the predeclared name is actually // referenced by the file, but that's a lot @@ -1276,7 +1209,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil // user before long. continue } - pass.addCandidate(result.imp, result.pkg) + pass.addCandidate(result.Import, result.Package) } return nil } @@ -1801,7 +1734,7 @@ func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols m // filename is the file being formatted. // pkgIdent is the package being searched for, like "client" (if // searching for "client.New") -func pkgIsCandidate(filename string, refs references, pkg *pkg) bool { +func pkgIsCandidate(filename string, refs References, pkg *pkg) bool { // Check "internal" and "vendor" visibility: if !canUse(filename, pkg.dir) { return false diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index ff6b59a5..2215a128 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -47,7 +47,14 @@ type Options struct { // Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env. func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { fileSet := token.NewFileSet() - file, adjust, err := parse(fileSet, filename, src, opt) + var parserMode parser.Mode + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + file, adjust, err := parse(fileSet, filename, src, parserMode, opt.Fragment) if err != nil { return nil, err } @@ -66,17 +73,19 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. -func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { +func FixImports(ctx context.Context, filename string, src []byte, goroot string, logf func(string, ...any), source Source) (fixes []*ImportFix, err error) { ctx, done := event.Start(ctx, "imports.FixImports") defer done() fileSet := token.NewFileSet() - file, _, err := parse(fileSet, filename, src, opt) + // TODO(rfindley): these default values for ParseComments and AllErrors were + // extracted from gopls, but are they even needed? + file, _, err := parse(fileSet, filename, src, parser.ParseComments|parser.AllErrors, true) if err != nil { return nil, err } - return getFixes(ctx, fileSet, file, filename, opt.Env) + return getFixesWithSource(ctx, fileSet, file, filename, goroot, logf, source) } // ApplyFixes applies all of the fixes to the file and formats it. extraMode @@ -114,7 +123,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e // formatted file, and returns the postpocessed result. func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { mergeImports(file) - sortImports(opt.LocalPrefix, fset.File(file.Pos()), file) + sortImports(opt.LocalPrefix, fset.File(file.FileStart), file) var spacesBefore []string // import paths we need spaces before for _, impSection := range astutil.Imports(fset, file) { // Within each block of contiguous imports, see if any @@ -164,13 +173,9 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori // parse parses src, which was read from filename, // as a Go source file or statement list. -func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) { - var parserMode parser.Mode // legacy ast.Object resolution is required here - if opt.Comments { - parserMode |= parser.ParseComments - } - if opt.AllErrors { - parserMode |= parser.AllErrors +func parse(fset *token.FileSet, filename string, src []byte, parserMode parser.Mode, fragment bool) (*ast.File, func(orig, src []byte) []byte, error) { + if parserMode&parser.SkipObjectResolution != 0 { + panic("legacy ast.Object resolution is required") } // Try as whole source file. @@ -181,7 +186,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast // If the error is that the source file didn't begin with a // package line and we accept fragmented input, fall through to // try as a source fragment. Stop and return on any other error. - if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") { + if !fragment || !strings.Contains(err.Error(), "expected 'package'") { return nil, nil, err } diff --git a/vendor/golang.org/x/tools/internal/imports/source.go b/vendor/golang.org/x/tools/internal/imports/source.go new file mode 100644 index 00000000..5d2aeeeb --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/source.go @@ -0,0 +1,63 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import "context" + +// These types document the APIs below. +// +// TODO(rfindley): consider making these defined types rather than aliases. +type ( + ImportPath = string + PackageName = string + Symbol = string + + // References is set of References found in a Go file. The first map key is the + // left hand side of a selector expression, the second key is the right hand + // side, and the value should always be true. + References = map[PackageName]map[Symbol]bool +) + +// A Result satisfies a missing import. +// +// The Import field describes the missing import spec, and the Package field +// summarizes the package exports. +type Result struct { + Import *ImportInfo + Package *PackageInfo +} + +// An ImportInfo represents a single import statement. +type ImportInfo struct { + ImportPath string // import path, e.g. "crypto/rand". + Name string // import name, e.g. "crand", or "" if none. +} + +// A PackageInfo represents what's known about a package. +type PackageInfo struct { + Name string // package name in the package declaration, if known + Exports map[string]bool // set of names of known package level sortSymbols +} + +// A Source provides imports to satisfy unresolved references in the file being +// fixed. +type Source interface { + // LoadPackageNames queries PackageName information for the requested import + // paths, when operating from the provided srcDir. + // + // TODO(rfindley): try to refactor to remove this operation. + LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) + + // ResolveReferences asks the Source for the best package name to satisfy + // each of the missing references, in the context of fixing the given + // filename. + // + // Returns a map from package name to a [Result] for that package name that + // provides the required symbols. Keys may be omitted in the map if no + // candidates satisfy all missing references for that package name. It is up + // to each data source to select the best result for each entry in the + // missing map. + ResolveReferences(ctx context.Context, filename string, missing References) (map[PackageName]*Result, error) +} diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go new file mode 100644 index 00000000..ff9555d2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/source_env.go @@ -0,0 +1,125 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "context" + "path/filepath" + "strings" + "sync" + + "golang.org/x/sync/errgroup" + "golang.org/x/tools/internal/gopathwalk" +) + +// ProcessEnvSource implements the [Source] interface using the legacy +// [ProcessEnv] abstraction. +type ProcessEnvSource struct { + env *ProcessEnv + srcDir string + filename string + pkgName string +} + +// NewProcessEnvSource returns a [ProcessEnvSource] wrapping the given +// env, to be used for fixing imports in the file with name filename in package +// named pkgName. +func NewProcessEnvSource(env *ProcessEnv, filename, pkgName string) (*ProcessEnvSource, error) { + abs, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + srcDir := filepath.Dir(abs) + return &ProcessEnvSource{ + env: env, + srcDir: srcDir, + filename: filename, + pkgName: pkgName, + }, nil +} + +func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, unknown []string) (map[string]string, error) { + r, err := s.env.GetResolver() + if err != nil { + return nil, err + } + return r.loadPackageNames(unknown, srcDir) +} + +func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) (map[string]*Result, error) { + var mu sync.Mutex + found := make(map[string][]pkgDistance) + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true // We want everything. + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, refs, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + if _, want := refs[pkg.packageName]; !want { + return false + } + if pkg.dir == s.srcDir && s.pkgName == pkg.packageName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + return false + } + if !canUse(filename, pkg.dir) { + return false + } + mu.Lock() + defer mu.Unlock() + found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(s.srcDir, pkg.dir)}) + return false // We'll do our own loading after we sort. + }, + } + resolver, err := s.env.GetResolver() + if err != nil { + return nil, err + } + if err := resolver.scan(ctx, callback); err != nil { + return nil, err + } + + g, ctx := errgroup.WithContext(ctx) + + searcher := symbolSearcher{ + logf: s.env.logf, + srcDir: s.srcDir, + xtest: strings.HasSuffix(s.pkgName, "_test"), + loadExports: resolver.loadExports, + } + + var resultMu sync.Mutex + results := make(map[string]*Result, len(refs)) + for pkgName, symbols := range refs { + g.Go(func() error { + found, err := searcher.search(ctx, found[pkgName], pkgName, symbols) + if err != nil { + return err + } + if found == nil { + return nil // No matching package. + } + + imp := &ImportInfo{ + ImportPath: found.importPathShort, + } + pkg := &PackageInfo{ + Name: pkgName, + Exports: symbols, + } + resultMu.Lock() + results[pkgName] = &Result{Import: imp, Package: pkg} + resultMu.Unlock() + return nil + }) + } + if err := g.Wait(); err != nil { + return nil, err + } + return results, nil +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go index 35810826..0ade5c29 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/free.go +++ b/vendor/golang.org/x/tools/internal/typeparams/free.go @@ -6,6 +6,8 @@ package typeparams import ( "go/types" + + "golang.org/x/tools/internal/aliases" ) // Free is a memoization of the set of free type parameters within a @@ -36,6 +38,18 @@ func (w *Free) Has(typ types.Type) (res bool) { break case *types.Alias: + if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() { + return true // This is an uninstantiated Alias. + } + // The expansion of an alias can have free type parameters, + // whether or not the alias itself has type parameters: + // + // func _[K comparable]() { + // type Set = map[K]bool // free(Set) = {K} + // type MapTo[V] = map[K]V // free(Map[foo]) = {V} + // } + // + // So, we must Unalias. return w.Has(types.Unalias(t)) case *types.Array: @@ -96,9 +110,8 @@ func (w *Free) Has(typ types.Type) (res bool) { case *types.Named: args := t.TypeArgs() - // TODO(taking): this does not match go/types/infer.go. Check with rfindley. if params := t.TypeParams(); params.Len() > args.Len() { - return true + return true // this is an uninstantiated named type. } for i, n := 0, args.Len(); i < n; i++ { if w.Has(args.At(i)) { diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 83923286..df3ea521 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -11,6 +11,8 @@ import ( "go/types" "reflect" "unsafe" + + "golang.org/x/tools/internal/aliases" ) func SetUsesCgo(conf *types.Config) bool { @@ -63,3 +65,57 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier { return other.Name() } } + +// A NamedOrAlias is a [types.Type] that is named (as +// defined by the spec) and capable of bearing type parameters: it +// abstracts aliases ([types.Alias]) and defined types +// ([types.Named]). +// +// Every type declared by an explicit "type" declaration is a +// NamedOrAlias. (Built-in type symbols may additionally +// have type [types.Basic], which is not a NamedOrAlias, +// though the spec regards them as "named".) +// +// NamedOrAlias cannot expose the Origin method, because +// [types.Alias.Origin] and [types.Named.Origin] have different +// (covariant) result types; use [Origin] instead. +type NamedOrAlias interface { + types.Type + Obj() *types.TypeName +} + +// TypeParams is a light shim around t.TypeParams(). +// (go/types.Alias).TypeParams requires >= 1.23. +func TypeParams(t NamedOrAlias) *types.TypeParamList { + switch t := t.(type) { + case *types.Alias: + return aliases.TypeParams(t) + case *types.Named: + return t.TypeParams() + } + return nil +} + +// TypeArgs is a light shim around t.TypeArgs(). +// (go/types.Alias).TypeArgs requires >= 1.23. +func TypeArgs(t NamedOrAlias) *types.TypeList { + switch t := t.(type) { + case *types.Alias: + return aliases.TypeArgs(t) + case *types.Named: + return t.TypeArgs() + } + return nil +} + +// Origin returns the generic type of the Named or Alias type t if it +// is instantiated, otherwise it returns t. +func Origin(t NamedOrAlias) NamedOrAlias { + switch t := t.(type) { + case *types.Alias: + return aliases.Origin(t) + case *types.Named: + return t.Origin() + } + return t +} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go index f0bb0d15..0fc10ce4 100644 --- a/vendor/golang.org/x/tools/internal/versions/types.go +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -31,8 +31,3 @@ func FileVersion(info *types.Info, file *ast.File) string { // This would act as a max version on what a tool can support. return Future } - -// InitFileVersions initializes info to record Go versions for Go files. -func InitFileVersions(info *types.Info) { - info.FileVersions = make(map[*ast.File]string) -} diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index ff6a3836..2c0693d7 100644 Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index fa790e0f..f3252985 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -32,6 +32,7 @@ const ( EditionProto2 Edition = 998 EditionProto3 Edition = 999 Edition2023 Edition = 1000 + Edition2024 Edition = 1001 EditionUnsupported Edition = 100000 ) @@ -77,28 +78,42 @@ type ( Locations SourceLocations } + // EditionFeatures is a frequently-instantiated struct, so please take care + // to minimize padding when adding new fields to this struct (add them in + // the right place/order). EditionFeatures struct { + // StripEnumPrefix determines if the plugin generates enum value + // constants as-is, with their prefix stripped, or both variants. + StripEnumPrefix int + // IsFieldPresence is true if field_presence is EXPLICIT // https://protobuf.dev/editions/features/#field_presence IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED // https://protobuf.dev/editions/features/#field_presence IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN // https://protobuf.dev/editions/features/#enum_type IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED // https://protobuf.dev/editions/features/#repeated_field_encoding IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY // https://protobuf.dev/editions/features/#utf8_validation IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED // https://protobuf.dev/editions/features/#message_encoding IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW // https://protobuf.dev/editions/features/#json_format IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the // UnmarshalJSON([]byte) error method for enums. GenerateLegacyUnmarshalJSON bool diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index fd4d0c83..7611796e 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -32,6 +32,10 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeVarint(b) b = b[m:] parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + case genid.GoFeatures_StripEnumPrefix_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.StripEnumPrefix = int(v) default: panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) } diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 7f67cbb6..09792d96 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -21,13 +21,30 @@ const ( // Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + GoFeatures_StripEnumPrefix_field_name protoreflect.Name = "strip_enum_prefix" GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_StripEnumPrefix_field_fullname protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix" ) // Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 + GoFeatures_StripEnumPrefix_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix" + GoFeatures_StripEnumPrefix_enum_name = "StripEnumPrefix" +) + +// Enum values for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value = 3 ) // Extension numbers diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index fb8e15e8..62a52a40 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 35 - Patch = 1 + Patch = 2 PreRelease = "" ) diff --git a/vendor/modules.txt b/vendor/modules.txt index e4eb4c81..70386a63 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# github.com/99designs/gqlgen v0.17.55 +# github.com/99designs/gqlgen v0.17.57 ## explicit; go 1.22.5 github.com/99designs/gqlgen github.com/99designs/gqlgen/api @@ -27,7 +27,7 @@ github.com/99designs/gqlgen/plugin/servergen # github.com/KyleBanks/depth v1.2.1 ## explicit github.com/KyleBanks/depth -# github.com/Masterminds/semver/v3 v3.3.0 +# github.com/Masterminds/semver/v3 v3.3.1 ## explicit; go 1.21 github.com/Masterminds/semver/v3 # github.com/agnivade/levenshtein v1.2.0 @@ -52,10 +52,10 @@ github.com/caddyserver/zerossl # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/cpuguy83/go-md2man/v2 v2.0.4 +# github.com/cpuguy83/go-md2man/v2 v2.0.5 ## explicit; go 1.11 github.com/cpuguy83/go-md2man/v2/md2man -# github.com/datarhei/gosrt v0.7.0 +# github.com/datarhei/gosrt v0.8.0 ## explicit; go 1.20 github.com/datarhei/gosrt github.com/datarhei/gosrt/circular @@ -97,7 +97,7 @@ github.com/davecgh/go-spew/spew # github.com/dustin/go-humanize v1.0.1 ## explicit; go 1.16 github.com/dustin/go-humanize -# github.com/gabriel-vasile/mimetype v1.4.6 +# github.com/gabriel-vasile/mimetype v1.4.7 ## explicit; go 1.20 github.com/gabriel-vasile/mimetype github.com/gabriel-vasile/mimetype/internal/charset @@ -133,9 +133,13 @@ github.com/go-playground/locales/currency # github.com/go-playground/universal-translator v0.18.1 ## explicit; go 1.18 github.com/go-playground/universal-translator -# github.com/go-playground/validator/v10 v10.22.1 +# github.com/go-playground/validator/v10 v10.23.0 ## explicit; go 1.18 github.com/go-playground/validator/v10 +# github.com/go-viper/mapstructure/v2 v2.2.1 +## explicit; go 1.18 +github.com/go-viper/mapstructure/v2 +github.com/go-viper/mapstructure/v2/internal/errors # github.com/gobwas/glob v0.2.3 ## explicit github.com/gobwas/glob @@ -201,8 +205,8 @@ github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/s2 github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/klauspost/cpuid/v2 v2.2.8 -## explicit; go 1.15 +# github.com/klauspost/cpuid/v2 v2.2.9 +## explicit; go 1.20 github.com/klauspost/cpuid/v2 # github.com/labstack/echo-jwt v0.0.0-20221127215225-c84d41a71003 ## explicit; go 1.17 @@ -223,7 +227,7 @@ github.com/leodido/go-urn/scim/schema # github.com/libdns/libdns v0.2.2 ## explicit; go 1.18 github.com/libdns/libdns -# github.com/lithammer/shortuuid/v4 v4.0.0 +# github.com/lithammer/shortuuid/v4 v4.1.0 ## explicit; go 1.13 github.com/lithammer/shortuuid/v4 # github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 @@ -250,7 +254,7 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.80 +# github.com/minio/minio-go/v7 v7.0.81 ## explicit; go 1.22 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/cors @@ -264,9 +268,6 @@ github.com/minio/minio-go/v7/pkg/set github.com/minio/minio-go/v7/pkg/signer github.com/minio/minio-go/v7/pkg/sse github.com/minio/minio-go/v7/pkg/tags -# github.com/mitchellh/mapstructure v1.5.0 -## explicit; go 1.14 -github.com/mitchellh/mapstructure # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg @@ -322,9 +323,10 @@ github.com/shoenig/go-m1cpu # github.com/sosodev/duration v1.3.1 ## explicit; go 1.17 github.com/sosodev/duration -# github.com/stretchr/testify v1.9.0 +# github.com/stretchr/testify v1.10.0 ## explicit; go 1.17 github.com/stretchr/testify/assert +github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/require # github.com/swaggo/echo-swagger v1.4.1 ## explicit; go 1.17 @@ -341,7 +343,7 @@ github.com/tklauser/go-sysconf # github.com/tklauser/numcpus v0.9.0 ## explicit; go 1.18 github.com/tklauser/numcpus -# github.com/urfave/cli/v2 v2.27.4 +# github.com/urfave/cli/v2 v2.27.5 ## explicit; go 1.18 github.com/urfave/cli/v2 # github.com/valyala/bytebufferpool v1.0.0 @@ -350,8 +352,8 @@ github.com/valyala/bytebufferpool # github.com/valyala/fasttemplate v1.2.2 ## explicit; go 1.12 github.com/valyala/fasttemplate -# github.com/vektah/gqlparser/v2 v2.5.18 -## explicit; go 1.21 +# github.com/vektah/gqlparser/v2 v2.5.20 +## explicit; go 1.22 github.com/vektah/gqlparser/v2 github.com/vektah/gqlparser/v2/ast github.com/vektah/gqlparser/v2/gqlerror @@ -400,7 +402,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.28.0 +# golang.org/x/crypto v0.29.0 ## explicit; go 1.20 golang.org/x/crypto/acme golang.org/x/crypto/acme/autocert @@ -411,12 +413,12 @@ golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/ocsp golang.org/x/crypto/pbkdf2 golang.org/x/crypto/sha3 -# golang.org/x/mod v0.21.0 +# golang.org/x/mod v0.22.0 ## explicit; go 1.22.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.30.0 +# golang.org/x/net v0.31.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/html @@ -431,16 +433,16 @@ golang.org/x/net/internal/socket golang.org/x/net/ipv4 golang.org/x/net/ipv6 golang.org/x/net/publicsuffix -# golang.org/x/sync v0.8.0 +# golang.org/x/sync v0.9.0 ## explicit; go 1.18 golang.org/x/sync/errgroup -# golang.org/x/sys v0.26.0 +# golang.org/x/sys v0.27.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.19.0 +# golang.org/x/text v0.20.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/internal @@ -452,10 +454,10 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.7.0 +# golang.org/x/time v0.8.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.26.0 +# golang.org/x/tools v0.27.0 ## explicit; go 1.22.0 golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/buildutil @@ -481,7 +483,7 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/protobuf v1.35.1 +# google.golang.org/protobuf v1.35.2 ## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/prototext