diff --git a/.github/workflows/ci-test.yml b/.github/workflows/ci-test.yml index 84655799..6de94881 100644 --- a/.github/workflows/ci-test.yml +++ b/.github/workflows/ci-test.yml @@ -9,24 +9,27 @@ jobs: security-events: write steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: path: src/github.com/qiniu/go-sdk ref: ${{ github.ref }} submodules: recursive - name: Install Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: - go-version: '1.10.x' + go-version: '1.10' - name: Run unit cases run: | - set -e + set -ex rm -rf $GITHUB_WORKSPACE/src/github.com/qiniu/x && git clone -b v1.12.21 --depth 1 https://github.com/qiniu/x.git $GITHUB_WORKSPACE/src/github.com/qiniu/x - GOPATH=$GITHUB_WORKSPACE go get golang.org/x/sync/singleflight + GOPATH=$GITHUB_WORKSPACE go get github.com/qiniu/dyn - GOPATH=$GITHUB_WORKSPACE go get github.com/gofrs/flock + + rm -rf $GITHUB_WORKSPACE/src/github.com/gofrs/flock && git clone -b v0.8.1 --depth 1 https://github.com/gofrs/flock $GITHUB_WORKSPACE/src/github.com/gofrs/flock + GOPATH=$GITHUB_WORKSPACE go get github.com/alex-ant/gomath/rational - GOPATH=$GITHUB_WORKSPACE go get github.com/matishsiao/goInfo + GOPATH=$GITHUB_WORKSPACE go get github.com/gammazero/toposort + GOPATH=$GITHUB_WORKSPACE go get github.com/joeshaw/multierror # FIXME special package # github.com/go-playground/validator/v10 @@ -35,18 +38,21 @@ jobs: # new package name don't work in non-module mode rm -rf $GITHUB_WORKSPACE/src/github.com/go-playground/validator/v10 && git clone -b v10.9.0 --depth 1 https://github.com/go-playground/validator.git $GITHUB_WORKSPACE/src/github.com/go-playground/validator/v10 rm -rf $GITHUB_WORKSPACE/src/github.com/universal-translator && git clone -b v0.18.0 --depth 1 https://github.com/go-playground/universal-translator.git $GITHUB_WORKSPACE/src/github.com/go-playground/universal-translator - + rm -rf $GITHUB_WORKSPACE/src/github.com/elastic/go-sysinfo && git clone -b v1.0.2 --depth 1 https://github.com/elastic/go-sysinfo.git $GITHUB_WORKSPACE/src/github.com/elastic/go-sysinfo + rm -rf $GITHUB_WORKSPACE/src/github.com/pkg/errors && git clone -b v0.9.1 --depth 1 https://github.com/pkg/errors.git $GITHUB_WORKSPACE/src/github.com/pkg/errors + rm -rf $GITHUB_WORKSPACE/src/github.com/prometheus/procfs && git clone -b v0.0.6 --depth 1 https://github.com/prometheus/procfs.git $GITHUB_WORKSPACE/src/github.com/prometheus/procfs + rm -rf $GITHUB_WORKSPACE/src/howett.net/plist && git clone -b v1.0.0 --depth 1 https://github.com/DHowett/go-plist.git $GITHUB_WORKSPACE/src/howett.net/plist + rm -rf $GITHUB_WORKSPACE/src/golang.org/x/sys && git clone -b v0.13.0 --depth 1 https://github.com/golang/sys $GITHUB_WORKSPACE/src/golang.org/x/sys rm -rf $GITHUB_WORKSPACE/src/golang.org/x/crypto && git clone -b v0.10.0 --depth 1 https://go.googlesource.com/crypto $GITHUB_WORKSPACE/src/golang.org/x/crypto - # GOPATH=$GITHUB_WORKSPACE go get golang.org/x/crypto/sha3 - rm -rf $GITHUB_WORKSPACE/src/golang.org/x/text && git clone -b v0.10.0 --depth 1 https://github.com/golang/text $GITHUB_WORKSPACE/src/golang.org/x/text - # GOPATH=$GITHUB_WORKSPACE go get golang.org/x/text/language + rm -rf $GITHUB_WORKSPACE/src/golang.org/x/sync && git clone -b v0.3.0 --depth 1 https://github.com/golang/sync $GITHUB_WORKSPACE/src/golang.org/x/sync GOPATH=$GITHUB_WORKSPACE go get github.com/leodido/go-urn GOPATH=$GITHUB_WORKSPACE go get github.com/go-playground/locales rm -rf $GITHUB_WORKSPACE/src/github.com/dave/jennifer && git clone -b v1.6.1 --depth 1 https://github.com/dave/jennifer $GITHUB_WORKSPACE/src/github.com/dave/jennifer - # GOPATH=$GITHUB_WORKSPACE go get github.com/dave/jennifer + rm -rf $GITHUB_WORKSPACE/src/modernc.org/fileutil && git clone -b v1.0.0 --depth 1 https://gitlab.com/cznic/fileutil.git $GITHUB_WORKSPACE/src/modernc.org/fileutil + rm -rf $GITHUB_WORKSPACE/src/github.com/gorilla/mux && git clone -b v1.7.4 --depth 1 https://github.com/gorilla/mux $GITHUB_WORKSPACE/src/github.com/gorilla/mux GOPATH=$GITHUB_WORKSPACE go get github.com/iancoleman/strcase @@ -66,7 +72,7 @@ jobs: fail-fast: false max-parallel: 1 matrix: - go_version: ['1.11.x', '1.12.x', '1.13.x', '1.14.x', '1.15.x', '1.16.x', '1.17.x', '1.18.x', '1.19.x', '1.20.x', '1.21.x', '1.22.x'] + go_version: ['1.11', '1.12', '1.13', '1.14', '1.15', '1.16', '1.17', '1.18', '1.19', '1.20', '1.21', '1.22'] runs-on: ubuntu-latest permissions: actions: read @@ -74,24 +80,24 @@ jobs: security-events: write steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: ref: ${{ github.ref }} submodules: recursive - name: Install Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: go-version: ${{ matrix.go_version }} - name: Format run: | - if [ "${{ matrix.go_version }}" = "1.22.x" ]; then + if [ "${{ matrix.go_version }}" = "1.22" ]; then if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then exit 1 fi fi - name: Golint run: | - if [ "${{ matrix.go_version }}" = "1.22.x" ]; then + if [ "${{ matrix.go_version }}" = "1.22" ]; then set -e go install honnef.co/go/tools/cmd/staticcheck@latest make staticcheck @@ -99,6 +105,55 @@ jobs: - name: Run unit cases run: | set -e + if [ "${{ matrix.go_version }}" = "1.11" ] || [ "${{ matrix.go_version }}" = "1.12" ] || [ "${{ matrix.go_version }}" = "1.13" ] || [ "${{ matrix.go_version }}" = "1.14" ] || [ "${{ matrix.go_version }}" = "1.15" ]; then + go get modernc.org/fileutil@v1.0.0 + fi make unittest env: GO111MODULE: 'on' + go-mod-test-windows: + needs: 'go-mod-test' + runs-on: windows-latest + permissions: + actions: read + contents: read + security-events: write + steps: + - name: Checkout repo + uses: actions/checkout@v4 + with: + ref: ${{ github.ref }} + submodules: recursive + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: stable + - name: Run unit cases + run: | + set -e + make unittest + shell: bash + go-mod-test-macos: + needs: 'go-mod-test-windows' + runs-on: macos-latest + permissions: + actions: read + contents: read + security-events: write + steps: + - name: Checkout repo + uses: actions/checkout@v4 + with: + ref: ${{ github.ref }} + submodules: recursive + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: stable + - name: Install dependencies + run: | + brew install make + - name: Run unit cases + run: | + set -e + make unittest diff --git a/Makefile b/Makefile index 9f8e8ae3..3aadd313 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ integrationtest: go test -tags=integration -failfast -parallel 1 -v -coverprofile=coverage.txt `go list ./... | egrep -v 'examples|sms'` staticcheck: - staticcheck -go 1.10 `go list ./... | egrep -v 'examples|sms'` + staticcheck `go list ./... | egrep -v 'examples|sms'` generate: go generate ./storagev2/ diff --git a/api-specs b/api-specs index f316d165..fc527c37 160000 --- a/api-specs +++ b/api-specs @@ -1 +1 @@ -Subproject commit f316d165b6091dcaa987153e92fd6a7267d7563d +Subproject commit fc527c37e43c96662b1241b1cc228726ade93b55 diff --git a/client/dialer.go b/client/dialer.go index 95685445..328ff026 100644 --- a/client/dialer.go +++ b/client/dialer.go @@ -31,7 +31,11 @@ func defaultDialFunc(ctx context.Context, network string, address string) (net.C keepAliveInterval = 15 * time.Second } if resolved, ok := ctx.Value(resolverContextKey{}).(resolverContextValue); ok && len(resolved.ips) > 0 && resolved.domain == host { - dialer := net.Dialer{Timeout: dialTimeout / time.Duration(len(resolved.ips)), KeepAlive: keepAliveInterval} + dialTimeout = dialTimeout / time.Duration(len(resolved.ips)) + if dialTimeout < 3*time.Second { + dialTimeout = 3 * time.Second + } + dialer := net.Dialer{Timeout: dialTimeout, KeepAlive: keepAliveInterval} for _, ip := range resolved.ips { newAddr := ip.String() if port != "" { diff --git a/go.mod b/go.mod index 88257a9b..97c0184b 100644 --- a/go.mod +++ b/go.mod @@ -6,19 +6,25 @@ require ( github.com/BurntSushi/toml v1.3.2 github.com/alex-ant/gomath v0.0.0-20160516115720-89013a210a82 github.com/dave/jennifer v1.6.1 - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/elastic/go-sysinfo v1.0.2 // indirect + github.com/gammazero/toposort v0.1.1 // indirect github.com/go-playground/universal-translator v0.18.0 // indirect - github.com/go-playground/validator/v10 v10.8.0 + github.com/go-playground/validator/v10 v10.7.0 github.com/gofrs/flock v0.8.1 + github.com/gorilla/mux v1.8.1 // indirect github.com/iancoleman/strcase v0.3.0 github.com/kr/pretty v0.3.0 // indirect + github.com/leodido/go-urn v1.2.1 // indirect github.com/matishsiao/goInfo v0.0.0-20210923090445-da2e3fa8d45f github.com/qiniu/dyn v1.3.0 github.com/rogpeppe/go-internal v1.8.0 // indirect github.com/stretchr/testify v1.6.1 - golang.org/x/crypto v0.1.0 // indirect + github.com/yuin/goldmark v1.4.13 // indirect + golang.org/x/mod v0.6.0-dev // indirect golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 - golang.org/x/sys v0.12.0 // indirect + golang.org/x/sys v0.0.0-20190425145619-16072639606e // indirect + golang.org/x/text v0.3.7 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b + modernc.org/fileutil v1.3.0 ) diff --git a/go.sum b/go.sum index bcd696fb..d53b5b6b 100644 --- a/go.sum +++ b/go.sum @@ -5,9 +5,16 @@ github.com/alex-ant/gomath v0.0.0-20160516115720-89013a210a82/go.mod h1:nLnM0KdK github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/dave/jennifer v1.6.1 h1:T4T/67t6RAA5AIV6+NP8Uk/BIsXgDoqEowgycdQQLuk= github.com/dave/jennifer v1.6.1/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/elastic/go-sysinfo v1.0.2 h1:Wq1bOgnSz7Obl7DbMjbn0tzx1bE5G8Cfy3MVFa6C1Cc= +github.com/elastic/go-sysinfo v1.0.2/go.mod h1:O/D5m1VpYLwGjCYzEt63g3Z1uO3jXfwyzzjiW90t8cY= +github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/gammazero/toposort v0.1.1 h1:OivGxsWxF3U3+U80VoLJ+f50HcPU1MIqE1JlKzoJ2Eg= +github.com/gammazero/toposort v0.1.1/go.mod h1:H2cozTnNpMw0hg2VHAYsAxmkHXBYroNangj2NTBQDvw= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= @@ -16,12 +23,19 @@ github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/validator/v10 v10.7.0 h1:gLi5ajTBBheLNt0ctewgq7eolXoDALQd5/y90Hh9ZgM= +github.com/go-playground/validator/v10 v10.7.0/go.mod h1:xm76BBt941f7yWdGnI2DVPFFg1UK3YY04qifoXU3lOk= github.com/go-playground/validator/v10 v10.8.0 h1:1kAa0fCrnpv+QYdkdcRzrRM7AyYs5o8+jZdJCz9xj6k= github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -30,38 +44,55 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/matishsiao/goInfo v0.0.0-20210923090445-da2e3fa8d45f h1:B0OD7nYl2FPQEVrw8g2uyc1lGEzNbvrKh7fspGZcbvY= github.com/matishsiao/goInfo v0.0.0-20210923090445-da2e3fa8d45f/go.mod h1:aEt7p9Rvh67BYApmZwNDPpgircTO2kgdmDUoF/1QmwA= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0 h1:c8R11WC8m7KNMkTv/0+Be8vvwo4I3/Ut9AC2FW8fX3U= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/qiniu/dyn v1.3.0 h1:s+xPTeV0H8yikgM4ZMBc7Rrefam8UNI3asBlkaOQg5o= github.com/qiniu/dyn v1.3.0/go.mod h1:E8oERcm8TtwJiZvkQPbcAh0RL8jO1G0VXJMW3FAWdkk= github.com/qiniu/x v1.10.5 h1:7V/CYWEmo9axJULvrJN6sMYh2FdY+esN5h8jwDkA4b0= github.com/qiniu/x v1.10.5/go.mod h1:03Ni9tj+N2h2aKnAz+6N0Xfl8FwMEDRC2PAlxekASDs= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/mod v0.6.0-dev/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190425145619-16072639606e h1:4ktJgTV34+N3qOZUc5fAaG3Pb11qzMm3PkAoTAgUZ2I= +golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -70,6 +101,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -77,6 +110,7 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -84,11 +118,19 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= diff --git a/internal/cache/cache.go b/internal/cache/cache.go index 8161216e..80a7f2bd 100644 --- a/internal/cache/cache.go +++ b/internal/cache/cache.go @@ -376,9 +376,12 @@ func saveCacheMapTo(w io.Writer, m map[string]cacheValue) error { return nil } -func lockCachePersistentFile(lockFilePath string, ex bool, handleError func(error)) (context.CancelFunc, error) { - lockFile := flock.New(lockFilePath) - var err error +func lockCachePersistentFile(cacheFilePath string, ex bool, handleError func(error)) (context.CancelFunc, error) { + var ( + lockFilePath = cacheFilePath + ".lock" + lockFile = flock.New(lockFilePath) + err error + ) if ex { err = lockFile.Lock() } else { diff --git a/internal/clientv2/client.go b/internal/clientv2/client.go index 15961c13..1133a4bb 100644 --- a/internal/clientv2/client.go +++ b/internal/clientv2/client.go @@ -2,7 +2,9 @@ package clientv2 import ( "net/http" + "reflect" "sort" + "sync" clientV1 "github.com/qiniu/go-sdk/v7/client" internal_io "github.com/qiniu/go-sdk/v7/internal/io" @@ -15,8 +17,9 @@ type Client interface { type Handler func(req *http.Request) (*http.Response, error) type client struct { - coreClient Client - interceptors interceptorList + coreClient Client + interceptors interceptorList + interceptorsMutex sync.RWMutex } func NewClient(cli Client, interceptors ...Interceptor) Client { @@ -27,28 +30,61 @@ func NewClient(cli Client, interceptors ...Interceptor) Client { var is interceptorList = interceptors is = append(is, newDefaultHeaderInterceptor()) is = append(is, newDebugInterceptor()) - sort.Sort(is) + if c, ok := cli.(*client); ok { + c.addInterceptors(is) + return c + } + + sort.Sort(is) return &client{ coreClient: cli, interceptors: is, } } -func (c *client) Do(req *http.Request) (*http.Response, error) { - handler := func(req *http.Request) (*http.Response, error) { - return c.coreClient.Do(req) +func (c *client) addInterceptors(is interceptorList) { + c.interceptorsMutex.Lock() + defer c.interceptorsMutex.Unlock() + + for _, i := range is { + c.addInterceptor(i) + } + sort.Sort(c.interceptors) +} + +func (c *client) addInterceptor(interceptor Interceptor) { + for _, i := range c.interceptors { + if reflect.TypeOf(i) == reflect.TypeOf(interceptor) { + return + } } - var newInterceptorList interceptorList - if intercetorsFromRequest := getIntercetorsFromRequest(req); len(intercetorsFromRequest) == 0 { - newInterceptorList = c.interceptors + c.interceptors = append(c.interceptors, interceptor) +} + +func (c *client) mergeInterceptors(req *http.Request) interceptorList { + c.interceptorsMutex.RLock() + defer c.interceptorsMutex.RUnlock() + + intercetorsFromRequest := getIntercetorsFromRequest(req) + newInterceptorList := make(interceptorList, 0, len(c.interceptors)+len(intercetorsFromRequest)) + if len(intercetorsFromRequest) == 0 { + newInterceptorList = append(newInterceptorList, c.interceptors...) } else if len(c.interceptors) == 0 { newInterceptorList = intercetorsFromRequest } else { - newInterceptorList = append(c.interceptors, intercetorsFromRequest...) + newInterceptorList = append(newInterceptorList, c.interceptors...) + newInterceptorList = append(newInterceptorList, intercetorsFromRequest...) sort.Sort(newInterceptorList) } + return newInterceptorList +} +func (c *client) Do(req *http.Request) (*http.Response, error) { + handler := func(req *http.Request) (*http.Response, error) { + return c.coreClient.Do(req) + } + newInterceptorList := c.mergeInterceptors(req) for _, interceptor := range newInterceptorList { h := handler i := interceptor diff --git a/internal/clientv2/interceptor.go b/internal/clientv2/interceptor.go index 27ef40c2..60110138 100644 --- a/internal/clientv2/interceptor.go +++ b/internal/clientv2/interceptor.go @@ -13,7 +13,8 @@ const ( InterceptorPrioritySetHeader InterceptorPriority = 400 InterceptorPriorityNormal InterceptorPriority = 500 InterceptorPriorityAuth InterceptorPriority = 600 - InterceptorPriorityDebug InterceptorPriority = 700 + InterceptorPriorityAntiHijacking InterceptorPriority = 700 + InterceptorPriorityDebug InterceptorPriority = 800 ) type InterceptorPriority int diff --git a/internal/clientv2/interceptor_anti_hijacking.go b/internal/clientv2/interceptor_anti_hijacking.go new file mode 100644 index 00000000..a493912e --- /dev/null +++ b/internal/clientv2/interceptor_anti_hijacking.go @@ -0,0 +1,31 @@ +package clientv2 + +import ( + "net/http" + + "github.com/qiniu/go-sdk/v7/storagev2/retrier" +) + +type antiHijackingInterceptor struct { +} + +func NewAntiHijackingInterceptor() Interceptor { + return &antiHijackingInterceptor{} +} + +func (interceptor *antiHijackingInterceptor) Priority() InterceptorPriority { + return InterceptorPriorityAntiHijacking +} + +func (interceptor *antiHijackingInterceptor) Intercept(req *http.Request, handler Handler) (response *http.Response, err error) { + response, err = handler(req) + if err != nil { + return + } + reqId := response.Header.Get("x-reqid") + log := response.Header.Get("x-log") + if reqId == "" && log == "" { + return nil, retrier.ErrMaliciousResponse + } + return +} diff --git a/internal/clientv2/interceptor_buffer_response.go b/internal/clientv2/interceptor_buffer_response.go index f53f0fd8..525ae949 100644 --- a/internal/clientv2/interceptor_buffer_response.go +++ b/internal/clientv2/interceptor_buffer_response.go @@ -14,7 +14,7 @@ func (interceptor bufferResponseInterceptor) Priority() InterceptorPriority { } func (interceptor bufferResponseInterceptor) Intercept(req *http.Request, handler Handler) (resp *http.Response, err error) { - toBufferResponse := req.Context().Value(contextKeyBufferResponse{}) != nil + toBufferResponse := req.Context().Value(bufferResponseContextKey{}) != nil resp, err = handler(req) if err == nil && toBufferResponse { err = bufferResponse(resp) diff --git a/internal/clientv2/interceptor_retry_hosts.go b/internal/clientv2/interceptor_retry_hosts.go index 486f67eb..8388a9d9 100644 --- a/internal/clientv2/interceptor_retry_hosts.go +++ b/internal/clientv2/interceptor_retry_hosts.go @@ -1,6 +1,7 @@ package clientv2 import ( + "io" "net/http" "net/url" "strings" @@ -82,7 +83,8 @@ func (interceptor *hostsRetryInterceptor) Intercept(req *http.Request, handler H resp, err = handler(req) - if interceptor.options.getRetryDecision(reqBefore, resp, err, i) == retrier.DontRetry { + retryDecision := interceptor.options.getRetryDecision(reqBefore, resp, err, i) + if retryDecision == retrier.DontRetry { return resp, err } @@ -127,6 +129,15 @@ func (interceptor *hostsRetryInterceptor) Intercept(req *http.Request, handler H req = reqBefore + if req.Body != nil && req.GetBody != nil { + if closer, ok := req.Body.(io.Closer); ok { + closer.Close() + } + if req.Body, err = req.GetBody(); err != nil { + return + } + } + if resp != nil && resp.Body != nil { _ = internal_io.SinkAll(resp.Body) resp.Body.Close() diff --git a/internal/clientv2/interceptor_retry_hosts_test.go b/internal/clientv2/interceptor_retry_hosts_test.go index e4129397..3b6ef7d2 100644 --- a/internal/clientv2/interceptor_retry_hosts_test.go +++ b/internal/clientv2/interceptor_retry_hosts_test.go @@ -5,6 +5,8 @@ package clientv2 import ( "net/http" + "net/http/httptest" + "strings" "testing" "time" @@ -104,9 +106,6 @@ func TestHostsNotRetryInterceptor(t *testing.T) { RetryInterval: func() time.Duration { return time.Second }, - //ShouldRetry: func(req *http.Request, resp *http.Response, err error) bool { - // return true - //}, }) doCount := 0 @@ -136,9 +135,9 @@ func TestHostsNotRetryInterceptor(t *testing.T) { Header: nil, GetBody: nil, }) - duration := float32(time.Now().UnixNano()-start.UnixNano()) / 1e9 + duration := time.Since(start) - if duration > float32(doCount-1)+0.1 || duration < float32(doCount-1)-0.1 { + if d := duration - time.Duration(doCount-1)*time.Second; d >= 900*time.Millisecond || d <= -900*time.Millisecond { t.Fatalf("retry interval may be error") } @@ -157,19 +156,28 @@ func TestHostsNotRetryInterceptor(t *testing.T) { } func TestHostsRetryInterceptorByRequest(t *testing.T) { - clientV1.DebugMode = true - defer func() { - clientV1.DebugMode = false - }() + serveMux_1 := http.NewServeMux() + serveMux_1.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(599) + w.Write([]byte(`{"error":"test error"}`)) + }) + server_1 := httptest.NewServer(serveMux_1) + defer server_1.Close() + + serveMux_2 := http.NewServeMux() + serveMux_2.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + server_2 := httptest.NewServer(serveMux_2) + defer server_2.Close() - hostA := "aaa.aa.com" - hostB := "www.qiniu.com" hRetryMax := 30 hRetryInterceptor := NewHostsRetryInterceptor(HostsRetryConfig{ - RetryMax: hRetryMax, - ShouldFreezeHost: nil, - HostFreezeDuration: 0, - HostProvider: hostprovider.NewWithHosts([]string{hostA, hostB}), + RetryMax: hRetryMax, + HostProvider: hostprovider.NewWithHosts([]string{ + strings.TrimPrefix(server_1.URL, "http://"), + strings.TrimPrefix(server_2.URL, "http://"), + }), }) retryMax := 1 @@ -203,7 +211,7 @@ func TestHostsRetryInterceptorByRequest(t *testing.T) { resp, err := Do(c, RequestParams{ Context: nil, Method: RequestMethodGet, - Url: "https://" + hostA, + Url: server_1.URL, Header: nil, GetBody: nil, }) @@ -221,7 +229,7 @@ func TestHostsRetryInterceptorByRequest(t *testing.T) { t.Fatalf("retry flow error") } - if resp.Request.Host != hostB { + if resp.Request.Host != strings.TrimPrefix(server_2.URL, "http://") { t.Fatalf("retry host set error") } } diff --git a/internal/clientv2/interceptor_retry_simple.go b/internal/clientv2/interceptor_retry_simple.go index 714bcc88..91f0e3c1 100644 --- a/internal/clientv2/interceptor_retry_simple.go +++ b/internal/clientv2/interceptor_retry_simple.go @@ -2,9 +2,11 @@ package clientv2 import ( "context" + "io" "math/rand" "net" "net/http" + "sync" "time" clientv1 "github.com/qiniu/go-sdk/v7/client" @@ -16,7 +18,7 @@ import ( ) type ( - contextKeyBufferResponse struct{} + bufferResponseContextKey struct{} SimpleRetryConfig struct { RetryMax int // 最大重试次数 @@ -101,9 +103,9 @@ func (interceptor *simpleRetryInterceptor) Intercept(req *http.Request, handler // 可能会被重试多次 for i := 0; ; i++ { - req, chosenIPs = interceptor.choose(req, resolvedIPs, hostname) // Clone 防止后面 Handler 处理对 req 有污染 reqBefore := cloneReq(req) + req, chosenIPs = interceptor.choose(req, resolvedIPs, hostname) resp, err = interceptor.callHandler(req, &retrier.RetrierOptions{Attempts: i}, handler) retryDecision := interceptor.config.getRetryDecision(reqBefore, resp, err, i) @@ -119,12 +121,21 @@ func (interceptor *simpleRetryInterceptor) Intercept(req *http.Request, handler break } + if req.Body != nil && req.GetBody != nil { + if closer, ok := req.Body.(io.Closer); ok { + closer.Close() + } + if req.Body, err = req.GetBody(); err != nil { + return + } + } + if resp != nil && resp.Body != nil { _ = internal_io.SinkAll(resp.Body) resp.Body.Close() } - interceptor.backoff(req, i) + interceptor.wait(req, i) } return resp, err } @@ -140,31 +151,61 @@ func (interceptor *simpleRetryInterceptor) callHandler(req *http.Request, option return } +var ( + defaultResolver resolver.Resolver + defaultResolverOnce sync.Once +) + +func (interceptor *simpleRetryInterceptor) resolver() resolver.Resolver { + var r resolver.Resolver + if r = interceptor.config.Resolver; r == nil { + defaultResolverOnce.Do(func() { + defaultResolver, _ = resolver.NewCacheResolver(nil, nil) + }) + r = defaultResolver + } + return r +} + func (interceptor *simpleRetryInterceptor) resolve(req *http.Request, hostname string) []net.IP { var ( ips []net.IP err error ) - if resolver := interceptor.config.Resolver; resolver != nil { + if r := interceptor.resolver(); r != nil { if interceptor.config.BeforeResolve != nil { interceptor.config.BeforeResolve(req) } - if ips, err = resolver.Resolve(req.Context(), hostname); err == nil { + if ips, err = r.Resolve(req.Context(), hostname); err == nil { if interceptor.config.AfterResolve != nil { interceptor.config.AfterResolve(req, ips) } - } else if err != nil && interceptor.config.ResolveError != nil { + } else if interceptor.config.ResolveError != nil { interceptor.config.ResolveError(req, err) } } return ips } +var ( + defaultChooser chooser.Chooser + defaultChooserOnce sync.Once +) + +func (interceptor *simpleRetryInterceptor) chooser() chooser.Chooser { + var cs chooser.Chooser + if cs = interceptor.config.Chooser; cs == nil { + defaultChooserOnce.Do(func() { + defaultChooser = chooser.NewShuffleChooser(chooser.NewSmartIPChooser(nil)) + }) + cs = defaultChooser + } + return cs +} + func (interceptor *simpleRetryInterceptor) choose(req *http.Request, ips []net.IP, hostname string) (*http.Request, []net.IP) { if len(ips) > 0 { - if cs := interceptor.config.Chooser; cs != nil { - ips = cs.Choose(req.Context(), ips, &chooser.ChooseOptions{Domain: hostname}) - } + ips = interceptor.chooser().Choose(req.Context(), ips, &chooser.ChooseOptions{Domain: hostname}) req = req.WithContext(clientv1.WithResolvedIPs(req.Context(), hostname, ips)) } return req, ips @@ -172,21 +213,17 @@ func (interceptor *simpleRetryInterceptor) choose(req *http.Request, ips []net.I func (interceptor *simpleRetryInterceptor) feedbackGood(req *http.Request, hostname string, ips []net.IP) { if len(ips) > 0 { - if cs := interceptor.config.Chooser; cs != nil { - cs.FeedbackGood(req.Context(), ips, &chooser.FeedbackOptions{Domain: hostname}) - } + interceptor.chooser().FeedbackGood(req.Context(), ips, &chooser.FeedbackOptions{Domain: hostname}) } } func (interceptor *simpleRetryInterceptor) feedbackBad(req *http.Request, hostname string, ips []net.IP) { if len(ips) > 0 { - if cs := interceptor.config.Chooser; cs != nil { - cs.FeedbackBad(req.Context(), ips, &chooser.FeedbackOptions{Domain: hostname}) - } + interceptor.chooser().FeedbackBad(req.Context(), ips, &chooser.FeedbackOptions{Domain: hostname}) } } -func (interceptor *simpleRetryInterceptor) backoff(req *http.Request, attempts int) { +func (interceptor *simpleRetryInterceptor) wait(req *http.Request, attempts int) { retryInterval := interceptor.config.getRetryInterval(req.Context(), attempts) if interceptor.config.BeforeBackoff != nil { interceptor.config.BeforeBackoff(req, &retrier.RetrierOptions{Attempts: attempts}, retryInterval) diff --git a/internal/clientv2/interceptor_retry_simple_test.go b/internal/clientv2/interceptor_retry_simple_test.go index 4346e971..731e6d16 100644 --- a/internal/clientv2/interceptor_retry_simple_test.go +++ b/internal/clientv2/interceptor_retry_simple_test.go @@ -210,10 +210,10 @@ func TestSimpleNotRetryInterceptor(t *testing.T) { Header: nil, GetBody: nil, }) - duration := float32(time.Now().UnixNano()-start.UnixNano()) / 1e9 + duration := time.Since(start) // 不重试,只执行一次,不等待 - if duration > 0.3 { + if duration > 500*time.Millisecond { t.Fatalf("retry interval may be error") } @@ -236,7 +236,7 @@ func TestRetryInterceptorWithBackoff(t *testing.T) { callbackedCount := 0 rInterceptor := NewSimpleRetryInterceptor(SimpleRetryConfig{ RetryMax: retryMax, - Backoff: backoff.NewExponentialBackoff(100*time.Millisecond, 2), + Backoff: backoff.NewExponentialBackoff(1*time.Second, 2), ShouldRetry: func(req *http.Request, resp *http.Response, err error) bool { return true }, @@ -259,7 +259,7 @@ func TestRetryInterceptorWithBackoff(t *testing.T) { if options.Attempts != (doCount - 1) { t.Fatalf("unexpected attempts:%d", options.Attempts) } - if duration != 100*time.Millisecond*time.Duration(math.Pow(2, float64(options.Attempts))) { + if duration != 1*time.Second*time.Duration(math.Pow(2, float64(options.Attempts))) { t.Fatalf("unexpected duration:%v", duration) } }, @@ -268,7 +268,7 @@ func TestRetryInterceptorWithBackoff(t *testing.T) { if options.Attempts != (doCount - 1) { t.Fatalf("unexpected attempts:%d", options.Attempts) } - if duration != 100*time.Millisecond*time.Duration(math.Pow(2, float64(options.Attempts))) { + if duration != 1*time.Second*time.Duration(math.Pow(2, float64(options.Attempts))) { t.Fatalf("unexpected duration:%v", duration) } }, @@ -314,10 +314,10 @@ func TestRetryInterceptorWithBackoff(t *testing.T) { Header: nil, GetBody: nil, }) - duration := float32(time.Now().UnixNano()-start.UnixNano()) / float32(time.Millisecond) + duration := time.Since(start) - if duration > 3100+50 || duration < 3100-50 { - t.Fatalf("retry interval may be error:%f", duration) + if d := duration - 31*time.Second; d >= 900*time.Millisecond || d <= -900*time.Millisecond { + t.Fatalf("retry interval may be error:%v", duration) } if (retryMax + 1) != doCount { diff --git a/internal/clientv2/interceptor_uptoken.go b/internal/clientv2/interceptor_uptoken.go new file mode 100644 index 00000000..f7468939 --- /dev/null +++ b/internal/clientv2/interceptor_uptoken.go @@ -0,0 +1,42 @@ +package clientv2 + +import ( + "net/http" + + "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +type UpTokenConfig struct { + // 上传凭证 + UpToken uptoken.UpTokenProvider +} + +type uptokenInterceptor struct { + config UpTokenConfig +} + +func NewUpTokenInterceptor(config UpTokenConfig) Interceptor { + return &uptokenInterceptor{ + config: config, + } +} + +func (interceptor *uptokenInterceptor) Priority() InterceptorPriority { + return InterceptorPriorityAuth +} + +func (interceptor *uptokenInterceptor) Intercept(req *http.Request, handler Handler) (*http.Response, error) { + if interceptor == nil || req == nil { + return handler(req) + } + + if upToken := interceptor.config.UpToken; upToken != nil { + if upToken, err := upToken.GetUpToken(req.Context()); err != nil { + return nil, err + } else { + req.Header.Set("Authorization", "UpToken "+upToken) + } + } + + return handler(req) +} diff --git a/internal/clientv2/request.go b/internal/clientv2/request.go index c433dc70..8771d1fa 100644 --- a/internal/clientv2/request.go +++ b/internal/clientv2/request.go @@ -7,6 +7,7 @@ import ( "io" "net/http" "net/url" + "strconv" "strings" "github.com/qiniu/go-sdk/v7/conf" @@ -30,6 +31,7 @@ func GetJsonRequestBody(object interface{}) (GetRequestBody, error) { } return func(o *RequestParams) (io.ReadCloser, error) { o.Header.Set("Content-Type", conf.CONTENT_TYPE_JSON) + o.Header.Set("Content-Length", strconv.Itoa(len(reqBody))) return internal_io.NewReadSeekableNopCloser(bytes.NewReader(reqBody)), nil }, nil } @@ -38,6 +40,7 @@ func GetFormRequestBody(info map[string][]string) GetRequestBody { body := formStringInfo(info) return func(o *RequestParams) (io.ReadCloser, error) { o.Header.Set("Content-Type", conf.CONTENT_TYPE_FORM) + o.Header.Set("Content-Length", strconv.Itoa(len(body))) return internal_io.NewReadSeekableNopCloser(strings.NewReader(body)), nil } } @@ -50,12 +53,13 @@ func formStringInfo(info map[string][]string) string { } type RequestParams struct { - Context context.Context - Method string - Url string - Header http.Header - GetBody GetRequestBody - BufferResponse bool + Context context.Context + Method string + Url string + Header http.Header + GetBody GetRequestBody + BufferResponse bool + OnRequestProgress RequestBodyProgress } func (o *RequestParams) init() { @@ -79,13 +83,28 @@ func (o *RequestParams) init() { } func NewRequest(options RequestParams) (req *http.Request, err error) { + var ( + bodyWrapper *requestBodyWrapperWithProgress = nil + contentLength uint64 + ) + options.init() body, err := options.GetBody(&options) if err != nil { return nil, err } - req, err = http.NewRequest(options.Method, options.Url, body) + if options.OnRequestProgress != nil && body != nil { + if contentLengthHeaderValue := options.Header.Get("Content-Length"); contentLengthHeaderValue != "" { + contentLength, _ = strconv.ParseUint(contentLengthHeaderValue, 10, 64) + } + bodyWrapper = &requestBodyWrapperWithProgress{body: body, expectedSize: contentLength, callback: options.OnRequestProgress} + } + if bodyWrapper != nil { + req, err = http.NewRequest(options.Method, options.Url, bodyWrapper) + } else { + req, err = http.NewRequest(options.Method, options.Url, body) + } if err != nil { return } @@ -93,13 +112,47 @@ func NewRequest(options RequestParams) (req *http.Request, err error) { req = req.WithContext(options.Context) } if options.BufferResponse { - req = req.WithContext(context.WithValue(options.Context, contextKeyBufferResponse{}, struct{}{})) + req = req.WithContext(context.WithValue(options.Context, bufferResponseContextKey{}, struct{}{})) } req.Header = options.Header if options.GetBody != nil && body != nil && body != http.NoBody { req.GetBody = func() (io.ReadCloser, error) { - return options.GetBody(&options) + reqBody, err := options.GetBody(&options) + if err != nil { + return nil, err + } + if bodyWrapper != nil { + return &requestBodyWrapperWithProgress{ + body: reqBody, + expectedSize: contentLength, + callback: options.OnRequestProgress, + }, nil + } else { + return reqBody, nil + } } } return } + +type ( + RequestBodyProgress func(uint64, uint64) + requestBodyWrapperWithProgress struct { + body io.ReadCloser + haveReadSize, expectedSize uint64 + callback RequestBodyProgress + } +) + +func (wrapper *requestBodyWrapperWithProgress) Read(p []byte) (n int, err error) { + n, err = wrapper.body.Read(p) + if callback := wrapper.callback; callback != nil && n > 0 { + wrapper.haveReadSize += uint64(n) + callback(wrapper.haveReadSize, wrapper.expectedSize) + } + return +} + +func (wrapper *requestBodyWrapperWithProgress) Close() error { + return wrapper.body.Close() +} diff --git a/internal/clientv2/request_test.go b/internal/clientv2/request_test.go index b1e4d1cd..729c33b0 100644 --- a/internal/clientv2/request_test.go +++ b/internal/clientv2/request_test.go @@ -26,6 +26,8 @@ func TestGetJsonRequestBody(t *testing.T) { t.Fatal("invalid body") } else if params.Header.Get("Content-Type") != "application/json" { t.Fatal("invalid header") + } else if params.Header.Get("Content-Length") != "21" { + t.Fatal("invalid header") } else if err = readCloser.Close(); err != nil { t.Fatal(err) } @@ -61,6 +63,8 @@ func TestGetFormRequestBody(t *testing.T) { t.Fatal("invalid body") } else if params.Header.Get("Content-Type") != "application/x-www-form-urlencoded" { t.Fatal("invalid header") + } else if params.Header.Get("Content-Length") != "22" { + t.Fatal("invalid header") } else if err = readCloser.Close(); err != nil { t.Fatal(err) } diff --git a/internal/io/compatible.go b/internal/io/compatible.go index d5a870f0..cd207a89 100644 --- a/internal/io/compatible.go +++ b/internal/io/compatible.go @@ -3,6 +3,7 @@ package io import ( "errors" "io" + "syscall" ) func MakeReadSeekCloserFromReader(r io.Reader) ReadSeekCloser { @@ -21,7 +22,7 @@ func (r *readSeekCloserFromReader) Seek(offset int64, whence int) (int64, error) if seeker, ok := r.r.(io.Seeker); ok { return seeker.Seek(offset, whence) } - return 0, errors.New("not support seek") + return 0, syscall.ESPIPE } func (r *readSeekCloserFromReader) Close() error { diff --git a/internal/io/go1.19.go b/internal/io/go1.19.go new file mode 100644 index 00000000..9f7e9d57 --- /dev/null +++ b/internal/io/go1.19.go @@ -0,0 +1,58 @@ +//go:build !1.20 +// +build !1.20 + +package io + +import ( + "errors" + "io" +) + +var ( + errWhence = errors.New("Seek: invalid whence") + errOffset = errors.New("Seek: invalid offset") +) + +// An OffsetWriter maps writes at offset base to offset base+off in the underlying writer. +type OffsetWriter struct { + w io.WriterAt + base int64 // the original offset + off int64 // the current offset +} + +// NewOffsetWriter returns an [OffsetWriter] that writes to w +// starting at offset off. +func NewOffsetWriter(w io.WriterAt, off int64) *OffsetWriter { + return &OffsetWriter{w, off, off} +} + +func (o *OffsetWriter) Write(p []byte) (n int, err error) { + n, err = o.w.WriteAt(p, o.off) + o.off += int64(n) + return +} + +func (o *OffsetWriter) WriteAt(p []byte, off int64) (n int, err error) { + if off < 0 { + return 0, errOffset + } + + off += o.base + return o.w.WriteAt(p, off) +} + +func (o *OffsetWriter) Seek(offset int64, whence int) (int64, error) { + switch whence { + default: + return 0, errWhence + case io.SeekStart: + offset += o.base + case io.SeekCurrent: + offset += o.off + } + if offset < o.base { + return 0, errOffset + } + o.off = offset + return offset - o.base, nil +} diff --git a/internal/io/go1.20.go b/internal/io/go1.20.go new file mode 100644 index 00000000..09a4b92c --- /dev/null +++ b/internal/io/go1.20.go @@ -0,0 +1,10 @@ +//go:build 1.20 +// +build 1.20 + +package io + +import "io" + +type OffsetWriter = io.OffsetWriter + +var NewOffsetWriter = io.NewOffsetWriter diff --git a/storage/bucket.go b/storage/bucket.go index bdcfdb52..0f1c4d2b 100644 --- a/storage/bucket.go +++ b/storage/bucket.go @@ -14,7 +14,6 @@ import ( "net/http" "net/url" "strings" - "sync" "time" "github.com/qiniu/go-sdk/v7/internal/clientv2" @@ -23,6 +22,7 @@ import ( "github.com/qiniu/go-sdk/v7/storagev2/apis/get_bucket_domains_v3" "github.com/qiniu/go-sdk/v7/storagev2/backoff" "github.com/qiniu/go-sdk/v7/storagev2/chooser" + "github.com/qiniu/go-sdk/v7/storagev2/downloader" "github.com/qiniu/go-sdk/v7/storagev2/http_client" "github.com/qiniu/go-sdk/v7/storagev2/resolver" "github.com/qiniu/go-sdk/v7/storagev2/retrier" @@ -343,11 +343,12 @@ type BucketManagerOptions struct { // BucketManager 提供了对资源进行管理的操作 type BucketManager struct { - Client *clientv1.Client - Mac *auth.Credentials - Cfg *Config - options BucketManagerOptions - apiClient *apis.Storage + Client *clientv1.Client + Mac *auth.Credentials + Cfg *Config + options BucketManagerOptions + apiClient *apis.Storage + downloadManager *downloader.DownloadManager } // NewBucketManager 用来构建一个新的资源管理对象 @@ -401,11 +402,12 @@ func NewBucketManagerExWithOptions(mac *auth.Credentials, cfg *Config, clt *clie } return &BucketManager{ - Client: clt, - Mac: mac, - Cfg: cfg, - options: options, - apiClient: apis.NewStorage(&opts), + Client: clt, + Mac: mac, + Cfg: cfg, + options: options, + apiClient: apis.NewStorage(&opts), + downloadManager: downloader.NewDownloadManager(&downloader.DownloadManagerOptions{Options: opts}), } } @@ -1111,58 +1113,6 @@ func (m *BucketManager) makeRequestOptions() *apis.Options { return &apis.Options{OverwrittenBucketHosts: getUcEndpoint(m.Cfg.UseHTTPS, nil)} } -var ( - defaultResolver resolver.Resolver - defaultResolverMutex sync.Mutex -) - -func (m *BucketManager) resolver() (resolver.Resolver, error) { - var err error - - if m.options.Resolver != nil { - return m.options.Resolver, nil - } - defaultResolverMutex.Lock() - defer defaultResolverMutex.Unlock() - - if defaultResolver != nil { - return defaultResolver, nil - } - - if defaultResolver, err = resolver.NewCacheResolver(nil, nil); err != nil { - return nil, err - } else { - return defaultResolver, nil - } -} - -var ( - defaultChooser chooser.Chooser - defaultChooserMutex sync.Mutex -) - -func (m *BucketManager) chooser() chooser.Chooser { - if m.options.Chooser != nil { - return m.options.Chooser - } - defaultChooserMutex.Lock() - defer defaultChooserMutex.Unlock() - - if defaultChooser != nil { - return defaultChooser - } - defaultChooser = chooser.NewShuffleChooser(chooser.NewSmartIPChooser(nil)) - return defaultChooser -} - -func (m *BucketManager) backoff() backoff.Backoff { - return m.options.Backoff -} - -func (m *BucketManager) retrier() retrier.Retrier { - return m.options.Retrier -} - // 构建op的方法,导出的方法支持在Batch操作中使用 // URIStat 构建 stat 接口的请求命令 diff --git a/storage/bucket_get.go b/storage/bucket_get.go index d1b4e187..9d1356c4 100644 --- a/storage/bucket_get.go +++ b/storage/bucket_get.go @@ -3,14 +3,16 @@ package storage import ( "context" "errors" + "fmt" "io" "net/http" "net/url" - "strconv" "strings" + "sync/atomic" "time" - clientV1 "github.com/qiniu/go-sdk/v7/client" + "github.com/qiniu/go-sdk/v7/storagev2/downloader" + "github.com/qiniu/go-sdk/v7/storagev2/region" ) type GetObjectInput struct { @@ -46,6 +48,51 @@ func (g *GetObjectOutput) Close() error { return g.Body.Close() } +type ( + trafficLimitDownloadURLsProvider struct { + base downloader.DownloadURLsProvider + trafficLimit uint64 + } + trafficLimitURLsIter struct { + iter downloader.URLsIter + trafficLimit uint64 + } +) + +func (p trafficLimitURLsIter) Peek(u *url.URL) (bool, error) { + if ok, err := p.iter.Peek(u); err != nil { + return ok, err + } else if !ok { + return false, nil + } else { + if u.RawQuery != "" { + u.RawQuery += "&" + } + u.RawQuery += fmt.Sprintf("X-Qiniu-Traffic-Limit=%d", p.trafficLimit) + return true, nil + } +} + +func (p trafficLimitURLsIter) Next() { + p.iter.Next() +} + +func (p trafficLimitURLsIter) Reset() { + p.iter.Reset() +} + +func (p trafficLimitURLsIter) Clone() downloader.URLsIter { + return trafficLimitURLsIter{p.iter.Clone(), p.trafficLimit} +} + +func (p trafficLimitDownloadURLsProvider) GetURLsIter(ctx context.Context, objectName string, options *downloader.GenerateOptions) (downloader.URLsIter, error) { + if urlsIter, err := p.base.GetURLsIter(ctx, objectName, options); err != nil { + return nil, err + } else { + return trafficLimitURLsIter{urlsIter, p.trafficLimit}, nil + } +} + // Get // // @Description: 下载文件 @@ -57,119 +104,110 @@ func (g *GetObjectOutput) Close() error { // @return error 请求错误信息 func (m *BucketManager) Get(bucket, key string, options *GetObjectInput) (*GetObjectOutput, error) { if options == nil { - options = &GetObjectInput{ - DownloadDomains: nil, - PresignUrl: false, - Range: "", - } - } - - domain := "" - if len(options.DownloadDomains) > 0 { - // 使用用户配置域名 - domain = options.DownloadDomains[0] - } else { - resolver, e := m.resolver() - if e != nil { - return nil, e - } - // 查源站域名 - if rg, e := getRegionByV4(m.Mac.AccessKey, bucket, UCApiOptions{ - UseHttps: m.Cfg.UseHTTPS, - RetryMax: m.options.RetryMax, - HostFreezeDuration: m.options.HostFreezeDuration, - Resolver: resolver, - Chooser: m.chooser(), - Backoff: m.backoff(), - Retrier: m.retrier(), - }); e != nil { - return nil, e - } else if len(rg.regions) == 0 { - return nil, errors.New("can't get region with bucket") - } else { - domain = rg.regions[0].IoSrcHost - } - options.PresignUrl = true + options = &GetObjectInput{} } - - if len(domain) == 0 { - return nil, errors.New("download domain is empty") - } - - query := url.Values{} - if options.TrafficLimit > 0 { - query.Add("X-Qiniu-Traffic-Limit", strconv.FormatUint(options.TrafficLimit, 10)) + ctx := options.Context + if ctx == nil { + ctx = context.Background() } - downloadUrl := endpoint(m.Cfg.UseHTTPS, domain) - if options.PresignUrl { - deadline := time.Now().Unix() + 3*60 - downloadUrl = MakePrivateURLv2WithQuery(m.Mac, downloadUrl, key, query, deadline) - } else { - downloadUrl = MakePublicURLv2WithQuery(key, downloadUrl, query) + bucketHosts, err := getUcEndpoint(m.Cfg.UseHTTPS, nil).GetEndpoints(ctx) + if err != nil { + return nil, err } - - resp, err := m.getWithDownloadUrl(options.Context, downloadUrl, options.Range, options.TrafficLimit) - if err == nil && resp == nil { - return nil, errors.New("response is empty") + var accessKey string + if m.Mac != nil { + accessKey = m.Mac.AccessKey } - - // err 和 resp 必须有一个有值 - var output *GetObjectOutput = nil - if resp != nil { - output = &GetObjectOutput{ - ContentType: "", - ContentLength: resp.ContentLength, - ETag: "", - Metadata: nil, - LastModified: time.Time{}, - Body: resp.Body, + urlsProvider := downloader.NewDefaultSrcURLsProvider(accessKey, &downloader.DefaultSrcURLsProviderOptions{ + BucketRegionsQueryOptions: region.BucketRegionsQueryOptions{}, + BucketHosts: bucketHosts, + }) + urlsProvider = m.applyPresignOnUrlsProvider(m.applyTrafficLimitOnUrlsProvider(urlsProvider, options.TrafficLimit)) + if len(options.DownloadDomains) > 0 { + staticDomainBasedURLsProvider := downloader.NewStaticDomainBasedURLsProvider(options.DownloadDomains) + staticDomainBasedURLsProvider = m.applyTrafficLimitOnUrlsProvider(staticDomainBasedURLsProvider, options.TrafficLimit) + if options.PresignUrl { + staticDomainBasedURLsProvider = m.applyPresignOnUrlsProvider(staticDomainBasedURLsProvider) } + urlsProvider = downloader.CombineDownloadURLsProviders(staticDomainBasedURLsProvider, urlsProvider) } - - if err != nil { - return output, err + reqHeaders := make(http.Header) + if options.Range != "" { + reqHeaders.Set("Range", options.Range) } - - // err 为空, resp 必有值 - if resp.StatusCode/100 != 2 { - return output, ResponseError(resp) + var ( + getObjectOutput GetObjectOutput + headerChan = make(chan struct{}) + errChan = make(chan error) + areChansClosed int32 + ) + defer func() { + atomic.StoreInt32(&areChansClosed, 1) + close(headerChan) + close(errChan) + }() + + objectOptions := downloader.ObjectOptions{ + DestinationDownloadOptions: downloader.DestinationDownloadOptions{ + Header: reqHeaders, + OnResponseHeader: func(h http.Header) { + defer func() { + if atomic.LoadInt32(&areChansClosed) == 0 { + headerChan <- struct{}{} + } + }() + getObjectOutput.ContentType = h.Get("Content-Type") + getObjectOutput.ETag = parseEtag(h.Get("ETag")) + + lm := h.Get("Last-Modified") + if len(lm) > 0 { + if t, e := time.Parse(time.RFC1123, lm); e == nil { + getObjectOutput.LastModified = t + } + } + + metaData := make(map[string]string) + for k, v := range h { + if len(v) > 0 && strings.HasPrefix(strings.ToLower(k), "x-qn-meta-") { + metaData[k] = v[0] + } + } + getObjectOutput.Metadata = metaData + }}, + GenerateOptions: downloader.GenerateOptions{ + BucketName: bucket, + UseInsecureProtocol: !m.Cfg.UseHTTPS, + }, + DownloadURLsProvider: urlsProvider, } - if resp.Header != nil { - output.ContentType = resp.Header.Get("Content-Type") - output.ETag = parseEtag(resp.Header.Get("ETag")) - - lm := resp.Header.Get("Last-Modified") - if len(lm) > 0 { - if t, e := time.Parse(time.RFC1123, lm); e == nil { - output.LastModified = t - } - } - - metaData := make(map[string]string) - for k, v := range resp.Header { - if len(v) > 0 && strings.HasPrefix(strings.ToLower(k), "x-qn-meta-") { - metaData[k] = v[0] - } + pipeR, pipeW := io.Pipe() + getObjectOutput.Body = pipeR + go func() { + n, err := m.downloadManager.DownloadToWriter(ctx, key, pipeW, &objectOptions) + getObjectOutput.ContentLength = int64(n) + if atomic.LoadInt32(&areChansClosed) == 0 { + errChan <- err } - output.Metadata = metaData + pipeW.CloseWithError(err) + }() + + select { + case <-headerChan: + return &getObjectOutput, nil + case err := <-errChan: + return &getObjectOutput, err } - - return output, nil } -func (m *BucketManager) getWithDownloadUrl(ctx context.Context, downloadUrl string, contentRange string, trafficLimit uint64) (*http.Response, error) { - if ctx == nil { - ctx = context.Background() +func (m *BucketManager) applyTrafficLimitOnUrlsProvider(urlsProvider downloader.DownloadURLsProvider, trafficLimit uint64) downloader.DownloadURLsProvider { + if trafficLimit > 0 { + urlsProvider = trafficLimitDownloadURLsProvider{urlsProvider, trafficLimit} } + return urlsProvider +} - req, err := http.NewRequest(http.MethodGet, downloadUrl, nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - - clientV1.AddHttpHeaderRange(req.Header, contentRange) - - return m.Client.Do(ctx, req) +func (m *BucketManager) applyPresignOnUrlsProvider(urlsProvider downloader.DownloadURLsProvider) downloader.DownloadURLsProvider { + signOptions := downloader.SignOptions{TTL: 3 * time.Minute} + return downloader.SignURLsProvider(urlsProvider, downloader.NewCredentialsSigner(m.Mac), &signOptions) } diff --git a/storage/form_upload.go b/storage/form_upload.go index a7ddb4b1..9174b79c 100644 --- a/storage/form_upload.go +++ b/storage/form_upload.go @@ -1,21 +1,18 @@ package storage import ( - "bytes" "context" - "errors" - "hash/crc32" "io" - "os" "path" "path/filepath" "strings" "time" "github.com/qiniu/go-sdk/v7/client" - internal_io "github.com/qiniu/go-sdk/v7/internal/io" - "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/internal/clientv2" "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/region" + "github.com/qiniu/go-sdk/v7/storagev2/uploader" "github.com/qiniu/go-sdk/v7/storagev2/uptoken" ) @@ -62,8 +59,8 @@ type FormUploader struct { // Deprecated Client *client.Client // Deprecated - Cfg *Config - storage *apis.Storage + Cfg *Config + uploader uploader.Uploader } // NewFormUploader 用来构建一个表单上传的对象 @@ -83,15 +80,16 @@ func NewFormUploaderEx(cfg *Config, clt *client.Client) *FormUploader { opts := http_client.Options{ BasicHTTPClient: clt.Client, UseInsecureProtocol: !cfg.UseHTTPS, + HostRetryConfig: &clientv2.RetryConfig{}, } if region := cfg.GetRegion(); region != nil { opts.Regions = region } return &FormUploader{ - Client: clt, - Cfg: cfg, - storage: apis.NewStorage(&opts), + Client: clt, + Cfg: cfg, + uploader: uploader.NewFormUploader(&uploader.FormUploaderOptions{Options: opts}), } } @@ -124,21 +122,15 @@ func (p *FormUploader) PutFileWithoutKey( func (p *FormUploader) putFile( ctx context.Context, ret interface{}, upToken string, - key string, hasKey bool, localFile string, extra *PutExtra) (err error) { - - f, err := os.Open(localFile) - if err != nil { - return - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - return + key string, hasKey bool, localFile string, extra *PutExtra) error { + if extra == nil { + extra = &PutExtra{} } - fsize := fi.Size() + extra.init() - return p.put(ctx, ret, upToken, key, hasKey, f, fsize, extra, filepath.Base(localFile)) + objectParams := p.newObjectParams(upToken, key, hasKey, extra) + objectParams.FileName = filepath.Base(localFile) + return p.uploader.UploadFile(ctx, localFile, objectParams, ret) } // Put 用来以表单方式上传一个文件。 @@ -174,55 +166,43 @@ func (p *FormUploader) PutWithoutKey( func (p *FormUploader) put( ctx context.Context, ret interface{}, upToken string, key string, hasKey bool, data io.Reader, size int64, extra *PutExtra, fileName string) error { - if extra == nil { extra = &PutExtra{} } extra.init() - seekableData, ok := data.(io.ReadSeeker) - if !ok { - dataBytes, rErr := internal_io.ReadAll(data) - if rErr != nil { - return rErr - } - if size <= 0 { - size = int64(len(dataBytes)) - } - seekableData = bytes.NewReader(dataBytes) - } + objectParams := p.newObjectParams(upToken, key, hasKey, extra) + objectParams.FileName = "Untitled" - return p.putSeekableData(ctx, ret, upToken, key, hasKey, seekableData, size, extra, fileName) + return p.uploader.UploadReader(ctx, data, objectParams, ret) } -func (p *FormUploader) putSeekableData(ctx context.Context, ret interface{}, upToken string, - key string, hasKey bool, data io.ReadSeeker, dataSize int64, extra *PutExtra, fileName string) error { - if fileName == "" { - fileName = "Untitled" +func (p *FormUploader) newObjectParams(upToken string, key string, hasKey bool, extra *PutExtra) *uploader.ObjectOptions { + objectParams := uploader.ObjectOptions{ + ContentType: extra.MimeType, + UpToken: uptoken.NewParser(upToken), } - var fileReader io.Reader = data - if extra.OnProgress != nil { - fileReader = &readerWithProgress{reader: data, fsize: dataSize, onProgress: extra.OnProgress} + if hasKey { + objectParams.ObjectName = &key } - - request := apis.PostObjectRequest{ - ObjectName: makeKeyForUploading(key, hasKey), - UploadToken: uptoken.NewParser(upToken), - File: http_client.MultipartFormBinaryData{ - Data: internal_io.MakeReadSeekCloserFromLimitedReader(fileReader, dataSize), - Name: fileName, - ContentType: extra.MimeType, - }, - CustomData: makeCustomData(extra.Params), - ResponseBody: ret, + if extra.UpHost != "" { + objectParams.RegionsProvider = ®ion.Region{ + Up: region.Endpoints{Preferred: []string{extra.UpHost}}, + } + } else if p.Cfg.UpHost != "" { + objectParams.RegionsProvider = ®ion.Region{ + Up: region.Endpoints{Preferred: []string{p.Cfg.UpHost}}, + } + } else if region := p.Cfg.GetRegion(); region != nil { + objectParams.RegionsProvider = region } - if crc32, ok, err := crc32FromReader(data); err != nil { - return err - } else if ok { - request.Crc32 = int64(crc32) + objectParams.Metadata, objectParams.CustomVars = splitParams(extra.Params) + if extra.OnProgress != nil { + objectParams.OnUploadingProgress = func(progress *uploader.UploadingProgress) { + extra.OnProgress(int64(progress.TotalSize), int64(progress.Uploaded)) + } } - _, err := p.storage.PostObject(ctx, &request, makeApiOptionsFromUpHost(extra.UpHost)) - return err + return &objectParams } // Deprecated @@ -230,39 +210,6 @@ func (p *FormUploader) UpHost(ak, bucket string) (upHost string, err error) { return getUpHost(p.Cfg, 0, 0, ak, bucket) } -type readerWithProgress struct { - reader io.Reader - uploaded int64 - fsize int64 - onProgress func(fsize, uploaded int64) -} - -func (p *readerWithProgress) Read(b []byte) (n int, err error) { - if p.uploaded > 0 { - p.onProgress(p.fsize, p.uploaded) - } - - n, err = p.reader.Read(b) - p.uploaded += int64(n) - if p.fsize > 0 && p.uploaded > p.fsize { - p.uploaded = p.fsize - } - return -} - -func (p *readerWithProgress) Seek(offset int64, whence int) (int64, error) { - if seeker, ok := p.reader.(io.Seeker); ok { - pos, err := seeker.Seek(offset, whence) - if err != nil { - return pos, err - } - p.uploaded = pos - p.onProgress(p.fsize, p.uploaded) - return pos, nil - } - return 0, errors.New("resource not support seek") -} - func makeCustomData(params map[string]string) map[string]string { customData := make(map[string]string, len(params)) for k, v := range params { @@ -273,20 +220,18 @@ func makeCustomData(params map[string]string) map[string]string { return customData } -func crc32FromReader(r io.Reader) (uint32, bool, error) { - if readSeeker, ok := r.(io.ReadSeeker); ok { - _, err := readSeeker.Seek(0, io.SeekStart) - if err != nil { - return 0, false, err - } - hasher := crc32.NewIEEE() - if _, err = io.Copy(hasher, readSeeker); err != nil { - return 0, false, err +func splitParams(params map[string]string) (metadata, customVars map[string]string) { + metadata = make(map[string]string) + customVars = make(map[string]string) + for k, v := range params { + if v == "" { + continue } - if _, err = readSeeker.Seek(0, io.SeekStart); err != nil { - return 0, false, err + if strings.HasPrefix(k, "x:") { + customVars[k] = v + } else if strings.HasPrefix(k, "x-qn-meta-") { + metadata[k] = v } - return hasher.Sum32(), true, nil } - return 0, false, nil + return metadata, customVars } diff --git a/storage/form_upload_test.go b/storage/form_upload_test.go index ef3f4b91..c79ccae8 100644 --- a/storage/form_upload_test.go +++ b/storage/form_upload_test.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "fmt" + "io" "io/ioutil" "math/rand" "os" @@ -30,8 +31,18 @@ func TestFormUploadPutFileWithoutExtra(t *testing.T) { } defer os.Remove(testLocalFile.Name()) + r := rand.New(rand.NewSource(time.Now().UnixNano())) + _, err = io.CopyN(testLocalFile, r, 10*1024*1024) + if err != nil { + t.Fatalf("ioutil.TempFile file write failed, err: %v", err) + } + _, err = testLocalFile.Seek(0, io.SeekCurrent) + if err != nil { + t.Fatalf("ioutil.TempFile file seek failed, err: %v", err) + } + upToken := putPolicy.UploadToken(mac) - testKey := "testPutFileWithoutExtra" + testKey := fmt.Sprintf("testPutFileWithoutExtra_%d", r.Int()) err = formUploader.PutFile(ctx, &putRet, upToken, testKey, testLocalFile.Name(), nil) if err != nil { @@ -51,6 +62,16 @@ func TestFormUploadPutFile(t *testing.T) { } defer os.Remove(testLocalFile.Name()) + r := rand.New(rand.NewSource(time.Now().UnixNano())) + _, err = io.CopyN(testLocalFile, r, 10*1024*1024) + if err != nil { + t.Fatalf("ioutil.TempFile file write failed, err: %v", err) + } + _, err = testLocalFile.Seek(0, io.SeekCurrent) + if err != nil { + t.Fatalf("ioutil.TempFile file seek failed, err: %v", err) + } + putPolicy := PutPolicy{ Scope: testBucket, DeleteAfterDays: 7, @@ -58,7 +79,6 @@ func TestFormUploadPutFile(t *testing.T) { upToken := putPolicy.UploadToken(mac) upHosts := []string{testUpHost, "https://" + testUpHost, ""} for _, upHost := range upHosts { - r := rand.New(rand.NewSource(time.Now().UnixNano())) testKey := fmt.Sprintf("testPutFileKey_%d", r.Int()) err = formUploader.PutFile(ctx, &putRet, upToken, testKey, testLocalFile.Name(), &PutExtra{ @@ -69,7 +89,6 @@ func TestFormUploadPutFile(t *testing.T) { } t.Logf("Key: %s, Hash:%s", putRet.Key, putRet.Hash) } - } func TestFormUploadTrafficLimit(t *testing.T) { diff --git a/storage/region.go b/storage/region.go index 9a6279f7..d381dd6e 100644 --- a/storage/region.go +++ b/storage/region.go @@ -225,9 +225,9 @@ var regionMap = map[RegionID]Region{ } const ( - defaultApiHost = "api.qiniu.com" - defaultUcHost0 = "kodo-config.qiniuapi.com" - defaultUcHost1 = "uc.qbox.me" + defaultUcHost0 = "uc.qiniuapi.com" + defaultUcHost1 = "kodo-config.qiniuapi.com" + defaultUcHost2 = "uc.qbox.me" ) // UcHost 为查询空间相关域名的 API 服务地址 @@ -235,9 +235,7 @@ const ( // Deprecated 使用 SetUcHosts 替换 var UcHost = "" -// 公有云包括 defaultApiHost,非 uc query api 使用时需要移除 defaultApiHost -// 用户配置时,不能配置 api 域名 -var ucHosts = []string{defaultUcHost0, defaultUcHost1, defaultApiHost} +var ucHosts = []string{defaultUcHost0, defaultUcHost1, defaultUcHost2} func init() { if defaultUcHosts, err := defaults.BucketURLs(); err == nil && len(defaultUcHosts) > 0 { diff --git a/storage/region_uc_v2.go b/storage/region_uc_v2.go index 742b7550..be89a624 100644 --- a/storage/region_uc_v2.go +++ b/storage/region_uc_v2.go @@ -87,11 +87,11 @@ type UcQueryServerInfo struct { func (io UcQueryServerInfo) toMapWithoutInfo() map[string][]string { ret := make(map[string][]string) - if io.Main != nil && len(io.Main) > 0 { + if len(io.Main) > 0 { ret["main"] = io.Main } - if io.Backup != nil && len(io.Backup) > 0 { + if len(io.Backup) > 0 { ret["backup"] = io.Backup } diff --git a/storage/resume_uploader.go b/storage/resume_uploader.go index 424e059d..69cf8593 100644 --- a/storage/resume_uploader.go +++ b/storage/resume_uploader.go @@ -44,6 +44,7 @@ func NewResumeUploaderEx(cfg *Config, clt *client.Client) *ResumeUploader { opts := http_client.Options{ BasicHTTPClient: clt.Client, UseInsecureProtocol: !cfg.UseHTTPS, + HostRetryConfig: &clientv2.RetryConfig{}, } if region := cfg.GetRegion(); region != nil { opts.Regions = region @@ -258,15 +259,14 @@ func newResumeUploaderImpl(resumeUploader *ResumeUploader, bucket, key string, h opts := http_client.Options{ BasicHTTPClient: resumeUploader.Client.Client, UseInsecureProtocol: !resumeUploader.Cfg.UseHTTPS, + HostRetryConfig: &clientv2.RetryConfig{}, } if region := resumeUploader.Cfg.GetRegion(); region != nil { opts.Regions = region } if extra != nil { if extra.TryTimes > 0 { - opts.HostRetryConfig = &clientv2.RetryConfig{ - RetryMax: extra.TryTimes, - } + opts.HostRetryConfig.RetryMax = extra.TryTimes } if extra.HostFreezeDuration > 0 { opts.HostFreezeDuration = extra.HostFreezeDuration diff --git a/storage/resume_uploader_apis.go b/storage/resume_uploader_apis.go index 936a834e..2895b679 100644 --- a/storage/resume_uploader_apis.go +++ b/storage/resume_uploader_apis.go @@ -306,10 +306,6 @@ func makeApiOptionsFromUpEndpoints(upEndpoints region.EndpointsProvider) *apis.O return nil } -func makeApiOptionsFromUpHost(upHost string) *apis.Options { - return makeApiOptionsFromUpEndpoints(makeEndpointsFromUpHost(upHost)) -} - func makeKeyForUploading(key string, hasKey bool) *string { if hasKey { return &key diff --git a/storage/resume_uploader_v2.go b/storage/resume_uploader_v2.go index 0e5fe2df..b0dec2f1 100644 --- a/storage/resume_uploader_v2.go +++ b/storage/resume_uploader_v2.go @@ -45,6 +45,7 @@ func NewResumeUploaderV2Ex(cfg *Config, clt *client.Client) *ResumeUploaderV2 { opts := http_client.Options{ BasicHTTPClient: clt.Client, UseInsecureProtocol: !cfg.UseHTTPS, + HostRetryConfig: &clientv2.RetryConfig{}, } if region := cfg.GetRegion(); region != nil { opts.Regions = region @@ -244,15 +245,14 @@ func newResumeUploaderV2Impl(resumeUploader *ResumeUploaderV2, bucket, key strin opts := http_client.Options{ BasicHTTPClient: resumeUploader.Client.Client, UseInsecureProtocol: !resumeUploader.Cfg.UseHTTPS, + HostRetryConfig: &clientv2.RetryConfig{}, } if region := resumeUploader.Cfg.GetRegion(); region != nil { opts.Regions = region } if extra != nil { if extra.TryTimes > 0 { - opts.HostRetryConfig = &clientv2.RetryConfig{ - RetryMax: extra.TryTimes, - } + opts.HostRetryConfig.RetryMax = extra.TryTimes } if extra.HostFreezeDuration > 0 { opts.HostFreezeDuration = extra.HostFreezeDuration diff --git a/storage/resume_uploader_v2_test.go b/storage/resume_uploader_v2_test.go index a4e16787..a8edb4c5 100644 --- a/storage/resume_uploader_v2_test.go +++ b/storage/resume_uploader_v2_test.go @@ -123,6 +123,9 @@ func TestPutWithSizeV2(t *testing.T) { if err != nil { t.Error(err) } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + if _, err = io.CopyN(tmpFile, io.TeeReader(r, md5Sumer), size); err != nil { t.Error(err) } else if err = tmpFile.Close(); err != nil { diff --git a/storage/upload_manager_test.go b/storage/upload_manager_test.go index 029169e4..33398833 100644 --- a/storage/upload_manager_test.go +++ b/storage/upload_manager_test.go @@ -58,10 +58,9 @@ func TestUploadManagerFormUpload(t *testing.T) { if err != nil { t.Fatalf("create temp file error:%v", err) } - defer func() { - tempFile.Close() - os.Remove(tempFile.Name()) - }() + defer os.Remove(tempFile.Name()) + defer tempFile.Close() + tempFile.Write(data) size := int64(len(data)) @@ -149,14 +148,14 @@ func TestUploadManagerResumeV1Upload(t *testing.T) { data := make([]byte, length, length) data[0] = 8 data[length-1] = 8 + tempFile, err := ioutil.TempFile("", "TestUploadManagerFormPut") if err != nil { t.Fatalf("create temp file error:%v", err) } - defer func() { - tempFile.Close() - os.ReadFile(tempFile.Name()) - }() + defer os.Remove(tempFile.Name()) + defer tempFile.Close() + tempFile.Write(data) size := int64(len(data)) @@ -247,14 +246,14 @@ func TestUploadManagerResumeV1UploadRecord(t *testing.T) { data := make([]byte, length, length) data[0] = 8 data[length-1] = 8 + tempFile, err := ioutil.TempFile("", "TestUploadManagerFormPut") if err != nil { t.Fatalf("create temp file error:%v", err) } - defer func() { - tempFile.Close() - os.ReadFile(tempFile.Name()) - }() + defer os.Remove(tempFile.Name()) + defer tempFile.Close() + tempFile.Write(data) params := make(map[string]string) @@ -327,14 +326,14 @@ func TestUploadManagerResumeV2Upload(t *testing.T) { data := make([]byte, length, length) data[0] = 8 data[length-1] = 8 + tempFile, err := ioutil.TempFile("", "TestUploadManagerFormPut") if err != nil { t.Fatalf("create temp file error:%v", err) } - defer func() { - tempFile.Close() - os.ReadFile(tempFile.Name()) - }() + defer os.Remove(tempFile.Name()) + defer tempFile.Close() + tempFile.Write(data) size := int64(len(data)) @@ -425,14 +424,14 @@ func TestUploadManagerResumeV2UploadRecord(t *testing.T) { data := make([]byte, length, length) data[0] = 8 data[length-1] = 8 + tempFile, err := ioutil.TempFile("", "TestUploadManagerFormPut") if err != nil { t.Fatalf("create temp file error:%v", err) } - defer func() { - tempFile.Close() - os.ReadFile(tempFile.Name()) - }() + defer os.Remove(tempFile.Name()) + defer tempFile.Close() + tempFile.Write(data) params := make(map[string]string) diff --git a/storage/upload_manager_uplog_test.go b/storage/upload_manager_uplog_test.go index e6a02849..eea2a7fb 100644 --- a/storage/upload_manager_uplog_test.go +++ b/storage/upload_manager_uplog_test.go @@ -42,14 +42,14 @@ func TestUploadManagerUplogForm(t *testing.T) { data := []byte("hello, 七牛!!!") dataLen := int64(len(data)) + tempFile, err := ioutil.TempFile("", "TestUploadManagerFormPut-*") if err != nil { t.Fatalf("create temp file error:%v", err) } - defer func() { - tempFile.Close() - os.Remove(tempFile.Name()) - }() + defer os.Remove(tempFile.Name()) + defer tempFile.Close() + tempFile.Write(data) uploadManager := getUploadManagerV2([]string{"mock03.qiniu.com", "mock04.qiniu.com"}) @@ -87,107 +87,111 @@ func TestUploadManagerUplogForm(t *testing.T) { } uplogs = append(uplogs, uplog) } - if len(uplogs) != 4 { + if len(uplogs) != 10 { t.Fatalf("unexpected uplog count:%v", len(uplogs)) } - if uplogs[0]["log_type"] != "request" { - t.Fatalf("unexpected uplog log_type:%v", uplogs[0]["log_type"]) - } - if uplogs[0]["api_type"] != "kodo" { - t.Fatalf("unexpected uplog api_type:%v", uplogs[0]["api_type"]) - } - if uplogs[0]["api_name"] != "postObject" { - t.Fatalf("unexpected uplog api_name:%v", uplogs[0]["api_name"]) - } - if uplogs[0]["error_type"] != "unknown_host" { - t.Fatalf("unexpected uplog error_type:%v", uplogs[0]["error_type"]) - } - if uplogs[0]["host"] != "mock03.qiniu.com" { - t.Fatalf("unexpected uplog host:%v", uplogs[0]["host"]) - } - if uplogs[0]["path"] != "/" { - t.Fatalf("unexpected uplog path:%v", uplogs[0]["path"]) - } - if uplogs[0]["method"] != "POST" { - t.Fatalf("unexpected uplog method:%v", uplogs[0]["method"]) - } - if uplogs[0]["target_bucket"] != testBucket { - t.Fatalf("unexpected uplog target_bucket:%v", uplogs[0]["target_bucket"]) - } - if uplogs[1]["log_type"] != "request" { - t.Fatalf("unexpected uplog log_type:%v", uplogs[1]["log_type"]) - } - if uplogs[1]["api_type"] != "kodo" { - t.Fatalf("unexpected uplog api_type:%v", uplogs[1]["api_type"]) - } - if uplogs[1]["api_name"] != "postObject" { - t.Fatalf("unexpected uplog api_name:%v", uplogs[1]["api_name"]) - } - if uplogs[1]["error_type"] != "unknown_host" { - t.Fatalf("unexpected uplog error_type:%v", uplogs[1]["error_type"]) - } - if uplogs[1]["host"] != "mock04.qiniu.com" { - t.Fatalf("unexpected uplog host:%v", uplogs[1]["host"]) - } - if uplogs[1]["target_bucket"] != testBucket { - t.Fatalf("unexpected uplog target_bucket:%v", uplogs[1]["target_bucket"]) - } - if uplogs[1]["path"] != "/" { - t.Fatalf("unexpected uplog path:%v", uplogs[1]["path"]) + for i := 0; i < 4; i++ { + if uplogs[i]["log_type"] != "request" { + t.Fatalf("unexpected uplog log_type:%v", uplogs[i]["log_type"]) + } + if uplogs[i]["api_type"] != "kodo" { + t.Fatalf("unexpected uplog api_type:%v", uplogs[i]["api_type"]) + } + if uplogs[i]["api_name"] != "postObject" { + t.Fatalf("unexpected uplog api_name:%v", uplogs[i]["api_name"]) + } + if uplogs[i]["error_type"] != "unknown_host" { + t.Fatalf("unexpected uplog error_type:%v", uplogs[i]["error_type"]) + } + if uplogs[i]["host"] != "mock03.qiniu.com" { + t.Fatalf("unexpected uplog host:%v", uplogs[i]["host"]) + } + if uplogs[i]["path"] != "/" { + t.Fatalf("unexpected uplog path:%v", uplogs[i]["path"]) + } + if uplogs[i]["method"] != "POST" { + t.Fatalf("unexpected uplog method:%v", uplogs[i]["method"]) + } + if uplogs[i]["target_bucket"] != testBucket { + t.Fatalf("unexpected uplog target_bucket:%v", uplogs[i]["target_bucket"]) + } } - if uplogs[1]["method"] != "POST" { - t.Fatalf("unexpected uplog method:%v", uplogs[1]["method"]) + for i := 4; i < 8; i++ { + if uplogs[i]["log_type"] != "request" { + t.Fatalf("unexpected uplog log_type:%v", uplogs[i]["log_type"]) + } + if uplogs[i]["api_type"] != "kodo" { + t.Fatalf("unexpected uplog api_type:%v", uplogs[i]["api_type"]) + } + if uplogs[i]["api_name"] != "postObject" { + t.Fatalf("unexpected uplog api_name:%v", uplogs[i]["api_name"]) + } + if uplogs[i]["error_type"] != "unknown_host" { + t.Fatalf("unexpected uplog error_type:%v", uplogs[i]["error_type"]) + } + if uplogs[i]["host"] != "mock04.qiniu.com" { + t.Fatalf("unexpected uplog host:%v", uplogs[i]["host"]) + } + if uplogs[i]["target_bucket"] != testBucket { + t.Fatalf("unexpected uplog target_bucket:%v", uplogs[i]["target_bucket"]) + } + if uplogs[i]["path"] != "/" { + t.Fatalf("unexpected uplog path:%v", uplogs[i]["path"]) + } + if uplogs[i]["method"] != "POST" { + t.Fatalf("unexpected uplog method:%v", uplogs[i]["method"]) + } } - if uplogs[2]["log_type"] != "request" { - t.Fatalf("unexpected uplog log_type:%v", uplogs[2]["log_type"]) + if uplogs[8]["log_type"] != "request" { + t.Fatalf("unexpected uplog log_type:%v", uplogs[8]["log_type"]) } - if uplogs[2]["api_type"] != "kodo" { - t.Fatalf("unexpected uplog api_type:%v", uplogs[2]["api_type"]) + if uplogs[8]["api_type"] != "kodo" { + t.Fatalf("unexpected uplog api_type:%v", uplogs[8]["api_type"]) } - if uplogs[2]["api_name"] != "postObject" { - t.Fatalf("unexpected uplog api_name:%v", uplogs[2]["api_name"]) + if uplogs[8]["api_name"] != "postObject" { + t.Fatalf("unexpected uplog api_name:%v", uplogs[8]["api_name"]) } - if uplogs[2]["error_type"] != nil { - t.Fatalf("unexpected uplog error_type:%v", uplogs[2]["error_type"]) + if uplogs[8]["error_type"] != nil { + t.Fatalf("unexpected uplog error_type:%v", uplogs[8]["error_type"]) } - if uplogs[2]["port"] != float64(443) { - t.Fatalf("unexpected uplog port:%v", uplogs[2]["port"]) + if uplogs[8]["port"] != float64(443) { + t.Fatalf("unexpected uplog port:%v", uplogs[8]["port"]) } - if uplogs[2]["remote_ip"] == nil { - t.Fatalf("unexpected uplog remote_ip:%v", uplogs[2]["remote_ip"]) + if uplogs[8]["remote_ip"] == nil { + t.Fatalf("unexpected uplog remote_ip:%v", uplogs[8]["remote_ip"]) } - if uplogs[2]["target_bucket"] != testBucket { - t.Fatalf("unexpected uplog target_bucket:%v", uplogs[2]["target_bucket"]) + if uplogs[8]["target_bucket"] != testBucket { + t.Fatalf("unexpected uplog target_bucket:%v", uplogs[8]["target_bucket"]) } - if uplogs[2]["path"] != "/" { - t.Fatalf("unexpected uplog path:%v", uplogs[2]["path"]) + if uplogs[8]["path"] != "/" { + t.Fatalf("unexpected uplog path:%v", uplogs[8]["path"]) } - if uplogs[2]["method"] != "POST" { - t.Fatalf("unexpected uplog method:%v", uplogs[2]["method"]) + if uplogs[8]["method"] != "POST" { + t.Fatalf("unexpected uplog method:%v", uplogs[8]["method"]) } - if uplogs[2]["status_code"] != float64(200) { - t.Fatalf("unexpected uplog status_code:%v", uplogs[2]["status_code"]) + if uplogs[8]["status_code"] != float64(200) { + t.Fatalf("unexpected uplog status_code:%v", uplogs[8]["status_code"]) } - if uplogs[3]["log_type"] != "quality" { - t.Fatalf("unexpected uplog log_type:%v", uplogs[3]["log_type"]) + if uplogs[9]["log_type"] != "quality" { + t.Fatalf("unexpected uplog log_type:%v", uplogs[9]["log_type"]) } - if uplogs[3]["result"] != "ok" { - t.Fatalf("unexpected uplog result:%v", uplogs[3]["result"]) + if uplogs[9]["result"] != "ok" { + t.Fatalf("unexpected uplog result:%v", uplogs[9]["result"]) } - if uplogs[3]["up_type"] != "form" { - t.Fatalf("unexpected uplog up_type:%v", uplogs[3]["up_type"]) + if uplogs[9]["up_type"] != "form" { + t.Fatalf("unexpected uplog up_type:%v", uplogs[9]["up_type"]) } - if uplogs[3]["regions_count"] != float64(2) { - t.Fatalf("unexpected uplog regions_count:%v", uplogs[3]["regions_count"]) + if uplogs[9]["regions_count"] != float64(2) { + t.Fatalf("unexpected uplog regions_count:%v", uplogs[9]["regions_count"]) } - if uplogs[3]["api_type"] != "kodo" { - t.Fatalf("unexpected uplog api_type:%v", uplogs[3]["api_type"]) + if uplogs[9]["api_type"] != "kodo" { + t.Fatalf("unexpected uplog api_type:%v", uplogs[9]["api_type"]) } - if uplogs[3]["file_size"] != float64(dataLen) { - t.Fatalf("unexpected uplog file_size:%v", uplogs[3]["file_size"]) + if uplogs[9]["file_size"] != float64(dataLen) { + t.Fatalf("unexpected uplog file_size:%v", uplogs[9]["file_size"]) } - if uplogs[3]["target_bucket"] != testBucket { - t.Fatalf("unexpected uplog target_bucket:%v", uplogs[3]["target_bucket"]) + if uplogs[9]["target_bucket"] != testBucket { + t.Fatalf("unexpected uplog target_bucket:%v", uplogs[9]["target_bucket"]) } } diff --git a/storagev2/apis/api_add_bucket_event_rule.go b/storagev2/apis/api_add_bucket_event_rule.go index 220bf709..99de5faf 100644 --- a/storagev2/apis/api_add_bucket_event_rule.go +++ b/storagev2/apis/api_add_bucket_event_rule.go @@ -116,7 +116,7 @@ func (storage *Storage) AddBucketEventRule(ctx context.Context, request *AddBuck if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_add_bucket_rules.go b/storagev2/apis/api_add_bucket_rules.go index c2427a35..843869d3 100644 --- a/storagev2/apis/api_add_bucket_rules.go +++ b/storagev2/apis/api_add_bucket_rules.go @@ -96,7 +96,7 @@ func (storage *Storage) AddBucketRules(ctx context.Context, request *AddBucketRu if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: httpclient.GetFormRequestBody(body)} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: httpclient.GetFormRequestBody(body), OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_async_fetch_object.go b/storagev2/apis/api_async_fetch_object.go index 20e46f63..15e07401 100644 --- a/storagev2/apis/api_async_fetch_object.go +++ b/storagev2/apis/api_async_fetch_object.go @@ -84,7 +84,7 @@ func (storage *Storage) AsyncFetchObject(ctx context.Context, request *AsyncFetc if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, RequestBody: body} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, RequestBody: body, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -94,9 +94,10 @@ func (storage *Storage) AsyncFetchObject(ctx context.Context, request *AsyncFetc return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_batch_ops.go b/storagev2/apis/api_batch_ops.go index cc848ac3..e08882b3 100644 --- a/storagev2/apis/api_batch_ops.go +++ b/storagev2/apis/api_batch_ops.go @@ -77,7 +77,7 @@ func (storage *Storage) BatchOps(ctx context.Context, request *BatchOpsRequest, if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, RequestBody: httpclient.GetFormRequestBody(body)} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, RequestBody: httpclient.GetFormRequestBody(body), OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -87,9 +87,10 @@ func (storage *Storage) BatchOps(ctx context.Context, request *BatchOpsRequest, return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_copy_object.go b/storagev2/apis/api_copy_object.go index 43c2088a..4d48eafa 100644 --- a/storagev2/apis/api_copy_object.go +++ b/storagev2/apis/api_copy_object.go @@ -101,7 +101,7 @@ func (storage *Storage) CopyObject(ctx context.Context, request *CopyObjectReque if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -111,9 +111,10 @@ func (storage *Storage) CopyObject(ctx context.Context, request *CopyObjectReque return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_create_bucket.go b/storagev2/apis/api_create_bucket.go index 1cdef53f..850d4127 100644 --- a/storagev2/apis/api_create_bucket.go +++ b/storagev2/apis/api_create_bucket.go @@ -78,7 +78,7 @@ func (storage *Storage) CreateBucket(ctx context.Context, request *CreateBucketR if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_delete_bucket.go b/storagev2/apis/api_delete_bucket.go index 5b484829..16080a9c 100644 --- a/storagev2/apis/api_delete_bucket.go +++ b/storagev2/apis/api_delete_bucket.go @@ -84,7 +84,7 @@ func (storage *Storage) DeleteBucket(ctx context.Context, request *DeleteBucketR if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_delete_bucket_event_rule.go b/storagev2/apis/api_delete_bucket_event_rule.go index d77d2928..1b6a9ff6 100644 --- a/storagev2/apis/api_delete_bucket_event_rule.go +++ b/storagev2/apis/api_delete_bucket_event_rule.go @@ -90,7 +90,7 @@ func (storage *Storage) DeleteBucketEventRule(ctx context.Context, request *Dele if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_delete_bucket_rules.go b/storagev2/apis/api_delete_bucket_rules.go index b0fba15d..0836c1f4 100644 --- a/storagev2/apis/api_delete_bucket_rules.go +++ b/storagev2/apis/api_delete_bucket_rules.go @@ -89,7 +89,7 @@ func (storage *Storage) DeleteBucketRules(ctx context.Context, request *DeleteBu if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: httpclient.GetFormRequestBody(body)} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: httpclient.GetFormRequestBody(body), OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_delete_bucket_taggings.go b/storagev2/apis/api_delete_bucket_taggings.go index 0386e24a..b7f23fa8 100644 --- a/storagev2/apis/api_delete_bucket_taggings.go +++ b/storagev2/apis/api_delete_bucket_taggings.go @@ -85,7 +85,7 @@ func (storage *Storage) DeleteBucketTaggings(ctx context.Context, request *Delet if err != nil { return nil, err } - req := httpclient.Request{Method: "DELETE", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "DELETE", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_delete_object.go b/storagev2/apis/api_delete_object.go index 7052652f..e7eafeef 100644 --- a/storagev2/apis/api_delete_object.go +++ b/storagev2/apis/api_delete_object.go @@ -92,7 +92,7 @@ func (storage *Storage) DeleteObject(ctx context.Context, request *DeleteObjectR if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -102,9 +102,10 @@ func (storage *Storage) DeleteObject(ctx context.Context, request *DeleteObjectR return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_delete_object_after_days.go b/storagev2/apis/api_delete_object_after_days.go index 9ea4dfd2..5446821c 100644 --- a/storagev2/apis/api_delete_object_after_days.go +++ b/storagev2/apis/api_delete_object_after_days.go @@ -87,7 +87,7 @@ func (storage *Storage) DeleteObjectAfterDays(ctx context.Context, request *Dele if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -97,9 +97,10 @@ func (storage *Storage) DeleteObjectAfterDays(ctx context.Context, request *Dele return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_disable_bucket_index_page.go b/storagev2/apis/api_disable_bucket_index_page.go index c76bb66c..31a186a8 100644 --- a/storagev2/apis/api_disable_bucket_index_page.go +++ b/storagev2/apis/api_disable_bucket_index_page.go @@ -87,7 +87,7 @@ func (storage *Storage) DisableBucketIndexPage(ctx context.Context, request *Dis if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_fetch_object.go b/storagev2/apis/api_fetch_object.go index cc8baa6f..eb05b80c 100644 --- a/storagev2/apis/api_fetch_object.go +++ b/storagev2/apis/api_fetch_object.go @@ -100,7 +100,7 @@ func (storage *Storage) FetchObject(ctx context.Context, request *FetchObjectReq if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -110,9 +110,10 @@ func (storage *Storage) FetchObject(ctx context.Context, request *FetchObjectReq return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_get_async_fetch_task.go b/storagev2/apis/api_get_async_fetch_task.go index fad00272..a5ab7e72 100644 --- a/storagev2/apis/api_get_async_fetch_task.go +++ b/storagev2/apis/api_get_async_fetch_task.go @@ -76,7 +76,7 @@ func (storage *Storage) GetAsyncFetchTask(ctx context.Context, request *GetAsync if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -86,9 +86,10 @@ func (storage *Storage) GetAsyncFetchTask(ctx context.Context, request *GetAsync return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_get_bucket_cors_rules.go b/storagev2/apis/api_get_bucket_cors_rules.go index cc927054..7c3b0931 100644 --- a/storagev2/apis/api_get_bucket_cors_rules.go +++ b/storagev2/apis/api_get_bucket_cors_rules.go @@ -84,7 +84,7 @@ func (storage *Storage) GetBucketCORSRules(ctx context.Context, request *GetBuck if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_get_bucket_domains.go b/storagev2/apis/api_get_bucket_domains.go index e251a5c7..70ed62f6 100644 --- a/storagev2/apis/api_get_bucket_domains.go +++ b/storagev2/apis/api_get_bucket_domains.go @@ -85,7 +85,7 @@ func (storage *Storage) GetBucketDomains(ctx context.Context, request *GetBucket if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_get_bucket_domains_v3.go b/storagev2/apis/api_get_bucket_domains_v3.go index 4aa52cf3..70e71b7a 100644 --- a/storagev2/apis/api_get_bucket_domains_v3.go +++ b/storagev2/apis/api_get_bucket_domains_v3.go @@ -85,7 +85,7 @@ func (storage *Storage) GetBucketDomainsV3(ctx context.Context, request *GetBuck if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_get_bucket_event_rules.go b/storagev2/apis/api_get_bucket_event_rules.go index 0af1a466..8e6256b2 100644 --- a/storagev2/apis/api_get_bucket_event_rules.go +++ b/storagev2/apis/api_get_bucket_event_rules.go @@ -85,7 +85,7 @@ func (storage *Storage) GetBucketEventRules(ctx context.Context, request *GetBuc if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_get_bucket_info.go b/storagev2/apis/api_get_bucket_info.go index 3e9a2340..c86da2e7 100644 --- a/storagev2/apis/api_get_bucket_info.go +++ b/storagev2/apis/api_get_bucket_info.go @@ -85,7 +85,7 @@ func (storage *Storage) GetBucketInfo(ctx context.Context, request *GetBucketInf if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_get_bucket_infos.go b/storagev2/apis/api_get_bucket_infos.go index c7b43bea..9d741a0a 100644 --- a/storagev2/apis/api_get_bucket_infos.go +++ b/storagev2/apis/api_get_bucket_infos.go @@ -78,7 +78,7 @@ func (storage *Storage) GetBucketInfos(ctx context.Context, request *GetBucketIn if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_get_bucket_quota.go b/storagev2/apis/api_get_bucket_quota.go index 7878f7db..157f151e 100644 --- a/storagev2/apis/api_get_bucket_quota.go +++ b/storagev2/apis/api_get_bucket_quota.go @@ -84,7 +84,7 @@ func (storage *Storage) GetBucketQuota(ctx context.Context, request *GetBucketQu if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_get_bucket_rules.go b/storagev2/apis/api_get_bucket_rules.go index 6c4b9925..5ad62cfe 100644 --- a/storagev2/apis/api_get_bucket_rules.go +++ b/storagev2/apis/api_get_bucket_rules.go @@ -76,7 +76,7 @@ func (storage *Storage) GetBucketRules(ctx context.Context, request *GetBucketRu if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_get_bucket_taggings.go b/storagev2/apis/api_get_bucket_taggings.go index 8e05219f..161689d8 100644 --- a/storagev2/apis/api_get_bucket_taggings.go +++ b/storagev2/apis/api_get_bucket_taggings.go @@ -85,7 +85,7 @@ func (storage *Storage) GetBucketTaggings(ctx context.Context, request *GetBucke if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_get_buckets.go b/storagev2/apis/api_get_buckets.go index 219a4556..dce22f60 100644 --- a/storagev2/apis/api_get_buckets.go +++ b/storagev2/apis/api_get_buckets.go @@ -74,7 +74,7 @@ func (storage *Storage) GetBuckets(ctx context.Context, request *GetBucketsReque if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_get_buckets_v4.go b/storagev2/apis/api_get_buckets_v4.go index 36b86972..4ba88ab6 100644 --- a/storagev2/apis/api_get_buckets_v4.go +++ b/storagev2/apis/api_get_buckets_v4.go @@ -81,7 +81,7 @@ func (storage *Storage) GetBucketsV4(ctx context.Context, request *GetBucketsV4R if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_get_objects.go b/storagev2/apis/api_get_objects.go index 02180931..cf76cbbd 100644 --- a/storagev2/apis/api_get_objects.go +++ b/storagev2/apis/api_get_objects.go @@ -101,7 +101,7 @@ func (storage *Storage) GetObjects(ctx context.Context, request *GetObjectsReque if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -111,9 +111,10 @@ func (storage *Storage) GetObjects(ctx context.Context, request *GetObjectsReque return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_get_objects_v2.go b/storagev2/apis/api_get_objects_v2.go index a0e0155e..4821324b 100644 --- a/storagev2/apis/api_get_objects_v2.go +++ b/storagev2/apis/api_get_objects_v2.go @@ -101,7 +101,7 @@ func (storage *Storage) GetObjectsV2(ctx context.Context, request *GetObjectsV2R if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -111,9 +111,10 @@ func (storage *Storage) GetObjectsV2(ctx context.Context, request *GetObjectsV2R return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_get_regions.go b/storagev2/apis/api_get_regions.go index 25b0a71c..474fa2cb 100644 --- a/storagev2/apis/api_get_regions.go +++ b/storagev2/apis/api_get_regions.go @@ -61,7 +61,7 @@ func (storage *Storage) GetRegions(ctx context.Context, request *GetRegionsReque if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_modify_object_life_cycle.go b/storagev2/apis/api_modify_object_life_cycle.go index 33399c4b..43bb4bc8 100644 --- a/storagev2/apis/api_modify_object_life_cycle.go +++ b/storagev2/apis/api_modify_object_life_cycle.go @@ -108,7 +108,7 @@ func (storage *Storage) ModifyObjectLifeCycle(ctx context.Context, request *Modi if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -118,9 +118,10 @@ func (storage *Storage) ModifyObjectLifeCycle(ctx context.Context, request *Modi return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_modify_object_metadata.go b/storagev2/apis/api_modify_object_metadata.go index a482e16c..82daa387 100644 --- a/storagev2/apis/api_modify_object_metadata.go +++ b/storagev2/apis/api_modify_object_metadata.go @@ -102,7 +102,7 @@ func (storage *Storage) ModifyObjectMetadata(ctx context.Context, request *Modif if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -112,9 +112,10 @@ func (storage *Storage) ModifyObjectMetadata(ctx context.Context, request *Modif return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_modify_object_status.go b/storagev2/apis/api_modify_object_status.go index 3389da6e..6db6c1f8 100644 --- a/storagev2/apis/api_modify_object_status.go +++ b/storagev2/apis/api_modify_object_status.go @@ -94,7 +94,7 @@ func (storage *Storage) ModifyObjectStatus(ctx context.Context, request *ModifyO if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -104,9 +104,10 @@ func (storage *Storage) ModifyObjectStatus(ctx context.Context, request *ModifyO return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_move_object.go b/storagev2/apis/api_move_object.go index 4ea4cc87..1a8864aa 100644 --- a/storagev2/apis/api_move_object.go +++ b/storagev2/apis/api_move_object.go @@ -101,7 +101,7 @@ func (storage *Storage) MoveObject(ctx context.Context, request *MoveObjectReque if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -111,9 +111,10 @@ func (storage *Storage) MoveObject(ctx context.Context, request *MoveObjectReque return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_post_object.go b/storagev2/apis/api_post_object.go index d217e1cb..ca7e0023 100644 --- a/storagev2/apis/api_post_object.go +++ b/storagev2/apis/api_post_object.go @@ -101,7 +101,7 @@ func (storage *Storage) PostObject(ctx context.Context, request *PostObjectReque if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, BufferResponse: true, RequestBody: httpclient.GetMultipartFormRequestBody(body)} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, BufferResponse: true, RequestBody: httpclient.GetMultipartFormRequestBody(body), OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -111,9 +111,10 @@ func (storage *Storage) PostObject(ctx context.Context, request *PostObjectReque return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err @@ -139,6 +140,7 @@ func (storage *Storage) PostObject(ctx context.Context, request *PostObjectReque } } } + ctx = httpclient.WithoutSignature(ctx) respBody := PostObjectResponse{Body: innerRequest.ResponseBody} if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { return nil, err diff --git a/storagev2/apis/api_prefetch_object.go b/storagev2/apis/api_prefetch_object.go index 4b98cec0..43694ee3 100644 --- a/storagev2/apis/api_prefetch_object.go +++ b/storagev2/apis/api_prefetch_object.go @@ -92,7 +92,7 @@ func (storage *Storage) PrefetchObject(ctx context.Context, request *PrefetchObj if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -102,9 +102,10 @@ func (storage *Storage) PrefetchObject(ctx context.Context, request *PrefetchObj return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_query_bucket_v2.go b/storagev2/apis/api_query_bucket_v2.go index f556b6c9..00292ff5 100644 --- a/storagev2/apis/api_query_bucket_v2.go +++ b/storagev2/apis/api_query_bucket_v2.go @@ -67,7 +67,7 @@ func (storage *Storage) QueryBucketV2(ctx context.Context, request *QueryBucketV if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -98,6 +98,7 @@ func (storage *Storage) QueryBucketV2(ctx context.Context, request *QueryBucketV } } } + ctx = httpclient.WithoutSignature(ctx) var respBody QueryBucketV2Response if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { return nil, err diff --git a/storagev2/apis/api_query_bucket_v4.go b/storagev2/apis/api_query_bucket_v4.go index 03f8b5c5..02af0164 100644 --- a/storagev2/apis/api_query_bucket_v4.go +++ b/storagev2/apis/api_query_bucket_v4.go @@ -67,7 +67,7 @@ func (storage *Storage) QueryBucketV4(ctx context.Context, request *QueryBucketV if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -98,6 +98,7 @@ func (storage *Storage) QueryBucketV4(ctx context.Context, request *QueryBucketV } } } + ctx = httpclient.WithoutSignature(ctx) var respBody QueryBucketV4Response if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { return nil, err diff --git a/storagev2/apis/api_restore_archived_object.go b/storagev2/apis/api_restore_archived_object.go index 190143bd..e8f800cf 100644 --- a/storagev2/apis/api_restore_archived_object.go +++ b/storagev2/apis/api_restore_archived_object.go @@ -98,7 +98,7 @@ func (storage *Storage) RestoreArchivedObject(ctx context.Context, request *Rest if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -108,9 +108,10 @@ func (storage *Storage) RestoreArchivedObject(ctx context.Context, request *Rest return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_resumable_upload_v1_bput.go b/storagev2/apis/api_resumable_upload_v1_bput.go index fb0bf909..e88fed44 100644 --- a/storagev2/apis/api_resumable_upload_v1_bput.go +++ b/storagev2/apis/api_resumable_upload_v1_bput.go @@ -82,7 +82,7 @@ func (storage *Storage) ResumableUploadV1Bput(ctx context.Context, request *Resu if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: httpclient.GetRequestBodyFromReadSeekCloser(body)} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: httpclient.GetRequestBodyFromReadSeekCloser(body), OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -92,9 +92,10 @@ func (storage *Storage) ResumableUploadV1Bput(ctx context.Context, request *Resu return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_resumable_upload_v1_make_block.go b/storagev2/apis/api_resumable_upload_v1_make_block.go index 33674e35..959a2031 100644 --- a/storagev2/apis/api_resumable_upload_v1_make_block.go +++ b/storagev2/apis/api_resumable_upload_v1_make_block.go @@ -81,7 +81,7 @@ func (storage *Storage) ResumableUploadV1MakeBlock(ctx context.Context, request if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: httpclient.GetRequestBodyFromReadSeekCloser(body)} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: httpclient.GetRequestBodyFromReadSeekCloser(body), OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -91,9 +91,10 @@ func (storage *Storage) ResumableUploadV1MakeBlock(ctx context.Context, request return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_resumable_upload_v1_make_file.go b/storagev2/apis/api_resumable_upload_v1_make_file.go index 261ed1b6..c47ed99d 100644 --- a/storagev2/apis/api_resumable_upload_v1_make_file.go +++ b/storagev2/apis/api_resumable_upload_v1_make_file.go @@ -91,7 +91,7 @@ func (storage *Storage) ResumableUploadV1MakeFile(ctx context.Context, request * if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: httpclient.GetRequestBodyFromReadSeekCloser(body)} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: httpclient.GetRequestBodyFromReadSeekCloser(body), OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -101,9 +101,10 @@ func (storage *Storage) ResumableUploadV1MakeFile(ctx context.Context, request * return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_resumable_upload_v2_abort_multipart_upload.go b/storagev2/apis/api_resumable_upload_v2_abort_multipart_upload.go index a6a87461..243c28e4 100644 --- a/storagev2/apis/api_resumable_upload_v2_abort_multipart_upload.go +++ b/storagev2/apis/api_resumable_upload_v2_abort_multipart_upload.go @@ -87,7 +87,7 @@ func (storage *Storage) ResumableUploadV2AbortMultipartUpload(ctx context.Contex if err != nil { return nil, err } - req := httpclient.Request{Method: "DELETE", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, UpToken: innerRequest.UpToken} + req := httpclient.Request{Method: "DELETE", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, UpToken: innerRequest.UpToken, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -97,9 +97,10 @@ func (storage *Storage) ResumableUploadV2AbortMultipartUpload(ctx context.Contex return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_resumable_upload_v2_complete_multipart_upload.go b/storagev2/apis/api_resumable_upload_v2_complete_multipart_upload.go index 1ade43d9..16109088 100644 --- a/storagev2/apis/api_resumable_upload_v2_complete_multipart_upload.go +++ b/storagev2/apis/api_resumable_upload_v2_complete_multipart_upload.go @@ -98,7 +98,7 @@ func (storage *Storage) ResumableUploadV2CompleteMultipartUpload(ctx context.Con if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: body} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: body, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -108,9 +108,10 @@ func (storage *Storage) ResumableUploadV2CompleteMultipartUpload(ctx context.Con return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_resumable_upload_v2_initiate_multipart_upload.go b/storagev2/apis/api_resumable_upload_v2_initiate_multipart_upload.go index 319ef0bc..cfb2cc0f 100644 --- a/storagev2/apis/api_resumable_upload_v2_initiate_multipart_upload.go +++ b/storagev2/apis/api_resumable_upload_v2_initiate_multipart_upload.go @@ -83,7 +83,7 @@ func (storage *Storage) ResumableUploadV2InitiateMultipartUpload(ctx context.Con if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, UpToken: innerRequest.UpToken, BufferResponse: true} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, UpToken: innerRequest.UpToken, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -93,9 +93,10 @@ func (storage *Storage) ResumableUploadV2InitiateMultipartUpload(ctx context.Con return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_resumable_upload_v2_list_parts.go b/storagev2/apis/api_resumable_upload_v2_list_parts.go index 88428188..5f0bc8d8 100644 --- a/storagev2/apis/api_resumable_upload_v2_list_parts.go +++ b/storagev2/apis/api_resumable_upload_v2_list_parts.go @@ -104,7 +104,7 @@ func (storage *Storage) ResumableUploadV2ListParts(ctx context.Context, request if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, UpToken: innerRequest.UpToken, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, UpToken: innerRequest.UpToken, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -114,9 +114,10 @@ func (storage *Storage) ResumableUploadV2ListParts(ctx context.Context, request return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_resumable_upload_v2_upload_part.go b/storagev2/apis/api_resumable_upload_v2_upload_part.go index 46fa01fe..cbd03f45 100644 --- a/storagev2/apis/api_resumable_upload_v2_upload_part.go +++ b/storagev2/apis/api_resumable_upload_v2_upload_part.go @@ -109,7 +109,7 @@ func (storage *Storage) ResumableUploadV2UploadPart(ctx context.Context, request if err != nil { return nil, err } - req := httpclient.Request{Method: "PUT", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, Header: headers, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: httpclient.GetRequestBodyFromReadSeekCloser(body)} + req := httpclient.Request{Method: "PUT", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, Header: headers, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: httpclient.GetRequestBodyFromReadSeekCloser(body), OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -119,9 +119,10 @@ func (storage *Storage) ResumableUploadV2UploadPart(ctx context.Context, request return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_set_bucket_access_mode.go b/storagev2/apis/api_set_bucket_access_mode.go index 30e37a6a..1f225c1c 100644 --- a/storagev2/apis/api_set_bucket_access_mode.go +++ b/storagev2/apis/api_set_bucket_access_mode.go @@ -86,7 +86,7 @@ func (storage *Storage) SetBucketAccessMode(ctx context.Context, request *SetBuc if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_set_bucket_cors_rules.go b/storagev2/apis/api_set_bucket_cors_rules.go index 9acc9e82..b1f7f726 100644 --- a/storagev2/apis/api_set_bucket_cors_rules.go +++ b/storagev2/apis/api_set_bucket_cors_rules.go @@ -95,7 +95,7 @@ func (storage *Storage) SetBucketCORSRules(ctx context.Context, request *SetBuck if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: body} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: body, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_set_bucket_image.go b/storagev2/apis/api_set_bucket_image.go index 6af12041..1a90c161 100644 --- a/storagev2/apis/api_set_bucket_image.go +++ b/storagev2/apis/api_set_bucket_image.go @@ -93,7 +93,7 @@ func (storage *Storage) SetBucketImage(ctx context.Context, request *SetBucketIm if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_set_bucket_max_age.go b/storagev2/apis/api_set_bucket_max_age.go index 1c849c23..6f6106fa 100644 --- a/storagev2/apis/api_set_bucket_max_age.go +++ b/storagev2/apis/api_set_bucket_max_age.go @@ -87,7 +87,7 @@ func (storage *Storage) SetBucketMaxAge(ctx context.Context, request *SetBucketM if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_set_bucket_private.go b/storagev2/apis/api_set_bucket_private.go index 88ab255d..30f32609 100644 --- a/storagev2/apis/api_set_bucket_private.go +++ b/storagev2/apis/api_set_bucket_private.go @@ -86,7 +86,7 @@ func (storage *Storage) SetBucketPrivate(ctx context.Context, request *SetBucket if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: httpclient.GetFormRequestBody(body)} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: httpclient.GetFormRequestBody(body), OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_set_bucket_quota.go b/storagev2/apis/api_set_bucket_quota.go index 659a3860..e7832ac1 100644 --- a/storagev2/apis/api_set_bucket_quota.go +++ b/storagev2/apis/api_set_bucket_quota.go @@ -91,7 +91,7 @@ func (storage *Storage) SetBucketQuota(ctx context.Context, request *SetBucketQu if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_set_bucket_refer_anti_leech.go b/storagev2/apis/api_set_bucket_refer_anti_leech.go index 691993b9..3d0393d7 100644 --- a/storagev2/apis/api_set_bucket_refer_anti_leech.go +++ b/storagev2/apis/api_set_bucket_refer_anti_leech.go @@ -90,7 +90,7 @@ func (storage *Storage) SetBucketReferAntiLeech(ctx context.Context, request *Se if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_set_bucket_remark.go b/storagev2/apis/api_set_bucket_remark.go index 5fc5c8e1..83a9dd01 100644 --- a/storagev2/apis/api_set_bucket_remark.go +++ b/storagev2/apis/api_set_bucket_remark.go @@ -95,7 +95,7 @@ func (storage *Storage) SetBucketRemark(ctx context.Context, request *SetBucketR if err != nil { return nil, err } - req := httpclient.Request{Method: "PUT", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: body} + req := httpclient.Request{Method: "PUT", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: body, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_set_bucket_taggings.go b/storagev2/apis/api_set_bucket_taggings.go index e75c9584..1b67cbc4 100644 --- a/storagev2/apis/api_set_bucket_taggings.go +++ b/storagev2/apis/api_set_bucket_taggings.go @@ -96,7 +96,7 @@ func (storage *Storage) SetBucketTaggings(ctx context.Context, request *SetBucke if err != nil { return nil, err } - req := httpclient.Request{Method: "PUT", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: body} + req := httpclient.Request{Method: "PUT", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: body, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_set_buckets_mirror.go b/storagev2/apis/api_set_buckets_mirror.go index 506dd088..ae2f470c 100644 --- a/storagev2/apis/api_set_buckets_mirror.go +++ b/storagev2/apis/api_set_buckets_mirror.go @@ -95,7 +95,7 @@ func (storage *Storage) SetBucketsMirror(ctx context.Context, request *SetBucket if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_set_object_file_type.go b/storagev2/apis/api_set_object_file_type.go index ebf1a16d..85f2e270 100644 --- a/storagev2/apis/api_set_object_file_type.go +++ b/storagev2/apis/api_set_object_file_type.go @@ -94,7 +94,7 @@ func (storage *Storage) SetObjectFileType(ctx context.Context, request *SetObjec if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -104,9 +104,10 @@ func (storage *Storage) SetObjectFileType(ctx context.Context, request *SetObjec return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_stat_object.go b/storagev2/apis/api_stat_object.go index 0dda3dc0..a5b9ec91 100644 --- a/storagev2/apis/api_stat_object.go +++ b/storagev2/apis/api_stat_object.go @@ -106,7 +106,7 @@ func (storage *Storage) StatObject(ctx context.Context, request *StatObjectReque if err != nil { return nil, err } - req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { @@ -116,9 +116,10 @@ func (storage *Storage) StatObject(ctx context.Context, request *StatObjectReque return nil, err } } - queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient(), Resolver: storage.client.GetResolver(), Chooser: storage.client.GetChooser(), BeforeResolve: storage.client.GetBeforeResolveCallback(), AfterResolve: storage.client.GetAfterResolveCallback(), ResolveError: storage.client.GetResolveErrorCallback(), BeforeBackoff: storage.client.GetBeforeBackoffCallback(), AfterBackoff: storage.client.GetAfterBackoffCallback(), BeforeRequest: storage.client.GetBeforeRequestCallback(), AfterResponse: storage.client.GetAfterResponseCallback()} if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { queryOptions.RetryMax = hostRetryConfig.RetryMax + queryOptions.Backoff = hostRetryConfig.Backoff } if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { return nil, err diff --git a/storagev2/apis/api_unset_bucket_image.go b/storagev2/apis/api_unset_bucket_image.go index f1ffbad0..bfd1ea11 100644 --- a/storagev2/apis/api_unset_bucket_image.go +++ b/storagev2/apis/api_unset_bucket_image.go @@ -84,7 +84,7 @@ func (storage *Storage) UnsetBucketImage(ctx context.Context, request *UnsetBuck if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_update_bucket_event_rule.go b/storagev2/apis/api_update_bucket_event_rule.go index b8ee53c2..b956eaae 100644 --- a/storagev2/apis/api_update_bucket_event_rule.go +++ b/storagev2/apis/api_update_bucket_event_rule.go @@ -116,7 +116,7 @@ func (storage *Storage) UpdateBucketEventRule(ctx context.Context, request *Upda if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/api_update_bucket_rules.go b/storagev2/apis/api_update_bucket_rules.go index dfc870bd..8626ffb0 100644 --- a/storagev2/apis/api_update_bucket_rules.go +++ b/storagev2/apis/api_update_bucket_rules.go @@ -98,7 +98,7 @@ func (storage *Storage) UpdateBucketRules(ctx context.Context, request *UpdateBu if err != nil { return nil, err } - req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: httpclient.GetFormRequestBody(body)} + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Interceptors: []httpclient.Interceptor{uplogInterceptor}, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: httpclient.GetFormRequestBody(body), OnRequestProgress: options.OnRequestProgress} if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { query := storage.client.GetBucketQuery() if query == nil { diff --git a/storagev2/apis/apis.go b/storagev2/apis/apis.go index 0771b0ed..724ccb14 100644 --- a/storagev2/apis/apis.go +++ b/storagev2/apis/apis.go @@ -23,4 +23,5 @@ type Options struct { OverwrittenBucketName string OverwrittenEndpoints region.EndpointsProvider OverwrittenRegion region.RegionsProvider + OnRequestProgress func(uint64, uint64) } diff --git a/storagev2/apis/batch_ops/api.go b/storagev2/apis/batch_ops/api.go index 22950731..29a4f62d 100644 --- a/storagev2/apis/batch_ops/api.go +++ b/storagev2/apis/batch_ops/api.go @@ -20,50 +20,57 @@ type Response struct { OperationResponses OperationResponses // 所有管理指令的响应信息 } +// 每个分片的大小 +type PartSizes = []int64 + // 响应数据 type Data struct { - Error string // 管理指令的错误信息,仅在发生错误时才返回 - Size int64 // 对象大小,单位为字节,仅对 stat 指令才有效 - Hash string // 对象哈希值,仅对 stat 指令才有效 - MimeType string // 对象 MIME 类型,仅对 stat 指令才有效 - Type int64 // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储,仅对 stat 指令才有效 - PutTime int64 // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒,仅对 stat 指令才有效 - EndUser string // 资源内容的唯一属主标识 - RestoringStatus int64 // 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段,仅对 stat 指令才有效 - Status int64 // 文件状态。`1` 表示禁用;只有禁用状态的文件才会返回该字段,仅对 stat 指令才有效 - Md5 string // 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回,仅对 stat 指令才有效 - ExpirationTime int64 // 文件过期删除日期,UNIX 时间戳格式,文件在设置过期时间后才会返回该字段,仅对 stat 指令才有效 - TransitionToIaTime int64 // 文件生命周期中转为低频存储的日期,UNIX 时间戳格式,文件在设置转低频后才会返回该字段,仅对 stat 指令才有效 - TransitionToArchiveTime int64 // 文件生命周期中转为归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段,仅对 stat 指令才有效 - TransitionToDeepArchiveTime int64 // 文件生命周期中转为深度归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段,仅对 stat 指令才有效 - TransitionToArchiveIrTime int64 // 文件生命周期中转为归档直读存储的日期,UNIX 时间戳格式,文件在设置转归档直读后才会返回该字段,仅对 stat 指令才有效 + Error string // 管理指令的错误信息,仅在发生错误时才返回 + Size int64 // 对象大小,单位为字节,仅对 stat 指令才有效 + Hash string // 对象哈希值,仅对 stat 指令才有效 + MimeType string // 对象 MIME 类型,仅对 stat 指令才有效 + Type int64 // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储,仅对 stat 指令才有效 + PutTime int64 // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒,仅对 stat 指令才有效 + EndUser string // 资源内容的唯一属主标识 + RestoringStatus int64 // 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段,仅对 stat 指令才有效 + Status int64 // 文件状态。`1` 表示禁用;只有禁用状态的文件才会返回该字段,仅对 stat 指令才有效 + Md5 string // 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回,仅对 stat 指令才有效 + ExpirationTime int64 // 文件过期删除日期,UNIX 时间戳格式,文件在设置过期时间后才会返回该字段,仅对 stat 指令才有效 + TransitionToIaTime int64 // 文件生命周期中转为低频存储的日期,UNIX 时间戳格式,文件在设置转低频后才会返回该字段,仅对 stat 指令才有效 + TransitionToArchiveTime int64 // 文件生命周期中转为归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段,仅对 stat 指令才有效 + TransitionToDeepArchiveTime int64 // 文件生命周期中转为深度归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段,仅对 stat 指令才有效 + TransitionToArchiveIrTime int64 // 文件生命周期中转为归档直读存储的日期,UNIX 时间戳格式,文件在设置转归档直读后才会返回该字段,仅对 stat 指令才有效 + Metadata map[string]string // 对象存储元信息 + Parts PartSizes // 每个分片的大小,如没有指定 need_parts 参数则不返回 } // 管理指令的响应数据 type OperationResponseData = Data type jsonData struct { - Error string `json:"error,omitempty"` // 管理指令的错误信息,仅在发生错误时才返回 - Size int64 `json:"fsize,omitempty"` // 对象大小,单位为字节,仅对 stat 指令才有效 - Hash string `json:"hash,omitempty"` // 对象哈希值,仅对 stat 指令才有效 - MimeType string `json:"mimeType,omitempty"` // 对象 MIME 类型,仅对 stat 指令才有效 - Type int64 `json:"type,omitempty"` // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储,仅对 stat 指令才有效 - PutTime int64 `json:"putTime,omitempty"` // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒,仅对 stat 指令才有效 - EndUser string `json:"endUser,omitempty"` // 资源内容的唯一属主标识 - RestoringStatus int64 `json:"restoreStatus,omitempty"` // 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段,仅对 stat 指令才有效 - Status int64 `json:"status,omitempty"` // 文件状态。`1` 表示禁用;只有禁用状态的文件才会返回该字段,仅对 stat 指令才有效 - Md5 string `json:"md5,omitempty"` // 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回,仅对 stat 指令才有效 - ExpirationTime int64 `json:"expiration,omitempty"` // 文件过期删除日期,UNIX 时间戳格式,文件在设置过期时间后才会返回该字段,仅对 stat 指令才有效 - TransitionToIaTime int64 `json:"transitionToIA,omitempty"` // 文件生命周期中转为低频存储的日期,UNIX 时间戳格式,文件在设置转低频后才会返回该字段,仅对 stat 指令才有效 - TransitionToArchiveTime int64 `json:"transitionToARCHIVE,omitempty"` // 文件生命周期中转为归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段,仅对 stat 指令才有效 - TransitionToDeepArchiveTime int64 `json:"transitionToDeepArchive,omitempty"` // 文件生命周期中转为深度归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段,仅对 stat 指令才有效 - TransitionToArchiveIrTime int64 `json:"transitionToArchiveIR,omitempty"` // 文件生命周期中转为归档直读存储的日期,UNIX 时间戳格式,文件在设置转归档直读后才会返回该字段,仅对 stat 指令才有效 + Error string `json:"error,omitempty"` // 管理指令的错误信息,仅在发生错误时才返回 + Size int64 `json:"fsize,omitempty"` // 对象大小,单位为字节,仅对 stat 指令才有效 + Hash string `json:"hash,omitempty"` // 对象哈希值,仅对 stat 指令才有效 + MimeType string `json:"mimeType,omitempty"` // 对象 MIME 类型,仅对 stat 指令才有效 + Type int64 `json:"type,omitempty"` // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储,仅对 stat 指令才有效 + PutTime int64 `json:"putTime,omitempty"` // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒,仅对 stat 指令才有效 + EndUser string `json:"endUser,omitempty"` // 资源内容的唯一属主标识 + RestoringStatus int64 `json:"restoreStatus,omitempty"` // 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段,仅对 stat 指令才有效 + Status int64 `json:"status,omitempty"` // 文件状态。`1` 表示禁用;只有禁用状态的文件才会返回该字段,仅对 stat 指令才有效 + Md5 string `json:"md5,omitempty"` // 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回,仅对 stat 指令才有效 + ExpirationTime int64 `json:"expiration,omitempty"` // 文件过期删除日期,UNIX 时间戳格式,文件在设置过期时间后才会返回该字段,仅对 stat 指令才有效 + TransitionToIaTime int64 `json:"transitionToIA,omitempty"` // 文件生命周期中转为低频存储的日期,UNIX 时间戳格式,文件在设置转低频后才会返回该字段,仅对 stat 指令才有效 + TransitionToArchiveTime int64 `json:"transitionToARCHIVE,omitempty"` // 文件生命周期中转为归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段,仅对 stat 指令才有效 + TransitionToDeepArchiveTime int64 `json:"transitionToDeepArchive,omitempty"` // 文件生命周期中转为深度归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段,仅对 stat 指令才有效 + TransitionToArchiveIrTime int64 `json:"transitionToArchiveIR,omitempty"` // 文件生命周期中转为归档直读存储的日期,UNIX 时间戳格式,文件在设置转归档直读后才会返回该字段,仅对 stat 指令才有效 + Metadata map[string]string `json:"x-qn-meta,omitempty"` // 对象存储元信息 + Parts PartSizes `json:"parts,omitempty"` // 每个分片的大小,如没有指定 need_parts 参数则不返回 } func (j *Data) MarshalJSON() ([]byte, error) { if err := j.validate(); err != nil { return nil, err } - return json.Marshal(&jsonData{Error: j.Error, Size: j.Size, Hash: j.Hash, MimeType: j.MimeType, Type: j.Type, PutTime: j.PutTime, EndUser: j.EndUser, RestoringStatus: j.RestoringStatus, Status: j.Status, Md5: j.Md5, ExpirationTime: j.ExpirationTime, TransitionToIaTime: j.TransitionToIaTime, TransitionToArchiveTime: j.TransitionToArchiveTime, TransitionToDeepArchiveTime: j.TransitionToDeepArchiveTime, TransitionToArchiveIrTime: j.TransitionToArchiveIrTime}) + return json.Marshal(&jsonData{Error: j.Error, Size: j.Size, Hash: j.Hash, MimeType: j.MimeType, Type: j.Type, PutTime: j.PutTime, EndUser: j.EndUser, RestoringStatus: j.RestoringStatus, Status: j.Status, Md5: j.Md5, ExpirationTime: j.ExpirationTime, TransitionToIaTime: j.TransitionToIaTime, TransitionToArchiveTime: j.TransitionToArchiveTime, TransitionToDeepArchiveTime: j.TransitionToDeepArchiveTime, TransitionToArchiveIrTime: j.TransitionToArchiveIrTime, Metadata: j.Metadata, Parts: j.Parts}) } func (j *Data) UnmarshalJSON(data []byte) error { var nj jsonData @@ -85,6 +92,8 @@ func (j *Data) UnmarshalJSON(data []byte) error { j.TransitionToArchiveTime = nj.TransitionToArchiveTime j.TransitionToDeepArchiveTime = nj.TransitionToDeepArchiveTime j.TransitionToArchiveIrTime = nj.TransitionToArchiveIrTime + j.Metadata = nj.Metadata + j.Parts = nj.Parts return nil } func (j *Data) validate() error { diff --git a/storagev2/apis/get_objects/api.go b/storagev2/apis/get_objects/api.go index 35224da3..a74db7b6 100644 --- a/storagev2/apis/get_objects/api.go +++ b/storagev2/apis/get_objects/api.go @@ -35,35 +35,39 @@ type PartSizes = []int64 // 对象条目,包含对象的元信息 type ListedObjectEntry struct { - Key string // 对象名称 - PutTime int64 // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒 - Hash string // 文件的哈希值 - Size int64 // 对象大小,单位为字节 - MimeType string // 对象 MIME 类型 - Type int64 // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储 - EndUser string // 资源内容的唯一属主标识 - RestoringStatus int64 // 文件的存储状态,即禁用状态和启用状态间的的互相转换,`0` 表示启用,`1`表示禁用 - Md5 string // 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回 - Parts PartSizes // 每个分片的大小,如没有指定 need_parts 参数则不返回 + Key string // 对象名称 + PutTime int64 // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒 + Hash string // 文件的哈希值 + Size int64 // 对象大小,单位为字节 + MimeType string // 对象 MIME 类型 + Type int64 // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储 + EndUser string // 资源内容的唯一属主标识 + RestoringStatus int64 // 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段,仅对 stat 指令才有效 + Status int64 // 文件的存储状态,即禁用状态和启用状态间的的互相转换,`0` 表示启用,`1`表示禁用 + Md5 string // 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回 + Metadata map[string]string // 对象存储元信息 + Parts PartSizes // 每个分片的大小,如没有指定 need_parts 参数则不返回 } type jsonListedObjectEntry struct { - Key string `json:"key"` // 对象名称 - PutTime int64 `json:"putTime"` // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒 - Hash string `json:"hash"` // 文件的哈希值 - Size int64 `json:"fsize,omitempty"` // 对象大小,单位为字节 - MimeType string `json:"mimeType"` // 对象 MIME 类型 - Type int64 `json:"type,omitempty"` // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储 - EndUser string `json:"endUser,omitempty"` // 资源内容的唯一属主标识 - RestoringStatus int64 `json:"status,omitempty"` // 文件的存储状态,即禁用状态和启用状态间的的互相转换,`0` 表示启用,`1`表示禁用 - Md5 string `json:"md5,omitempty"` // 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回 - Parts PartSizes `json:"parts,omitempty"` // 每个分片的大小,如没有指定 need_parts 参数则不返回 + Key string `json:"key"` // 对象名称 + PutTime int64 `json:"putTime"` // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒 + Hash string `json:"hash"` // 文件的哈希值 + Size int64 `json:"fsize,omitempty"` // 对象大小,单位为字节 + MimeType string `json:"mimeType"` // 对象 MIME 类型 + Type int64 `json:"type,omitempty"` // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储 + EndUser string `json:"endUser,omitempty"` // 资源内容的唯一属主标识 + RestoringStatus int64 `json:"restoreStatus,omitempty"` // 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段,仅对 stat 指令才有效 + Status int64 `json:"status,omitempty"` // 文件的存储状态,即禁用状态和启用状态间的的互相转换,`0` 表示启用,`1`表示禁用 + Md5 string `json:"md5,omitempty"` // 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回 + Metadata map[string]string `json:"x-qn-meta,omitempty"` // 对象存储元信息 + Parts PartSizes `json:"parts,omitempty"` // 每个分片的大小,如没有指定 need_parts 参数则不返回 } func (j *ListedObjectEntry) MarshalJSON() ([]byte, error) { if err := j.validate(); err != nil { return nil, err } - return json.Marshal(&jsonListedObjectEntry{Key: j.Key, PutTime: j.PutTime, Hash: j.Hash, Size: j.Size, MimeType: j.MimeType, Type: j.Type, EndUser: j.EndUser, RestoringStatus: j.RestoringStatus, Md5: j.Md5, Parts: j.Parts}) + return json.Marshal(&jsonListedObjectEntry{Key: j.Key, PutTime: j.PutTime, Hash: j.Hash, Size: j.Size, MimeType: j.MimeType, Type: j.Type, EndUser: j.EndUser, RestoringStatus: j.RestoringStatus, Status: j.Status, Md5: j.Md5, Metadata: j.Metadata, Parts: j.Parts}) } func (j *ListedObjectEntry) UnmarshalJSON(data []byte) error { var nj jsonListedObjectEntry @@ -78,7 +82,9 @@ func (j *ListedObjectEntry) UnmarshalJSON(data []byte) error { j.Type = nj.Type j.EndUser = nj.EndUser j.RestoringStatus = nj.RestoringStatus + j.Status = nj.Status j.Md5 = nj.Md5 + j.Metadata = nj.Metadata j.Parts = nj.Parts return nil } diff --git a/storagev2/apis/resumable_upload_v1_bput/api.go b/storagev2/apis/resumable_upload_v1_bput/api.go index ebf30d72..bbf8419e 100644 --- a/storagev2/apis/resumable_upload_v1_bput/api.go +++ b/storagev2/apis/resumable_upload_v1_bput/api.go @@ -65,12 +65,6 @@ func (j *Response) validate() error { if j.Checksum == "" { return errors.MissingRequiredFieldError{Name: "Checksum"} } - if j.Crc32 == 0 { - return errors.MissingRequiredFieldError{Name: "Crc32"} - } - if j.Offset == 0 { - return errors.MissingRequiredFieldError{Name: "Offset"} - } if j.Host == "" { return errors.MissingRequiredFieldError{Name: "Host"} } diff --git a/storagev2/apis/resumable_upload_v1_make_block/api.go b/storagev2/apis/resumable_upload_v1_make_block/api.go index 2beb30e8..f5278ab2 100644 --- a/storagev2/apis/resumable_upload_v1_make_block/api.go +++ b/storagev2/apis/resumable_upload_v1_make_block/api.go @@ -64,12 +64,6 @@ func (j *Response) validate() error { if j.Checksum == "" { return errors.MissingRequiredFieldError{Name: "Checksum"} } - if j.Crc32 == 0 { - return errors.MissingRequiredFieldError{Name: "Crc32"} - } - if j.Offset == 0 { - return errors.MissingRequiredFieldError{Name: "Offset"} - } if j.Host == "" { return errors.MissingRequiredFieldError{Name: "Host"} } diff --git a/storagev2/apis/stat_object/api.go b/storagev2/apis/stat_object/api.go index 2a8fabaf..9149870b 100644 --- a/storagev2/apis/stat_object/api.go +++ b/storagev2/apis/stat_object/api.go @@ -45,7 +45,7 @@ type jsonResponse struct { Size int64 `json:"fsize,omitempty"` // 对象大小,单位为字节 Hash string `json:"hash"` // 对象哈希值 MimeType string `json:"mimeType"` // 对象 MIME 类型 - Type int64 `json:"type"` // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储 + Type int64 `json:"type,omitempty"` // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储 PutTime int64 `json:"putTime"` // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒 EndUser string `json:"endUser,omitempty"` // 资源内容的唯一属主标识 RestoringStatus int64 `json:"restoreStatus,omitempty"` // 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段 @@ -96,9 +96,6 @@ func (j *Response) validate() error { if j.MimeType == "" { return errors.MissingRequiredFieldError{Name: "MimeType"} } - if j.Type == 0 { - return errors.MissingRequiredFieldError{Name: "Type"} - } if j.PutTime == 0 { return errors.MissingRequiredFieldError{Name: "PutTime"} } diff --git a/storagev2/chooser/ip_chooser.go b/storagev2/chooser/ip_chooser.go index 8823b03c..04dace35 100644 --- a/storagev2/chooser/ip_chooser.go +++ b/storagev2/chooser/ip_chooser.go @@ -41,15 +41,16 @@ func NewIPChooser(options *IPChooserConfig) Chooser { if options == nil { options = &IPChooserConfig{} } - if options.FreezeDuration == 0 { - options.FreezeDuration = 10 * time.Minute + freezeDuration := options.FreezeDuration + if freezeDuration == 0 { + freezeDuration = 10 * time.Minute } return &ipChooser{ blackheap: blackheap{ m: make(map[string]*blackItem, 1024), items: make([]*blackItem, 0, 1024), }, - freezeDuration: options.FreezeDuration, + freezeDuration: freezeDuration, } } diff --git a/storagev2/chooser/subnet_chooser.go b/storagev2/chooser/subnet_chooser.go index 92f622f4..033998e1 100644 --- a/storagev2/chooser/subnet_chooser.go +++ b/storagev2/chooser/subnet_chooser.go @@ -24,15 +24,16 @@ func NewSubnetChooser(options *SubnetChooserConfig) Chooser { if options == nil { options = &SubnetChooserConfig{} } - if options.FreezeDuration == 0 { - options.FreezeDuration = 10 * time.Minute + freezeDuration := options.FreezeDuration + if freezeDuration == 0 { + freezeDuration = 10 * time.Minute } return &subnetChooser{ blackheap: blackheap{ m: make(map[string]*blackItem, 1024), items: make([]*blackItem, 0, 1024), }, - freezeDuration: options.FreezeDuration, + freezeDuration: freezeDuration, } } diff --git a/storagev2/downloader/destination/destination.go b/storagev2/downloader/destination/destination.go new file mode 100644 index 00000000..0eb8d516 --- /dev/null +++ b/storagev2/downloader/destination/destination.go @@ -0,0 +1,290 @@ +package destination + +import ( + "errors" + "io" + "os" + "path/filepath" + + internal_io "github.com/qiniu/go-sdk/v7/internal/io" + resumablerecorder "github.com/qiniu/go-sdk/v7/storagev2/downloader/resumable_recorder" +) + +type ( + // 切片选项 + SplitOptions struct { + // 只读可恢复记录仪介质 + Medium resumablerecorder.ReadableResumableRecorderMedium + } + + // 数据目标 + Destination interface { + PartWriter + io.Closer + + // 切片 + Split(totalSize, partSize uint64, options *SplitOptions) ([]Part, error) + + // 数据目标 ID + DestinationID() (string, error) + + // 获取文件,如果数据源不是文件,则返回 nil + GetFile() *os.File + } + + // 分片 + Part interface { + PartWriter + + // 分片大小 + Size() uint64 + + // 分片偏移量 + Offset() uint64 + + // 分片偏移量 + HaveDownloaded() uint64 + } + + // 分片写入接口 + PartWriter interface { + // 从 `io.Reader` 复制数据写入 + CopyFrom(io.Reader, func(uint64)) (uint64, error) + } + + writerAtDestination struct { + wr WriteAtCloser + destinationID string + } + + writerAtPart struct { + *internal_io.OffsetWriter + offset, totalSize, restSize uint64 + } + + writeCloserDestination struct { + wr io.WriteCloser + destinationID string + } + + writeCloserPart struct { + wr io.Writer + totalSize, restSize uint64 + } + + WriteAtCloser interface { + io.WriterAt + io.WriteSeeker + io.Closer + } +) + +// 将 io.WriteCloser 封装为数据目标 +func NewWriteCloserDestination(wr io.WriteCloser, destinationID string) Destination { + return &writeCloserDestination{wr, destinationID} +} + +func (wcd *writeCloserDestination) CopyFrom(r io.Reader, progress func(uint64)) (uint64, error) { + return copyBuffer(wcd.wr, r, progress) +} + +func (wcd *writeCloserDestination) Split(totalSize, _ uint64, _ *SplitOptions) ([]Part, error) { + return []Part{&writeCloserPart{wcd.wr, totalSize, totalSize}}, nil +} + +func (wcd *writeCloserDestination) DestinationID() (string, error) { + return wcd.destinationID, nil +} + +func (wcd *writeCloserDestination) Close() error { + return wcd.wr.Close() +} + +func (wcd *writeCloserDestination) GetFile() *os.File { + if file, ok := wcd.wr.(*os.File); ok { + return file + } else { + return nil + } +} + +func (wcp *writeCloserPart) Size() uint64 { + return wcp.totalSize +} + +func (wcp *writeCloserPart) Offset() uint64 { + return 0 +} + +func (wcp *writeCloserPart) HaveDownloaded() uint64 { + return 0 +} + +var errInvalidWrite = errors.New("invalid write result") + +func (wcp *writeCloserPart) CopyFrom(r io.Reader, progress func(uint64)) (uint64, error) { + var newProgress func(uint64) + + if wcp.restSize == 0 { + return 0, nil + } + + haveCopied := wcp.HaveDownloaded() + if progress != nil { + newProgress = func(downloaded uint64) { progress(haveCopied + downloaded) } + } + n, err := copyBuffer(wcp.wr, io.LimitReader(r, int64(wcp.restSize)), newProgress) + if n > 0 { + wcp.restSize -= n + } + return n, err +} + +// 将 io.WriterAt + io.WriteSeeker + io.Closer 封装为数据目标 +func NewWriteAtCloserDestination(wr WriteAtCloser, destinationID string) Destination { + return &writerAtDestination{wr, destinationID} +} + +func (wad *writerAtDestination) CopyFrom(r io.Reader, progress func(uint64)) (uint64, error) { + n, err := io.Copy(wad.wr, r) + return uint64(n), err +} + +func (wad *writerAtDestination) Split(totalSize, partSize uint64, options *SplitOptions) ([]Part, error) { + var ( + parts []Part + offsetMap = make(map[uint64]uint64) + resumableRecord resumablerecorder.ResumableRecord + err error + ) + if options == nil { + options = &SplitOptions{} + } + + if medium := options.Medium; medium != nil { + for { + if err = medium.Next(&resumableRecord); err != nil { + break + } + offsetMap[resumableRecord.Offset] = resumableRecord.PartWritten + } + } + + parts = make([]Part, 0, (totalSize+partSize-1)/partSize) + for offset := uint64(0); offset < totalSize; offset += partSize { + size := partSize + if size > (totalSize - offset) { + size = totalSize - offset + } + haveWritten := offsetMap[offset] + parts = append(parts, &writerAtPart{internal_io.NewOffsetWriter(wad.wr, int64(offset+haveWritten)), offset, size, size - haveWritten}) + } + return parts, nil +} + +func (wad *writerAtDestination) DestinationID() (string, error) { + return wad.destinationID, nil +} + +func (wad *writerAtDestination) Close() error { + return wad.wr.Close() +} + +func (wad *writerAtDestination) GetFile() *os.File { + if file, ok := wad.wr.(*os.File); ok { + return file + } else { + return nil + } +} + +func (w *writerAtPart) Size() uint64 { + return w.totalSize +} + +func (w *writerAtPart) Offset() uint64 { + return w.offset +} + +func (w *writerAtPart) HaveDownloaded() uint64 { + return w.totalSize - w.restSize +} + +func (w *writerAtPart) CopyFrom(r io.Reader, progress func(uint64)) (uint64, error) { + var newProgress func(uint64) + + if w.restSize == 0 { + return 0, nil + } + + haveCopied := w.HaveDownloaded() + if progress != nil { + newProgress = func(downloaded uint64) { progress(haveCopied + downloaded) } + } + n, err := copyBuffer(w.OffsetWriter, io.LimitReader(r, int64(w.restSize)), newProgress) + if n > 0 { + w.restSize -= n + } + return n, err +} + +func copyBuffer(w io.Writer, r io.Reader, progress func(uint64)) (uint64, error) { + const BUFSIZE = 32 * 1024 + var ( + buf = make([]byte, BUFSIZE) + haveCopied uint64 + nr, nw int + er, ew, err error + ) + for { + nr, er = r.Read(buf) + if nr > 0 { + nw, ew = w.Write(buf[0:nr]) + if nw < 0 || nr < nw { + nw = 0 + if ew == nil { + ew = errInvalidWrite + } + } + haveCopied += uint64(nw) + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + if progress != nil { + progress(haveCopied) + } + } + if er != nil { + if er != io.EOF { + err = er + } + break + } + } + return haveCopied, err +} + +// 将文件封装为数据目标 +func NewFileDestination(filePath string) (Destination, error) { + file, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return nil, err + } + if !canSeekReally(file) { + return NewWriteCloserDestination(file, ""), nil + } else if absFilePath, err := filepath.Abs(filePath); err != nil { + return nil, err + } else { + return NewWriteAtCloserDestination(file, absFilePath), nil + } +} + +func canSeekReally(seeker io.Seeker) bool { + _, err := seeker.Seek(0, io.SeekCurrent) + return err == nil +} diff --git a/storagev2/downloader/destination/destination_test.go b/storagev2/downloader/destination/destination_test.go new file mode 100644 index 00000000..f9900845 --- /dev/null +++ b/storagev2/downloader/destination/destination_test.go @@ -0,0 +1,130 @@ +//go:build unit +// +build unit + +package destination_test + +import ( + "bytes" + "crypto/md5" + "errors" + "io" + "io/ioutil" + "math/rand" + "os" + "sync" + "testing" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/downloader/destination" +) + +func TestSeekableDestination(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "test-seekable-destination-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + dest := destination.NewWriteAtCloserDestination(tmpFile, tmpFile.Name()) + parts, err := dest.Split(1024*1024*1024, 1024*1024, nil) + if err != nil { + t.Fatal(err) + } + if len(parts) != 1024 { + t.Fatalf("unexpected slices") + } + + var ( + wg sync.WaitGroup + lock sync.Mutex + ) + + for i, part := range parts { + wg.Add(1) + go func(i int, part destination.Part) { + defer wg.Done() + + buf := make([]byte, 1024*1024) + for j := 0; j < 1024*1024; j++ { + buf[j] = byte(i % 256) + } + var lastDownloaded uint64 + if _, e := part.CopyFrom(bytes.NewReader(buf), func(downloaded uint64) { + if lastDownloaded > downloaded { + lock.Lock() + err = errors.New("unexpected downloading progress") + lock.Unlock() + } + lastDownloaded = downloaded + }); e != nil { + lock.Lock() + err = e + lock.Unlock() + } + }(i, part) + } + wg.Wait() + + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 1024; i++ { + buf := make([]byte, 1024*1024) + _, err := tmpFile.Read(buf) + if err != nil { + t.Fatal(err) + } + for j := 0; j < len(buf); j++ { + if buf[j] != byte(i%256) { + t.Fatalf("unexpected buffer content") + } + } + } +} + +func TestUnseekableDestination(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "test-unseekable-destination-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + dest := destination.NewWriteCloserDestination(tmpFile, tmpFile.Name()) + parts, err := dest.Split(1024*1024, 1024, nil) + if err != nil { + t.Fatal(err) + } + if len(parts) != 1 { + t.Fatalf("unexpected slices") + } + + md5Expected := md5.New() + r := rand.New(rand.NewSource(time.Now().UnixNano())) + var lastDownloaded uint64 + + if _, err = parts[0].CopyFrom(io.TeeReader(io.LimitReader(r, 1024*1024), md5Expected), func(downloaded uint64) { + if lastDownloaded > downloaded { + t.Fatalf("unexpected downloading progress") + } + lastDownloaded = downloaded + }); err != nil { + t.Fatal(err) + } + if _, err = tmpFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + md5Actual := md5.New() + if _, err = io.Copy(md5Actual, tmpFile); err != nil { + t.Fatal(err) + } + if !bytes.Equal(md5Expected.Sum(nil), md5Actual.Sum(nil)) { + t.Fatalf("unexpected md5 checksum") + } + if lastDownloaded != 1024*1024 { + t.Fatalf("unexpected downloading progress") + } +} diff --git a/storagev2/downloader/download_manager.go b/storagev2/downloader/download_manager.go new file mode 100644 index 00000000..fc1ea38b --- /dev/null +++ b/storagev2/downloader/download_manager.go @@ -0,0 +1,308 @@ +package downloader + +import ( + "context" + "io" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/downloader/destination" + "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/objects" + "github.com/qiniu/go-sdk/v7/storagev2/region" + "golang.org/x/sync/errgroup" +) + +type ( + // 下载管理器 + DownloadManager struct { + destinationDownloader DestinationDownloader + objectsManager *objects.ObjectsManager + downloadURLsProvider DownloadURLsProvider + downloadURLsProviderOnce sync.Once + options httpclient.Options + } + + // 下载管理器选项 + DownloadManagerOptions struct { + // HTTP 客户端选项 + httpclient.Options + + // 目标下载器 + DestinationDownloader DestinationDownloader + + // 分片列举版本,如果不填写,默认为 V1 + ListerVersion objects.ListerVersion + + // 下载 URL 生成器 + DownloadURLsProvider DownloadURLsProvider + } + + // 对象下载参数 + ObjectOptions struct { + DestinationDownloadOptions + GenerateOptions + + // 下载 URL 生成器 + DownloadURLsProvider DownloadURLsProvider + } + + // 目录下载参数 + DirectoryOptions struct { + // 是否使用 HTTP 协议,默认为不使用 + UseInsecureProtocol bool + + // 空间名称 + BucketName string + + // 对象前缀 + ObjectPrefix string + + // 下载并发度 + ObjectConcurrency int + + // 下载 URL 生成器 + DownloadURLsProvider DownloadURLsProvider + + // 下载前回调函数 + BeforeObjectDownload func(objectName string, objectOptions *ObjectOptions) + + // 下载进度 + OnDownloadingProgress func(objectName string, progress *DownloadingProgress) + + // 对象下载成功后回调 + OnObjectDownloaded func(objectName string, info *DownloadedObjectInfo) + + // 是否下载指定对象 + ShouldDownloadObject func(objectName string) bool + + // 分隔符,默认为 / + PathSeparator string + } + + // 已经下载的对象信息 + DownloadedObjectInfo struct { + Size uint64 // 对象大小 + } + + writeSeekCloser struct { + w io.Writer + } +) + +// 创建下载管理器 +func NewDownloadManager(options *DownloadManagerOptions) *DownloadManager { + if options == nil { + options = &DownloadManagerOptions{} + } + destinationDownloader := options.DestinationDownloader + if destinationDownloader == nil { + destinationDownloader = NewConcurrentDownloader(nil) + } + objectsManager := objects.NewObjectsManager(&objects.ObjectsManagerOptions{ + Options: options.Options, + ListerVersion: options.ListerVersion, + }) + return &DownloadManager{ + destinationDownloader: destinationDownloader, + objectsManager: objectsManager, + options: options.Options, + } +} + +// 下载对象到文件 +func (downloadManager *DownloadManager) DownloadToFile(ctx context.Context, objectName, filePath string, options *ObjectOptions) (uint64, error) { + dest, err := destination.NewFileDestination(filePath) + if err != nil { + return 0, err + } + defer dest.Close() + return downloadManager.downloadToDestination(ctx, objectName, dest, options) +} + +// 下载对象到 io.Writer +func (downloadManager *DownloadManager) DownloadToWriter(ctx context.Context, objectName string, writer io.Writer, options *ObjectOptions) (uint64, error) { + var dest destination.Destination + if writeAtCloser, ok := writer.(destination.WriteAtCloser); ok { + dest = destination.NewWriteAtCloserDestination(writeAtCloser, "") + } else { + dest = destination.NewWriteCloserDestination(&writeSeekCloser{writer}, "") + } + defer dest.Close() + return downloadManager.downloadToDestination(ctx, objectName, dest, options) +} + +func (downloadManager *DownloadManager) downloadToDestination(ctx context.Context, objectName string, dest destination.Destination, options *ObjectOptions) (uint64, error) { + if options == nil { + options = &ObjectOptions{} + } + downloadURLsProvider := options.DownloadURLsProvider + if downloadURLsProvider == nil { + if err := downloadManager.initDownloadURLsProvider(ctx); err != nil { + return 0, err + } + } + if downloadURLsProvider == nil { + downloadURLsProvider = downloadManager.downloadURLsProvider + } + if downloadURLsProvider == nil { + return 0, errors.MissingRequiredFieldError{Name: "DownloadURLsProvider"} + } + urls, err := downloadURLsProvider.GetURLsIter(ctx, objectName, &options.GenerateOptions) + if err != nil { + return 0, err + } + return downloadManager.destinationDownloader.Download(ctx, urls, dest, &options.DestinationDownloadOptions) +} + +// 下载目录 +func (downloadManager *DownloadManager) DownloadDirectory(ctx context.Context, targetDirPath string, options *DirectoryOptions) error { + var err error + + if options == nil { + options = &DirectoryOptions{} + } + if options.BucketName == "" { + return &errors.MissingRequiredFieldError{Name: "BucketName"} + } + objectConcurrency := options.ObjectConcurrency + if objectConcurrency == 0 { + objectConcurrency = 4 + } + pathSeparator := options.PathSeparator + if pathSeparator == "" { + pathSeparator = "/" + } + + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(objectConcurrency) + + lister := downloadManager.objectsManager.Bucket(options.BucketName).List(ctx, &objects.ListObjectsOptions{ + Prefix: options.ObjectPrefix, + }) + defer lister.Close() + + var object objects.ObjectDetails + for lister.Next(&object) { + objectName := object.Name + relativePath := strings.TrimPrefix(objectName, options.ObjectPrefix) + if pathSeparator != string(filepath.Separator) { + relativePath = strings.Replace(relativePath, pathSeparator, string(filepath.Separator), -1) + } + fullPath := filepath.Join(targetDirPath, relativePath) + if strings.HasSuffix(relativePath, string(filepath.Separator)) { + if err = os.MkdirAll(fullPath, 0700); err != nil { + return err + } + } else { + if err = os.MkdirAll(filepath.Dir(fullPath), 0700); err != nil { + return err + } + g.Go(func() error { + var destinationDownloadOptions DestinationDownloadOptions + if onDownloadingProgress := options.OnDownloadingProgress; onDownloadingProgress != nil { + destinationDownloadOptions.OnDownloadingProgress = func(progress *DownloadingProgress) { + onDownloadingProgress(objectName, progress) + } + } + objectOptions := ObjectOptions{ + DestinationDownloadOptions: destinationDownloadOptions, + GenerateOptions: GenerateOptions{ + BucketName: options.BucketName, + UseInsecureProtocol: options.UseInsecureProtocol, + }, + DownloadURLsProvider: options.DownloadURLsProvider, + } + if options.ShouldDownloadObject != nil && !options.ShouldDownloadObject(objectName) { + return nil + } + if options.BeforeObjectDownload != nil { + options.BeforeObjectDownload(objectName, &objectOptions) + } + n, err := downloadManager.DownloadToFile(ctx, objectName, fullPath, &objectOptions) + if err == nil && options.OnObjectDownloaded != nil { + options.OnObjectDownloaded(objectName, &DownloadedObjectInfo{Size: n}) + } + return err + }) + } + } + if err = lister.Error(); err != nil { + return err + } + return g.Wait() +} + +func (downloadManager *DownloadManager) initDownloadURLsProvider(ctx context.Context) (err error) { + if downloadManager.downloadURLsProvider == nil { + if credentialsProvider := downloadManager.getCredentialsProvider(); credentialsProvider != nil { + downloadManager.downloadURLsProviderOnce.Do(func() { + var creds *credentials.Credentials + creds, err = credentialsProvider.Get(ctx) + if err != nil { + return + } + bucketRegionsQueryOptions := region.BucketRegionsQueryOptions{ + UseInsecureProtocol: downloadManager.options.UseInsecureProtocol, + Client: downloadManager.options.BasicHTTPClient, + Resolver: downloadManager.options.Resolver, + Chooser: downloadManager.options.Chooser, + BeforeResolve: downloadManager.options.BeforeResolve, + AfterResolve: downloadManager.options.AfterResolve, + ResolveError: downloadManager.options.ResolveError, + BeforeBackoff: downloadManager.options.BeforeBackoff, + AfterBackoff: downloadManager.options.AfterBackoff, + BeforeRequest: downloadManager.options.BeforeRequest, + AfterResponse: downloadManager.options.AfterResponse, + } + if hostRetryConfig := downloadManager.options.HostRetryConfig; hostRetryConfig != nil { + bucketRegionsQueryOptions.RetryMax = hostRetryConfig.RetryMax + bucketRegionsQueryOptions.Backoff = hostRetryConfig.Backoff + } + downloadManager.downloadURLsProvider = SignURLsProvider( + NewDefaultSrcURLsProvider(creds.AccessKey, &DefaultSrcURLsProviderOptions{ + BucketRegionsQueryOptions: bucketRegionsQueryOptions, + BucketHosts: httpclient.DefaultBucketHosts(), + }), + NewCredentialsSigner(creds), + &SignOptions{TTL: 3 * time.Minute}, + ) + }) + } + } + return +} + +func (downloadManager *DownloadManager) getCredentialsProvider() credentials.CredentialsProvider { + credentialsProvider := downloadManager.options.Credentials + if credentialsProvider == nil { + if defaultCreds := credentials.Default(); defaultCreds != nil { + credentialsProvider = defaultCreds + } + } + return credentialsProvider +} + +func (w *writeSeekCloser) Write(p []byte) (int, error) { + return w.w.Write(p) +} + +func (w *writeSeekCloser) Seek(offset int64, whence int) (int64, error) { + if seeker, ok := w.w.(io.Seeker); ok { + return seeker.Seek(offset, whence) + } + return 0, syscall.ESPIPE +} + +func (w *writeSeekCloser) Close() error { + if closer, ok := w.w.(io.Closer); ok { + return closer.Close() + } + return nil +} diff --git a/storagev2/downloader/download_manager_test.go b/storagev2/downloader/download_manager_test.go new file mode 100644 index 00000000..b45f899a --- /dev/null +++ b/storagev2/downloader/download_manager_test.go @@ -0,0 +1,144 @@ +//go:build unit +// +build unit + +package downloader_test + +import ( + "context" + "encoding/json" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/apis/get_objects" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/downloader" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/region" +) + +func TestDownloadManagerDownloadDirectory(t *testing.T) { + rsfMux := http.NewServeMux() + rsfMux.HandleFunc("/list", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + t.Fatalf("unexpected method") + } + w.Header().Set("Content-Type", "application/json") + query := r.URL.Query() + if query.Get("bucket") != "bucket1" { + t.Fatalf("unexpected bucket") + } + if query.Get("prefix") != "" { + t.Fatalf("unexpected prefix") + } + if query.Get("limit") != "" { + t.Fatalf("unexpected limit") + } + jsonData, err := json.Marshal(&get_objects.Response{ + Items: []get_objects.ListedObjectEntry{{ + Key: "test1/file1", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash1", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + }, { + Key: "test2/file2", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash2", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + }}, + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonData) + }) + rsfServer := httptest.NewServer(rsfMux) + defer rsfServer.Close() + + ioMux := http.NewServeMux() + ioMux.HandleFunc("/test1/file1", func(w http.ResponseWriter, r *http.Request) { + rander := rand.New(rand.NewSource(time.Now().UnixNano())) + w.Header().Set("Accept-Ranges", "bytes") + w.Header().Add("X-ReqId", "fakereqid") + switch r.Method { + case http.MethodHead: + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", strconv.Itoa(4*1024*1024)) + w.Header().Set("ETag", "testetag1") + case http.MethodGet: + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", strconv.Itoa(4*1024*1024)) + w.Header().Set("ETag", "testetag1") + io.CopyN(w, rander, 4*1024*1024) + default: + t.Fatalf("unexpected method") + } + }) + ioMux.HandleFunc("/test2/file2", func(w http.ResponseWriter, r *http.Request) { + rander := rand.New(rand.NewSource(time.Now().UnixNano())) + w.Header().Set("Accept-Ranges", "bytes") + w.Header().Add("X-ReqId", "fakereqid") + switch r.Method { + case http.MethodHead: + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", strconv.Itoa(4*1024*1024)) + w.Header().Set("ETag", "testetag1") + case http.MethodGet: + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", strconv.Itoa(4*1024*1024)) + w.Header().Set("ETag", "testetag1") + io.CopyN(w, rander, 4*1024*1024) + default: + t.Fatalf("unexpected method") + } + }) + ioServer := httptest.NewServer(ioMux) + defer ioServer.Close() + + tmpDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + downloadManager := downloader.NewDownloadManager(&downloader.DownloadManagerOptions{ + Options: http_client.Options{ + Regions: ®ion.Region{ + Rsf: region.Endpoints{Preferred: []string{rsfServer.URL}}, + }, + Credentials: credentials.NewCredentials("testaccesskey", "testsecretkey"), + UseInsecureProtocol: true, + }, + DestinationDownloader: downloader.NewConcurrentDownloader(&downloader.ConcurrentDownloaderOptions{ + Concurrency: 1, + PartSize: 10 * 1024 * 1024, + }), + }) + if err = downloadManager.DownloadDirectory(context.Background(), tmpDir, &downloader.DirectoryOptions{ + UseInsecureProtocol: true, + BucketName: "bucket1", + DownloadURLsProvider: downloader.NewStaticDomainBasedURLsProvider([]string{ioServer.URL}), + }); err != nil { + t.Fatal(err) + } + if fileInfo, err := os.Stat(filepath.Join(tmpDir, "test1", "file1")); err != nil { + t.Fatal(err) + } else if fileInfo.Size() != 4*1024*1024 { + t.Fatalf("unexpected file size: test1/file1") + } + if fileInfo, err := os.Stat(filepath.Join(tmpDir, "test2", "file2")); err != nil { + t.Fatal(err) + } else if fileInfo.Size() != 4*1024*1024 { + t.Fatalf("unexpected file size: test2/file2") + } +} diff --git a/storagev2/downloader/downloaders.go b/storagev2/downloader/downloaders.go new file mode 100644 index 00000000..54fdeb3f --- /dev/null +++ b/storagev2/downloader/downloaders.go @@ -0,0 +1,463 @@ +package downloader + +import ( + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strings" + "sync" + "time" + + clientv1 "github.com/qiniu/go-sdk/v7/client" + "github.com/qiniu/go-sdk/v7/internal/clientv2" + "github.com/qiniu/go-sdk/v7/storagev2/backoff" + "github.com/qiniu/go-sdk/v7/storagev2/chooser" + "github.com/qiniu/go-sdk/v7/storagev2/downloader/destination" + resumablerecorder "github.com/qiniu/go-sdk/v7/storagev2/downloader/resumable_recorder" + "github.com/qiniu/go-sdk/v7/storagev2/resolver" + "github.com/qiniu/go-sdk/v7/storagev2/retrier" + "golang.org/x/sync/errgroup" +) + +type ( + concurrentDownloader struct { + concurrency uint + partSize uint64 + client clientv2.Client + resumableRecorder resumablerecorder.ResumableRecorder + } + + // 下载器选项 + DownloaderOptions struct { + Client clientv2.Client // HTTP 客户端,如果不配置则使用默认的 HTTP 客户端 + RetryMax int // 最大重试次数,如果不配置,默认为 9 + Backoff backoff.Backoff // 重试时间间隔 v2,优先级高于 RetryInterval + Resolver resolver.Resolver // 主备域名解析器 + Chooser chooser.Chooser // IP 选择器 + + BeforeResolve func(*http.Request) // 域名解析前回调函数 + AfterResolve func(*http.Request, []net.IP) // 域名解析后回调函数 + ResolveError func(*http.Request, error) // 域名解析错误回调函数 + BeforeBackoff func(*http.Request, *retrier.RetrierOptions, time.Duration) // 退避前回调函数 + AfterBackoff func(*http.Request, *retrier.RetrierOptions, time.Duration) // 退避后回调函数 + BeforeRequest func(*http.Request, *retrier.RetrierOptions) // 请求前回调函数 + AfterResponse func(*http.Response, *retrier.RetrierOptions, error) // 请求后回调函数 + } + + // 并发下载器选项 + ConcurrentDownloaderOptions struct { + DownloaderOptions + Concurrency uint // 并发度 + PartSize uint64 // 分片大小 + ResumableRecorder resumablerecorder.ResumableRecorder // 可恢复记录仪 + } +) + +func (options *DownloaderOptions) toSimpleRetryConfig() clientv2.SimpleRetryConfig { + retryMax := options.RetryMax + if retryMax <= 0 { + retryMax = 9 + } + return clientv2.SimpleRetryConfig{ + RetryMax: retryMax, + Resolver: options.Resolver, + Chooser: options.Chooser, + Backoff: options.Backoff, + ShouldRetry: func(req *http.Request, resp *http.Response, err error) bool { + if err != nil { + return retrier.IsErrorRetryable(err) + } + return resp.StatusCode >= 500 + }, + BeforeResolve: options.BeforeResolve, + AfterResolve: options.AfterResolve, + ResolveError: options.ResolveError, + BeforeBackoff: options.BeforeBackoff, + AfterBackoff: options.AfterBackoff, + BeforeRequest: options.BeforeRequest, + AfterResponse: options.AfterResponse, + } +} + +// 创建并发下载器 +func NewConcurrentDownloader(options *ConcurrentDownloaderOptions) DestinationDownloader { + if options == nil { + options = &ConcurrentDownloaderOptions{} + } + concurrency := options.Concurrency + if concurrency == 0 { + concurrency = 4 + } + partSize := options.PartSize + if partSize == 0 { + partSize = 16 * 1024 * 1024 + } + client := clientv2.NewClient(options.Client, clientv2.NewSimpleRetryInterceptor(options.toSimpleRetryConfig()), retryWhenTokenOutOfDateInterceptor{}) + return &concurrentDownloader{concurrency, partSize, client, options.ResumableRecorder} +} + +func (downloader concurrentDownloader) Download(ctx context.Context, urlsIter URLsIter, dest destination.Destination, options *DestinationDownloadOptions) (uint64, error) { + if options == nil { + options = &DestinationDownloadOptions{} + } + headResponse, err := headRequest(ctx, urlsIter, options.Header, downloader.client) + if err != nil { + return 0, err + } else if headResponse == nil { + return 0, errors.New("no url tried") + } + if onResponseHeader := options.OnResponseHeader; onResponseHeader != nil { + onResponseHeader(headResponse.Header) + } + + var offset uint64 + switch headResponse.StatusCode { + case http.StatusOK: + case http.StatusPartialContent: + var unused1, unused2 int64 + contentRange := headResponse.Header.Get("Content-Range") + if _, err = fmt.Sscanf(contentRange, "bytes %d-%d/%d", &offset, &unused1, &unused2); err != nil { + return 0, err + } + default: + return 0, clientv1.ResponseError(headResponse) + } + etag := parseEtag(headResponse.Header.Get("Etag")) + if headResponse.ContentLength < 0 || // 无法确定文件实际大小,发出一个请求下载整个文件,不再使用并行下载 + headResponse.Header.Get("Accept-Ranges") != "bytes" { // 必须返回 Accept-Ranges 头,否则不认为可以分片下载 + var progress func(uint64) + if onDownloadingProgress := options.OnDownloadingProgress; onDownloadingProgress != nil { + progress = func(downloaded uint64) { + onDownloadingProgress(&DownloadingProgress{Downloaded: downloaded}) + } + } + return downloadToPartReader(ctx, urlsIter, etag, options.Header, downloader.client, dest, progress) + } + needToDownload := uint64(headResponse.ContentLength) + + var ( + readableMedium resumablerecorder.ReadableResumableRecorderMedium + writeableMedium resumablerecorder.WriteableResumableRecorderMedium + resumableRecorderOpenArgs *resumablerecorder.ResumableRecorderOpenArgs + ) + if resumableRecorder := downloader.resumableRecorder; resumableRecorder != nil { + var destinationID string + destinationID, err = dest.DestinationID() + if err == nil && destinationID != "" { + resumableRecorderOpenArgs = &resumablerecorder.ResumableRecorderOpenArgs{ + ETag: etag, + DestinationID: destinationID, + PartSize: downloader.partSize, + TotalSize: needToDownload, + Offset: offset, + } + readableMedium = resumableRecorder.OpenForReading(resumableRecorderOpenArgs) + if readableMedium != nil { + defer readableMedium.Close() + } else if file := dest.GetFile(); file != nil { + if err = file.Truncate(0); err != nil { // 无法恢复进度,目标文件清空 + return 0, err + } + } + } + } + + parts, err := dest.Split(needToDownload, downloader.partSize, &destination.SplitOptions{Medium: readableMedium}) + if err != nil { + return 0, err + } + if readableMedium != nil { + readableMedium.Close() + readableMedium = nil + } + if resumableRecorder := downloader.resumableRecorder; resumableRecorder != nil && resumableRecorderOpenArgs != nil { + writeableMedium = resumableRecorder.OpenForAppending(resumableRecorderOpenArgs) + if writeableMedium == nil { + writeableMedium = resumableRecorder.OpenForCreatingNew(resumableRecorderOpenArgs) + } + if writeableMedium != nil { + defer writeableMedium.Close() + } + } + + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(int(downloader.concurrency)) + var ( + downloadingProgress = newDownloadingPartsProgress() + downloadingProgressMutex sync.Mutex + ) + for _, part := range parts { + p := part + urlsIterClone := urlsIter.Clone() + g.Go(func() error { + n, err := downloader.downloadToPart(ctx, urlsIterClone, etag, offset, options.Header, p, writeableMedium, &downloadingProgressMutex, func(downloaded uint64) { + downloadingProgress.setPartDownloadingProgress(p.Offset(), downloaded) + if onDownloadingProgress := options.OnDownloadingProgress; onDownloadingProgress != nil { + onDownloadingProgress(&DownloadingProgress{Downloaded: downloadingProgress.totalDownloaded(), TotalSize: needToDownload}) + } + }) + if n > 0 { + downloadingProgress.partDownloaded(p.Offset(), n) + if onDownloadingProgress := options.OnDownloadingProgress; onDownloadingProgress != nil { + onDownloadingProgress(&DownloadingProgress{Downloaded: downloadingProgress.totalDownloaded(), TotalSize: needToDownload}) + } + } + return err + }) + } + err = g.Wait() + if writeableMedium != nil { + writeableMedium.Close() + writeableMedium = nil + } + if resumableRecorder := downloader.resumableRecorder; resumableRecorder != nil && resumableRecorderOpenArgs != nil && err == nil { + resumableRecorder.Delete(resumableRecorderOpenArgs) + } + return downloadingProgress.totalDownloaded(), err +} + +func (downloader concurrentDownloader) downloadToPart( + ctx context.Context, urlsIter URLsIter, etag string, originalOffset uint64, headers http.Header, + part destination.Part, writeableMedium resumablerecorder.WriteableResumableRecorderMedium, + downloadingProgressMutex *sync.Mutex, onDownloadingProgress func(downloaded uint64)) (uint64, error) { + var ( + n uint64 + err error + size = part.Size() + offset = part.Offset() + haveRead = part.HaveDownloaded() + downloadingProgressCallback func(uint64) + ) + if onDownloadingProgress != nil { + downloadingProgressCallback = func(downloaded uint64) { + if downloadingProgressMutex != nil { + downloadingProgressMutex.Lock() + defer downloadingProgressMutex.Unlock() + } + onDownloadingProgress(downloaded) + } + } + for size > haveRead { + n, err = downloadToPartReaderWithOffsetAndSize(ctx, urlsIter, etag, originalOffset+offset+haveRead, size-haveRead, + headers, downloader.client, part, downloadingProgressCallback) + if n > 0 { + haveRead += n + continue + } + break + } + if haveRead > 0 && writeableMedium != nil { + writeableMedium.Write(&resumablerecorder.ResumableRecord{ + Offset: offset, + PartSize: size, + PartWritten: haveRead, + }) + } + return haveRead, err +} + +func downloadToPartReaderWithOffsetAndSize( + ctx context.Context, urlsIter URLsIter, etag string, offset, size uint64, headers http.Header, + client clientv2.Client, part destination.PartWriter, onDownloadingProgress func(downloaded uint64)) (uint64, error) { + headers = cloneHeader(headers) + setRange(headers, offset, offset+size) + return _downloadToPartReader(ctx, urlsIter, headers, etag, client, part, onDownloadingProgress) +} + +func downloadToPartReader( + ctx context.Context, urlsIter URLsIter, etag string, headers http.Header, + client clientv2.Client, part destination.PartWriter, onDownloadingProgress func(downloaded uint64)) (uint64, error) { + if headers.Get("Range") == "" { + headers = cloneHeader(headers) + setAcceptGzip(headers) + } + return _downloadToPartReader(ctx, urlsIter, headers, etag, client, part, onDownloadingProgress) +} + +func _downloadToPartReader( + ctx context.Context, urlsIter URLsIter, headers http.Header, etag string, + client clientv2.Client, part destination.PartWriter, onDownloadingProgress func(downloaded uint64)) (uint64, error) { + var ( + response *http.Response + u url.URL + n uint64 + ok, haveReset bool + err, peekErr error + ) + + for { + if ok, peekErr = urlsIter.Peek(&u); peekErr != nil { + return 0, peekErr + } else if !ok { + if haveReset { + break + } else { + urlsIter.Reset() + haveReset = true + continue + } + } + req := http.Request{ + Method: http.MethodGet, + URL: &u, + Header: headers, + Body: http.NoBody, + } + ctx = context.WithValue(ctx, urlsIterContextKey{}, urlsIter) + if response, err = client.Do(req.WithContext(ctx)); err != nil { + if !retrier.IsErrorRetryable(err) { + return 0, err + } + urlsIter.Next() + continue + } + var ( + bodyReader io.Reader = response.Body + bodyCloser io.Closer = response.Body + ) + if etag == parseEtag(response.Header.Get("Etag")) { + switch response.Header.Get("Content-Encoding") { + case "gzip": + if bodyReader, err = gzip.NewReader(bodyReader); err != nil { + bodyCloser.Close() + return 0, err + } + fallthrough + case "": + n, err = part.CopyFrom(bodyReader, onDownloadingProgress) + bodyCloser.Close() + if n > 0 { + return n, err + } + default: + bodyCloser.Close() + err = errors.New("unrecognized content-encoding") + } + } else { + bodyCloser.Close() + err = errors.New("etag dismatch") + } + urlsIter.Next() + } + return 0, err +} + +func headRequest(ctx context.Context, urlsIter URLsIter, headers http.Header, client clientv2.Client) (response *http.Response, err error) { + var ( + u url.URL + ok, haveReset bool + ) + if headers.Get("Accept-Encoding") != "" { + headers = cloneHeader(headers) + headers.Del("Accept-Encoding") + } + for { + if ok, err = urlsIter.Peek(&u); err != nil { + return + } else if !ok { + if haveReset { + break + } else { + urlsIter.Reset() + haveReset = true + continue + } + } + req := http.Request{ + Method: http.MethodHead, + URL: &u, + Header: headers, + Body: http.NoBody, + } + if response, err = client.Do(req.WithContext(ctx)); err != nil { + if !retrier.IsErrorRetryable(err) { + return + } + urlsIter.Next() + continue + } + break + } + if response != nil && response.Body != nil { + response.Body.Close() + } + return +} + +func setAcceptGzip(headers http.Header) { + headers.Set("Accept-Encoding", "gzip") +} + +func setRange(headers http.Header, from, end uint64) { + headers.Set("Range", fmt.Sprintf("bytes=%d-%d", from, end-1)) + headers.Del("Accept-Encoding") +} + +type downloadingPartsProgress struct { + downloaded uint64 + downloading map[uint64]uint64 + lock sync.Mutex +} + +func newDownloadingPartsProgress() *downloadingPartsProgress { + return &downloadingPartsProgress{ + downloading: make(map[uint64]uint64), + } +} + +func (progress *downloadingPartsProgress) setPartDownloadingProgress(offset, downloaded uint64) { + progress.lock.Lock() + defer progress.lock.Unlock() + + progress.downloading[offset] = downloaded +} + +func (progress *downloadingPartsProgress) partDownloaded(offset, partSize uint64) { + progress.lock.Lock() + defer progress.lock.Unlock() + + delete(progress.downloading, offset) + progress.downloaded += partSize +} + +func (progress *downloadingPartsProgress) totalDownloaded() uint64 { + progress.lock.Lock() + defer progress.lock.Unlock() + + downloaded := progress.downloaded + for _, b := range progress.downloading { + downloaded += b + } + return downloaded +} +func parseEtag(etag string) string { + etag = strings.TrimPrefix(etag, "\"") + etag = strings.TrimSuffix(etag, "\"") + etag = strings.TrimSuffix(etag, ".gz") + return etag +} + +func cloneHeader(h http.Header) http.Header { + if h == nil { + return make(http.Header) + } + + // Find total number of values. + nv := 0 + for _, vv := range h { + nv += len(vv) + } + sv := make([]string, nv) // shared backing array for headers' values + h2 := make(http.Header, len(h)) + for k, vv := range h { + n := copy(sv, vv) + h2[k] = sv[:n:n] + sv = sv[n:] + } + return h2 +} diff --git a/storagev2/downloader/downloaders_test.go b/storagev2/downloader/downloaders_test.go new file mode 100644 index 00000000..7ea2cbda --- /dev/null +++ b/storagev2/downloader/downloaders_test.go @@ -0,0 +1,585 @@ +package downloader_test + +import ( + "bytes" + "compress/gzip" + "context" + "crypto/md5" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/backoff" + "github.com/qiniu/go-sdk/v7/storagev2/chooser" + "github.com/qiniu/go-sdk/v7/storagev2/downloader" + "github.com/qiniu/go-sdk/v7/storagev2/downloader/destination" + resumablerecorder "github.com/qiniu/go-sdk/v7/storagev2/downloader/resumable_recorder" + "github.com/qiniu/go-sdk/v7/storagev2/resolver" +) + +func TestConcurrentDownloaderWithSinglePart(t *testing.T) { + var ( + counts [3]uint64 + hasher = md5.New() + ) + handler := func(id int, w http.ResponseWriter, r *http.Request) { + counts[id-1] += 1 + switch id { + case 1: + w.WriteHeader(http.StatusGatewayTimeout) + case 2: + w.WriteHeader(http.StatusServiceUnavailable) + case 3: + w.Header().Set("Accept-Ranges", "bytes") + switch r.Method { + case http.MethodHead: + w.Header().Set("Etag", "testetag1") + w.Header().Set("Content-Length", strconv.Itoa(1024*1024)) + case http.MethodGet: + w.Header().Set("Etag", "testetag1") + w.Header().Set("Content-Length", strconv.Itoa(1024*1024)) + _, err := io.Copy(w, io.TeeReader(io.LimitReader(rand.New(rand.NewSource(time.Now().UnixNano())), 1024*1024), hasher)) + if err != nil { + t.Fatal(err) + } + default: + w.WriteHeader(http.StatusMethodNotAllowed) + } + default: + w.WriteHeader(http.StatusPaymentRequired) + } + } + server1 := newTestServer(1, handler) + defer server1.Close() + url1, err := url.Parse(server1.URL) + if err != nil { + t.Fatal(err) + } + url1.Path = "/testfile" + + server2 := newTestServer(2, handler) + defer server2.Close() + url2, err := url.Parse(server2.URL) + if err != nil { + t.Fatal(err) + } + url2.Path = "/testfile" + + server3 := newTestServer(3, handler) + defer server3.Close() + url3, err := url.Parse(server3.URL) + if err != nil { + t.Fatal(err) + } + url3.Path = "/testfile" + + d := downloader.NewConcurrentDownloader(&downloader.ConcurrentDownloaderOptions{ + Concurrency: 1, + PartSize: 4 * 1024 * 1024, + DownloaderOptions: downloader.DownloaderOptions{ + Backoff: backoff.NewFixedBackoff(0), + Resolver: resolver.NewDefaultResolver(), + Chooser: chooser.NewDirectChooser(), + }, + }) + var ( + buf closableBuffer + lastDownloaded uint64 + ) + n, err := d.Download( + context.Background(), + downloader.NewURLsIter([]*url.URL{url1, url2, url3}), + destination.NewWriteCloserDestination(&buf, ""), + &downloader.DestinationDownloadOptions{ + OnDownloadingProgress: func(progress *downloader.DownloadingProgress) { + if progress.Downloaded < lastDownloaded { + t.Fatalf("unexpected downloaded progress") + } + lastDownloaded = progress.Downloaded + if progress.TotalSize != 1024*1024 { + t.Fatalf("unexpected downloaded progress") + } + }, + }) + if err != nil { + t.Fatal(err) + } + if n != 1024*1024 { + t.Fatalf("unexpected downloaded size") + } + if lastDownloaded != 1024*1024 { + t.Fatalf("unexpected downloaded progress") + } + if counts[0] != 10 { + t.Fatalf("unexpected called count") + } + if counts[1] != 10 { + t.Fatalf("unexpected called count") + } + if counts[2] != 2 { + t.Fatalf("unexpected called count") + } + serverMD5 := hasher.Sum(nil) + clientMD5 := md5.Sum(buf.Bytes()) + if !bytes.Equal(serverMD5, clientMD5[:]) { + t.Fatalf("unexpected hash") + } +} + +func TestConcurrentDownloaderWithCompression(t *testing.T) { + var counts uint64 + hasher := md5.New() + handler := func(id int, w http.ResponseWriter, r *http.Request) { + counts += 1 + switch id { + case 1: + w.Header().Set("Accept-Ranges", "bytes") + switch r.Method { + case http.MethodHead: + if r.Header.Get("Accept-Encoding") != "" { + t.Fatalf("unexpected accept-encoding") + } + w.Header().Set("Etag", "\"testetag1\"") + case http.MethodGet: + if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { + t.Fatalf("unexpected accept-encoding") + } + w.Header().Set("Etag", "\"testetag1.gz\"") + w.Header().Set("Content-Encoding", "gzip") + var ( + r = io.TeeReader(io.LimitReader(rand.New(rand.NewSource(time.Now().UnixNano())), 1024*1024), hasher) + err error + ) + gw := gzip.NewWriter(w) + if _, err = io.Copy(gw, r); err != nil { + t.Fatal(err) + } + if err = gw.Close(); err != nil { + t.Fatal(err) + } + default: + w.WriteHeader(http.StatusMethodNotAllowed) + } + default: + w.WriteHeader(http.StatusPaymentRequired) + } + } + server1 := newTestServer(1, handler) + defer server1.Close() + url1, err := url.Parse(server1.URL) + if err != nil { + t.Fatal(err) + } + url1.Path = "/testfile" + d := downloader.NewConcurrentDownloader(&downloader.ConcurrentDownloaderOptions{ + Concurrency: 1, + PartSize: 4 * 1024 * 1024, + DownloaderOptions: downloader.DownloaderOptions{ + Backoff: backoff.NewFixedBackoff(0), + Resolver: resolver.NewDefaultResolver(), + Chooser: chooser.NewDirectChooser(), + }, + }) + var ( + buf closableBuffer + lastDownloaded uint64 + ) + n, err := d.Download( + context.Background(), + downloader.NewURLsIter([]*url.URL{url1}), + destination.NewWriteCloserDestination(&buf, ""), &downloader.DestinationDownloadOptions{ + OnDownloadingProgress: func(progress *downloader.DownloadingProgress) { + if progress.Downloaded < lastDownloaded { + t.Fatalf("unexpected downloaded progress") + } + lastDownloaded = progress.Downloaded + if progress.TotalSize != 0 { + t.Fatalf("unexpected downloaded progress") + } + }, + }) + if err != nil { + t.Fatal(err) + } + if n != 1024*1024 { + t.Fatalf("unexpected downloaded size") + } + if lastDownloaded != 1024*1024 { + t.Fatalf("unexpected downloaded progress") + } + if counts != 2 { + t.Fatalf("unexpected called count") + } + serverMD5 := hasher.Sum(nil) + clientMD5 := md5.Sum(buf.Bytes()) + if !bytes.Equal(serverMD5, clientMD5[:]) { + t.Fatalf("unexpected hash") + } +} + +func TestConcurrentDownloaderWithMultipleParts(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + srcFile, err := ioutil.TempFile(tmpDir, "testfile") + if err != nil { + t.Fatal(err) + } + defer srcFile.Close() + + dstFile, err := ioutil.TempFile(tmpDir, "testfile2") + if err != nil { + t.Fatal(err) + } + defer dstFile.Close() + + const SIZE = 1024 * 1024 * 127 + if _, err = io.CopyN(srcFile, rand.New(rand.NewSource(time.Now().UnixNano())), SIZE); err != nil { + t.Fatal(err) + } + + mux := http.NewServeMux() + handler := http.FileServer(http.Dir(tmpDir)) + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Etag", "testetag1") + handler.ServeHTTP(w, r) + }) + server1 := httptest.NewServer(mux) + defer server1.Close() + url1, err := url.Parse(server1.URL) + if err != nil { + t.Fatal(err) + } + url1.Path = "/" + filepath.Base(srcFile.Name()) + d := downloader.NewConcurrentDownloader(&downloader.ConcurrentDownloaderOptions{ + Concurrency: 16, + PartSize: 4 * 1024 * 1024, + DownloaderOptions: downloader.DownloaderOptions{ + Backoff: backoff.NewFixedBackoff(0), + Resolver: resolver.NewDefaultResolver(), + Chooser: chooser.NewDirectChooser(), + }, + }) + dest, err := destination.NewFileDestination(dstFile.Name()) + if err != nil { + t.Fatal(err) + } + var lastDownloaded uint64 + n, err := d.Download( + context.Background(), + downloader.NewURLsIter([]*url.URL{url1}), + dest, + &downloader.DestinationDownloadOptions{ + OnDownloadingProgress: func(progress *downloader.DownloadingProgress) { + if progress.Downloaded < atomic.LoadUint64(&lastDownloaded) { + t.Fatalf("unexpected downloaded progress") + } + atomic.StoreUint64(&lastDownloaded, progress.Downloaded) + if progress.TotalSize != SIZE { + t.Fatalf("unexpected downloaded progress") + } + }, + }) + if err != nil { + t.Fatal(err) + } + if n != SIZE { + t.Fatalf("unexpected downloaded size") + } + if lastDownloaded != SIZE { + t.Fatalf("unexpected downloaded progress") + } + hasher := md5.New() + if _, err = srcFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + if _, err = io.Copy(hasher, srcFile); err != nil { + t.Fatal(err) + } + serverMD5 := hasher.Sum(nil) + hasher.Reset() + if _, err = dstFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + if _, err = io.Copy(hasher, dstFile); err != nil { + t.Fatal(err) + } + clientMD5 := hasher.Sum(nil) + if !bytes.Equal(serverMD5, clientMD5) { + t.Fatalf("unexpected hash") + } +} + +func TestConcurrentDownloaderWithMultiplePartsAndRange(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + srcFile, err := ioutil.TempFile(tmpDir, "testfile") + if err != nil { + t.Fatal(err) + } + defer srcFile.Close() + + dstFile, err := ioutil.TempFile(tmpDir, "testfile2") + if err != nil { + t.Fatal(err) + } + defer dstFile.Close() + + const SIZE = 1024 * 1024 * 127 + if _, err = io.CopyN(srcFile, rand.New(rand.NewSource(time.Now().UnixNano())), SIZE); err != nil { + t.Fatal(err) + } + const REQUEST_SIZE = 1024 * 1024 * 126 + + mux := http.NewServeMux() + handler := http.FileServer(http.Dir(tmpDir)) + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Etag", "testetag1") + handler.ServeHTTP(w, r) + }) + server1 := httptest.NewServer(mux) + defer server1.Close() + url1, err := url.Parse(server1.URL) + if err != nil { + t.Fatal(err) + } + url1.Path = "/" + filepath.Base(srcFile.Name()) + d := downloader.NewConcurrentDownloader(&downloader.ConcurrentDownloaderOptions{ + Concurrency: 16, + PartSize: 4 * 1024 * 1024, + DownloaderOptions: downloader.DownloaderOptions{ + Backoff: backoff.NewFixedBackoff(0), + Resolver: resolver.NewDefaultResolver(), + Chooser: chooser.NewDirectChooser(), + }, + }) + dest, err := destination.NewFileDestination(dstFile.Name()) + if err != nil { + t.Fatal(err) + } + var lastDownloaded uint64 + n, err := d.Download( + context.Background(), + downloader.NewURLsIter([]*url.URL{url1}), + dest, + &downloader.DestinationDownloadOptions{ + Header: http.Header{"Range": []string{fmt.Sprintf("bytes=%d-", SIZE-REQUEST_SIZE)}}, + OnDownloadingProgress: func(progress *downloader.DownloadingProgress) { + if progress.Downloaded < atomic.LoadUint64(&lastDownloaded) { + t.Fatalf("unexpected downloaded progress") + } + atomic.StoreUint64(&lastDownloaded, progress.Downloaded) + if progress.TotalSize != REQUEST_SIZE { + t.Fatalf("unexpected downloaded progress") + } + }, + }) + if err != nil { + t.Fatal(err) + } + if n != REQUEST_SIZE { + t.Fatalf("unexpected downloaded size") + } + if lastDownloaded != REQUEST_SIZE { + t.Fatalf("unexpected downloaded progress") + } + hasher := md5.New() + if _, err = srcFile.Seek(SIZE-REQUEST_SIZE, io.SeekStart); err != nil { + t.Fatal(err) + } + if _, err = io.Copy(hasher, srcFile); err != nil { + t.Fatal(err) + } + serverMD5 := hasher.Sum(nil) + hasher.Reset() + if _, err = dstFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + if _, err = io.Copy(hasher, dstFile); err != nil { + t.Fatal(err) + } + clientMD5 := hasher.Sum(nil) + if !bytes.Equal(serverMD5, clientMD5) { + t.Fatalf("unexpected hash") + } +} + +func TestConcurrentDownloaderWithResumableRecorder(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + srcFile, err := ioutil.TempFile(tmpDir, "srcFile") + if err != nil { + t.Fatal(err) + } + defer srcFile.Close() + + dstFile, err := ioutil.TempFile(tmpDir, "dstFile") + if err != nil { + t.Fatal(err) + } + defer dstFile.Close() + + if _, err = io.CopyN(srcFile, rand.New(rand.NewSource(time.Now().UnixNano())), 10*1024*1024); err != nil { + t.Fatal(err) + } + + ranges := make(map[uint64]uint64) + var rangesMutex sync.Mutex + handler := func(id int, w http.ResponseWriter, r *http.Request) { + switch id { + case 1: + w.Header().Set("Accept-Ranges", "bytes") + switch r.Method { + case http.MethodHead: + w.Header().Set("Etag", "testetag1") + w.Header().Set("Content-Length", strconv.Itoa(10*1024*1024)) + case http.MethodGet: + w.Header().Set("Etag", "testetag1") + + var fromOffset, toOffset uint64 + if _, err = fmt.Sscanf(r.Header.Get("Range"), "bytes=%d-%d", &fromOffset, &toOffset); err != nil { + t.Fatal(err) + } + rangesMutex.Lock() + ranges[fromOffset] = toOffset - fromOffset + 1 + rangesMutex.Unlock() + + w.Header().Set("Content-Length", strconv.FormatUint(toOffset-fromOffset+1, 10)) + w.WriteHeader(http.StatusPartialContent) + if _, err = io.Copy(w, io.NewSectionReader(srcFile, int64(fromOffset), int64(toOffset-fromOffset+1))); err != nil { + t.Fatal(err) + } + } + default: + w.WriteHeader(http.StatusPaymentRequired) + } + } + server1 := newTestServer(1, handler) + defer server1.Close() + url1, err := url.Parse(server1.URL) + if err != nil { + t.Fatal(err) + } + url1.Path = "/testfile" + + resumableRecorder := resumablerecorder.NewJsonFileSystemResumableRecorder(tmpDir) + options := resumablerecorder.ResumableRecorderOpenArgs{ + ETag: "testetag1", + DestinationID: dstFile.Name(), + PartSize: 1024 * 1024, + TotalSize: 10 * 1024 * 1024, + } + writableMedium := resumableRecorder.OpenForCreatingNew(&options) + defer writableMedium.Close() + + if err = writableMedium.Write(&resumablerecorder.ResumableRecord{ + Offset: 0, + PartSize: 1024 * 1024, + PartWritten: 1024, + }); err != nil { + t.Fatal(err) + } + if err = writableMedium.Write(&resumablerecorder.ResumableRecord{ + Offset: 1024 * 1024, + PartSize: 1024 * 1024, + PartWritten: 1024 * 1024, + }); err != nil { + t.Fatal(err) + } + if err = writableMedium.Close(); err != nil { + t.Fatal(err) + } + + d := downloader.NewConcurrentDownloader(&downloader.ConcurrentDownloaderOptions{ + Concurrency: 10, + PartSize: 1024 * 1024, + ResumableRecorder: resumableRecorder, + DownloaderOptions: downloader.DownloaderOptions{ + Backoff: backoff.NewFixedBackoff(0), + Resolver: resolver.NewDefaultResolver(), + Chooser: chooser.NewDirectChooser(), + }, + }) + dest, err := destination.NewFileDestination(dstFile.Name()) + if err != nil { + t.Fatal(err) + } + var lastDownloaded uint64 + n, err := d.Download( + context.Background(), + downloader.NewURLsIter([]*url.URL{url1}), + dest, + &downloader.DestinationDownloadOptions{ + OnDownloadingProgress: func(progress *downloader.DownloadingProgress) { + if progress.Downloaded < lastDownloaded { + t.Fatalf("unexpected downloaded progress") + } + lastDownloaded = progress.Downloaded + if progress.TotalSize != 10*1024*1024 { + t.Fatalf("unexpected downloaded progress") + } + }, + }) + if err != nil { + t.Fatal(err) + } + if n != 10*1024*1024 { + t.Fatal(err) + } + if lastDownloaded != 10*1024*1024 { + t.Fatalf("unexpected downloaded progress") + } + if len(ranges) != 9 { + t.Fatalf("unexpected ranges") + } + if ranges[1024] != 1024*1023 { + t.Fatalf("unexpected ranges") + } + if ranges[1024*1024] != 0 { + t.Fatalf("unexpected ranges") + } + + readableMedium := resumableRecorder.OpenForReading(&options) + if readableMedium != nil { + t.Fatalf("medium is not expected to be found") + } +} + +func newTestServer(id int, handler func(int, http.ResponseWriter, *http.Request)) *httptest.Server { + mux := http.NewServeMux() + mux.HandleFunc("/testfile", func(w http.ResponseWriter, r *http.Request) { + handler(id, w, r) + }) + return httptest.NewServer(mux) +} + +type closableBuffer struct { + bytes.Buffer +} + +func (w closableBuffer) Close() error { + return nil +} diff --git a/storagev2/downloader/interceptor.go b/storagev2/downloader/interceptor.go new file mode 100644 index 00000000..1e2da753 --- /dev/null +++ b/storagev2/downloader/interceptor.go @@ -0,0 +1,26 @@ +package downloader + +import ( + "net/http" + + "github.com/qiniu/go-sdk/v7/internal/clientv2" +) + +type ( + retryWhenTokenOutOfDateInterceptor struct{} + urlsIterContextKey struct{} +) + +func (interceptor retryWhenTokenOutOfDateInterceptor) Priority() clientv2.InterceptorPriority { + return clientv2.InterceptorPriorityAuth +} + +func (interceptor retryWhenTokenOutOfDateInterceptor) Intercept(req *http.Request, handler clientv2.Handler) (resp *http.Response, err error) { + if urlsIter, ok := req.Context().Value(urlsIterContextKey{}).(URLsIter); ok { + if _, err = urlsIter.Peek(req.URL); err != nil { + return + } + } + resp, err = handler(req) + return +} diff --git a/storagev2/downloader/interfaces.go b/storagev2/downloader/interfaces.go new file mode 100644 index 00000000..205a109e --- /dev/null +++ b/storagev2/downloader/interfaces.go @@ -0,0 +1,73 @@ +package downloader + +import ( + "context" + "net/http" + "net/url" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/downloader/destination" +) + +type ( + // 获取 URL 迭代器 + URLsIter interface { + // 获取首个 URL + Peek(*url.URL) (bool, error) + // 切换到下一个 URL + Next() + // 重置迭代器 + Reset() + // 复制迭代器 + Clone() URLsIter + } + + // 获取对象下载 URL 接口 + DownloadURLsProvider interface { + GetURLsIter(context.Context, string, *GenerateOptions) (URLsIter, error) + } + + // 对下载 URL 签名 + Signer interface { + Sign(context.Context, *url.URL, *SignOptions) error + } + + // 下载进度 + DownloadingProgress struct { + Downloaded uint64 // 已经下载的数据量,单位为字节 + TotalSize uint64 // 总数据量,单位为字节 + } + + // 目标下载选项 + DestinationDownloadOptions struct { + // 对象下载附加 HTTP Header + Header http.Header + // 对象下载进度 + OnDownloadingProgress func(*DownloadingProgress) + // 对象 Header 获取回调 + OnResponseHeader func(http.Header) + } + + // 目标下载器 + DestinationDownloader interface { + Download(context.Context, URLsIter, destination.Destination, *DestinationDownloadOptions) (uint64, error) + } + + // 对象下载 URL 生成选项 + GenerateOptions struct { + // 空间名称,可选 + BucketName string + + // 文件处理命令,可选 + Command string + + // 是否使用 HTTP 协议,默认为不使用 + UseInsecureProtocol bool + } + + // 对象签名选项 + SignOptions struct { + // 签名有效期,如果不填写,默认为 3 分钟 + TTL time.Duration + } +) diff --git a/storagev2/downloader/resumable_recorder/dummy.go b/storagev2/downloader/resumable_recorder/dummy.go new file mode 100644 index 00000000..1fef430f --- /dev/null +++ b/storagev2/downloader/resumable_recorder/dummy.go @@ -0,0 +1,30 @@ +package resumablerecorder + +import "time" + +type dummyResumableRecorder struct{} + +// 创建假的可恢复记录仪 +func NewDummyResumableRecorder() ResumableRecorder { + return dummyResumableRecorder{} +} + +func (dummyResumableRecorder) OpenForReading(*ResumableRecorderOpenArgs) ReadableResumableRecorderMedium { + return nil +} + +func (dummyResumableRecorder) OpenForAppending(*ResumableRecorderOpenArgs) WriteableResumableRecorderMedium { + return nil +} + +func (dummyResumableRecorder) OpenForCreatingNew(*ResumableRecorderOpenArgs) WriteableResumableRecorderMedium { + return nil +} + +func (dummyResumableRecorder) Delete(*ResumableRecorderOpenArgs) error { + return nil +} + +func (dummyResumableRecorder) ClearOutdated(createdBefore time.Duration) error { + return nil +} diff --git a/storagev2/downloader/resumable_recorder/json_file_system.go b/storagev2/downloader/resumable_recorder/json_file_system.go new file mode 100644 index 00000000..6eb543e3 --- /dev/null +++ b/storagev2/downloader/resumable_recorder/json_file_system.go @@ -0,0 +1,243 @@ +package resumablerecorder + +import ( + "crypto/sha1" + "encoding/binary" + "encoding/hex" + "encoding/json" + "errors" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "time" + + "github.com/gofrs/flock" + "modernc.org/fileutil" +) + +type ( + jsonFileSystemResumableRecorder struct { + dirPath string + } + jsonFileSystemResumableRecorderReadableMedium struct { + file *os.File + decoder *json.Decoder + } + jsonFileSystemResumableRecorderWritableMedium struct { + file *os.File + encoder *json.Encoder + } +) + +const jsonFileSystemResumableRecorderLock = "json_file_system_resumable_recorder_01.lock" + +// 创建记录文件系统的可恢复记录仪 +func NewJsonFileSystemResumableRecorder(dirPath string) ResumableRecorder { + return jsonFileSystemResumableRecorder{dirPath} +} + +func (frr jsonFileSystemResumableRecorder) OpenForReading(options *ResumableRecorderOpenArgs) ReadableResumableRecorderMedium { + if options == nil { + options = &ResumableRecorderOpenArgs{} + } + if options.DestinationID == "" { + return nil + } + + err := os.MkdirAll(frr.dirPath, 0700) + if err != nil { + return nil + } + file, err := os.Open(frr.getFilePath(options)) + if err != nil { + return nil + } + _ = fileutil.Fadvise(file, 0, 0, fileutil.POSIX_FADV_SEQUENTIAL) + decoder := json.NewDecoder(file) + if verified, err := jsonFileSystemResumableRecorderVerifyHeaderLine(decoder, options); err != nil || !verified { + return nil + } + return jsonFileSystemResumableRecorderReadableMedium{file, decoder} +} + +func (frr jsonFileSystemResumableRecorder) OpenForAppending(options *ResumableRecorderOpenArgs) WriteableResumableRecorderMedium { + if options == nil { + options = &ResumableRecorderOpenArgs{} + } + if options.DestinationID == "" { + return nil + } + + file, err := os.OpenFile(frr.getFilePath(options), os.O_WRONLY|os.O_APPEND, 0600) + if err != nil { + return nil + } + return jsonFileSystemResumableRecorderWritableMedium{file, json.NewEncoder(file)} +} + +func (frr jsonFileSystemResumableRecorder) OpenForCreatingNew(options *ResumableRecorderOpenArgs) WriteableResumableRecorderMedium { + if options == nil { + options = &ResumableRecorderOpenArgs{} + } + if options.DestinationID == "" { + return nil + } + + file, err := os.OpenFile(frr.getFilePath(options), os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return nil + } + encoder := json.NewEncoder(file) + if err := jsonFileSystemResumableRecorderWriteHeaderLine(encoder, options); err != nil { + return nil + } + return jsonFileSystemResumableRecorderWritableMedium{file, encoder} +} + +func (frr jsonFileSystemResumableRecorder) Delete(options *ResumableRecorderOpenArgs) error { + return os.Remove(frr.getFilePath(options)) +} + +func (frr jsonFileSystemResumableRecorder) ClearOutdated(createdBefore time.Duration) error { + jsonFileSystemResumableRecorderLockFilePath := filepath.Join(frr.dirPath, jsonFileSystemResumableRecorderLock) + lock := flock.New(jsonFileSystemResumableRecorderLockFilePath) + locked, err := lock.TryLock() + if err != nil { + return err + } else if !locked { + return nil + } + defer lock.Unlock() + + fileInfos, err := ioutil.ReadDir(frr.dirPath) + if err != nil { + return err + } + for _, fileInfo := range fileInfos { + if !fileInfo.Mode().IsRegular() { + continue + } + if fileInfo.Name() == jsonFileSystemResumableRecorderLock { + continue + } + filePath := filepath.Join(frr.dirPath, fileInfo.Name()) + if err = frr.tryToClearPath(createdBefore, filePath); err != nil { + os.Remove(filePath) + } + } + return nil +} + +func (frr jsonFileSystemResumableRecorder) tryToClearPath(createdBefore time.Duration, filePath string) error { + file, err := os.Open(filePath) + if err != nil { + return err + } + defer file.Close() + + decoder := json.NewDecoder(file) + var lineOptions jsonBasedResumableRecorderOpenArgs + if err = decoder.Decode(&lineOptions); err != nil { + return nil + } + if lineOptions.Version != fileSystemResumableRecorderVersion { + return nil + } + if time.Now().Before(time.Unix(lineOptions.CreatedAt, 0).Add(createdBefore)) { + return nil + } + return errors.New("resumable recorder is expired") +} + +func (frr jsonFileSystemResumableRecorder) fileName(options *ResumableRecorderOpenArgs) string { + hasher := sha1.New() + hasher.Write([]byte(options.DestinationID)) + hasher.Write([]byte{0}) + hasher.Write([]byte(options.ETag)) + hasher.Write([]byte{0}) + hasher.Write([]byte{0}) + binary.Write(hasher, binary.LittleEndian, options.Offset) + binary.Write(hasher, binary.LittleEndian, options.PartSize) + binary.Write(hasher, binary.LittleEndian, options.TotalSize) + return hex.EncodeToString(hasher.Sum(nil)) +} + +func (frr jsonFileSystemResumableRecorder) getFilePath(options *ResumableRecorderOpenArgs) string { + return filepath.Join(frr.dirPath, frr.fileName(options)) +} + +type ( + jsonBasedResumableRecorderOpenArgs struct { + ETag string `json:"e,omitempty"` + DestinationID string `json:"d,omitempty"` + PartSize uint64 `json:"p,omitempty"` + TotalSize uint64 `json:"t,omitempty"` + Offset uint64 `json:"o,omitempty"` + CreatedAt int64 `json:"c,omitempty"` + Version uint32 `json:"v,omitempty"` + } + + jsonBasedResumableRecord struct { + Offset uint64 `json:"o,omitempty"` + PartSize uint64 `json:"s,omitempty"` + PartWritten uint64 `json:"w,omitempty"` + } +) + +const fileSystemResumableRecorderVersion uint32 = 1 + +func jsonFileSystemResumableRecorderWriteHeaderLine(encoder *json.Encoder, options *ResumableRecorderOpenArgs) error { + return encoder.Encode(&jsonBasedResumableRecorderOpenArgs{ + ETag: options.ETag, + DestinationID: options.DestinationID, + PartSize: options.PartSize, + TotalSize: options.TotalSize, + Offset: options.Offset, + CreatedAt: time.Now().Unix(), + Version: fileSystemResumableRecorderVersion, + }) +} + +func jsonFileSystemResumableRecorderVerifyHeaderLine(decoder *json.Decoder, options *ResumableRecorderOpenArgs) (bool, error) { + var lineOptions jsonBasedResumableRecorderOpenArgs + err := decoder.Decode(&lineOptions) + if err != nil { + return false, err + } + return reflect.DeepEqual(lineOptions, jsonBasedResumableRecorderOpenArgs{ + ETag: options.ETag, + DestinationID: options.DestinationID, + PartSize: options.PartSize, + TotalSize: options.TotalSize, + Offset: options.Offset, + CreatedAt: lineOptions.CreatedAt, + Version: fileSystemResumableRecorderVersion, + }), nil +} + +func (medium jsonFileSystemResumableRecorderReadableMedium) Next(rr *ResumableRecord) error { + var jrr jsonBasedResumableRecord + for { + if err := medium.decoder.Decode(&jrr); err != nil { + return err + } else { + break + } + } + + *rr = ResumableRecord(jrr) + return nil +} + +func (medium jsonFileSystemResumableRecorderReadableMedium) Close() error { + return medium.file.Close() +} + +func (medium jsonFileSystemResumableRecorderWritableMedium) Write(rr *ResumableRecord) error { + return medium.encoder.Encode(jsonBasedResumableRecord(*rr)) +} + +func (medium jsonFileSystemResumableRecorderWritableMedium) Close() error { + return medium.file.Close() +} diff --git a/storagev2/downloader/resumable_recorder/json_file_system_test.go b/storagev2/downloader/resumable_recorder/json_file_system_test.go new file mode 100644 index 00000000..001a9b8a --- /dev/null +++ b/storagev2/downloader/resumable_recorder/json_file_system_test.go @@ -0,0 +1,105 @@ +//go:build unit +// +build unit + +package resumablerecorder_test + +import ( + "io/ioutil" + "os" + "testing" + "time" + + resumablerecorder "github.com/qiniu/go-sdk/v7/storagev2/downloader/resumable_recorder" +) + +func TestJsonFileSystemResumableRecorder(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + options := resumablerecorder.ResumableRecorderOpenArgs{ + ETag: "testetag1", + DestinationID: "/tmp/fakeFile", + PartSize: 16 * 1024 * 1024, + TotalSize: 100 * 1024 * 1024, + } + fs := resumablerecorder.NewJsonFileSystemResumableRecorder(tmpDir) + writableMedium := fs.OpenForCreatingNew(&options) + for i := uint64(0); i < 3; i++ { + if err = writableMedium.Write(&resumablerecorder.ResumableRecord{ + Offset: i * 16 * 1024 * 1024, + PartSize: 16 * 1024 * 1024, + PartWritten: 16 * 1024 * 1024, + }); err != nil { + t.Fatal(err) + } + } + if err = writableMedium.Close(); err != nil { + t.Fatal(err) + } + writableMedium = fs.OpenForAppending(&options) + if err = writableMedium.Write(&resumablerecorder.ResumableRecord{ + Offset: 3 * 16 * 1024 * 1024, + PartSize: 16 * 1024 * 1024, + PartWritten: 16 * 1024 * 1024, + }); err != nil { + t.Fatal(err) + } + if err = writableMedium.Close(); err != nil { + t.Fatal(err) + } + + options2 := options + options2.ETag = "testetag2" + writableMedium = fs.OpenForCreatingNew(&options2) + for i := uint64(0); i < 4; i++ { + if err = writableMedium.Write(&resumablerecorder.ResumableRecord{ + Offset: i * 16 * 1024 * 1024, + PartSize: 16 * 1024 * 1024, + PartWritten: 8 * 1024 * 1024, + }); err != nil { + t.Fatal(err) + } + } + if err = writableMedium.Close(); err != nil { + t.Fatal(err) + } + + readableMedium := fs.OpenForReading(&options) + for i := uint64(0); i < 4; i++ { + var rr resumablerecorder.ResumableRecord + + if err = readableMedium.Next(&rr); err != nil { + t.Fatal(err) + } + + if rr.Offset != i*16*1024*1024 { + t.Fatalf("unexpected offset: %d", rr.Offset) + } + if rr.PartSize != 16*1024*1024 { + t.Fatalf("unexpected partSize: %d", rr.PartSize) + } + if rr.PartWritten != 16*1024*1024 { + t.Fatalf("unexpected partWritten: %d", rr.PartWritten) + } + } + if err = readableMedium.Close(); err != nil { + t.Fatal(err) + } + + time.Sleep(11 * time.Second) + if err = fs.ClearOutdated(10 * time.Second); err != nil { + t.Fatal(err) + } + + readableMedium = fs.OpenForReading(&options) + if readableMedium != nil { + t.Fatalf("unexpected readable medium") + } + + readableMedium = fs.OpenForReading(&options2) + if readableMedium != nil { + t.Fatalf("unexpected readable medium") + } +} diff --git a/storagev2/downloader/resumable_recorder/resumable_recorder.go b/storagev2/downloader/resumable_recorder/resumable_recorder.go new file mode 100644 index 00000000..54b2e45c --- /dev/null +++ b/storagev2/downloader/resumable_recorder/resumable_recorder.go @@ -0,0 +1,72 @@ +package resumablerecorder + +import ( + "io" + "time" +) + +type ( + // 可恢复记录仪选项 + ResumableRecorderOpenArgs struct { + // 数据源 ETag + ETag string + + // 数据目标 ID + DestinationID string + + // 分片大小 + PartSize uint64 + + // 数据源大小 + TotalSize uint64 + + // 数据源偏移量 + Offset uint64 + } + + // 可恢复记录仪接口 + ResumableRecorder interface { + // 打开记录仪介质以读取记录 + OpenForReading(*ResumableRecorderOpenArgs) ReadableResumableRecorderMedium + + // 打开记录仪介质以追加记录 + OpenForAppending(*ResumableRecorderOpenArgs) WriteableResumableRecorderMedium + + // 新建记录仪介质以追加记录 + OpenForCreatingNew(*ResumableRecorderOpenArgs) WriteableResumableRecorderMedium + + // 删除记录仪介质 + Delete(*ResumableRecorderOpenArgs) error + + // 清理过期的记录仪介质 + ClearOutdated(createdBefore time.Duration) error + } + + // 只读的可恢复记录仪介质接口 + ReadableResumableRecorderMedium interface { + io.Closer + + // 读取下一条记录 + Next(*ResumableRecord) error + } + + // 只追家的可恢复记录仪介质接口 + WriteableResumableRecorderMedium interface { + io.Closer + + // 写入下一条记录 + Write(*ResumableRecord) error + } + + // 可恢复记录 + ResumableRecord struct { + // 分片偏移量 + Offset uint64 + + // 分片大小 + PartSize uint64 + + // 分片写入量 + PartWritten uint64 + } +) diff --git a/storagev2/downloader/signers.go b/storagev2/downloader/signers.go new file mode 100644 index 00000000..35c7f350 --- /dev/null +++ b/storagev2/downloader/signers.go @@ -0,0 +1,62 @@ +package downloader + +import ( + "context" + "fmt" + "net/url" + "strings" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/credentials" +) + +type credentialsSigner struct { + credentials credentials.CredentialsProvider +} + +// 创建基于七牛鉴权的下载 URL 签名 +func NewCredentialsSigner(credentials credentials.CredentialsProvider) Signer { + return &credentialsSigner{credentials} +} + +func (signer credentialsSigner) Sign(ctx context.Context, u *url.URL, options *SignOptions) error { + if options == nil { + options = &SignOptions{} + } + ttl := options.TTL + if ttl == 0 { + ttl = 3 * time.Minute + } + + cred, err := signer.credentials.Get(ctx) + if err != nil { + return err + } + u.RawQuery += signURL(u.String(), cred, time.Now().Add(ttl).Unix()) + return nil +} + +func signURL(url string, cred *credentials.Credentials, deadline int64) string { + var appendUrl string + + if isURLSigned(url) { + return "" + } + + urlToSign := url + if strings.Contains(url, "?") { + appendUrl = fmt.Sprintf("&e=%d", deadline) + urlToSign += appendUrl + } else { + appendUrl = fmt.Sprintf("e=%d", deadline) + urlToSign += "?" + urlToSign += appendUrl + } + token := cred.Sign([]byte(urlToSign)) + return fmt.Sprintf("%s&token=%s", appendUrl, token) +} + +func isURLSigned(url string) bool { + return (strings.Contains(url, "&e=") || strings.Contains(url, "?e=")) && + strings.Contains(url, "&token=") +} diff --git a/storagev2/downloader/urls_provider.go b/storagev2/downloader/urls_provider.go new file mode 100644 index 00000000..60ff7e11 --- /dev/null +++ b/storagev2/downloader/urls_provider.go @@ -0,0 +1,477 @@ +package downloader + +import ( + "context" + "fmt" + "hash/crc64" + "net/url" + "os" + "path/filepath" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/qiniu/go-sdk/v7/internal/cache" + "github.com/qiniu/go-sdk/v7/internal/log" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/errors" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/region" +) + +type ( + simpleURLsIter struct { + urls, used []*url.URL + } + + signedURLsIter struct { + ctx context.Context + urlsIter URLsIter + signer Signer + options *SignOptions + cache *cache.Cache + } + + staticDomainBasedURLsProvider struct { + domains []string + } + + defaultSrcURLsProvider struct { + accessKey string + query region.BucketRegionsQuery + queryOnce sync.Once + bucketHosts region.Endpoints + options *DefaultSrcURLsProviderOptions + } + + domainsQueryURLsProvider struct { + storage *apis.Storage + cache *cache.Cache + credentials credentials.CredentialsProvider + cacheTTL time.Duration + } + + combinedDownloadURLsProviders struct { + providers []DownloadURLsProvider + } + + combinedURLsIter struct { + iters, used []URLsIter + } + + signedDownloadURLsProviders struct { + provider DownloadURLsProvider + signer Signer + options *SignOptions + } + + // 默认源站域名下载 URL 生成器选项 + DefaultSrcURLsProviderOptions struct { + region.BucketRegionsQueryOptions + + // Bucket 服务器地址 + BucketHosts region.Endpoints + } + + // 基于域名查询的下载 URL 生成器选项 + DomainsQueryURLsProviderOptions struct { + http_client.Options + + // 压缩周期(默认:60s) + CompactInterval time.Duration + + // 持久化路径(默认:$TMPDIR/qiniu-golang-sdk/domain_v2_01.cache.json) + PersistentFilePath string + + // 持久化周期(默认:60s) + PersistentDuration time.Duration + + // 缓存有效周期(默认:3600s) + CacheTTL time.Duration + } + + domainCacheValue struct { + Domains []string `json:"domains"` + ExpiredAt time.Time `json:"expired_at"` + } + + signingCacheValue struct { + url *url.URL + expiredAt time.Time + } +) + +// 将 URL 列表转换为迭代器 +func NewURLsIter(urls []*url.URL) URLsIter { + return &simpleURLsIter{urls: urls, used: make([]*url.URL, 0, len(urls))} +} + +func (s *simpleURLsIter) Peek(u *url.URL) (bool, error) { + if len(s.urls) > 0 { + *u = *s.urls[0] + return true, nil + } + return false, nil +} + +func (s *simpleURLsIter) Next() { + if len(s.urls) > 0 { + s.used = append(s.used, s.urls[0]) + s.urls = s.urls[1:] + } +} + +func (s *simpleURLsIter) Reset() { + s.urls = append(s.used, s.urls...) + s.used = make([]*url.URL, 0, cap(s.urls)) +} + +func (s *simpleURLsIter) Clone() URLsIter { + return &simpleURLsIter{ + urls: append(make([]*url.URL, 0, cap(s.urls)), s.urls...), + used: append(make([]*url.URL, 0, cap(s.used)), s.used...), + } +} + +// 为 URL 列表签名 +func SignURLs(ctx context.Context, urlsIter URLsIter, signer Signer, options *SignOptions) URLsIter { + return &signedURLsIter{ctx: ctx, urlsIter: urlsIter, signer: signer, options: options, cache: cache.NewCache(1 * time.Second)} +} + +func (s *signedURLsIter) Peek(u *url.URL) (bool, error) { + var unsignedURL url.URL + if ok, err := s.urlsIter.Peek(&unsignedURL); err != nil { + return ok, err + } else if ok { + var err error + cacheValue, status := s.cache.Get(unsignedURL.String(), func() (cache.CacheValue, error) { + signedURL := unsignedURL + if err = s.signer.Sign(s.ctx, &signedURL, s.options); err != nil { + return nil, err + } + return signingCacheValue{&signedURL, time.Now().Add(1 * time.Second)}, nil + }) + if status == cache.NoResultGot { + return false, err + } + *u = *cacheValue.(signingCacheValue).url + return true, nil + } + return false, nil +} + +func (s *signedURLsIter) Next() { + s.urlsIter.Next() +} + +func (s *signedURLsIter) Reset() { + s.urlsIter.Reset() +} + +func (s *signedURLsIter) Clone() URLsIter { + return &signedURLsIter{ + ctx: s.ctx, + urlsIter: s.urlsIter.Clone(), + signer: s.signer, + options: s.options, + cache: s.cache, + } +} + +func (scv signingCacheValue) IsEqual(cv cache.CacheValue) bool { + return scv.url.String() == cv.(signingCacheValue).url.String() +} + +func (scv signingCacheValue) IsValid() bool { + return scv.expiredAt.After(time.Now()) +} + +// 创建静态域名下载 URL 生成器 +func NewStaticDomainBasedURLsProvider(domains []string) DownloadURLsProvider { + return &staticDomainBasedURLsProvider{domains} +} + +func (g *staticDomainBasedURLsProvider) GetURLsIter(_ context.Context, objectName string, options *GenerateOptions) (URLsIter, error) { + if options == nil { + options = &GenerateOptions{} + } + urls := make([]*url.URL, 0, len(g.domains)) + for _, domain := range g.domains { + if !strings.Contains(domain, "://") { + if options.UseInsecureProtocol { + domain = "http://" + domain + } else { + domain = "https://" + domain + } + } + u, err := url.Parse(domain) + if err != nil { + return nil, err + } + u.Path = "/" + objectName + u.RawPath = "" + u.RawQuery = options.Command + urls = append(urls, u) + } + return NewURLsIter(urls), nil +} + +// 创建默认源站域名下载 URL 生成器 +func NewDefaultSrcURLsProvider(accessKey string, options *DefaultSrcURLsProviderOptions) DownloadURLsProvider { + if options == nil { + options = &DefaultSrcURLsProviderOptions{} + } + bucketHosts := options.BucketHosts + if bucketHosts.IsEmpty() { + bucketHosts = http_client.DefaultBucketHosts() + } + return &defaultSrcURLsProvider{accessKey: accessKey, bucketHosts: bucketHosts, options: options} +} + +func (g *defaultSrcURLsProvider) GetURLsIter(ctx context.Context, objectName string, options *GenerateOptions) (URLsIter, error) { + if options == nil { + options = &GenerateOptions{} + } + if options.BucketName == "" { + return nil, errors.MissingRequiredFieldError{Name: "BucketName"} + } + + var err error + g.queryOnce.Do(func() { + g.query, err = region.NewBucketRegionsQuery(g.bucketHosts, &g.options.BucketRegionsQueryOptions) + }) + if err != nil { + return nil, err + } + + accessKey := g.accessKey + if accessKey == "" { + if defaultCreds := credentials.Default(); defaultCreds != nil { + accessKey = defaultCreds.AccessKey + } + } + + regions, err := g.query.Query(accessKey, options.BucketName).GetRegions(ctx) + if err != nil { + return nil, err + } + if len(regions) == 0 { + return nil, http_client.ErrNoRegion + } + region := regions[0] + ioSrcDomains := make([]string, 0, len(region.IoSrc.Preferred)+len(region.IoSrc.Alternative)) + ioSrcDomains = append(ioSrcDomains, region.IoSrc.Preferred...) + ioSrcDomains = append(ioSrcDomains, region.IoSrc.Alternative...) + return NewStaticDomainBasedURLsProvider(ioSrcDomains).GetURLsIter(ctx, objectName, options) +} + +const cacheFileName = "domain_v2_01.cache.json" + +var ( + persistentCaches map[uint64]*cache.Cache + persistentCachesLock sync.Mutex +) + +// 创建基于域名查询的下载 URL 生成器 +func NewDomainsQueryURLsProvider(options *DomainsQueryURLsProviderOptions) (DownloadURLsProvider, error) { + if options == nil { + options = &DomainsQueryURLsProviderOptions{} + } + creds := options.Credentials + if creds == nil { + if defaultCreds := credentials.Default(); defaultCreds != nil { + creds = defaultCreds + } + } + if creds == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + compactInterval := options.CompactInterval + if compactInterval == time.Duration(0) { + compactInterval = time.Minute + } + persistentFilePath := options.PersistentFilePath + if persistentFilePath == "" { + persistentFilePath = filepath.Join(os.TempDir(), "qiniu-golang-sdk", cacheFileName) + } + persistentDuration := options.PersistentDuration + if persistentDuration == time.Duration(0) { + persistentDuration = time.Minute + } + cacheTTL := options.CacheTTL + if cacheTTL == time.Duration(0) { + cacheTTL = time.Hour + } + persistentCache, err := getPersistentCache(persistentFilePath, compactInterval, persistentDuration) + if err != nil { + return nil, err + } + + storage := apis.NewStorage(&options.Options) + return &domainsQueryURLsProvider{storage, persistentCache, creds, cacheTTL}, nil +} + +func (g *domainsQueryURLsProvider) GetURLsIter(ctx context.Context, objectName string, options *GenerateOptions) (URLsIter, error) { + var ( + creds *credentials.Credentials + err error + ) + + if options == nil { + options = &GenerateOptions{} + } + if options.BucketName == "" { + return nil, errors.MissingRequiredFieldError{Name: "BucketName"} + } + if creds, err = g.credentials.Get(ctx); err != nil { + return nil, err + } + cacheKey := fmt.Sprintf("%s:%s", creds.AccessKey, options.BucketName) + + cacheValue, status := g.cache.Get(cacheKey, func() (cache.CacheValue, error) { + response, err := g.storage.GetBucketDomains(ctx, &apis.GetBucketDomainsRequest{BucketName: options.BucketName}, nil) + if err != nil { + return nil, err + } else { + return &domainCacheValue{Domains: response.Domains, ExpiredAt: time.Now().Add(g.cacheTTL)}, nil + } + }) + if status == cache.NoResultGot { + return nil, err + } + domains := cacheValue.(*domainCacheValue).Domains + return NewStaticDomainBasedURLsProvider(domains).GetURLsIter(ctx, objectName, options) +} + +func (left *domainCacheValue) IsEqual(rightValue cache.CacheValue) bool { + if right, ok := rightValue.(*domainCacheValue); ok { + if len(left.Domains) != len(right.Domains) { + return false + } + for idx := range left.Domains { + if left.Domains[idx] != right.Domains[idx] { + return false + } + } + return true + } + return false +} + +func (left *domainCacheValue) IsValid() bool { + return time.Now().Before(left.ExpiredAt) +} + +func getPersistentCache(persistentFilePath string, compactInterval, persistentDuration time.Duration) (*cache.Cache, error) { + var ( + persistentCache *cache.Cache + ok bool + err error + ) + + crc64Value := calcPersistentCacheCrc64(persistentFilePath, compactInterval, persistentDuration) + persistentCachesLock.Lock() + defer persistentCachesLock.Unlock() + + if persistentCaches == nil { + persistentCaches = make(map[uint64]*cache.Cache) + } + if persistentCache, ok = persistentCaches[crc64Value]; !ok { + persistentCache, err = cache.NewPersistentCache( + reflect.TypeOf(&domainCacheValue{}), + persistentFilePath, + compactInterval, + persistentDuration, + func(err error) { + log.Warn(fmt.Sprintf("DomainsURLsProvider persist error: %s", err)) + }) + if err != nil { + return nil, err + } + persistentCaches[crc64Value] = persistentCache + } + return persistentCache, nil +} + +func calcPersistentCacheCrc64(persistentFilePath string, compactInterval, persistentDuration time.Duration) uint64 { + bytes := make([]byte, 0, 1024) + bytes = strconv.AppendInt(bytes, int64(compactInterval), 36) + bytes = append(bytes, []byte(persistentFilePath)...) + bytes = append(bytes, byte(0)) + bytes = strconv.AppendInt(bytes, int64(persistentDuration), 36) + return crc64.Checksum(bytes, crc64.MakeTable(crc64.ISO)) +} + +// 合并多个下载 URL 生成器 +func CombineDownloadURLsProviders(providers ...DownloadURLsProvider) DownloadURLsProvider { + return combinedDownloadURLsProviders{providers} +} + +func (g combinedDownloadURLsProviders) GetURLsIter(ctx context.Context, objectName string, options *GenerateOptions) (URLsIter, error) { + urlIters := make([]URLsIter, 0, len(g.providers)) + for _, downloadURLsProvider := range g.providers { + urlsIter, err := downloadURLsProvider.GetURLsIter(ctx, objectName, options) + if err != nil { + return nil, err + } + urlIters = append(urlIters, urlsIter) + } + return &combinedURLsIter{iters: urlIters, used: make([]URLsIter, 0, len(urlIters))}, nil +} + +func (c *combinedURLsIter) Peek(u *url.URL) (bool, error) { + for len(c.iters) > 0 { + iter := c.iters[0] + if ok, err := iter.Peek(u); err != nil { + return ok, err + } else if ok { + return true, nil + } else { + c.used = append(c.used, c.iters[0]) + c.iters = c.iters[1:] + } + } + return false, nil +} + +func (c *combinedURLsIter) Next() { + if len(c.iters) > 0 { + iter := c.iters[0] + iter.Next() + } +} + +func (c *combinedURLsIter) Reset() { + c.iters = append(c.used, c.iters...) + c.used = make([]URLsIter, 0, cap(c.iters)) +} + +func (c *combinedURLsIter) Clone() URLsIter { + new := combinedURLsIter{ + iters: make([]URLsIter, 0, cap(c.iters)), + used: make([]URLsIter, 0, cap(c.used)), + } + for _, iter := range c.iters { + new.iters = append(new.iters, iter.Clone()) + } + for _, iter := range c.used { + new.used = append(new.used, iter.Clone()) + } + return &new +} + +// 为下载 URL 获取结果签名 +func SignURLsProvider(provider DownloadURLsProvider, signer Signer, options *SignOptions) DownloadURLsProvider { + return signedDownloadURLsProviders{provider, signer, options} +} + +func (provider signedDownloadURLsProviders) GetURLsIter(ctx context.Context, objectName string, options *GenerateOptions) (URLsIter, error) { + if urlsIter, err := provider.provider.GetURLsIter(ctx, objectName, options); err == nil { + return SignURLs(ctx, urlsIter, provider.signer, provider.options), nil + } else { + return urlsIter, err + } +} diff --git a/storagev2/downloader/urls_provider_test.go b/storagev2/downloader/urls_provider_test.go new file mode 100644 index 00000000..836c4171 --- /dev/null +++ b/storagev2/downloader/urls_provider_test.go @@ -0,0 +1,190 @@ +//go:build unit +// +build unit + +package downloader_test + +import ( + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "sync/atomic" + "testing" + + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/downloader" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/region" +) + +func TestStaticDomainBasedURLsProvider(t *testing.T) { + generator := downloader.NewStaticDomainBasedURLsProvider([]string{ + "http://testa.com/", + "https://b.testb.com/", + "testc.com", + }) + + urlsIter, err := generator.GetURLsIter(context.Background(), "/!@#$%^&*()?", &downloader.GenerateOptions{ + Command: "test1|test2", + }) + if err != nil { + t.Fatal(err) + } + if peekURLsIter(t, urlsIter) != "http://testa.com//%21@%23$%25%5E&%2A%28%29%3F?test1|test2" { + t.Fatalf("unexpected generated url") + } + urlsIter.Next() + if peekURLsIter(t, urlsIter) != "https://b.testb.com//%21@%23$%25%5E&%2A%28%29%3F?test1|test2" { + t.Fatalf("unexpected generated url") + } + urlsIter.Next() + if peekURLsIter(t, urlsIter) != "https://testc.com//%21@%23$%25%5E&%2A%28%29%3F?test1|test2" { + t.Fatalf("unexpected generated url") + } + urlsIter.Next() + assertURLsIterIsConsumed(t, urlsIter) +} + +func TestDefaultSrcURLsProvider(t *testing.T) { + const accessKey = "fakeaccesskey" + const secretKey = "fakesecretkey" + const bucketName = "fakeBucketName" + mux := http.NewServeMux() + mux.HandleFunc("/v4/query", func(w http.ResponseWriter, r *http.Request) { + if gotAk := r.URL.Query().Get("ak"); gotAk != accessKey { + t.Fatalf("Unexpected ak: %s", gotAk) + } + if gotBucketName := r.URL.Query().Get("bucket"); gotBucketName != bucketName { + t.Fatalf("Unexpected bucket: %s", gotBucketName) + } + w.Header().Add("x-reqid", "fakereqid") + if _, err := io.WriteString(w, ` +{ + "hosts": [ + { + "region": "z0", + "ttl": 86400, + "io_src": { + "domains": ["fakebucket.cn-east-1.qiniucs.com"] + } + } + ] +} + `); err != nil { + t.Fatal(err) + } + }) + server := httptest.NewServer(mux) + defer server.Close() + + cacheFile, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(cacheFile.Name()) + defer cacheFile.Close() + + urlsProvider := downloader.NewDefaultSrcURLsProvider( + accessKey, + &downloader.DefaultSrcURLsProviderOptions{ + BucketRegionsQueryOptions: region.BucketRegionsQueryOptions{PersistentFilePath: cacheFile.Name()}, + BucketHosts: region.Endpoints{Preferred: []string{server.URL}}, + }, + ) + urlsIter, err := urlsProvider.GetURLsIter(context.Background(), "/!@#$%^&*()?", &downloader.GenerateOptions{ + BucketName: bucketName, + Command: "test1|test2", + }) + if err != nil { + t.Fatal(err) + } + if peekURLsIter(t, urlsIter) != "https://fakebucket.cn-east-1.qiniucs.com//%21@%23$%25%5E&%2A%28%29%3F?test1|test2" { + t.Fatalf("unexpected generated url") + } + urlsIter.Next() + assertURLsIterIsConsumed(t, urlsIter) +} + +func TestDomainsQueryURLsProvider(t *testing.T) { + const accessKey = "fakeaccesskey" + const secretKey = "fakesecretkey" + const bucketName = "fakeBucketName" + var callCount uint64 + mux := http.NewServeMux() + mux.HandleFunc("/v2/domains", func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint64(&callCount, 1) + if r.URL.String() != "/v2/domains?tbl="+bucketName { + t.Fatalf("unexpected request url") + } + bytes, err := json.Marshal([]string{"domain1.com", "domain2.com"}) + if err != nil { + t.Fatal(err) + } + w.Header().Add("x-reqid", "fakereqid") + w.Write(bytes) + }) + server := httptest.NewServer(mux) + defer server.Close() + + cacheFile, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(cacheFile.Name()) + defer cacheFile.Close() + + generator, err := downloader.NewDomainsQueryURLsProvider(&downloader.DomainsQueryURLsProviderOptions{ + Options: http_client.Options{ + Regions: ®ion.Region{Bucket: region.Endpoints{Preferred: []string{server.URL}}}, + Credentials: credentials.NewCredentials(accessKey, secretKey), + }, + PersistentFilePath: cacheFile.Name(), + }) + if err != nil { + t.Fatal(err) + } + for i := 0; i < 2; i++ { + urlsIter, err := generator.GetURLsIter(context.Background(), "/!@#$%^&*()?", &downloader.GenerateOptions{ + BucketName: bucketName, + Command: "test1|test2", + }) + if err != nil { + t.Fatal(err) + } + if peekURLsIter(t, urlsIter) != "https://domain1.com//%21@%23$%25%5E&%2A%28%29%3F?test1|test2" { + t.Fatalf("unexpected generated url") + } + urlsIter.Next() + if peekURLsIter(t, urlsIter) != "https://domain2.com//%21@%23$%25%5E&%2A%28%29%3F?test1|test2" { + t.Fatalf("unexpected generated url") + } + urlsIter.Next() + assertURLsIterIsConsumed(t, urlsIter) + } + if atomic.LoadUint64(&callCount) != 1 { + t.Fatalf("unexpected call count") + } +} + +func peekURLsIter(t *testing.T, urlsIter downloader.URLsIter) string { + var u url.URL + if ok, err := urlsIter.Peek(&u); err != nil { + t.Fatal(err) + } else if !ok { + t.Fatalf("unexpected empty urls iter") + } + return u.String() +} + +func assertURLsIterIsConsumed(t *testing.T, urlsIter downloader.URLsIter) { + var u url.URL + if ok, err := urlsIter.Peek(&u); err != nil { + t.Fatal(err) + } else if ok { + t.Fatalf("urls iter should be consumed") + } +} diff --git a/storagev2/http_client/context.go b/storagev2/http_client/context.go new file mode 100644 index 00000000..60ff17c8 --- /dev/null +++ b/storagev2/http_client/context.go @@ -0,0 +1,14 @@ +package http_client + +import "context" + +type noSignatureContextKey struct{} + +func WithoutSignature(ctx context.Context) context.Context { + return context.WithValue(ctx, noSignatureContextKey{}, struct{}{}) +} + +func isSignatureDisabled(ctx context.Context) bool { + _, ok := ctx.Value(noSignatureContextKey{}).(struct{}) + return ok +} diff --git a/storagev2/http_client/http_client.go b/storagev2/http_client/http_client.go index c2ec085b..a330af07 100644 --- a/storagev2/http_client/http_client.go +++ b/storagev2/http_client/http_client.go @@ -7,6 +7,7 @@ import ( "net" "net/http" "net/url" + "strconv" "strings" "time" @@ -166,7 +167,7 @@ type ( // 授权类型 AuthType auth.TokenType - // 上传凭证提供者 + // 上传凭证接口 UpToken uptoken.UpTokenProvider // 是否缓存响应 @@ -174,6 +175,9 @@ type ( // 拦截器追加列表 Interceptors []Interceptor + + // 请求进度回调函数 + OnRequestProgress func(uint64, uint64) } ) @@ -185,15 +189,18 @@ func NewClient(options *Options) *Client { options.UseInsecureProtocol = isDisabled } } - if options.HostFreezeDuration < time.Millisecond { - options.HostFreezeDuration = 600 * time.Second + hostFreezeDuration := options.HostFreezeDuration + if hostFreezeDuration < time.Millisecond { + hostFreezeDuration = 600 * time.Second } - if options.ShouldFreezeHost == nil { - options.ShouldFreezeHost = defaultShouldFreezeHost + shouldFreezeHost := options.ShouldFreezeHost + if shouldFreezeHost == nil { + shouldFreezeHost = defaultShouldFreezeHost } - if options.Credentials == nil { - if defaultAuth := auth.Default(); defaultAuth != nil { - options.Credentials = defaultAuth + creds := options.Credentials + if creds == nil { + if defaultCreds := credentials.Default(); defaultCreds != nil { + creds = defaultCreds } } @@ -202,13 +209,13 @@ func NewClient(options *Options) *Client { basicHTTPClient: clientv2.NewClient(options.BasicHTTPClient, options.Interceptors...), bucketQuery: options.BucketQuery, regions: options.Regions, - credentials: options.Credentials, + credentials: creds, resolver: options.Resolver, chooser: options.Chooser, hostRetryConfig: options.HostRetryConfig, hostsRetryConfig: options.HostsRetryConfig, - hostFreezeDuration: options.HostFreezeDuration, - shouldFreezeHost: options.ShouldFreezeHost, + hostFreezeDuration: hostFreezeDuration, + shouldFreezeHost: shouldFreezeHost, beforeSign: options.BeforeSign, afterSign: options.AfterSign, signError: options.SignError, @@ -228,28 +235,34 @@ func (httpClient *Client) Do(ctx context.Context, request *Request) (*http.Respo if err != nil { return nil, err } - if upTokenProvider := request.UpToken; upTokenProvider != nil { - if upToken, err := upTokenProvider.GetUpToken(ctx); err != nil { - return nil, err + req = clientv2.WithInterceptors(req, clientv2.NewAntiHijackingInterceptor()) + if !isSignatureDisabled(ctx) { + if upTokenProvider := request.UpToken; upTokenProvider != nil { + req = clientv2.WithInterceptors(req, clientv2.NewUpTokenInterceptor(clientv2.UpTokenConfig{ + UpToken: upTokenProvider, + })) } else { - req.Header.Set("Authorization", "UpToken "+upToken) - } - } else { - credentialsProvider := request.Credentials - if credentialsProvider == nil { - credentialsProvider = httpClient.credentials - } - if credentialsProvider != nil { - if credentials, err := credentialsProvider.Get(ctx); err != nil { - return nil, err - } else { - req = clientv2.WithInterceptors(req, clientv2.NewAuthInterceptor(clientv2.AuthConfig{ - Credentials: credentials, - TokenType: request.AuthType, - BeforeSign: httpClient.beforeSign, - AfterSign: httpClient.afterSign, - SignError: httpClient.signError, - })) + credentialsProvider := request.Credentials + if credentialsProvider == nil { + credentialsProvider = httpClient.credentials + } + if credentialsProvider == nil { + if defaultCreds := credentials.Default(); defaultCreds != nil { + credentialsProvider = defaultCreds + } + } + if credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else { + req = clientv2.WithInterceptors(req, clientv2.NewAuthInterceptor(clientv2.AuthConfig{ + Credentials: creds, + TokenType: request.AuthType, + BeforeSign: httpClient.beforeSign, + AfterSign: httpClient.afterSign, + SignError: httpClient.signError, + })) + } } } } @@ -300,6 +313,42 @@ func (httpClient *Client) GetHostsRetryConfig() *RetryConfig { return httpClient.hostsRetryConfig } +func (httpClient *Client) GetResolver() resolver.Resolver { + return httpClient.resolver +} + +func (httpClient *Client) GetChooser() chooser.Chooser { + return httpClient.chooser +} + +func (httpClient *Client) GetBeforeResolveCallback() func(*http.Request) { + return httpClient.beforeResolve +} + +func (httpClient *Client) GetAfterResolveCallback() func(*http.Request, []net.IP) { + return httpClient.afterResolve +} + +func (httpClient *Client) GetResolveErrorCallback() func(*http.Request, error) { + return httpClient.resolveError +} + +func (httpClient *Client) GetBeforeBackoffCallback() func(*http.Request, *retrier.RetrierOptions, time.Duration) { + return httpClient.beforeBackoff +} + +func (httpClient *Client) GetAfterBackoffCallback() func(*http.Request, *retrier.RetrierOptions, time.Duration) { + return httpClient.afterBackoff +} + +func (httpClient *Client) GetBeforeRequestCallback() func(*http.Request, *retrier.RetrierOptions) { + return httpClient.beforeRequest +} + +func (httpClient *Client) GetAfterResponseCallback() func(*http.Response, *retrier.RetrierOptions, error) { + return httpClient.afterResponse +} + func (httpClient *Client) getEndpoints(ctx context.Context, request *Request) (region.Endpoints, error) { getEndpointsFromEndpointsProvider := func(ctx context.Context, endpoints region.EndpointsProvider) (region.Endpoints, error) { return endpoints.GetEndpoints(ctx) @@ -336,58 +385,63 @@ func (httpClient *Client) makeReq(ctx context.Context, request *Request) (*http. } interceptors := make([]Interceptor, 0, 3) - hostsRetryConfig := httpClient.hostsRetryConfig - if hostsRetryConfig == nil { - hostsRetryConfig = &RetryConfig{ - RetryMax: len(endpoints.Preferred) + len(endpoints.Alternative), - } + + var hostsRetryConfig, hostRetryConfig clientv2.RetryConfig + if httpClient.hostsRetryConfig != nil { + hostsRetryConfig = *httpClient.hostsRetryConfig } - r := httpClient.resolver - if r == nil { - if r, err = resolver.NewCacheResolver(nil, nil); err != nil { - return nil, err - } + if hostsRetryConfig.RetryMax <= 0 { + hostsRetryConfig.RetryMax = len(endpoints.Preferred) + len(endpoints.Alternative) + } + if hostsRetryConfig.Retrier == nil { + hostsRetryConfig.Retrier = retrier.NewErrorRetrier() } - cs := httpClient.chooser - if cs == nil { - cs = chooser.NewShuffleChooser(chooser.NewSmartIPChooser(nil)) + + if httpClient.hostRetryConfig != nil { + hostRetryConfig = *httpClient.hostRetryConfig } + if hostRetryConfig.RetryMax <= 0 { + hostRetryConfig.RetryMax = 3 + } + if hostRetryConfig.Retrier == nil { + hostRetryConfig.Retrier = retrier.NewErrorRetrier() + } + interceptors = append(interceptors, clientv2.NewBufferResponseInterceptor()) interceptors = append(interceptors, clientv2.NewHostsRetryInterceptor(clientv2.HostsRetryConfig{ RetryMax: hostsRetryConfig.RetryMax, ShouldRetry: hostsRetryConfig.ShouldRetry, Retrier: hostsRetryConfig.Retrier, HostFreezeDuration: httpClient.hostFreezeDuration, - HostProvider: hostProvider, ShouldFreezeHost: httpClient.shouldFreezeHost, + HostProvider: hostProvider, })) - if httpClient.hostRetryConfig != nil { - interceptors = append(interceptors, clientv2.NewSimpleRetryInterceptor( - clientv2.SimpleRetryConfig{ - RetryMax: httpClient.hostRetryConfig.RetryMax, - RetryInterval: httpClient.hostRetryConfig.RetryInterval, - Backoff: httpClient.hostRetryConfig.Backoff, - ShouldRetry: httpClient.hostRetryConfig.ShouldRetry, - Resolver: r, - Chooser: cs, - Retrier: httpClient.hostRetryConfig.Retrier, - BeforeResolve: httpClient.beforeResolve, - AfterResolve: httpClient.afterResolve, - ResolveError: httpClient.resolveError, - BeforeBackoff: httpClient.beforeBackoff, - AfterBackoff: httpClient.afterBackoff, - BeforeRequest: httpClient.beforeRequest, - AfterResponse: httpClient.afterResponse, - }, - )) - } + interceptors = append(interceptors, clientv2.NewSimpleRetryInterceptor( + clientv2.SimpleRetryConfig{ + RetryMax: hostRetryConfig.RetryMax, + RetryInterval: hostRetryConfig.RetryInterval, + Backoff: hostRetryConfig.Backoff, + ShouldRetry: hostRetryConfig.ShouldRetry, + Retrier: hostRetryConfig.Retrier, + Resolver: httpClient.resolver, + Chooser: httpClient.chooser, + BeforeResolve: httpClient.beforeResolve, + AfterResolve: httpClient.afterResolve, + ResolveError: httpClient.resolveError, + BeforeBackoff: httpClient.beforeBackoff, + AfterBackoff: httpClient.afterBackoff, + BeforeRequest: httpClient.beforeRequest, + AfterResponse: httpClient.afterResponse, + }, + )) req, err := clientv2.NewRequest(clientv2.RequestParams{ - Context: ctx, - Method: request.Method, - Url: url, - Header: request.Header, - GetBody: request.RequestBody, - BufferResponse: request.BufferResponse, + Context: ctx, + Method: request.Method, + Url: url, + Header: request.Header, + GetBody: request.RequestBody, + BufferResponse: request.BufferResponse, + OnRequestProgress: request.OnRequestProgress, }) if err != nil { return nil, err @@ -457,8 +511,14 @@ func GetMultipartFormRequestBody(info *MultipartForm) GetRequestBody { // GetMultipartFormRequestBody 将二进制数据请求 Body 发送 func GetRequestBodyFromReadSeekCloser(r compatible_io.ReadSeekCloser) GetRequestBody { - return func(*clientv2.RequestParams) (io.ReadCloser, error) { - _, err := r.Seek(0, io.SeekStart) + return func(params *clientv2.RequestParams) (io.ReadCloser, error) { + params.Header.Set("Content-Type", "application/octet-stream") + totalSize, err := r.Seek(0, io.SeekEnd) + if err != nil { + return r, err + } + params.Header.Set("Content-Length", strconv.FormatInt(totalSize, 10)) + _, err = r.Seek(0, io.SeekStart) return r, err } } diff --git a/storagev2/http_client/http_client_test.go b/storagev2/http_client/http_client_test.go index b092599d..27b11820 100644 --- a/storagev2/http_client/http_client_test.go +++ b/storagev2/http_client/http_client_test.go @@ -29,6 +29,7 @@ func TestHttpClient(t *testing.T) { if auth := r.Header.Get("Authorization"); !strings.HasPrefix(auth, "Qiniu TestAk:") { t.Fatalf("Unexpected authorization: %s", auth) } + w.Header().Add("X-ReqId", "fakereqid") w.WriteHeader(http.StatusInternalServerError) io.WriteString(w, "test error") }) @@ -41,6 +42,7 @@ func TestHttpClient(t *testing.T) { if auth := r.Header.Get("Authorization"); !strings.HasPrefix(auth, "Qiniu TestAk:") { t.Fatalf("Unexpected authorization: %s", auth) } + w.Header().Add("X-ReqId", "fakereqid") w.WriteHeader(http.StatusInternalServerError) io.WriteString(w, "test error") }) @@ -53,6 +55,7 @@ func TestHttpClient(t *testing.T) { if auth := r.Header.Get("Authorization"); !strings.HasPrefix(auth, "Qiniu TestAk:") { t.Fatalf("Unexpected authorization: %s", auth) } + w.Header().Add("X-ReqId", "fakereqid") w.WriteHeader(http.StatusInternalServerError) io.WriteString(w, "test error") }) @@ -60,6 +63,7 @@ func TestHttpClient(t *testing.T) { defer server_3.Close() httpClient := NewClient(&Options{ + HostRetryConfig: &RetryConfig{}, Regions: ®ion.Region{ Api: region.Endpoints{ Preferred: []string{server_1.URL, server_2.URL}, @@ -90,12 +94,12 @@ func TestHttpClient(t *testing.T) { t.Fatalf("Unexpected status code: %d", clientErr.Code) } } - if len(reqs) != 3 { + if len(reqs) != 12 { t.Fatalf("Unexpected reqs: %#v", reqs) } for i, req := range reqs { - if i+1 != req.id || req.url.String() != "/test?fakeRawQuery&x-query-1=x-value-1&x-query-2=x-value-2" { - t.Fatalf("Unexpected req: %#v", req) + if i/4+1 != req.id || req.url.String() != "/test?fakeRawQuery&x-query-1=x-value-1&x-query-2=x-value-2" { + t.Fatalf("Unexpected req: %d, %d, %s", i, req.id, req.url) } } } @@ -106,6 +110,7 @@ func TestHttpClientJson(t *testing.T) { if auth := r.Header.Get("Authorization"); !strings.HasPrefix(auth, "Qiniu TestAk:") { t.Fatalf("Unexpected authorization: %s", auth) } + w.Header().Add("X-ReqId", "fakereqid") io.WriteString(w, "{\"Test\":\"AccessKey\"}") }) server_1 := httptest.NewServer(mux_1) diff --git a/storagev2/internal/api-generator/client.go b/storagev2/internal/api-generator/client.go index 5e49e655..4ad33bd3 100644 --- a/storagev2/internal/api-generator/client.go +++ b/storagev2/internal/api-generator/client.go @@ -328,7 +328,7 @@ func (description *ApiDetailedDescription) generatePackage(group *jen.Group, opt getUpTokenFunc = jen.Func().Params().Params(jen.String(), jen.Error()).BlockFunc(func(group *jen.Group) { group.Return(jen.Id("innerRequest").Dot("UpToken").Dot("GetUpToken").Call(jen.Id("ctx"))) }) - default: + case AuthorizationNone: getUpTokenFunc = jen.Nil() } @@ -406,6 +406,7 @@ func (description *ApiDetailedDescription) generatePackage(group *jen.Group, opt ) } } + group.Add(jen.Id("OnRequestProgress").Op(":").Id("options").Dot("OnRequestProgress")) }), ) group.Add( @@ -455,6 +456,15 @@ func (description *ApiDetailedDescription) generatePackage(group *jen.Group, opt group.Add(jen.Id("UseInsecureProtocol").Op(":").Id("storage").Dot("client").Dot("UseInsecureProtocol").Call()) group.Add(jen.Id("HostFreezeDuration").Op(":").Id("storage").Dot("client").Dot("GetHostFreezeDuration").Call()) group.Add(jen.Id("Client").Op(":").Id("storage").Dot("client").Dot("GetClient").Call()) + group.Add(jen.Id("Resolver").Op(":").Id("storage").Dot("client").Dot("GetResolver").Call()) + group.Add(jen.Id("Chooser").Op(":").Id("storage").Dot("client").Dot("GetChooser").Call()) + group.Add(jen.Id("BeforeResolve").Op(":").Id("storage").Dot("client").Dot("GetBeforeResolveCallback").Call()) + group.Add(jen.Id("AfterResolve").Op(":").Id("storage").Dot("client").Dot("GetAfterResolveCallback").Call()) + group.Add(jen.Id("ResolveError").Op(":").Id("storage").Dot("client").Dot("GetResolveErrorCallback").Call()) + group.Add(jen.Id("BeforeBackoff").Op(":").Id("storage").Dot("client").Dot("GetBeforeBackoffCallback").Call()) + group.Add(jen.Id("AfterBackoff").Op(":").Id("storage").Dot("client").Dot("GetAfterBackoffCallback").Call()) + group.Add(jen.Id("BeforeRequest").Op(":").Id("storage").Dot("client").Dot("GetBeforeRequestCallback").Call()) + group.Add(jen.Id("AfterResponse").Op(":").Id("storage").Dot("client").Dot("GetAfterResponseCallback").Call()) }), ) group.Add( @@ -463,6 +473,7 @@ func (description *ApiDetailedDescription) generatePackage(group *jen.Group, opt jen.Id("hostRetryConfig").Op("!=").Nil(), ).BlockFunc(func(group *jen.Group) { group.Id("queryOptions").Dot("RetryMax").Op("=").Id("hostRetryConfig").Dot("RetryMax") + group.Id("queryOptions").Dot("Backoff").Op("=").Id("hostRetryConfig").Dot("Backoff") }), ) group.Add( @@ -525,6 +536,9 @@ func (description *ApiDetailedDescription) generatePackage(group *jen.Group, opt ) }), ) + if description.Request.Authorization.ToAuthorization() == AuthorizationNone { + group.Add(jen.Id("ctx").Op("=").Qual(PackageNameHTTPClient, "WithoutSignature").Call(jen.Id("ctx"))) + } if body := description.Response.Body; body != nil { if json := body.Json; json != nil { if description.Request.responseTypeRequired { diff --git a/storagev2/internal/api-generator/main.go b/storagev2/internal/api-generator/main.go index c3a9a4e8..310dd31e 100644 --- a/storagev2/internal/api-generator/main.go +++ b/storagev2/internal/api-generator/main.go @@ -163,6 +163,10 @@ func generateApiClient(group *jen.Group) { group.Add(jen.Id("OverwrittenBucketName").String()) group.Add(jen.Id("OverwrittenEndpoints").Qual(PackageNameRegion, "EndpointsProvider")) group.Add(jen.Id("OverwrittenRegion").Qual(PackageNameRegion, "RegionsProvider")) + group.Add(jen.Id("OnRequestProgress").Func().Params( + jen.Uint64(), + jen.Uint64(), + )) }), ) } diff --git a/storagev2/internal/uplog/quality_uplog.go b/storagev2/internal/uplog/quality_uplog.go index 71f3f19a..34dc4350 100644 --- a/storagev2/internal/uplog/quality_uplog.go +++ b/storagev2/internal/uplog/quality_uplog.go @@ -102,6 +102,8 @@ func detectLogResult(err error) LogResult { return LogResultTimeout case ErrorTypeUnknownHost: return LogResultUnknownHost + case ErrorTypeMaliciousResponse: + return LogResultMaliciousResponse case ErrorTypeCannotConnectToHost: return LogResultCannotConnectToHost case ErrorTypeUserCanceled: diff --git a/storagev2/internal/uplog/uplog.go b/storagev2/internal/uplog/uplog.go index 9a99aa5f..84bb401f 100644 --- a/storagev2/internal/uplog/uplog.go +++ b/storagev2/internal/uplog/uplog.go @@ -9,7 +9,8 @@ import ( "sync" "syscall" - "github.com/matishsiao/goInfo" + sysinfo "github.com/elastic/go-sysinfo" + "github.com/qiniu/go-sdk/v7/storagev2/retrier" ) type ( @@ -30,6 +31,7 @@ const ( ErrorTypeUnknownError ErrorType = "unknown_error" ErrorTypeTimeout ErrorType = "timeout" ErrorTypeUnknownHost ErrorType = "unknown_host" + ErrorTypeMaliciousResponse ErrorType = "malicious_response" ErrorTypeCannotConnectToHost ErrorType = "cannot_connect_to_host" ErrorTypeSSLError ErrorType = "ssl_error" ErrorTypeTransmissionError ErrorType = "transmission_error" @@ -49,6 +51,7 @@ const ( LogResultUnknownError LogResult = "unknown_error" LogResultTimeout LogResult = "timeout" LogResultUnknownHost LogResult = "unknown_host" + LogResultMaliciousResponse LogResult = "malicious_response" LogResultCannotConnectToHost LogResult = "cannot_connect_to_host" LogResultSSLError LogResult = "ssl_error" LogResultTransmissionError LogResult = "transmission_error" @@ -66,8 +69,8 @@ var ( func getOsVersion() string { osVersionOnce.Do(func() { - if osInfo, err := goInfo.GetInfo(); err == nil { - osVersion = osInfo.Core + if hostInfo, err := sysinfo.Host(); err == nil { + osVersion = hostInfo.Info().KernelVersion } }) return osVersion @@ -98,7 +101,9 @@ func detectErrorType(err error) ErrorType { } unwrapedErr := unwrapUnderlyingError(err) - if os.IsTimeout(unwrapedErr) { + if unwrapedErr == retrier.ErrMaliciousResponse { + return ErrorTypeMaliciousResponse + } else if os.IsTimeout(unwrapedErr) { return ErrorTypeTimeout } else if dnsError, ok := unwrapedErr.(*net.DNSError); ok && isDnsNotFoundError(dnsError) { return ErrorTypeUnknownHost @@ -111,6 +116,13 @@ func detectErrorType(err error) ErrorType { default: return ErrorTypeUnexpectedSyscallError } + } else if errno, ok := unwrapedErr.(syscall.Errno); ok { + switch errno { + case syscall.ECONNREFUSED, syscall.ECONNABORTED, syscall.ECONNRESET: + return ErrorTypeCannotConnectToHost + default: + return ErrorTypeUnexpectedSyscallError + } } else if unwrapedErr == context.Canceled { return ErrorTypeUserCanceled } else { diff --git a/storagev2/internal/uplog/uplog_buffer.go b/storagev2/internal/uplog/uplog_buffer.go index f6a1a237..2699e484 100644 --- a/storagev2/internal/uplog/uplog_buffer.go +++ b/storagev2/internal/uplog/uplog_buffer.go @@ -9,6 +9,7 @@ import ( "path/filepath" "strings" "sync" + "sync/atomic" "time" "github.com/gofrs/flock" @@ -24,7 +25,8 @@ var ( uplogFileBuffer *os.File uplogFileBufferFileLocker *flock.Flock uplogFileBufferLock sync.Mutex - uplogFileBufferThreshold int64 = 4 * 1024 * 1024 + uplogFileBufferThreshold uint64 = 4 * 1024 * 1024 + uplogMaxStorageBytes uint64 = 100 * 1024 * 1024 uplogWriteFileBufferTicker *time.Ticker uplogWriteFileBufferInterval time.Duration = 1 * time.Minute uplogWriteFileBufferTimerLock sync.Mutex @@ -93,6 +95,14 @@ func IsUplogEnabled() bool { return !uplogDisabled } +func GetUplogMaxStorageBytes() uint64 { + return atomic.LoadUint64(&uplogMaxStorageBytes) +} + +func SetUplogMaxStorageBytes(max uint64) { + atomic.StoreUint64(&uplogMaxStorageBytes, max) +} + func SetUplogFileBufferDirPath(path string) { uplogFileBufferDirPathMutex.Lock() defer uplogFileBufferDirPathMutex.Unlock() @@ -111,7 +121,7 @@ func getUplogFileBufferPath(current bool) string { uplogFileBufferPath = filepath.Join(uplogFileBufferDirPath, UPLOG_FILE_BUFFER_NAME) } if !current { - uplogFileBufferPath = uplogFileBufferPath + "." + time.Now().UTC().Format(time.RFC3339Nano) + uplogFileBufferPath = uplogFileBufferPath + "." + time.Now().UTC().Format("20060102150405.999999999") } return uplogFileBufferPath } @@ -129,13 +139,13 @@ func getUplogFileDirectoryLock() *flock.Flock { } func FlushBuffer() error { - return withUploadFileBuffer(func(io.Writer) (bool, error) { - return true, nil + return withUploadFileBuffer(func(w io.WriteCloser) error { + return w.Close() }) } func writeMemoryBufferToFileBuffer(data []byte) (n int, err error) { - if err = withUploadFileBuffer(func(w io.Writer) (shouldClose bool, e error) { + if err = withUploadFileBuffer(func(w io.WriteCloser) (e error) { for len(data) > 0 { n, e = w.Write(data) if e != nil { @@ -148,7 +158,7 @@ func writeMemoryBufferToFileBuffer(data []byte) (n int, err error) { return } - if fi, serr := os.Stat(getUplogFileBufferPath(true)); serr == nil && fi.Size() >= uplogFileBufferThreshold { + if fi, serr := os.Stat(getUplogFileBufferPath(true)); serr == nil && uint64(fi.Size()) >= uplogFileBufferThreshold { tryToArchiveFileBuffer(false) } return @@ -176,16 +186,14 @@ func tryToArchiveFileBuffer(force bool) { } defer locker.Close() - if err = withUploadFileBuffer(func(io.Writer) (shouldClose bool, renameErr error) { + if err = withUploadFileBuffer(func(w io.WriteCloser) error { currentFilePath := getUplogFileBufferPath(true) if fileInfo, fileInfoErr := os.Stat(currentFilePath); fileInfoErr == nil && fileInfo.Size() == 0 { - return + return nil } archivedFilePath := getUplogFileBufferPath(false) - if renameErr = os.Rename(currentFilePath, archivedFilePath); renameErr == nil { - shouldClose = true - } - return + w.Close() + return os.Rename(currentFilePath, archivedFilePath) }); err != nil { return } @@ -194,9 +202,17 @@ func tryToArchiveFileBuffer(force bool) { go uploadAllClosedFileBuffers() } -func withUploadFileBuffer(fn func(io.Writer) (bool, error)) (err error) { - var shouldClose bool +type uplogFileBufferWrapper struct{} +func (uplogFileBufferWrapper) Write(p []byte) (int, error) { + return uplogFileBuffer.Write(p) +} + +func (uplogFileBufferWrapper) Close() error { + return closeUplogFileBufferWithoutLock() +} + +func withUploadFileBuffer(fn func(io.WriteCloser) error) (err error) { uplogFileBufferLock.Lock() defer uplogFileBufferLock.Unlock() @@ -216,25 +232,34 @@ func withUploadFileBuffer(fn func(io.Writer) (bool, error)) (err error) { } else if uplogFileBuffer, err = os.OpenFile(uplogFileBufferPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644); err != nil { return } - uplogFileBufferFileLocker = flock.New(uplogFileBufferPath) + uplogFileBufferLockPath := uplogFileBufferPath + ".lock" + uplogFileBufferFileLocker = flock.New(uplogFileBufferLockPath) } if err = uplogFileBufferFileLocker.Lock(); err != nil { return } - shouldClose, err = fn(uplogFileBuffer) - _ = uplogFileBufferFileLocker.Unlock() - if shouldClose { - closeUplogFileBufferWithoutLock() + err = fn(uplogFileBufferWrapper{}) + if uplogFileBufferFileLocker != nil && uplogFileBufferFileLocker.Locked() { + _ = uplogFileBufferFileLocker.Unlock() } return } -func closeUplogFileBufferWithoutLock() { - uplogFileBuffer.Close() - uplogFileBuffer = nil - uplogFileBufferFileLocker.Close() - uplogFileBufferFileLocker = nil +func closeUplogFileBufferWithoutLock() error { + var err1, err2 error + if uplogFileBuffer != nil { + err1 = uplogFileBuffer.Close() + uplogFileBuffer = nil + } + if uplogFileBufferFileLocker != nil { + err2 = uplogFileBufferFileLocker.Close() + uplogFileBufferFileLocker = nil + } + if err1 != nil { + return err1 + } + return err2 } func SetWriteFileBufferInterval(d time.Duration) { @@ -295,18 +320,25 @@ func (r *multipleFileReader) readAllAsync() { defer r.w.CloseWithError(io.EOF) defer r.compressor.Close() for _, path := range r.paths { - file, err := os.Open(path) - if err != nil { - r.setError(err) - return - } - if _, err = io.Copy(r.compressor, file); err != nil && err != io.EOF { + if err := r.readAllForPathAsync(path); err != nil { r.setError(err) return } } } +func (r *multipleFileReader) readAllForPathAsync(path string) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + if _, err = io.Copy(r.compressor, file); err != nil && err != io.EOF { + return err + } + return nil +} + func (r *multipleFileReader) getError() error { r.errLock.Lock() defer r.errLock.Unlock() @@ -340,10 +372,9 @@ func getArchivedUplogFileBufferPaths(dirPath string) ([]string, error) { archivedPaths := make([]string, 0, len(dirEntries)) for _, dirEntry := range dirEntries { - if !dirEntry.Mode().IsRegular() { - continue - } - if !strings.HasPrefix(dirEntry.Name(), UPLOG_FILE_BUFFER_NAME+".") { + if !dirEntry.Mode().IsRegular() || + !strings.HasPrefix(dirEntry.Name(), UPLOG_FILE_BUFFER_NAME+".") || + strings.HasSuffix(dirEntry.Name(), ".lock") { continue } archivedPaths = append(archivedPaths, filepath.Join(dirPath, dirEntry.Name())) diff --git a/storagev2/internal/uplog/uplog_buffer_test.go b/storagev2/internal/uplog/uplog_buffer_test.go index 47fbf4a9..a7945b65 100644 --- a/storagev2/internal/uplog/uplog_buffer_test.go +++ b/storagev2/internal/uplog/uplog_buffer_test.go @@ -15,6 +15,7 @@ import ( "net/http/httptest" "os" "path/filepath" + "strings" "sync" "sync/atomic" "testing" @@ -163,4 +164,112 @@ func TestUplogArchiveFileBuffer(t *testing.T) { if !bytes.Equal(md5HasherClient.Sum(nil), md5HasherServer.Sum(nil)) { t.Fatal("unexpected request body") } + entries, err := ioutil.ReadDir(tmpDir) + if err != nil { + t.Fatal(err) + } + if len(entries) != 2 && len(entries) != 3 { + t.Fatalf("unexpected uplog buffer files count") + } + for _, entry := range entries { + if !strings.HasSuffix(entry.Name(), ".lock") && entry.Name() != UPLOG_FILE_BUFFER_NAME { + t.Fatalf("unexpected uplog buffer file: %s", entry.Name()) + } + } +} + +func TestUplogArchiveFileBufferFailed(t *testing.T) { + testLock.Lock() + defer testLock.Unlock() + + tmpDir, err := ioutil.TempDir("", "test-uplog-*") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + defer func() { + if err := FlushBuffer(); err != nil { + t.Fatal(err) + } + }() + + var called int32 + httpServerMux := http.NewServeMux() + httpServerMux.Handle("/log/4", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Fatalf("Unexpected method: %s", r.Method) + } + if r.URL.Query().Get("compressed") != "gzip" { + t.Fatalf("Unexpected compressed: %s", r.URL.Query().Get("compressed")) + } + if r.Header.Get("Authorization") != "UpToken fakeuptoken" { + t.Fatalf("Unexpected Authorization: %s", r.Header.Get("Authorization")) + } + if atomic.AddInt32(&called, 1) > 1 { + if r.Header.Get(X_LOG_CLIENT_ID) != "fake-x-log-client-id" { + t.Fatalf("Unexpected X-Log-Client-Id: %s", r.Header.Get("X_LOG_CLIENT_ID")) + } + } + w.Header().Add(X_LOG_CLIENT_ID, "fake-x-log-client-id") + w.WriteHeader(http.StatusInternalServerError) + })) + httpServer := httptest.NewServer(httpServerMux) + defer httpServer.Close() + + SetUplogUrl(httpServer.URL) + defer SetUplogUrl("") + + getUpToken = func() (string, error) { return "fakeuptoken", nil } + defer func() { getUpToken = nil }() + + SetUplogFileBufferDirPath(tmpDir) + defer SetUplogFileBufferDirPath("") + + DisableUplog() + defer EnableUplog() + + originalUplogMaxStorageBytes := GetUplogMaxStorageBytes() + SetUplogMaxStorageBytes(48 * 1024) + defer SetUplogMaxStorageBytes(originalUplogMaxStorageBytes) + + originalUplogFileBufferThreshold := uplogFileBufferThreshold + uplogFileBufferThreshold = 24 * 1024 + defer func() { + uplogFileBufferThreshold = originalUplogFileBufferThreshold + }() + + uplogBuffer := bytes.NewBuffer(make([]byte, 0, 4*1024)) + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + for i := 0; i < 4*24; i++ { + n, err := io.CopyN(uplogBuffer, r, 1024) + if err != nil { + t.Fatal(err) + } else if n != 1024 { + t.Fatalf("unexpected n: %d", n) + } + + writeMemoryBufferToFileBuffer(uplogBuffer.Bytes()) + uplogBuffer.Reset() + time.Sleep(10 * time.Nanosecond) + } + tryToArchiveFileBuffer(true) + time.Sleep(100 * time.Millisecond) + c := atomic.LoadInt32(&called) + if c == 0 { + t.Fatal("unexpected upload count") + } + + entries, err := ioutil.ReadDir(tmpDir) + if err != nil { + t.Fatal(err) + } + totalSize := uint64(0) + for _, entry := range entries { + totalSize += uint64(entry.Size()) + } + if totalSize > 48*1024 { + t.Fatalf("unexpected uplog buffer file size: %d", totalSize) + } } diff --git a/storagev2/internal/uplog/uplog_upload.go b/storagev2/internal/uplog/uplog_upload.go index 4202125b..e687006d 100644 --- a/storagev2/internal/uplog/uplog_upload.go +++ b/storagev2/internal/uplog/uplog_upload.go @@ -6,6 +6,7 @@ import ( "net/http" "os" "path/filepath" + "sort" "sync" clientv1 "github.com/qiniu/go-sdk/v7/client" @@ -72,8 +73,32 @@ func uploadAllClosedFileBuffers() { } if err = uploadUplogLog(archivedPaths); err == nil { - for _, archarchivedPath := range archivedPaths { - os.Remove(archarchivedPath) + for _, archivedPath := range archivedPaths { + os.Remove(archivedPath) + } + } else { + sort.Strings(archivedPaths) + var ( + archivedPathsLen = len(archivedPaths) + totalSize uint64 = 0 + deleteAllRest bool = false + ) + for i := range archivedPaths { + archivedPath := archivedPaths[archivedPathsLen-i-1] + if !deleteAllRest { + fileInfo, err := os.Stat(archivedPath) + if err != nil { + return + } + if totalSize+uint64(fileInfo.Size()) > GetUplogMaxStorageBytes() { + deleteAllRest = true + } else { + totalSize += uint64(fileInfo.Size()) + } + } + if deleteAllRest { + os.Remove(archivedPath) + } } } } diff --git a/storagev2/objects/batch.go b/storagev2/objects/batch.go new file mode 100644 index 00000000..450963e8 --- /dev/null +++ b/storagev2/objects/batch.go @@ -0,0 +1,803 @@ +package objects + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "net" + "net/url" + "os" + "sort" + "sync" + "time" + + "github.com/gammazero/toposort" + clientv1 "github.com/qiniu/go-sdk/v7/client" + internal_context "github.com/qiniu/go-sdk/v7/internal/context" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/apis/batch_ops" + "github.com/qiniu/go-sdk/v7/storagev2/apis/stat_object" + "github.com/qiniu/go-sdk/v7/storagev2/retrier" +) + +type ( + // 批处理执行器 + BatchOpsExecutor interface { + ExecuteBatchOps(context.Context, []Operation, *apis.Storage) error + } + + // 串行批处理执行器选项 + SerialBatchOpsExecutorOptions struct { + RetryMax uint // 最大重试次数,默认为 10 + BatchSize uint // 批次大小,默认为 1000 + } + + serialBatchOpsExecutor struct { + options *SerialBatchOpsExecutorOptions + } + + // 并行批处理执行器选项 + ConcurrentBatchOpsExecutorOptions struct { + RetryMax uint // 最大重试次数,默认为 10 + InitBatchSize uint // 初始批次大小,默认为 250 + MaxBatchSize uint // 最大批次大小,默认为 250 + MinBatchSize uint // 最小批次大小,默认为 50 + DoublingFactor uint // 批次大小翻倍系数,默认为 2 + DoublingInterval time.Duration // 翻倍时间间隔,默认为 1 分钟 + InitWorkers uint // 初始化并发数,默认为 20 + MaxWorkers uint // 最大并发数,默认为 20 + MinWorkers uint // 最小并发数,默认为 1 + AddWorkerInterval time.Duration // 增加并发数时间间隔,默认为 1 分钟 + } + + concurrentBatchOpsExecutor struct { + options *ConcurrentBatchOpsExecutorOptions + } + + operation struct { + Operation + tries uint + } + + requestsManager struct { + storage *apis.Storage + lock sync.Mutex + operations [][]*operation + batchSize, minBatchSize, maxBatchSize, doublingFactor, maxTries uint + doublingInterval time.Duration + ticker *time.Ticker + resetTicker chan struct{} + cancelTicker internal_context.CancelCauseFunc + lastDecreaseBatchSizeTime time.Time + lastDecreaseBatchSizeTimeMutex sync.Mutex + waitGroup sync.WaitGroup + } + + workersManager struct { + lock sync.Mutex + parentCtx internal_context.Context + parentCancelFunc internal_context.CancelCauseFunc + cancels []internal_context.CancelCauseFunc + requestsManager *requestsManager + maxWorkers, minWorkers uint + addWorkerInterval time.Duration + ticker *time.Ticker + resetTicker chan struct{} + cancelTickerFunc internal_context.CancelCauseFunc + lastResetTickerTime time.Time + lastResetTickerTimeMutex sync.Mutex + timerWaitGroup, asyncWorkersWaitGroup sync.WaitGroup + } +) + +// 创建串型批处理执行器 +func NewSerialBatchOpsExecutor(options *SerialBatchOpsExecutorOptions) BatchOpsExecutor { + if options == nil { + options = &SerialBatchOpsExecutorOptions{} + } + return &serialBatchOpsExecutor{options} +} + +func (executor *serialBatchOpsExecutor) ExecuteBatchOps(ctx context.Context, operations []Operation, storage *apis.Storage) error { + ops := make([]*operation, len(operations)) + for i, op := range operations { + ops[i] = &operation{Operation: op} + } + _, err := doOperations(ctx, ops, storage, executor.options.BatchSize, executor.options.RetryMax) + return err +} + +// 创建并行批处理执行器 +func NewConcurrentBatchOpsExecutor(options *ConcurrentBatchOpsExecutorOptions) BatchOpsExecutor { + if options == nil { + options = &ConcurrentBatchOpsExecutorOptions{} + } + return &concurrentBatchOpsExecutor{options} +} + +func (executor *concurrentBatchOpsExecutor) ExecuteBatchOps(ctx context.Context, operations []Operation, storage *apis.Storage) error { + rm, err := newRequestsManager( + storage, + executor.options.InitBatchSize, + executor.options.MinBatchSize, + executor.options.MaxBatchSize, + executor.options.DoublingFactor, + executor.options.RetryMax, + executor.options.DoublingInterval, + operations, + ) + if err != nil { + return err + } + defer rm.done() + wm := newWorkersManager( + ctx, + executor.options.InitWorkers, + executor.options.MinWorkers, + executor.options.MaxWorkers, + executor.options.AddWorkerInterval, + rm, + ) + return wm.wait() +} + +func newRequestsManager(storage *apis.Storage, initBatchSize, minBatchSize, maxBatchSize, doublingFactor, maxTries uint, doublingInterval time.Duration, operations []Operation) (*requestsManager, error) { + if initBatchSize == 0 { + initBatchSize = 250 + } + if minBatchSize == 0 { + minBatchSize = 50 + } + if maxBatchSize == 0 { + maxBatchSize = 250 + } + if maxBatchSize < minBatchSize { + maxBatchSize = minBatchSize + } + if initBatchSize < minBatchSize { + initBatchSize = minBatchSize + } + if initBatchSize > maxBatchSize { + initBatchSize = maxBatchSize + } + if doublingFactor < 2 { + doublingFactor = 2 + } + if doublingInterval == 0 { + doublingInterval = 1 * time.Minute + } + if maxTries == 0 { + maxTries = 10 + } + + sortedOperations, err := topoSort(operations) + if err != nil { + return nil, err + } + + ctx, cancelFunc := internal_context.WithCancelCause(internal_context.Background()) + rm := requestsManager{ + storage: storage, + operations: wrapOperations(filterOperations(sortedOperations)), + batchSize: initBatchSize, + minBatchSize: minBatchSize, + maxBatchSize: maxBatchSize, + doublingFactor: doublingFactor, + doublingInterval: doublingInterval, + ticker: time.NewTicker(doublingInterval), + resetTicker: make(chan struct{}, 1024), + cancelTicker: cancelFunc, + } + sortOperations(rm.operations) + + rm.waitGroup.Add(1) + go rm.asyncLoop(ctx) + return &rm, nil +} + +func (rm *requestsManager) asyncLoop(ctx internal_context.Context) { + defer rm.waitGroup.Done() + + for { + select { + case <-rm.resetTicker: + // do nothing + case <-rm.ticker.C: + rm.increaseBatchSize() + case <-ctx.Done(): + return + } + } +} + +func (rm *requestsManager) done() { + rm.cancelTicker(nil) + rm.ticker.Stop() + rm.waitGroup.Wait() +} + +func (rm *requestsManager) takeOperations() []*operation { + rm.lock.Lock() + defer rm.lock.Unlock() + + needed := int(rm.batchSize) + got := make([]*operation, 0, needed) + for needed > 0 && len(rm.operations) > 0 { + foundOperations, restOperations := findBestMatches(needed, rm.operations) + if len(foundOperations) == 0 { + break + } + rm.operations = restOperations + got = append(got, foundOperations...) + needed -= len(foundOperations) + } + if len(got) == 0 && needed > 0 && len(rm.operations) > 0 { // 一个都没获得,但依然有剩余的操作还没能获取,说明锁有剩余的操作组的大小都大于 batchSize + got = rm.operations[0] + rm.operations = rm.operations[1:] + } + return got +} + +func (rm *requestsManager) putBackOperations(operations []*operation) { + if len(operations) == 0 { + return + } + + rm.lock.Lock() + defer rm.lock.Unlock() + + rm.operations = append(rm.operations, operations) + sortOperations(rm.operations) +} + +func (rm *requestsManager) isOperationsEmpty() bool { + rm.lock.Lock() + defer rm.lock.Unlock() + + return len(rm.operations) == 0 +} + +func (rm *requestsManager) handleTimeoutError() { + rm.lastDecreaseBatchSizeTimeMutex.Lock() + defer rm.lastDecreaseBatchSizeTimeMutex.Unlock() + + canDecrease := time.Since(rm.lastDecreaseBatchSizeTime) > time.Second + if canDecrease { + rm.decreaseBatchSize() + rm.lastDecreaseBatchSizeTime = time.Now() + } +} + +func (rm *requestsManager) decreaseBatchSize() { + rm.lock.Lock() + defer rm.lock.Unlock() + + batchSize := rm.batchSize / rm.doublingFactor + if batchSize < rm.minBatchSize { + batchSize = rm.minBatchSize + } + rm.batchSize = batchSize + rm.ticker.Stop() + rm.ticker = time.NewTicker(rm.doublingInterval) + rm.resetTicker <- struct{}{} +} + +func (rm *requestsManager) increaseBatchSize() { + rm.lock.Lock() + defer rm.lock.Unlock() + + batchSize := rm.batchSize * rm.doublingFactor + if batchSize > rm.maxBatchSize { + batchSize = rm.maxBatchSize + } + rm.batchSize = batchSize +} + +func newWorkersManager(ctx internal_context.Context, initWorkers, minWorkers, maxWorkers uint, addWorkerInterval time.Duration, requestsManager *requestsManager) *workersManager { + if initWorkers == 0 { + initWorkers = 20 + } + if minWorkers == 0 { + minWorkers = 1 + } + if maxWorkers == 0 { + maxWorkers = 20 + } + if maxWorkers < minWorkers { + maxWorkers = minWorkers + } + if initWorkers < minWorkers { + initWorkers = minWorkers + } + if initWorkers > maxWorkers { + initWorkers = maxWorkers + } + if addWorkerInterval == 0 { + addWorkerInterval = 1 * time.Minute + } + wm := new(workersManager) + wm.parentCtx, wm.parentCancelFunc = internal_context.WithCancelCause(ctx) + wm.requestsManager = requestsManager + wm.cancels = make([]internal_context.CancelCauseFunc, initWorkers) + wm.maxWorkers = maxWorkers + wm.minWorkers = minWorkers + wm.addWorkerInterval = addWorkerInterval + wm.ticker = time.NewTicker(addWorkerInterval) + wm.resetTicker = make(chan struct{}, 1024) + + var timerTickerCtx internal_context.Context + timerTickerCtx, wm.cancelTickerFunc = internal_context.WithCancelCause(wm.parentCtx) + + wm.timerWaitGroup.Add(1) + go wm.asyncAddWorkersLoop(timerTickerCtx) + + for i := uint(0); i < initWorkers; i++ { + workerCtx, workerCancelFunc := internal_context.WithCancelCause(wm.parentCtx) + wm.cancels[i] = workerCancelFunc + wm.asyncWorkersWaitGroup.Add(1) + go wm.asyncWorker(workerCtx, i) + } + return wm +} + +func (wm *workersManager) asyncAddWorkersLoop(ctx internal_context.Context) { + defer wm.timerWaitGroup.Done() + + for { + select { + case <-wm.resetTicker: + // do nothing + case _, ok := <-wm.ticker.C: + if !ok { + return + } + if wm.getWorkersCount() < wm.maxWorkers { + wm.spawnWorker() + } + case <-ctx.Done(): + return + } + } +} + +func (wm *workersManager) wait() error { + wm.asyncWorkersWaitGroup.Wait() + wm.ticker.Stop() + wm.cancelTickerFunc(nil) + wm.timerWaitGroup.Wait() + if wm.requestsManager.isOperationsEmpty() { + return wm.parentCtx.Err() + } + return wm.doOperationsSync() +} + +func (wm *workersManager) doOperationsSync() error { + for { + if err := getCtxError(wm.parentCtx); err != nil { + return err + } + if operations := wm.requestsManager.takeOperations(); len(operations) > 0 { + if operations, err := wm.doOperations(wm.parentCtx, operations); err != nil { + wm.requestsManager.putBackOperations(operations) + if isTimeoutError(err) { + wm.requestsManager.handleTimeoutError() + } else { + wm.setError(err) + return err + } + } + } else { + return nil + } + } +} + +func (wm *workersManager) asyncWorker(ctx internal_context.Context, id uint) { + defer wm.asyncWorkersWaitGroup.Done() + for getCtxError(wm.parentCtx) == nil { + if operations := wm.requestsManager.takeOperations(); len(operations) > 0 { + if operations, err := wm.doOperations(ctx, operations); err != nil { + // 确定错误是否可以重试 + wm.requestsManager.putBackOperations(operations) + if isTimeoutError(err) { // 超时,说明 batchSize 过大 + wm.requestsManager.handleTimeoutError() + } else if isOutOfQuotaError(err) { + // 并发度过高,自杀减少并发度 + if wm.handleOutOfQuotaError(id, err) { + return + } + } else { + wm.setError(err) + return + } + } + } else { + break + } + } +} + +func (wm *workersManager) handleOutOfQuotaError(id uint, err error) bool { + wm.lastResetTickerTimeMutex.Lock() + defer wm.lastResetTickerTimeMutex.Unlock() + + canReset := time.Since(wm.lastResetTickerTime) > time.Second + if canReset { // 这里禁止并行杀死 worker,防止杀死速度过快 + wm.ticker.Stop() + wm.ticker = time.NewTicker(wm.addWorkerInterval) + wm.resetTicker <- struct{}{} + wm.killWorker(id, err) + wm.lastResetTickerTime = time.Now() + } + return canReset +} + +func (wm *workersManager) doOperations(ctx internal_context.Context, operations []*operation) ([]*operation, error) { + return doOperations(ctx, operations, wm.requestsManager.storage, wm.requestsManager.batchSize, wm.requestsManager.maxTries) +} + +func (wm *workersManager) setError(err error) { + wm.parentCancelFunc(err) +} + +func (wm *workersManager) getWorkersCount() (count uint) { + wm.lock.Lock() + defer wm.lock.Unlock() + + for _, c := range wm.cancels { + if c != nil { + count += 1 + } + } + return +} + +func (wm *workersManager) killWorker(id uint, err error) { + wm.lock.Lock() + defer wm.lock.Unlock() + + cancelFunc := wm.cancels[id] + cancelFunc(err) + wm.cancels[id] = nil +} + +func (wm *workersManager) spawnWorker() { + wm.lock.Lock() + defer wm.lock.Unlock() + + workerCtx, workerCancelFunc := internal_context.WithCancelCause(wm.parentCtx) + for id := range wm.cancels { + if wm.cancels[id] == nil { + wm.cancels[id] = workerCancelFunc + go wm.asyncWorker(workerCtx, uint(id)) + return + } + } + wm.cancels = append(wm.cancels, workerCancelFunc) + go wm.asyncWorker(workerCtx, uint(len(wm.cancels)-1)) +} + +func doOperations(ctx internal_context.Context, operations []*operation, storage *apis.Storage, batchSize, maxTries uint) ([]*operation, error) { + if batchSize == 0 { + batchSize = 1000 + } + if maxTries == 0 { + maxTries = 10 + } + for len(operations) > 0 { + thisBatchSize := batchSize + if thisBatchSize > uint(len(operations)) { + thisBatchSize = uint(len(operations)) + } + toDoThisLoop := operations[:thisBatchSize] + willDoNextLoop := make([]*operation, 0, thisBatchSize) + bucketName := toDoThisLoop[0].relatedEntries()[0].bucketName + + operationsStrings := make([]string, len(toDoThisLoop)) + for i, operation := range toDoThisLoop { + operationsStrings[i] = operation.String() + } + + response, err := storage.BatchOps(ctx, &apis.BatchOpsRequest{ + Operations: operationsStrings, + }, &apis.Options{ + OverwrittenBucketName: bucketName, + }) + if err != nil { + return operations, err + } + for i, operationResponse := range response.OperationResponses { + operation := toDoThisLoop[i] + if operationResponse.Code == 200 { + var object ObjectDetails + if err = object.fromOperationResponseData(operation.relatedEntries()[0].objectName, &operationResponse.Data); err != nil { + operation.handleResponse(nil, err) + continue + } + operation.handleResponse(&object, nil) + } else { + operation.handleResponse(nil, errors.New(operationResponse.Data.Error)) + operation.tries += 1 + if retrier.IsStatusCodeRetryable(int(operationResponse.Code)) && operation.tries < maxTries { + willDoNextLoop = append(willDoNextLoop, operation) + } + } + } + if thisBatchSize >= batchSize { + willDoNextLoop = append(willDoNextLoop, operations[thisBatchSize:]...) + } + operations = willDoNextLoop + } + return nil, nil +} + +func (object *ObjectDetails) fromOperationResponseData(key string, data *batch_ops.OperationResponseData) error { + var ( + md5 []byte + err error + ) + object.Name = key + object.UploadedAt = time.Unix(data.PutTime/1e7, (data.PutTime%1e7)*1e2) + object.ETag = data.Hash + object.Size = data.Size + object.MimeType = data.MimeType + object.StorageClass = StorageClass(data.Type) + object.EndUser = data.EndUser + object.Status = Status(data.Status) + object.RestoreStatus = RestoreStatus(data.RestoringStatus) + object.Metadata = data.Metadata + if data.Md5 != "" { + md5, err = hex.DecodeString(data.Md5) + if err != nil { + return err + } + } + if len(md5) > 0 { + copy(object.MD5[:], md5) + } + if len(data.Parts) > 0 { + object.Parts = append(make(stat_object.PartSizes, 0, len(data.Parts)), data.Parts...) + } + if data.TransitionToIaTime > 0 { + transitionToIA := time.Unix(data.TransitionToIaTime, 0) + object.TransitionToIA = &transitionToIA + } + if data.TransitionToArchiveIrTime > 0 { + transitionToArchiveIR := time.Unix(data.TransitionToArchiveIrTime, 0) + object.TransitionToArchiveIR = &transitionToArchiveIR + } + if data.TransitionToArchiveTime > 0 { + transitionToArchive := time.Unix(data.TransitionToArchiveTime, 0) + object.TransitionToArchive = &transitionToArchive + } + if data.TransitionToDeepArchiveTime > 0 { + transitionToDeepArchive := time.Unix(data.TransitionToDeepArchiveTime, 0) + object.TransitionToDeepArchive = &transitionToDeepArchive + } + if data.ExpirationTime > 0 { + expireAt := time.Unix(data.ExpirationTime, 0) + object.ExpireAt = &expireAt + } + return nil +} + +type groups struct { + keyGroups []map[string]struct{} + keyIndexes map[string]int + emptyGroupIndexes []int +} + +func findKeyFromGroups(g *groups, key string) (int, bool) { + idx, ok := g.keyIndexes[key] + return idx, ok +} + +func addKeyToGroup(g *groups, key string) { + if _, ok := findKeyFromGroups(g, key); ok { + return + } + appendKeyToGroup(g, key) +} + +func appendKeyToGroup(g *groups, keys ...string) { + foundIndex := -1 + if len(g.emptyGroupIndexes) > 0 { + lastIdx := len(g.emptyGroupIndexes) - 1 + foundIndex = g.emptyGroupIndexes[lastIdx] + g.emptyGroupIndexes = g.emptyGroupIndexes[:lastIdx] + } + newKeyGroup := make(map[string]struct{}, len(keys)) + for _, key := range keys { + newKeyGroup[key] = struct{}{} + } + + if foundIndex < 0 { + foundIndex = len(g.keyGroups) + g.keyGroups = append(g.keyGroups, newKeyGroup) + } else { + g.keyGroups[foundIndex] = newKeyGroup + } + for _, key := range keys { + g.keyIndexes[key] = foundIndex + } +} + +func connectGroup(g *groups, key1, key2 string) { + k1, ok := findKeyFromGroups(g, key1) + if !ok { + k1 = -1 + } + k2, ok := findKeyFromGroups(g, key2) + if !ok { + k2 = -1 + } + if k1 == k2 { + if k1 < 0 { + appendKeyToGroup(g, key1, key2) + } + } else if k1 < 0 { + g.keyGroups[k2][key1] = struct{}{} + g.keyIndexes[key1] = k2 + } else if k2 < 0 { + g.keyGroups[k1][key2] = struct{}{} + g.keyIndexes[key2] = k1 + } else { + for k := range g.keyGroups[k2] { + g.keyGroups[k1][k] = struct{}{} + g.keyIndexes[k] = k1 + } + g.keyGroups[k2] = nil + g.emptyGroupIndexes = append(g.emptyGroupIndexes, k2) + } +} + +func topoSort(operations []Operation) ([][]Operation, error) { + var ( + edges = make([]toposort.Edge, 0, len(operations)) + rootNodesMap = make(map[string]int, len(operations)*2) + g = groups{keyIndexes: make(map[string]int)} + ) + + for operationId, operation := range operations { + if operation == nil { + continue + } + edges = append(edges, toposort.Edge{nil, operationId}) + + var firstKey string + for _, relatedEntry := range operation.relatedEntries() { + key := relatedEntry.String() + if oldRootOperationId, ok := rootNodesMap[key]; ok { + edges = append(edges, toposort.Edge{oldRootOperationId, operationId}) + } + rootNodesMap[key] = operationId + + addKeyToGroup(&g, key) + if firstKey == "" { + firstKey = key + } else { + connectGroup(&g, firstKey, key) + } + } + } + + sortedOperationIds, err := toposort.Toposort(edges) + if err != nil { + return nil, err + } + + groupedOperations := make([][]Operation, len(g.keyGroups)) + for _, sortedOperationId := range sortedOperationIds { + operation := operations[sortedOperationId.(int)] + operationKey := operation.relatedEntries()[0].String() + index, ok := findKeyFromGroups(&g, operationKey) + if !ok { + panic(fmt.Sprintf("failed to find key `%s`, which is unexpected", operationKey)) + } + groupedOperations[index] = append(groupedOperations[index], operation) + } + return groupedOperations, nil +} + +func sortOperations(operations [][]*operation) { + sort.Slice(operations, func(i, j int) bool { + return len(operations[i]) < len(operations[j]) + }) +} + +func filterOperations(operationsGroups [][]Operation) [][]Operation { + results := make([][]Operation, 0, len(operationsGroups)) + for _, operationsGroup := range operationsGroups { + if len(operationsGroup) == 0 { + continue + } + results = append(results, operationsGroup) + } + return results +} + +func wrapOperations(operationsGroups [][]Operation) [][]*operation { + results := make([][]*operation, len(operationsGroups)) + for groupId, operationsGroup := range operationsGroups { + groupResults := make([]*operation, len(operationsGroup)) + for opId, op := range operationsGroup { + groupResults[opId] = &operation{Operation: op} + } + results[groupId] = groupResults + } + return results +} + +func findBestMatches(size int, operations [][]*operation) ([]*operation, [][]*operation) { + var lastIdx int = -1 + sort.Search(len(operations), func(idx int) bool { + if len(operations[idx]) <= size { + lastIdx = idx + } + return size <= len(operations[idx]) + }) + if lastIdx < 0 { + return nil, operations + } + bestMatches := operations[lastIdx] + if len(bestMatches) > size { + return nil, operations + } + return bestMatches, append(operations[:lastIdx], operations[lastIdx+1:]...) +} + +func getCtxError(ctx internal_context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + return nil + } +} + +func isTimeoutError(err error) bool { + if err == context.DeadlineExceeded { + return false + } else if os.IsTimeout(err) { + return true + } else if clientErr, ok := unwrapUnderlyingError(err).(*clientv1.ErrorInfo); ok { + if clientErr.Code == 504 { + return true + } + } + return false +} + +func isOutOfQuotaError(err error) bool { + if clientErr, ok := unwrapUnderlyingError(err).(*clientv1.ErrorInfo); ok { + if clientErr.Code == 573 { + return true + } + } + return false +} + +func tryToUnwrapUnderlyingError(err error) (error, bool) { + switch err := err.(type) { + case *os.PathError: + return err.Err, true + case *os.LinkError: + return err.Err, true + case *os.SyscallError: + return err.Err, true + case *url.Error: + return err.Err, true + case *net.OpError: + return err.Err, true + } + return err, false +} + +func unwrapUnderlyingError(err error) error { + ok := true + for ok { + err, ok = tryToUnwrapUnderlyingError(err) + } + return err +} diff --git a/storagev2/objects/batch_test.go b/storagev2/objects/batch_test.go new file mode 100644 index 00000000..99363f3b --- /dev/null +++ b/storagev2/objects/batch_test.go @@ -0,0 +1,493 @@ +//go:build unit +// +build unit + +package objects + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/apis/batch_ops" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/region" +) + +func TestTopoSort(t *testing.T) { + objectsManager := NewObjectsManager(nil) + object1 := objectsManager.Bucket("bucket1").Object("object1") + object2 := objectsManager.Bucket("bucket2").Object("object2") + object3 := objectsManager.Bucket("bucket3").Object("object3") + object4 := objectsManager.Bucket("bucket4").Object("object4") + operations := []Operation{ + object4.Stat(), + object1.Stat(), + object2.Stat(), + object3.Stat(), + } + assertTopoSort(t, operations, [][]Operation{{operations[0]}, {operations[1]}, {operations[2]}, {operations[3]}}) + operations = []Operation{ + object4.Stat(), + object1.Stat(), + object2.Stat(), + object3.Stat(), + object1.SetLifeCycle().DeleteAfterDays(1), + object2.SetLifeCycle().DeleteAfterDays(1), + object1.CopyTo("bucket2", "object2"), + object2.CopyTo("bucket3", "object3"), + } + assertTopoSort(t, operations, [][]Operation{{operations[0]}, operations[1:]}) + operations = []Operation{ + object1.Stat(), + object2.Stat(), + object3.Stat(), + object1.SetLifeCycle().DeleteAfterDays(1), + object2.SetLifeCycle().DeleteAfterDays(2), + object4.CopyTo("bucket2", "object2"), + object2.CopyTo("bucket3", "object3"), + } + assertTopoSort(t, operations, + [][]Operation{ + {operations[0], operations[3]}, + {operations[1], operations[2], operations[4], operations[5], operations[6]}, + }) + operations = []Operation{ + object4.Stat(), + object1.Stat(), + object2.Stat(), + object3.Stat(), + object1.SetLifeCycle().DeleteAfterDays(1), + object2.SetLifeCycle().DeleteAfterDays(1), + object1.CopyTo("bucket2", "object2"), + object2.CopyTo("bucket3", "object3"), + object4.CopyTo("bucket3", "object3"), + } + assertTopoSort(t, operations, [][]Operation{operations}) +} + +func assertTopoSort(t *testing.T, operations []Operation, operationsGroups [][]Operation) { + sortedGroups, err := topoSort(operations) + if err != nil { + t.Fatal(err) + } +next: + for _, actual := range filterOperations(sortedGroups) { + for _, expected := range operationsGroups { + if isOperationsEqual(actual, expected) { + continue next + } + } + t.Fatalf("failed to match topo sort result") + } +} + +func isOperationsEqual(operations1, operations2 []Operation) bool { + m := make(map[string]struct{}, len(operations1)) + for _, operation := range operations1 { + m[operation.String()] = struct{}{} + } + for _, operation := range operations2 { + if _, ok := m[operation.String()]; ok { + delete(m, operation.String()) + } else { + return false + } + } + return true +} + +func TestDoOperations(t *testing.T) { + objectNamesDeleteCounts := make(map[string]uint) + objectNamesResponsedCounts := make(map[string]uint) + mux := http.NewServeMux() + mux.HandleFunc("/batch", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Fatalf("unexpected method") + } + if err := r.ParseForm(); err != nil { + t.Fatal(err) + } + responses := make([]batch_ops.OperationResponse, 0, len(r.PostForm["op"])) + for _, op := range r.PostForm["op"] { + if !strings.HasPrefix(op, "delete/") { + t.Fatalf("unexpected op: %s", op) + } + op = strings.TrimPrefix(op, "delete/") + entryBytes, err := base64.URLEncoding.DecodeString(op) + if err != nil { + t.Fatal(err) + } + if !strings.HasPrefix(string(entryBytes), "bucket1:") { + t.Fatalf("unexpected op entry: %s", entryBytes) + } + objectName := strings.TrimPrefix(string(entryBytes), "bucket1:") + objectNamesDeleteCounts[objectName] += 1 + responses = append(responses, batch_ops.OperationResponse{Code: 200}) + } + respBody, err := json.Marshal(&batch_ops.Response{ + OperationResponses: responses, + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(respBody) + }) + server := httptest.NewServer(mux) + defer server.Close() + + objectsManager := NewObjectsManager(nil) + bucket1 := objectsManager.Bucket("bucket1") + + operations := make([]*operation, 20) + for i := 0; i < 20; i++ { + objectName := fmt.Sprintf("object_%02d", i) + operations[i] = &operation{Operation: bucket1.Object(objectName).Delete().OnResponse(func() { + objectNamesResponsedCounts[objectName] += 1 + }).OnError(func(err error) { + t.Fatal(err) + })} + } + + operations, err := doOperations(context.Background(), operations, apis.NewStorage(&http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rs: region.Endpoints{Preferred: []string{server.URL}}}, + }), 10, 3) + if err != nil { + t.Fatal(err) + } else if len(operations) > 0 { + t.Fatalf("unexpected operations returned") + } + if len(objectNamesDeleteCounts) != 20 { + t.Fatalf("unexpected object names deleted count map") + } + for _, count := range objectNamesDeleteCounts { + if count != 1 { + t.Fatalf("unexpected objects deleted count") + } + } + if len(objectNamesResponsedCounts) != 20 { + t.Fatalf("unexpected object names responsed map") + } + for _, count := range objectNamesResponsedCounts { + if count != 1 { + t.Fatalf("unexpected objects responsed count") + } + } +} + +func TestDoOperationsRetries(t *testing.T) { + objectNames := make([]string, 0, 20) + for i := 0; i < 20; i++ { + objectNames = append(objectNames, fmt.Sprintf("object%02d", i)) + } + objectNamesDeleteCounts := make(map[string]uint) + objectNamesResponsedCounts := make(map[string]uint) + mux := http.NewServeMux() + mux.HandleFunc("/batch", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Fatalf("unexpected method") + } + if err := r.ParseForm(); err != nil { + t.Fatal(err) + } + responses := make([]batch_ops.OperationResponse, 0, len(r.PostForm["op"])) + for _, op := range r.PostForm["op"] { + if !strings.HasPrefix(op, "delete/") { + t.Fatalf("unexpected op: %s", op) + } + op = strings.TrimPrefix(op, "delete/") + entryBytes, err := base64.URLEncoding.DecodeString(op) + if err != nil { + t.Fatal(err) + } + if !strings.HasPrefix(string(entryBytes), "bucket1:") { + t.Fatalf("unexpected op entry: %s", entryBytes) + } + objectName := strings.TrimPrefix(string(entryBytes), "bucket1:") + objectNamesDeleteCounts[objectName] += 1 + responses = append(responses, batch_ops.OperationResponse{Code: 599, Data: batch_ops.OperationResponseData{Error: "test error"}}) + } + respBody, err := json.Marshal(&batch_ops.Response{ + OperationResponses: responses, + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(respBody) + }) + server := httptest.NewServer(mux) + defer server.Close() + + objectsManager := NewObjectsManager(nil) + bucket1 := objectsManager.Bucket("bucket1") + + operations := make([]*operation, len(objectNames)) + for i, objectName := range objectNames { + thisObjectName := objectName + operations[i] = &operation{Operation: bucket1.Object(thisObjectName).Delete().OnResponse(func() { + t.Fatalf("unexpected response") + }).OnError(func(err error) { + objectNamesResponsedCounts[thisObjectName] += 1 + })} + } + + operations, err := doOperations(context.Background(), operations, apis.NewStorage(&http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rs: region.Endpoints{Preferred: []string{server.URL}}}, + }), 10, 3) + if err != nil { + t.Fatal(err) + } else if len(operations) > 0 { + t.Fatalf("unexpected operations returned") + } + if len(objectNamesDeleteCounts) != 20 { + t.Fatalf("unexpected object names delete count map") + } + for _, count := range objectNamesDeleteCounts { + if count != 3 { + t.Fatalf("unexpected objects delete count") + } + } + if len(objectNamesResponsedCounts) != 20 { + t.Fatalf("unexpected object names responsed count map") + } + for _, count := range objectNamesResponsedCounts { + if count != 3 { + t.Fatalf("unexpected objects responsed count") + } + } +} + +func TestDoOperationsDontRetry(t *testing.T) { + objectNames := make([]string, 0, 20) + for i := 0; i < 20; i++ { + objectNames = append(objectNames, fmt.Sprintf("object%02d", i)) + } + objectNamesDeleteCounts := make(map[string]uint) + objectNamesResponsedCounts := make(map[string]uint) + mux := http.NewServeMux() + mux.HandleFunc("/batch", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Fatalf("unexpected method") + } + if err := r.ParseForm(); err != nil { + t.Fatal(err) + } + responses := make([]batch_ops.OperationResponse, 0, len(r.PostForm["op"])) + for _, op := range r.PostForm["op"] { + if !strings.HasPrefix(op, "delete/") { + t.Fatalf("unexpected op: %s", op) + } + op = strings.TrimPrefix(op, "delete/") + entryBytes, err := base64.URLEncoding.DecodeString(op) + if err != nil { + t.Fatal(err) + } + if !strings.HasPrefix(string(entryBytes), "bucket1:") { + t.Fatalf("unexpected op entry: %s", entryBytes) + } + objectName := strings.TrimPrefix(string(entryBytes), "bucket1:") + objectNamesDeleteCounts[objectName] += 1 + responses = append(responses, batch_ops.OperationResponse{Code: 614}) + } + respBody, err := json.Marshal(&batch_ops.Response{ + OperationResponses: responses, + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(respBody) + }) + server := httptest.NewServer(mux) + defer server.Close() + + objectsManager := NewObjectsManager(nil) + bucket1 := objectsManager.Bucket("bucket1") + + operations := make([]*operation, len(objectNames)) + for i, objectName := range objectNames { + thisObjectName := objectName + operations[i] = &operation{Operation: bucket1.Object(thisObjectName).Delete().OnResponse(func() { + t.Fatalf("unexpected responsed") + }).OnError(func(err error) { + objectNamesResponsedCounts[thisObjectName] += 1 + })} + } + + operations, err := doOperations(context.Background(), operations, apis.NewStorage(&http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rs: region.Endpoints{Preferred: []string{server.URL}}}, + }), 10, 3) + if err != nil { + t.Fatal(err) + } else if len(operations) > 0 { + t.Fatalf("unexpected operations returned") + } + if len(objectNamesDeleteCounts) != 20 { + t.Fatalf("unexpected object names deleted count map") + } + for _, count := range objectNamesDeleteCounts { + if count != 1 { + t.Fatalf("unexpected objects deleted count") + } + } + if len(objectNamesResponsedCounts) != 20 { + t.Fatalf("unexpected object names responsed count map") + } + for _, count := range objectNamesResponsedCounts { + if count != 1 { + t.Fatalf("unexpected objects responsed count") + } + } +} + +func TestRequestManagerGetOperations(t *testing.T) { + objectsManager := NewObjectsManager(nil) + bucket1 := objectsManager.Bucket("bucket1") + object1 := bucket1.Object("object1") + object2 := bucket1.Object("object2") + object3 := bucket1.Object("object3") + + requestsManager, err := newRequestsManager(apis.NewStorage(nil), 4, 4, 4, 2, 1, 1*time.Minute, []Operation{ + object1.Stat(), + object1.Stat(), + object1.Stat(), + object1.Stat(), + object1.Stat(), + object2.Stat(), + object2.Stat(), + object2.Stat(), + }) + if err != nil { + t.Fatal(err) + } + + if operations := requestsManager.takeOperations(); len(operations) != 3 { + t.Fatalf("unexpected got operations") + } + if operations := requestsManager.takeOperations(); len(operations) != 5 { + t.Fatalf("unexpected got operations") + } + + requestsManager, err = newRequestsManager(apis.NewStorage(nil), 4, 4, 4, 2, 1, 1*time.Minute, []Operation{ + object1.Stat(), + object1.Stat(), + object1.Stat(), + object1.Stat(), + object1.Stat(), + object2.Stat(), + object2.Stat(), + object2.Stat(), + object3.Stat(), + }) + if err != nil { + t.Fatal(err) + } + + if operations := requestsManager.takeOperations(); len(operations) != 4 { + t.Fatalf("unexpected got operations") + } + if operations := requestsManager.takeOperations(); len(operations) != 5 { + t.Fatalf("unexpected got operations") + } +} + +func TestRequestManagerBatchSize(t *testing.T) { + objectsManager := NewObjectsManager(nil) + bucket1 := objectsManager.Bucket("bucket1") + operations := make([]Operation, 0, 10000) + for i := 0; i < 10000; i++ { + operations = append(operations, bucket1.Object(fmt.Sprintf("object_%04d", i)).Stat()) + } + + const interval = 1 * time.Second + requestsManager, err := newRequestsManager(apis.NewStorage(nil), 256, 1, 1000, 2, 1, interval, operations) + if err != nil { + t.Fatal(err) + } + defer requestsManager.done() + time.Sleep(100 * time.Millisecond) + + ticker := time.NewTicker(interval) + defer ticker.Stop() + if size := len(requestsManager.takeOperations()); size != 256 { + t.Fatalf("unexpected operations got, actual: %d, expected: %d", size, 256) + } + <-ticker.C + requestsManager.decreaseBatchSize() + ticker = time.NewTicker(interval) + if size := len(requestsManager.takeOperations()); size != 256 { + t.Fatalf("unexpected operations got, actual: %d, expected: %d", size, 256) + } + for i := 0; i < 3; i++ { + <-ticker.C + } + if size := len(requestsManager.takeOperations()); size != 1000 { + t.Fatalf("unexpected operations got, actual: %d, expected: %d", size, 1000) + } + for i := 0; i < 10; i++ { + requestsManager.decreaseBatchSize() + } + ticker = time.NewTicker(interval) + if size := len(requestsManager.takeOperations()); size != 1 { + t.Fatalf("unexpected operations got, actual: %d, expected: %d", size, 1) + } + for i := 0; i < 11; i++ { + <-ticker.C + } + if size := len(requestsManager.takeOperations()); size != 1000 { + t.Fatalf("unexpected operations got, actual: %d, expected: %d", size, 1000) + } +} + +func TestWorkersManagerDoOperations573(t *testing.T) { + objectNames := make([]string, 100000) + for i := 0; i < 100000; i++ { + objectNames[i] = fmt.Sprintf("object%05d", i) + } + mux := http.NewServeMux() + mux.HandleFunc("/batch", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Fatalf("unexpected method") + } + if err := r.ParseForm(); err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.WriteHeader(573) + }) + server := httptest.NewServer(mux) + defer server.Close() + + objectsManager := NewObjectsManager(&ObjectsManagerOptions{}) + bucket1 := objectsManager.Bucket("bucket1") + + operations := make([]Operation, 100000) + for i := 0; i < 100000; i++ { + objectName := fmt.Sprintf("object_%05d", i) + operations[i] = bucket1.Object(objectName).Delete() + } + + requestsManager, err := newRequestsManager(apis.NewStorage(&http_client.Options{ + Credentials: credentials.NewCredentials("ak", "sk"), + Regions: ®ion.Region{Rs: region.Endpoints{Preferred: []string{server.URL}}}, + }), 100, 100, 100, 2, 1, 1*time.Minute, operations) + if err != nil { + t.Fatal(err) + } + defer requestsManager.done() + + workersManager := newWorkersManager(context.Background(), 10, 10, 10, 1*time.Minute, requestsManager) + workersManager.wait() +} diff --git a/storagev2/objects/bucket.go b/storagev2/objects/bucket.go new file mode 100644 index 00000000..e41c7134 --- /dev/null +++ b/storagev2/objects/bucket.go @@ -0,0 +1,57 @@ +package objects + +import ( + "context" + "strings" +) + +type ( + // 存储空间 + Bucket struct { + name string + objectsManager *ObjectsManager + } + + // 列举对象选项 + ListObjectsOptions struct { + Limit *uint64 // 最大列举数量 + Prefix string // 前缀 + Marker string // 标记 + NeedParts bool // 是否需要分片信息 + } +) + +// 存储空间名称 +func (bucket *Bucket) Name() string { + return bucket.name +} + +// 获取存储空间对象 +func (bucket *Bucket) Object(name string) *Object { + return &Object{bucket, name} +} + +// 获取存储空间目录 +func (bucket *Bucket) Directory(prefix, pathSeparator string) *Directory { + if pathSeparator == "" { + pathSeparator = "/" + } + if prefix != "" && !strings.HasSuffix(prefix, pathSeparator) { + prefix += pathSeparator + } + return &Directory{bucket, prefix, pathSeparator} +} + +// 列举对象 +func (bucket *Bucket) List(ctx context.Context, options *ListObjectsOptions) Lister { + if options == nil { + options = &ListObjectsOptions{} + } + + switch bucket.objectsManager.listerVersion { + case ListerVersionV1: + fallthrough + default: + return newListerV1(ctx, bucket, options) + } +} diff --git a/storagev2/objects/directory.go b/storagev2/objects/directory.go new file mode 100644 index 00000000..640be508 --- /dev/null +++ b/storagev2/objects/directory.go @@ -0,0 +1,167 @@ +package objects + +import ( + "container/list" + "path/filepath" + "strings" + + "github.com/qiniu/go-sdk/v7/internal/context" + "github.com/qiniu/go-sdk/v7/storagev2/apis" +) + +type ( + // 目录 + Directory struct { + bucket *Bucket + prefix, pathSeparator string + } + + // 条目 + Entry struct { + // 目录名称,仅当条目为目录时才有效 + DirectoryName string + + // 对象元信息,仅当条目为对象时才有效 + Object *ObjectDetails + } + + // 列举条目选项 + ListEntriesOptions struct { + NeedParts bool // 是否需要分片信息 + Recursive bool // 是否递归列举 + } +) + +var SkipDir = filepath.SkipDir + +// 移动目录 +func (directory *Directory) MoveTo(ctx context.Context, toBucketName, toPrefix string) error { + if !strings.HasSuffix(toPrefix, directory.pathSeparator) { + toPrefix += directory.pathSeparator + } + operations := make([]Operation, 0, 16) + if err := directory.forEachObject(ctx, func(objectDetails *ObjectDetails) { + toObjectName := toPrefix + strings.TrimPrefix(objectDetails.Name, directory.prefix) + operations = append(operations, directory.bucket.Object(objectDetails.Name).MoveTo(toBucketName, toObjectName)) + }); err != nil { + return err + } + return directory.bucket.objectsManager.Batch(ctx, operations, nil) +} + +// 复制目录 +func (directory *Directory) CopyTo(ctx context.Context, toBucketName, toPrefix string) error { + if !strings.HasSuffix(toPrefix, directory.pathSeparator) { + toPrefix += directory.pathSeparator + } + operations := make([]Operation, 0, 16) + if err := directory.forEachObject(ctx, func(objectDetails *ObjectDetails) { + toObjectName := toPrefix + strings.TrimPrefix(objectDetails.Name, directory.prefix) + operations = append(operations, directory.bucket.Object(objectDetails.Name).CopyTo(toBucketName, toObjectName)) + }); err != nil { + return err + } + return directory.bucket.objectsManager.Batch(ctx, operations, nil) +} + +// 删除目录 +func (directory *Directory) Delete(ctx context.Context) error { + operations := make([]Operation, 0, 16) + if err := directory.forEachObject(ctx, func(objectDetails *ObjectDetails) { + operations = append(operations, directory.bucket.Object(objectDetails.Name).Delete()) + }); err != nil { + return err + } + return directory.bucket.objectsManager.Batch(ctx, operations, nil) +} + +func (directory *Directory) forEachObject(ctx context.Context, each func(*ObjectDetails)) error { + lister := directory.bucket.List(ctx, &ListObjectsOptions{Prefix: directory.prefix}) + defer lister.Close() + + var objectDetails ObjectDetails + for lister.Next(&objectDetails) { + each(&objectDetails) + } + + return lister.Error() +} + +// 列举目录条目 +func (directory *Directory) ListEntries(ctx context.Context, options *ListEntriesOptions, f func(*Entry) error) error { + if options == nil { + options = &ListEntriesOptions{} + } + + directories := list.New() + directories.PushBack(directory) + return consumeEntries(ctx, directories, options, f) +} + +func (directory *Directory) listEntries(ctx context.Context, marker string, needParts bool) ([]*Entry, string, error) { + entries := make([]*Entry, 0, 1024) + request := apis.GetObjectsRequest{ + Bucket: directory.bucket.name, + Prefix: directory.prefix, + Delimiter: directory.pathSeparator, + Marker: marker, + NeedParts: needParts, + } + response, err := directory.bucket.objectsManager.storage.GetObjects(ctx, &request, nil) + if err != nil { + return nil, "", err + } + for _, commonPrefix := range response.CommonPrefixes { + entries = append(entries, &Entry{DirectoryName: commonPrefix}) + } + for _, item := range response.Items { + objectDetails := new(ObjectDetails) + if err = objectDetails.fromListedObjectEntry(&item); err != nil { + return nil, "", err + } + entries = append(entries, &Entry{Object: objectDetails}) + } + return entries, response.Marker, nil +} + +func consumeEntries(ctx context.Context, directories *list.List, options *ListEntriesOptions, f func(*Entry) error) error { + var firstElement *list.Element + for { + if firstElement = directories.Front(); firstElement == nil { + break + } + directories.Remove(firstElement) + currentDirectory := firstElement.Value.(*Directory) + + var ( + entries []*Entry + firstPage = true + marker string + err error + ) + nextPage: + for firstPage || marker != "" { + firstPage = false + if entries, marker, err = currentDirectory.listEntries(ctx, marker, options.NeedParts); err != nil { + return err + } else { + for _, entry := range entries { + err := f(entry) + switch err { + case nil: + if options.Recursive && entry.DirectoryName != "" { + directories.PushBack(&Directory{bucket: currentDirectory.bucket, prefix: entry.DirectoryName, pathSeparator: currentDirectory.pathSeparator}) + } + case SkipDir: + if entry.DirectoryName == "" { + continue nextPage + } + default: + return err + } + } + } + } + } + return nil +} diff --git a/storagev2/objects/directory_test.go b/storagev2/objects/directory_test.go new file mode 100644 index 00000000..5760f78a --- /dev/null +++ b/storagev2/objects/directory_test.go @@ -0,0 +1,438 @@ +//go:build unit +// +build unit + +package objects_test + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/apis/get_objects" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/objects" + "github.com/qiniu/go-sdk/v7/storagev2/region" +) + +func TestDirectoryListEntriesWithoutRecurse(t *testing.T) { + counted := 0 + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + if r.URL.Path == "/list" { + rw.Header().Add("X-ReqId", "fakereqid") + rw.Header().Set("Content-Type", "application/json") + var ( + jsonData []byte + err error + query = r.URL.Query() + ) + if query.Get("bucket") != "bucket1" { + t.Fatalf("unexpected bucket") + } + if query.Get("prefix") != "" { + t.Fatalf("unexpected prefix") + } + if query.Get("delimiter") != "/" { + t.Fatalf("unexpected delimiter") + } + if query.Get("limit") != "" { + t.Fatalf("unexpected limit") + } + switch counted { + case 0: + if query.Get("marker") != "" { + t.Fatalf("unexpected marker") + } + jsonData, err = json.Marshal(&get_objects.Response{ + Marker: "testmarker1", + CommonPrefixes: []string{"test1/", "test2/"}, + Items: []get_objects.ListedObjectEntry{{ + Key: "file1", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash1", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + }}, + }) + case 1: + if query.Get("marker") != "testmarker1" { + t.Fatalf("unexpected marker") + } + jsonData, err = json.Marshal(&get_objects.Response{ + CommonPrefixes: []string{"test3/", "test4/"}, + Items: []get_objects.ListedObjectEntry{{ + Key: "file2", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash1", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + }}, + }) + default: + t.Fatalf("unexpected request") + } + counted += 1 + if err != nil { + t.Fatal(err) + } + rw.Write(jsonData) + } else { + t.Fatalf("unexpected path") + } + if !strings.HasPrefix(r.Header.Get("Authorization"), "Qiniu testak:") { + t.Fatalf("unexpected authorization") + } + default: + t.Fatalf("unexpected method: %s", r.Method) + } + })) + defer server.Close() + + objectsManager := objects.NewObjectsManager(&objects.ObjectsManagerOptions{ + Options: http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rsf: region.Endpoints{Preferred: []string{server.URL}}}, + }, + }) + directory := objectsManager.Bucket("bucket1").Directory("", "") + listed := make(map[string]*objects.ObjectDetails) + err := directory.ListEntries(context.Background(), nil, func(de *objects.Entry) error { + if de.DirectoryName != "" { + listed[de.DirectoryName] = nil + } else { + listed[de.Object.Name] = de.Object + } + return nil + }) + if err != nil { + t.Fatal(err) + } + if len(listed) != 6 { + t.Fatalf("unexpected listed length") + } + if obj, ok := listed["test1/"]; !ok || obj != nil { + t.Fatalf("unexpected directory list1") + } + if obj, ok := listed["test2/"]; !ok || obj != nil { + t.Fatalf("unexpected directory list2") + } + if obj, ok := listed["test3/"]; !ok || obj != nil { + t.Fatalf("unexpected directory list3") + } + if obj, ok := listed["test4/"]; !ok || obj != nil { + t.Fatalf("unexpected directory list4") + } + if obj, ok := listed["file1"]; !ok || obj == nil { + t.Fatalf("unexpected object file1") + } + if obj, ok := listed["file2"]; !ok || obj == nil { + t.Fatalf("unexpected object file2") + } +} + +func TestDirectoryListEntriesWithRecurse(t *testing.T) { + counted := 0 + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + if r.URL.Path == "/list" { + rw.Header().Set("Content-Type", "application/json") + rw.Header().Add("X-ReqId", "fakereqid") + var ( + jsonData []byte + err error + query = r.URL.Query() + ) + if query.Get("bucket") != "bucket1" { + t.Fatalf("unexpected bucket") + } + if query.Get("delimiter") != "/" { + t.Fatalf("unexpected delimiter") + } + if query.Get("limit") != "" { + t.Fatalf("unexpected limit") + } + switch counted { + case 0: + if query.Get("prefix") != "" { + t.Fatalf("unexpected prefix") + } + if query.Get("marker") != "" { + t.Fatalf("unexpected marker") + } + jsonData, err = json.Marshal(&get_objects.Response{ + Marker: "testmarker1", + CommonPrefixes: []string{"test1/"}, + Items: []get_objects.ListedObjectEntry{{ + Key: "file1", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash1", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + }}, + }) + case 1: + if query.Get("prefix") != "" { + t.Fatalf("unexpected prefix") + } + if query.Get("marker") != "testmarker1" { + t.Fatalf("unexpected marker") + } + jsonData, err = json.Marshal(&get_objects.Response{ + CommonPrefixes: []string{"test2/"}, + Items: []get_objects.ListedObjectEntry{{ + Key: "file2", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash1", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + }}, + }) + case 2: + if query.Get("prefix") != "test1/" { + t.Fatalf("unexpected prefix") + } + if query.Get("marker") != "" { + t.Fatalf("unexpected marker") + } + jsonData, err = json.Marshal(&get_objects.Response{ + Items: []get_objects.ListedObjectEntry{{ + Key: "test1/file1", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash1", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + }}, + }) + case 3: + if query.Get("prefix") != "test2/" { + t.Fatalf("unexpected prefix") + } + if query.Get("marker") != "" { + t.Fatalf("unexpected marker") + } + jsonData, err = json.Marshal(&get_objects.Response{ + Items: []get_objects.ListedObjectEntry{{ + Key: "test2/file2", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash1", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + }}, + }) + default: + t.Fatalf("unexpected request") + } + counted += 1 + if err != nil { + t.Fatal(err) + } + rw.Write(jsonData) + } else { + t.Fatalf("unexpected path") + } + if !strings.HasPrefix(r.Header.Get("Authorization"), "Qiniu testak:") { + t.Fatalf("unexpected authorization") + } + default: + t.Fatalf("unexpected method: %s", r.Method) + } + })) + defer server.Close() + + objectsManager := objects.NewObjectsManager(&objects.ObjectsManagerOptions{ + Options: http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rsf: region.Endpoints{Preferred: []string{server.URL}}}, + }, + }) + directory := objectsManager.Bucket("bucket1").Directory("", "") + listed := make(map[string]*objects.ObjectDetails) + err := directory.ListEntries(context.Background(), &objects.ListEntriesOptions{ + Recursive: true, + }, func(de *objects.Entry) error { + if de.DirectoryName != "" { + listed[de.DirectoryName] = nil + } else { + listed[de.Object.Name] = de.Object + } + return nil + }) + if err != nil { + t.Fatal(err) + } + if len(listed) != 6 { + t.Fatalf("unexpected listed length") + } + if obj, ok := listed["test1/"]; !ok || obj != nil { + t.Fatalf("unexpected directory list1") + } + if obj, ok := listed["test2/"]; !ok || obj != nil { + t.Fatalf("unexpected directory list2") + } + if obj, ok := listed["file1"]; !ok || obj == nil { + t.Fatalf("unexpected object file1") + } + if obj, ok := listed["file2"]; !ok || obj == nil { + t.Fatalf("unexpected object file2") + } + if obj, ok := listed["test1/file1"]; !ok || obj == nil { + t.Fatalf("unexpected object file1") + } + if obj, ok := listed["test2/file2"]; !ok || obj == nil { + t.Fatalf("unexpected object file2") + } +} + +func TestDirectoryListEntriesWithSkipDir(t *testing.T) { + counted := 0 + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + if r.URL.Path == "/list" { + rw.Header().Set("Content-Type", "application/json") + rw.Header().Add("X-ReqId", "fakereqid") + var ( + jsonData []byte + err error + query = r.URL.Query() + ) + if query.Get("bucket") != "bucket1" { + t.Fatalf("unexpected bucket") + } + if query.Get("delimiter") != "/" { + t.Fatalf("unexpected delimiter") + } + if query.Get("limit") != "" { + t.Fatalf("unexpected limit") + } + switch counted { + case 0: + if query.Get("prefix") != "" { + t.Fatalf("unexpected prefix") + } + if query.Get("marker") != "" { + t.Fatalf("unexpected marker") + } + jsonData, err = json.Marshal(&get_objects.Response{ + Marker: "testmarker1", + CommonPrefixes: []string{"test1/"}, + Items: []get_objects.ListedObjectEntry{{ + Key: "file1", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash1", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + }}, + }) + case 1: + if query.Get("prefix") != "" { + t.Fatalf("unexpected prefix") + } + if query.Get("marker") != "testmarker1" { + t.Fatalf("unexpected marker") + } + jsonData, err = json.Marshal(&get_objects.Response{ + CommonPrefixes: []string{"test2/"}, + Items: []get_objects.ListedObjectEntry{{ + Key: "file2", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash1", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + }}, + }) + case 2: + if query.Get("prefix") != "test1/" { + t.Fatalf("unexpected prefix") + } + if query.Get("marker") != "" { + t.Fatalf("unexpected marker") + } + jsonData, err = json.Marshal(&get_objects.Response{ + Items: []get_objects.ListedObjectEntry{{ + Key: "test1/file1", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash1", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + }, { + Key: "test1/file2", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash1", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + }}, + }) + default: + t.Fatalf("unexpected request") + } + counted += 1 + if err != nil { + t.Fatal(err) + } + rw.Write(jsonData) + } else { + t.Fatalf("unexpected path") + } + if !strings.HasPrefix(r.Header.Get("Authorization"), "Qiniu testak:") { + t.Fatalf("unexpected authorization") + } + default: + t.Fatalf("unexpected method: %s", r.Method) + } + })) + defer server.Close() + + objectsManager := objects.NewObjectsManager(&objects.ObjectsManagerOptions{ + Options: http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rsf: region.Endpoints{Preferred: []string{server.URL}}}, + }, + }) + directory := objectsManager.Bucket("bucket1").Directory("", "") + listed := make(map[string]*objects.ObjectDetails) + err := directory.ListEntries(context.Background(), &objects.ListEntriesOptions{ + Recursive: true, + }, func(de *objects.Entry) error { + if de.DirectoryName != "" { + listed[de.DirectoryName] = nil + if de.DirectoryName == "test2/" { + return objects.SkipDir + } + } else { + listed[de.Object.Name] = de.Object + if de.Object.Name == "test1/file1" { + return objects.SkipDir + } + } + return nil + }) + if err != nil { + t.Fatal(err) + } + if len(listed) != 5 { + t.Fatalf("unexpected listed length") + } + if obj, ok := listed["test1/"]; !ok || obj != nil { + t.Fatalf("unexpected directory list1") + } + if obj, ok := listed["test2/"]; !ok || obj != nil { + t.Fatalf("unexpected directory list2") + } + if obj, ok := listed["file1"]; !ok || obj == nil { + t.Fatalf("unexpected object file1") + } + if obj, ok := listed["file2"]; !ok || obj == nil { + t.Fatalf("unexpected object file2") + } + if obj, ok := listed["test1/file1"]; !ok || obj == nil { + t.Fatalf("unexpected object file1") + } +} diff --git a/storagev2/objects/lister.go b/storagev2/objects/lister.go new file mode 100644 index 00000000..f8dbda25 --- /dev/null +++ b/storagev2/objects/lister.go @@ -0,0 +1,140 @@ +package objects + +import ( + "encoding/hex" + "io" + "time" + + "github.com/qiniu/go-sdk/v7/internal/context" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/apis/get_objects" + "github.com/qiniu/go-sdk/v7/storagev2/apis/stat_object" +) + +type ( + // 对象列举接口 + Lister interface { + io.Closer + + // 读取下一条记录 + Next(*ObjectDetails) bool + + // 获取错误信息 + Error() error + + // 获取位置标记 + Marker() string + } + + listerV1 struct { + ctx context.Context + bucket *Bucket + rest *uint64 + marker string + entries get_objects.ListedObjects + options *ListObjectsOptions + firstCall bool + err error + } +) + +const listerV1DefaultLimit = 1000 + +func newListerV1(ctx context.Context, bucket *Bucket, options *ListObjectsOptions) Lister { + if options == nil { + options = &ListObjectsOptions{} + } + return &listerV1{ctx: ctx, bucket: bucket, rest: options.Limit, marker: options.Marker, options: options, firstCall: true} +} + +func (v1 *listerV1) Next(object *ObjectDetails) bool { + if len(v1.entries) == 0 { + if err := v1.callListApi(); err != nil { + v1.err = err + return false + } + } + if len(v1.entries) == 0 { + return false + } + entry := v1.entries[0] + v1.entries = v1.entries[1:] + if err := object.fromListedObjectEntry(&entry); err != nil { + v1.err = err + return false + } + return true +} + +func (v1 *listerV1) Marker() string { + return v1.marker +} + +func (v1 *listerV1) callListApi() error { + if v1.marker == "" && !v1.firstCall { + return nil + } + v1.firstCall = false + + request := apis.GetObjectsRequest{ + Bucket: v1.bucket.name, + Marker: v1.marker, + Prefix: v1.options.Prefix, + NeedParts: v1.options.NeedParts, + } + if v1.rest != nil && *v1.rest < listerV1DefaultLimit { + if *v1.rest == 0 { + return nil + } + request.Limit = int64(*v1.rest) + } + response, err := v1.bucket.objectsManager.storage.GetObjects(v1.ctx, &request, nil) + if err != nil { + return err + } + v1.entries = response.Items + v1.marker = response.Marker + request.Marker = response.Marker + if v1.rest != nil { + *v1.rest -= uint64(len(response.Items)) + } + return nil +} + +func (v1 *listerV1) Error() error { + return v1.err +} + +func (v1 *listerV1) Close() error { + return v1.err +} + +func (object *ObjectDetails) fromListedObjectEntry(entry *get_objects.ListedObjectEntry) error { + var ( + md5 []byte + err error + ) + object.Name = entry.Key + object.UploadedAt = time.Unix(entry.PutTime/1e7, (entry.PutTime%1e7)*1e2) + object.ETag = entry.Hash + object.Size = entry.Size + object.MimeType = entry.MimeType + object.StorageClass = StorageClass(entry.Type) + object.EndUser = entry.EndUser + object.Status = Status(entry.Status) + object.RestoreStatus = RestoreStatus(entry.RestoringStatus) + object.Metadata = entry.Metadata + if entry.Md5 != "" { + md5, err = hex.DecodeString(entry.Md5) + if err != nil { + return err + } + } + if len(md5) > 0 { + copy(object.MD5[:], md5) + } + if len(entry.Parts) > 0 { + object.Parts = append(make(stat_object.PartSizes, 0, len(entry.Parts)), entry.Parts...) + } + return nil +} diff --git a/storagev2/objects/lister_test.go b/storagev2/objects/lister_test.go new file mode 100644 index 00000000..5e6ed95e --- /dev/null +++ b/storagev2/objects/lister_test.go @@ -0,0 +1,200 @@ +//go:build unit +// +build unit + +package objects_test + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/apis/get_objects" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/objects" + "github.com/qiniu/go-sdk/v7/storagev2/region" +) + +func TestObjectLister(t *testing.T) { + counted := 0 + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + if r.URL.Path == "/list" { + rw.Header().Set("Content-Type", "application/json") + rw.Header().Add("X-ReqId", "fakereqid") + var ( + jsonData []byte + err error + query = r.URL.Query() + ) + if query.Get("bucket") != "bucket1" { + t.Fatalf("unexpected bucket") + } + if query.Get("prefix") != "test/" { + t.Fatalf("unexpected prefix") + } + if query.Get("delimiter") != "" { + t.Fatalf("unexpected delimiter") + } + if query.Get("limit") != "" { + t.Fatalf("unexpected limit") + } + switch counted { + case 0: + if query.Get("marker") != "" { + t.Fatalf("unexpected marker") + } + jsonData, err = json.Marshal(&get_objects.Response{ + Marker: "testmarker1", + Items: []get_objects.ListedObjectEntry{{ + Key: "test/1", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash1", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + Parts: []int64{4 * 1024 * 1024}, + }, { + Key: "test/2", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash2", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + Parts: []int64{4 * 1024 * 1024}, + }, { + Key: "test/3", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash3", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + Parts: []int64{4 * 1024 * 1024}, + }}, + }) + case 1: + if query.Get("marker") != "testmarker1" { + t.Fatalf("unexpected marker") + } + jsonData, err = json.Marshal(&get_objects.Response{ + Items: []get_objects.ListedObjectEntry{{ + Key: "test/4", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash4", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + Parts: []int64{4 * 1024 * 1024}, + }, { + Key: "test/5", + PutTime: time.Now().UnixNano() / 100, + Hash: "testhash5", + Size: 4 * 1024 * 1024, + MimeType: "application/json", + Parts: []int64{4 * 1024 * 1024}, + }}, + }) + default: + t.Fatalf("unexpected request") + } + counted += 1 + if err != nil { + t.Fatal(err) + } + rw.Write(jsonData) + } else { + t.Fatalf("unexpected path") + } + if !strings.HasPrefix(r.Header.Get("Authorization"), "Qiniu testak:") { + t.Fatalf("unexpected authorization") + } + default: + t.Fatalf("unexpected method: %s", r.Method) + } + })) + defer server.Close() + + objectsManager := objects.NewObjectsManager(&objects.ObjectsManagerOptions{ + Options: http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rsf: region.Endpoints{Preferred: []string{server.URL}}}, + }, + }) + bucket := objectsManager.Bucket("bucket1") + lister := bucket.List(context.Background(), &objects.ListObjectsOptions{ + Prefix: "test/", + NeedParts: true, + }) + defer lister.Close() + + var objectDetails objects.ObjectDetails + if !lister.Next(&objectDetails) { + t.Fatalf("unexpected eof: %s", lister.Error()) + } + if objectDetails.Name != "test/1" { + t.Fatalf("unexpected object name") + } + if objectDetails.Size != 4*1024*1024 { + t.Fatalf("unexpected size") + } + if objectDetails.UploadedAt.Unix()-time.Now().Unix() >= 10 { + t.Fatalf("unexpected putTime") + } + + if !lister.Next(&objectDetails) { + t.Fatalf("unexpected eof: %s", lister.Error()) + } + if objectDetails.Name != "test/2" { + t.Fatalf("unexpected object name") + } + if objectDetails.Size != 4*1024*1024 { + t.Fatalf("unexpected size") + } + if objectDetails.UploadedAt.Unix()-time.Now().Unix() >= 10 { + t.Fatalf("unexpected putTime") + } + + if !lister.Next(&objectDetails) { + t.Fatalf("unexpected eof: %s", lister.Error()) + } + if objectDetails.Name != "test/3" { + t.Fatalf("unexpected object name") + } + if objectDetails.Size != 4*1024*1024 { + t.Fatalf("unexpected size") + } + if objectDetails.UploadedAt.Unix()-time.Now().Unix() >= 10 { + t.Fatalf("unexpected putTime") + } + + if !lister.Next(&objectDetails) { + t.Fatalf("unexpected eof: %s", lister.Error()) + } + if objectDetails.Name != "test/4" { + t.Fatalf("unexpected object name") + } + if objectDetails.Size != 4*1024*1024 { + t.Fatalf("unexpected size") + } + if objectDetails.UploadedAt.Unix()-time.Now().Unix() >= 10 { + t.Fatalf("unexpected putTime") + } + + if !lister.Next(&objectDetails) { + t.Fatalf("unexpected eof: %s", lister.Error()) + } + if objectDetails.Name != "test/5" { + t.Fatalf("unexpected object name") + } + if objectDetails.Size != 4*1024*1024 { + t.Fatalf("unexpected size") + } + if objectDetails.UploadedAt.Unix()-time.Now().Unix() >= 10 { + t.Fatalf("unexpected putTime") + } + + if err := lister.Error(); err != nil { + t.Fatal(err) + } +} diff --git a/storagev2/objects/object.go b/storagev2/objects/object.go new file mode 100644 index 00000000..f44e53e8 --- /dev/null +++ b/storagev2/objects/object.go @@ -0,0 +1,76 @@ +package objects + +// 对象 +type Object struct { + bucket *Bucket + name string +} + +// 获取对象元信息 +func (object *Object) Stat() *StatObjectOperation { + return &StatObjectOperation{ + object: *object, + } +} + +// 移动对象 +func (object *Object) MoveTo(toBucketName, toObjectName string) *MoveObjectOperation { + return &MoveObjectOperation{ + fromObject: *object, + toObject: entry{toBucketName, toObjectName}, + } +} + +// 复制对象 +func (object *Object) CopyTo(toBucketName, toObjectName string) *CopyObjectOperation { + return &CopyObjectOperation{ + fromObject: *object, + toObject: entry{toBucketName, toObjectName}, + } +} + +// 删除对象 +func (object *Object) Delete() *DeleteObjectOperation { + return &DeleteObjectOperation{ + object: *object, + } +} + +// 解冻对象 +func (object *Object) Restore(freezeAfterDays int64) *RestoreObjectOperation { + return &RestoreObjectOperation{ + object: *object, + freezeAfterDays: freezeAfterDays, + } +} + +// 设置对象存储类型 +func (object *Object) SetStorageClass(storageClass StorageClass) *SetObjectStorageClassOperation { + return &SetObjectStorageClassOperation{ + object: *object, + storageClass: storageClass, + } +} + +// 设置对象状态 +func (object *Object) SetStatus(status Status) *SetObjectStatusOperation { + return &SetObjectStatusOperation{ + object: *object, + status: status, + } +} + +// 设置对象元信息 +func (object *Object) SetMetadata(mimeType string) *SetObjectMetadataOperation { + return &SetObjectMetadataOperation{ + object: *object, + mimeType: mimeType, + } +} + +// 设置对象生命周期 +func (object *Object) SetLifeCycle() *SetObjectLifeCycleOperation { + return &SetObjectLifeCycleOperation{ + object: *object, + } +} diff --git a/storagev2/objects/object_details.go b/storagev2/objects/object_details.go new file mode 100644 index 00000000..06fca8ed --- /dev/null +++ b/storagev2/objects/object_details.go @@ -0,0 +1,83 @@ +package objects + +import ( + "crypto/md5" + "time" +) + +type ( + // 存储类型 + StorageClass int64 + + // 解冻状态 + RestoreStatus int64 + + // 禁用状态 + Status int64 + + // 列举 API 版本 + ListerVersion int64 + + // 对象详情 + ObjectDetails struct { + Name string // 对象名称 + UploadedAt time.Time // 上传时间 + ETag string // 哈希值 + Size int64 // 对象大小,单位为字节 + MimeType string // 对象 MIME 类型 + StorageClass StorageClass // 存储类型 + EndUser string // 唯一属主标识 + Status Status // 存储状态 + RestoreStatus RestoreStatus // 冻结状态,仅对归档存储或深度归档存储的对象生效 + TransitionToIA *time.Time // 文件生命周期中转为低频存储的日期 + TransitionToArchiveIR *time.Time // 文件生命周期中转为归档直读存储的日期 + TransitionToArchive *time.Time // 文件生命周期中转为归档存储的日期 + TransitionToDeepArchive *time.Time // 文件生命周期中转为深度归档存储的日期 + ExpireAt *time.Time // 文件过期删除日期 + MD5 [md5.Size]byte // 对象 MD5 值 + Metadata map[string]string + Parts []int64 // 分片的大小 + } +) + +const ( + // 标准存储类型 + StandardStorageClass StorageClass = iota + + // 低频访问存储类型 + IAStorageClass + + // 归档存储类型 + ArchiveStorageClass + + // 深度归档存储类型 + DeepArchiveStorageClass + + // 归档直读存储类型 + ArchiveIRStorageClass +) + +const ( + // 启用状态 + EnabledStatus Status = iota + + // 禁用状态 + DisabledStatus +) + +const ( + // 冻结中 + FrozenStatus RestoreStatus = iota + + // 解冻中 + RestoringStatus + + // 已解冻 + RestoredStatus +) + +const ( + // 列举 V1 + ListerVersionV1 ListerVersion = iota + // ListerVersionV2 +) diff --git a/storagev2/objects/object_test.go b/storagev2/objects/object_test.go new file mode 100644 index 00000000..585dd23d --- /dev/null +++ b/storagev2/objects/object_test.go @@ -0,0 +1,352 @@ +//go:build unit +// +build unit + +package objects_test + +import ( + "context" + "encoding/base64" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/apis/stat_object" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/objects" + "github.com/qiniu/go-sdk/v7/storagev2/region" +) + +func TestObjectStat(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + if r.URL.RequestURI() == "/stat/"+base64.URLEncoding.EncodeToString([]byte("bucket1:testobject"))+"?needparts=true" { + rw.Header().Set("Content-Type", "application/json") + jsonData, err := json.Marshal(&stat_object.Response{ + Size: 4 * 1024 * 1024, + Hash: "testhash1", + MimeType: "application/json", + Type: 0, + PutTime: time.Now().UnixNano() / 100, + RestoringStatus: 0, + Status: 0, + TransitionToIaTime: time.Now().Add(24 * time.Hour).Unix(), + TransitionToArchiveIrTime: time.Now().Add(2 * 24 * time.Hour).Unix(), + TransitionToArchiveTime: time.Now().Add(3 * 24 * time.Hour).Unix(), + TransitionToDeepArchiveTime: time.Now().Add(4 * 24 * time.Hour).Unix(), + ExpirationTime: time.Now().Add(5 * 24 * time.Hour).Unix(), + Metadata: map[string]string{"x-qn-meta-a": "b"}, + Parts: []int64{4 * 1024 * 1024}, + }) + if err != nil { + t.Fatal(err) + } + rw.Header().Add("X-ReqId", "fakereqid") + rw.Write(jsonData) + } else { + t.Fatalf("unexpected path") + } + if !strings.HasPrefix(r.Header.Get("Authorization"), "Qiniu testak:") { + t.Fatalf("unexpected authorization") + } + default: + t.Fatalf("unexpected method: %s", r.Method) + } + })) + defer server.Close() + + objectsManager := objects.NewObjectsManager(&objects.ObjectsManagerOptions{ + Options: http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rs: region.Endpoints{Preferred: []string{server.URL}}}, + }, + }) + bucket := objectsManager.Bucket("bucket1") + if obj, err := bucket.Object("testobject").Stat().NeedParts(true).Call(context.Background()); err != nil { + t.Fatal(err) + } else { + if obj.Size != 4*1024*1024 { + t.Fatalf("unexpected fsize") + } + if obj.ETag != "testhash1" { + t.Fatalf("unexpected etag") + } + if obj.MimeType != "application/json" { + t.Fatalf("unexpected mimeType") + } + if obj.UploadedAt.Unix()-time.Now().Unix() >= 10 { + t.Fatalf("unexpected putTime") + } + if obj.TransitionToIA.Unix()-time.Now().Add(24*time.Hour).Unix() >= 10 { + t.Fatalf("unexpected transitionToIA") + } + if obj.TransitionToArchiveIR.Unix()-time.Now().Add(2*24*time.Hour).Unix() >= 10 { + t.Fatalf("unexpected transitionToArchiveIR") + } + if obj.TransitionToArchive.Unix()-time.Now().Add(3*24*time.Hour).Unix() >= 10 { + t.Fatalf("unexpected transitionToArchive") + } + if obj.TransitionToDeepArchive.Unix()-time.Now().Add(4*24*time.Hour).Unix() >= 10 { + t.Fatalf("unexpected transitionToDeepArchive") + } + if obj.ExpireAt.Unix()-time.Now().Add(5*24*time.Hour).Unix() >= 10 { + t.Fatalf("unexpected expiration") + } + if len(obj.Metadata) != 1 || obj.Metadata["x-qn-meta-a"] != "b" { + t.Fatalf("unexpected metadata") + } + if len(obj.Parts) != 1 || obj.Parts[0] != 4*1024*1024 { + t.Fatalf("unexpected parts") + } + } +} + +func TestObjectMoveTo(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodPost: + if r.URL.RequestURI() != "/move/"+base64.URLEncoding.EncodeToString([]byte("bucket1:testobject"))+"/"+base64.URLEncoding.EncodeToString([]byte("bucket2:testobject")) { + t.Fatalf("unexpected path") + } + if !strings.HasPrefix(r.Header.Get("Authorization"), "Qiniu testak:") { + t.Fatalf("unexpected authorization") + } + rw.Header().Add("X-ReqId", "fakereqid") + default: + t.Fatalf("unexpected method: %s", r.Method) + } + })) + defer server.Close() + + objectsManager := objects.NewObjectsManager(&objects.ObjectsManagerOptions{ + Options: http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rs: region.Endpoints{Preferred: []string{server.URL}}}, + }, + }) + bucket := objectsManager.Bucket("bucket1") + if err := bucket.Object("testobject").MoveTo("bucket2", "testobject").Call(context.Background()); err != nil { + t.Fatal(err) + } +} + +func TestObjectCopyTo(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodPost: + if r.URL.RequestURI() != "/copy/"+base64.URLEncoding.EncodeToString([]byte("bucket1:testobject"))+"/"+base64.URLEncoding.EncodeToString([]byte("bucket2:testobject")) { + t.Fatalf("unexpected path") + } + if !strings.HasPrefix(r.Header.Get("Authorization"), "Qiniu testak:") { + t.Fatalf("unexpected authorization") + } + rw.Header().Add("X-ReqId", "fakereqid") + default: + t.Fatalf("unexpected method: %s", r.Method) + } + })) + defer server.Close() + + objectsManager := objects.NewObjectsManager(&objects.ObjectsManagerOptions{ + Options: http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rs: region.Endpoints{Preferred: []string{server.URL}}}, + }, + }) + bucket := objectsManager.Bucket("bucket1") + if err := bucket.Object("testobject").CopyTo("bucket2", "testobject").Call(context.Background()); err != nil { + t.Fatal(err) + } +} + +func TestObjectDelete(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodPost: + if r.URL.RequestURI() != "/delete/"+base64.URLEncoding.EncodeToString([]byte("bucket1:testobject")) { + t.Fatalf("unexpected path") + } + if !strings.HasPrefix(r.Header.Get("Authorization"), "Qiniu testak:") { + t.Fatalf("unexpected authorization") + } + rw.Header().Add("X-ReqId", "fakereqid") + default: + t.Fatalf("unexpected method: %s", r.Method) + } + })) + defer server.Close() + + objectsManager := objects.NewObjectsManager(&objects.ObjectsManagerOptions{ + Options: http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rs: region.Endpoints{Preferred: []string{server.URL}}}, + }, + }) + bucket := objectsManager.Bucket("bucket1") + if err := bucket.Object("testobject").Delete().Call(context.Background()); err != nil { + t.Fatal(err) + } +} + +func TestObjectRestore(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodPost: + if r.URL.RequestURI() != "/restoreAr/"+base64.URLEncoding.EncodeToString([]byte("bucket1:testobject"))+"/freezeAfterDays/7" { + t.Fatalf("unexpected path") + } + if !strings.HasPrefix(r.Header.Get("Authorization"), "Qiniu testak:") { + t.Fatalf("unexpected authorization") + } + rw.Header().Add("X-ReqId", "fakereqid") + default: + t.Fatalf("unexpected method: %s", r.Method) + } + })) + defer server.Close() + + objectsManager := objects.NewObjectsManager(&objects.ObjectsManagerOptions{ + Options: http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rs: region.Endpoints{Preferred: []string{server.URL}}}, + }, + }) + bucket := objectsManager.Bucket("bucket1") + if err := bucket.Object("testobject").Restore(7).Call(context.Background()); err != nil { + t.Fatal(err) + } +} + +func TestObjectSetStorageClass(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodPost: + if r.URL.RequestURI() != "/chtype/"+base64.URLEncoding.EncodeToString([]byte("bucket1:testobject"))+"/type/4" { + t.Fatalf("unexpected path") + } + if !strings.HasPrefix(r.Header.Get("Authorization"), "Qiniu testak:") { + t.Fatalf("unexpected authorization") + } + rw.Header().Add("X-ReqId", "fakereqid") + default: + t.Fatalf("unexpected method: %s", r.Method) + } + })) + defer server.Close() + + objectsManager := objects.NewObjectsManager(&objects.ObjectsManagerOptions{ + Options: http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rs: region.Endpoints{Preferred: []string{server.URL}}}, + }, + }) + bucket := objectsManager.Bucket("bucket1") + if err := bucket.Object("testobject").SetStorageClass(objects.ArchiveIRStorageClass).Call(context.Background()); err != nil { + t.Fatal(err) + } +} + +func TestObjectSetStatus(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodPost: + if r.URL.RequestURI() != "/chstatus/"+base64.URLEncoding.EncodeToString([]byte("bucket1:testobject"))+"/status/1" { + t.Fatalf("unexpected path") + } + if !strings.HasPrefix(r.Header.Get("Authorization"), "Qiniu testak:") { + t.Fatalf("unexpected authorization") + } + rw.Header().Add("X-ReqId", "fakereqid") + default: + t.Fatalf("unexpected method: %s", r.Method) + } + })) + defer server.Close() + + objectsManager := objects.NewObjectsManager(&objects.ObjectsManagerOptions{ + Options: http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rs: region.Endpoints{Preferred: []string{server.URL}}}, + }, + }) + bucket := objectsManager.Bucket("bucket1") + if err := bucket.Object("testobject").SetStatus(objects.DisabledStatus).Call(context.Background()); err != nil { + t.Fatal(err) + } +} + +func TestObjectSetMetadata(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodPost: + if r.URL.RequestURI() != "/chgm/"+base64.URLEncoding.EncodeToString([]byte("bucket1:testobject"))+ + "/mime/"+base64.URLEncoding.EncodeToString([]byte("application/json"))+ + "/cond/"+base64.URLEncoding.EncodeToString([]byte("fsize=1"))+ + "/x-qn-meta-a/"+base64.URLEncoding.EncodeToString([]byte("b")) { + t.Fatalf("unexpected path: %s", r.URL.RequestURI()) + } + if !strings.HasPrefix(r.Header.Get("Authorization"), "Qiniu testak:") { + t.Fatalf("unexpected authorization") + } + rw.Header().Add("X-ReqId", "fakereqid") + default: + t.Fatalf("unexpected method: %s", r.Method) + } + })) + defer server.Close() + + objectsManager := objects.NewObjectsManager(&objects.ObjectsManagerOptions{ + Options: http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rs: region.Endpoints{Preferred: []string{server.URL}}}, + }, + }) + bucket := objectsManager.Bucket("bucket1") + if err := bucket.Object("testobject").SetMetadata("application/json"). + Metadata(map[string]string{"a": "b"}). + Conditions(map[string]string{"fsize": "1"}). + Call(context.Background()); err != nil { + t.Fatal(err) + } +} + +func TestObjectSetLifeCycle(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodPost: + if r.URL.RequestURI() != "/lifecycle/"+base64.URLEncoding.EncodeToString([]byte("bucket1:testobject"))+ + "/toIAAfterDays/1/toArchiveAfterDays/3/toDeepArchiveAfterDays/4/toArchiveIRAfterDays/2/deleteAfterDays/5" { + t.Fatalf("unexpected path: %s", r.URL.RequestURI()) + } + if !strings.HasPrefix(r.Header.Get("Authorization"), "Qiniu testak:") { + t.Fatalf("unexpected authorization") + } + rw.Header().Add("X-ReqId", "fakereqid") + default: + t.Fatalf("unexpected method: %s", r.Method) + } + })) + defer server.Close() + + objectsManager := objects.NewObjectsManager(&objects.ObjectsManagerOptions{ + Options: http_client.Options{ + Credentials: credentials.NewCredentials("testak", "testsk"), + Regions: ®ion.Region{Rs: region.Endpoints{Preferred: []string{server.URL}}}, + }, + }) + bucket := objectsManager.Bucket("bucket1") + if err := bucket.Object("testobject"). + SetLifeCycle(). + ToIAAfterDays(1). + ToArchiveIRAfterDays(2). + ToArchiveAfterDays(3). + ToDeepArchiveAfterDays(4). + DeleteAfterDays(5). + Call(context.Background()); err != nil { + t.Fatal(err) + } +} diff --git a/storagev2/objects/objects_manager.go b/storagev2/objects/objects_manager.go new file mode 100644 index 00000000..5651cbad --- /dev/null +++ b/storagev2/objects/objects_manager.go @@ -0,0 +1,73 @@ +package objects + +import ( + "context" + + "github.com/qiniu/go-sdk/v7/storagev2/apis" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" +) + +type ( + // 对象管理器 + ObjectsManager struct { + storage *apis.Storage + options httpclient.Options + listerVersion ListerVersion + batchOpsExecutor BatchOpsExecutor + } + + // 对象管理器选项 + ObjectsManagerOptions struct { + // HTTP 客户端选项 + httpclient.Options + + // 分片列举版本,如果不填写,默认为 V1 + ListerVersion ListerVersion + + // 批处理执行器,如果不填写,默认为串型批处理执行器 + BatchOpsExecutor BatchOpsExecutor + } + + // 批处理选项 + BatchOptions struct { + // 批处理执行器,如果不填写,默认使用 ObjectsManager 的批处理执行器 + BatchOpsExecutor BatchOpsExecutor + } +) + +// 创建对象管理器 +func NewObjectsManager(options *ObjectsManagerOptions) *ObjectsManager { + if options == nil { + options = &ObjectsManagerOptions{} + } + batchOpsExecutor := options.BatchOpsExecutor + if batchOpsExecutor == nil { + batchOpsExecutor = NewConcurrentBatchOpsExecutor(nil) + } + return &ObjectsManager{ + storage: apis.NewStorage(&options.Options), + options: options.Options, + listerVersion: options.ListerVersion, + batchOpsExecutor: batchOpsExecutor, + } +} + +// 获取存储空间 +func (objectsManager *ObjectsManager) Bucket(name string) *Bucket { + return &Bucket{name: name, objectsManager: objectsManager} +} + +// 执行批处理操作 +func (objectsManager *ObjectsManager) Batch(ctx context.Context, operations []Operation, options *BatchOptions) error { + if len(operations) == 0 { + return nil + } + if options == nil { + options = &BatchOptions{} + } + batchOpsExecutor := options.BatchOpsExecutor + if batchOpsExecutor == nil { + batchOpsExecutor = objectsManager.batchOpsExecutor + } + return batchOpsExecutor.ExecuteBatchOps(ctx, operations, objectsManager.storage) +} diff --git a/storagev2/objects/operations.go b/storagev2/objects/operations.go new file mode 100644 index 00000000..204faf21 --- /dev/null +++ b/storagev2/objects/operations.go @@ -0,0 +1,676 @@ +package objects + +import ( + "context" + "encoding/base64" + "encoding/hex" + "fmt" + "strconv" + "strings" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/apis/stat_object" +) + +type ( + // 操作接口,用于发送单次请求或批量请求 + Operation interface { + // 批量请求命令 + fmt.Stringer + // 被操作的对象,必须返回至少一个 + relatedEntries() []entry + // 处理返回结果 + handleResponse(*ObjectDetails, error) + } + + entry struct { + bucketName string + objectName string + } + + // 获取对象元信息操作 + StatObjectOperation struct { + object Object + needParts bool + onResponse func(*ObjectDetails) + onError func(error) + } + + // 移动对象操作 + MoveObjectOperation struct { + fromObject Object + toObject entry + force bool + onResponse func() + onError func(error) + } + + // 复制对象操作 + CopyObjectOperation struct { + fromObject Object + toObject entry + force bool + onResponse func() + onError func(error) + } + + // 删除对象操作 + DeleteObjectOperation struct { + object Object + onResponse func() + onError func(error) + } + + // 解冻对象操作 + RestoreObjectOperation struct { + object Object + freezeAfterDays int64 + onResponse func() + onError func(error) + } + + // 设置对象存储类型操作 + SetObjectStorageClassOperation struct { + object Object + storageClass StorageClass + onResponse func() + onError func(error) + } + + // 设置对象状态 + SetObjectStatusOperation struct { + object Object + status Status + onResponse func() + onError func(error) + } + + // 设置对象元信息操作 + SetObjectMetadataOperation struct { + object Object + mimeType string + metadata map[string]string + conditions map[string]string + onResponse func() + onError func(error) + } + + // 设置对象生命周期操作 + SetObjectLifeCycleOperation struct { + object Object + toIAAfterDays int64 + toArchiveIRAfterDays int64 + toArchiveAfterDays int64 + toDeepArchiveAfterDays int64 + deleteAfterDays int64 + onResponse func() + onError func(error) + } +) + +func (operation *StatObjectOperation) NeedParts(needParts bool) *StatObjectOperation { + copy := *operation + copy.needParts = needParts + return © +} + +func (operation *StatObjectOperation) OnResponse(fn func(*ObjectDetails)) *StatObjectOperation { + copy := *operation + copy.onResponse = fn + return © +} + +func (operation *StatObjectOperation) OnError(fn func(error)) *StatObjectOperation { + copy := *operation + copy.onError = fn + return © +} + +func (operation *StatObjectOperation) relatedEntries() []entry { + return []entry{{operation.object.bucket.name, operation.object.name}} +} + +func (operation *StatObjectOperation) parseResponse(response *stat_object.Response, err error) (*ObjectDetails, error) { + if err != nil { + if operation.onError != nil { + operation.onError(err) + } + return nil, err + } + + object := ObjectDetails{ + Name: operation.object.name, + UploadedAt: time.Unix(response.PutTime/1e7, (response.PutTime%1e7)*1e2), + ETag: response.Hash, + Size: response.Size, + MimeType: response.MimeType, + StorageClass: StorageClass(response.Type), + EndUser: response.EndUser, + Status: Status(response.Status), + RestoreStatus: RestoreStatus(response.RestoringStatus), + Metadata: response.Metadata, + } + var md5 []byte + if response.Md5 != "" { + md5, err = hex.DecodeString(response.Md5) + if err != nil { + if operation.onError != nil { + operation.onError(err) + } + return nil, err + } + } + if len(md5) > 0 { + copy(object.MD5[:], md5) + } + if len(response.Parts) > 0 { + object.Parts = append(make(stat_object.PartSizes, 0, len(response.Parts)), response.Parts...) + } + if response.TransitionToIaTime > 0 { + transitionToIA := time.Unix(response.TransitionToIaTime, 0) + object.TransitionToIA = &transitionToIA + } + if response.TransitionToArchiveIrTime > 0 { + transitionToArchiveIR := time.Unix(response.TransitionToArchiveIrTime, 0) + object.TransitionToArchiveIR = &transitionToArchiveIR + } + if response.TransitionToArchiveTime > 0 { + transitionToArchive := time.Unix(response.TransitionToArchiveTime, 0) + object.TransitionToArchive = &transitionToArchive + } + if response.TransitionToDeepArchiveTime > 0 { + transitionToDeepArchive := time.Unix(response.TransitionToDeepArchiveTime, 0) + object.TransitionToDeepArchive = &transitionToDeepArchive + } + if response.ExpirationTime > 0 { + expireAt := time.Unix(response.ExpirationTime, 0) + object.ExpireAt = &expireAt + } + if operation.onResponse != nil { + operation.onResponse(&object) + } + return &object, nil +} + +func (operation *StatObjectOperation) handleResponse(object *ObjectDetails, err error) { + if err != nil && operation.onError != nil { + operation.onError(err) + } else if operation.onResponse != nil { + operation.onResponse(object) + } +} + +func (operation *StatObjectOperation) String() string { + s := "stat/" + operation.object.encode() + if operation.needParts { + s += "/needparts/true" + } + return s +} + +func (operation *StatObjectOperation) Call(ctx context.Context) (*ObjectDetails, error) { + response, err := operation.object.bucket.objectsManager.storage.StatObject(ctx, &apis.StatObjectRequest{ + Entry: operation.object.String(), + NeedParts: operation.needParts, + }, &apis.Options{ + OverwrittenBucketName: operation.object.bucket.name, + }) + return operation.parseResponse(response, err) +} + +var _ Operation = (*StatObjectOperation)(nil) + +func (operation *MoveObjectOperation) Force(force bool) *MoveObjectOperation { + copy := *operation + copy.force = force + return © +} + +func (operation *MoveObjectOperation) OnResponse(fn func()) *MoveObjectOperation { + copy := *operation + copy.onResponse = fn + return © +} + +func (operation *MoveObjectOperation) OnError(fn func(error)) *MoveObjectOperation { + copy := *operation + copy.onError = fn + return © +} + +func (operation *MoveObjectOperation) relatedEntries() []entry { + return []entry{{operation.fromObject.bucket.name, operation.fromObject.name}, operation.toObject} +} + +func (operation *MoveObjectOperation) handleResponse(_ *ObjectDetails, err error) { + if err != nil && operation.onError != nil { + operation.onError(err) + } else if operation.onResponse != nil { + operation.onResponse() + } +} + +func (operation *MoveObjectOperation) String() string { + s := "move/" + operation.fromObject.encode() + "/" + operation.toObject.encode() + if operation.force { + s += "/force/true" + } + return s +} + +func (operation *MoveObjectOperation) Call(ctx context.Context) error { + _, err := operation.fromObject.bucket.objectsManager.storage.MoveObject(ctx, &apis.MoveObjectRequest{ + SrcEntry: operation.fromObject.String(), + DestEntry: operation.toObject.String(), + IsForce: operation.force, + }, &apis.Options{ + OverwrittenBucketName: operation.fromObject.bucket.name, + }) + operation.handleResponse(nil, err) + return err +} + +var _ Operation = (*MoveObjectOperation)(nil) + +func (operation *CopyObjectOperation) Force(force bool) *CopyObjectOperation { + copy := *operation + copy.force = force + return © +} + +func (operation *CopyObjectOperation) OnResponse(fn func()) *CopyObjectOperation { + copy := *operation + copy.onResponse = fn + return © +} + +func (operation *CopyObjectOperation) OnError(fn func(error)) *CopyObjectOperation { + copy := *operation + copy.onError = fn + return © +} + +func (operation *CopyObjectOperation) relatedEntries() []entry { + return []entry{{operation.fromObject.bucket.name, operation.fromObject.name}, operation.toObject} +} + +func (operation *CopyObjectOperation) handleResponse(_ *ObjectDetails, err error) { + if err != nil && operation.onError != nil { + operation.onError(err) + } else if operation.onResponse != nil { + operation.onResponse() + } +} + +func (operation *CopyObjectOperation) String() string { + s := "copy/" + operation.fromObject.encode() + "/" + operation.toObject.encode() + if operation.force { + s += "/force/true" + } + return s +} + +func (operation *CopyObjectOperation) Call(ctx context.Context) error { + _, err := operation.fromObject.bucket.objectsManager.storage.CopyObject(ctx, &apis.CopyObjectRequest{ + SrcEntry: operation.fromObject.String(), + DestEntry: operation.toObject.String(), + IsForce: operation.force, + }, &apis.Options{ + OverwrittenBucketName: operation.fromObject.bucket.name, + }) + operation.handleResponse(nil, err) + return err +} + +var _ Operation = (*CopyObjectOperation)(nil) + +func (operation *DeleteObjectOperation) OnResponse(fn func()) *DeleteObjectOperation { + copy := *operation + copy.onResponse = fn + return © +} + +func (operation *DeleteObjectOperation) OnError(fn func(error)) *DeleteObjectOperation { + copy := *operation + copy.onError = fn + return © +} + +func (operation *DeleteObjectOperation) handleResponse(_ *ObjectDetails, err error) { + if err != nil && operation.onError != nil { + operation.onError(err) + } else if operation.onResponse != nil { + operation.onResponse() + } +} + +func (operation *DeleteObjectOperation) relatedEntries() []entry { + return []entry{{operation.object.bucket.name, operation.object.name}} +} + +func (operation *DeleteObjectOperation) String() string { + return "delete/" + operation.object.encode() +} + +func (operation *DeleteObjectOperation) Call(ctx context.Context) error { + _, err := operation.object.bucket.objectsManager.storage.DeleteObject(ctx, &apis.DeleteObjectRequest{ + Entry: operation.object.String(), + }, &apis.Options{ + OverwrittenBucketName: operation.object.bucket.name, + }) + operation.handleResponse(nil, err) + return err +} + +var _ Operation = (*DeleteObjectOperation)(nil) + +func (operation *RestoreObjectOperation) OnResponse(fn func()) *RestoreObjectOperation { + copy := *operation + copy.onResponse = fn + return © +} + +func (operation *RestoreObjectOperation) OnError(fn func(error)) *RestoreObjectOperation { + copy := *operation + copy.onError = fn + return © +} + +func (operation *RestoreObjectOperation) relatedEntries() []entry { + return []entry{{operation.object.bucket.name, operation.object.name}} +} + +func (operation *RestoreObjectOperation) handleResponse(_ *ObjectDetails, err error) { + if err != nil && operation.onError != nil { + operation.onError(err) + } else if operation.onResponse != nil { + operation.onResponse() + } +} + +func (operation *RestoreObjectOperation) String() string { + return "restoreAr/" + operation.object.encode() + "/freezeAfterDays/" + strconv.FormatInt(operation.freezeAfterDays, 10) +} + +func (operation *RestoreObjectOperation) Call(ctx context.Context) error { + _, err := operation.object.bucket.objectsManager.storage.RestoreArchivedObject(ctx, &apis.RestoreArchivedObjectRequest{ + Entry: operation.object.String(), + FreezeAfterDays: int64(operation.freezeAfterDays), + }, &apis.Options{ + OverwrittenBucketName: operation.object.bucket.name, + }) + operation.handleResponse(nil, err) + return err +} + +var _ Operation = (*RestoreObjectOperation)(nil) + +func (operation *SetObjectStorageClassOperation) OnResponse(fn func()) *SetObjectStorageClassOperation { + copy := *operation + copy.onResponse = fn + return © +} + +func (operation *SetObjectStorageClassOperation) OnError(fn func(error)) *SetObjectStorageClassOperation { + copy := *operation + copy.onError = fn + return © +} + +func (operation *SetObjectStorageClassOperation) relatedEntries() []entry { + return []entry{{operation.object.bucket.name, operation.object.name}} +} + +func (operation *SetObjectStorageClassOperation) handleResponse(_ *ObjectDetails, err error) { + if err != nil && operation.onError != nil { + operation.onError(err) + } else if operation.onResponse != nil { + operation.onResponse() + } +} + +func (operation *SetObjectStorageClassOperation) String() string { + return "chtype/" + operation.object.encode() + "/type/" + strconv.Itoa(int(operation.storageClass)) +} + +func (operation *SetObjectStorageClassOperation) Call(ctx context.Context) error { + _, err := operation.object.bucket.objectsManager.storage.SetObjectFileType(ctx, &apis.SetObjectFileTypeRequest{ + Entry: operation.object.String(), + Type: int64(operation.storageClass), + }, &apis.Options{ + OverwrittenBucketName: operation.object.bucket.name, + }) + operation.handleResponse(nil, err) + return err +} + +var _ Operation = (*SetObjectStorageClassOperation)(nil) + +func (operation *SetObjectStatusOperation) OnResponse(fn func()) *SetObjectStatusOperation { + copy := *operation + copy.onResponse = fn + return © +} + +func (operation *SetObjectStatusOperation) OnError(fn func(error)) *SetObjectStatusOperation { + copy := *operation + copy.onError = fn + return © +} + +func (operation *SetObjectStatusOperation) relatedEntries() []entry { + return []entry{{operation.object.bucket.name, operation.object.name}} +} + +func (operation *SetObjectStatusOperation) handleResponse(_ *ObjectDetails, err error) { + if err != nil && operation.onError != nil { + operation.onError(err) + } else if operation.onResponse != nil { + operation.onResponse() + } +} + +func (operation *SetObjectStatusOperation) String() string { + return "chstatus/" + operation.object.encode() + "/status/" + strconv.Itoa(int(operation.status)) +} + +func (operation *SetObjectStatusOperation) Call(ctx context.Context) error { + _, err := operation.object.bucket.objectsManager.storage.ModifyObjectStatus(ctx, &apis.ModifyObjectStatusRequest{ + Entry: operation.object.String(), + Status: int64(operation.status), + }, &apis.Options{ + OverwrittenBucketName: operation.object.bucket.name, + }) + operation.handleResponse(nil, err) + return err +} + +var _ Operation = (*SetObjectStatusOperation)(nil) + +func (operation *SetObjectMetadataOperation) Metadata(metadata map[string]string) *SetObjectMetadataOperation { + copy := *operation + copy.metadata = metadata + return © +} + +func (operation *SetObjectMetadataOperation) Conditions(conds map[string]string) *SetObjectMetadataOperation { + copy := *operation + copy.conditions = conds + return © +} + +func (operation *SetObjectMetadataOperation) OnResponse(fn func()) *SetObjectMetadataOperation { + copy := *operation + copy.onResponse = fn + return © +} + +func (operation *SetObjectMetadataOperation) OnError(fn func(error)) *SetObjectMetadataOperation { + copy := *operation + copy.onError = fn + return © +} + +func (operation *SetObjectMetadataOperation) relatedEntries() []entry { + return []entry{{operation.object.bucket.name, operation.object.name}} +} + +func (operation *SetObjectMetadataOperation) handleResponse(_ *ObjectDetails, err error) { + if err != nil && operation.onError != nil { + operation.onError(err) + } else if operation.onResponse != nil { + operation.onResponse() + } +} + +func (operation *SetObjectMetadataOperation) String() string { + s := "chgm/" + operation.object.encode() + "/mime/" + base64.URLEncoding.EncodeToString([]byte(operation.mimeType)) + for k, v := range operation.metadata { + s += "/" + normalizeMetadataKey(k) + "/" + base64.URLEncoding.EncodeToString([]byte(v)) + } + conds := []string{} + for k, v := range operation.conditions { + conds = append(conds, k+"="+v) + } + if len(conds) > 0 { + s += "/cond/" + base64.URLEncoding.EncodeToString([]byte(strings.Join(conds, "&"))) + } + return s +} + +func (operation *SetObjectMetadataOperation) Call(ctx context.Context) error { + conds := make([]string, 0, len(operation.conditions)) + for k, v := range operation.conditions { + conds = append(conds, k+"="+v) + } + metadata := make(map[string]string, len(operation.metadata)) + for k, v := range operation.metadata { + metadata[normalizeMetadataKey(k)] = v + } + _, err := operation.object.bucket.objectsManager.storage.ModifyObjectMetadata(ctx, &apis.ModifyObjectMetadataRequest{ + Entry: operation.object.String(), + MimeType: operation.mimeType, + Condition: strings.Join(conds, "&"), + MetaData: metadata, + }, &apis.Options{ + OverwrittenBucketName: operation.object.bucket.name, + }) + operation.handleResponse(nil, err) + return err +} + +var _ Operation = (*SetObjectMetadataOperation)(nil) + +func (operation *SetObjectLifeCycleOperation) ToIAAfterDays(afterDays int64) *SetObjectLifeCycleOperation { + operation.toIAAfterDays = afterDays + return operation +} + +func (operation *SetObjectLifeCycleOperation) ToArchiveIRAfterDays(afterDays int64) *SetObjectLifeCycleOperation { + copy := *operation + copy.toArchiveIRAfterDays = afterDays + return © +} + +func (operation *SetObjectLifeCycleOperation) ToArchiveAfterDays(afterDays int64) *SetObjectLifeCycleOperation { + copy := *operation + copy.toArchiveAfterDays = afterDays + return © +} + +func (operation *SetObjectLifeCycleOperation) ToDeepArchiveAfterDays(afterDays int64) *SetObjectLifeCycleOperation { + copy := *operation + copy.toDeepArchiveAfterDays = afterDays + return © +} + +func (operation *SetObjectLifeCycleOperation) DeleteAfterDays(afterDays int64) *SetObjectLifeCycleOperation { + copy := *operation + copy.deleteAfterDays = afterDays + return © +} + +func (operation *SetObjectLifeCycleOperation) OnResponse(fn func()) *SetObjectLifeCycleOperation { + copy := *operation + copy.onResponse = fn + return © +} + +func (operation *SetObjectLifeCycleOperation) OnError(fn func(error)) *SetObjectLifeCycleOperation { + copy := *operation + copy.onError = fn + return © +} + +func (operation *SetObjectLifeCycleOperation) relatedEntries() []entry { + return []entry{{operation.object.bucket.name, operation.object.name}} +} + +func (operation *SetObjectLifeCycleOperation) handleResponse(_ *ObjectDetails, err error) { + if err != nil && operation.onError != nil { + operation.onError(err) + } else if operation.onResponse != nil { + operation.onResponse() + } +} + +func (operation *SetObjectLifeCycleOperation) String() string { + s := "lifecycle/" + operation.object.encode() + if operation.toIAAfterDays > 0 { + s += "/toIAAfterDays/" + strconv.FormatInt(operation.toIAAfterDays, 10) + } + if operation.toArchiveIRAfterDays > 0 { + s += "/toArchiveIRAfterDays/" + strconv.FormatInt(operation.toArchiveIRAfterDays, 10) + } + if operation.toArchiveAfterDays > 0 { + s += "/toArchiveAfterDays/" + strconv.FormatInt(operation.toArchiveAfterDays, 10) + } + if operation.toDeepArchiveAfterDays > 0 { + s += "/toDeepArchiveAfterDays/" + strconv.FormatInt(operation.toDeepArchiveAfterDays, 10) + } + if operation.deleteAfterDays > 0 { + s += "/deleteAfterDays/" + strconv.FormatInt(operation.deleteAfterDays, 10) + } + return s +} + +func (operation *SetObjectLifeCycleOperation) Call(ctx context.Context) error { + _, err := operation.object.bucket.objectsManager.storage.ModifyObjectLifeCycle(ctx, &apis.ModifyObjectLifeCycleRequest{ + Entry: operation.object.String(), + ToIaAfterDays: operation.toIAAfterDays, + ToArchiveAfterDays: operation.toArchiveAfterDays, + ToDeepArchiveAfterDays: operation.toDeepArchiveAfterDays, + ToArchiveIrAfterDays: operation.toArchiveIRAfterDays, + DeleteAfterDays: operation.deleteAfterDays, + }, &apis.Options{ + OverwrittenBucketName: operation.object.bucket.name, + }) + operation.handleResponse(nil, err) + return err +} + +var _ Operation = (*SetObjectLifeCycleOperation)(nil) + +func (entry Object) String() string { + return entry.bucket.name + ":" + entry.name +} + +func (entry Object) encode() string { + return base64.URLEncoding.EncodeToString([]byte(entry.String())) +} + +func (entry entry) String() string { + return entry.bucketName + ":" + entry.objectName +} + +func (entry entry) encode() string { + return base64.URLEncoding.EncodeToString([]byte(entry.String())) +} + +func normalizeMetadataKey(k string) string { + if !strings.HasPrefix(k, "x-qn-meta-") { + k = "x-qn-meta-" + k + } + return k +} diff --git a/storagev2/region/query.go b/storagev2/region/query.go index 9116ca88..9bcca509 100644 --- a/storagev2/region/query.go +++ b/storagev2/region/query.go @@ -133,8 +133,6 @@ const cacheFileName = "query_v4_01.cache.json" var ( persistentCaches map[uint64]*cache.Cache persistentCachesLock sync.Mutex - defaultResolver = resolver.NewDefaultResolver() - defaultChooser = chooser.NewShuffleChooser(chooser.NewSmartIPChooser(nil)) ) // NewBucketRegionsQuery 创建空间区域查询器 @@ -142,33 +140,28 @@ func NewBucketRegionsQuery(bucketHosts Endpoints, opts *BucketRegionsQueryOption if opts == nil { opts = &BucketRegionsQueryOptions{} } - if opts.RetryMax <= 0 { - opts.RetryMax = 2 + retryMax := opts.RetryMax + if retryMax <= 0 { + retryMax = 2 } - if opts.CompactInterval == time.Duration(0) { - opts.CompactInterval = time.Minute + compactInterval := opts.CompactInterval + if compactInterval == time.Duration(0) { + compactInterval = time.Minute } - if opts.PersistentFilePath == "" { - opts.PersistentFilePath = filepath.Join(os.TempDir(), "qiniu-golang-sdk", cacheFileName) + persistentFilePath := opts.PersistentFilePath + if persistentFilePath == "" { + persistentFilePath = filepath.Join(os.TempDir(), "qiniu-golang-sdk", cacheFileName) } - if opts.PersistentDuration == time.Duration(0) { - opts.PersistentDuration = time.Minute + persistentDuration := opts.PersistentDuration + if persistentDuration == time.Duration(0) { + persistentDuration = time.Minute } - persistentCache, err := getPersistentCache(opts) + persistentCache, err := getPersistentCache(persistentFilePath, compactInterval, persistentDuration) if err != nil { return nil, err } - r := opts.Resolver - cs := opts.Chooser - bf := opts.Backoff - if r == nil { - r = defaultResolver - } - if cs == nil { - cs = defaultChooser - } return &bucketRegionsQuery{ bucketHosts: bucketHosts, cache: persistentCache, @@ -176,11 +169,11 @@ func NewBucketRegionsQuery(bucketHosts Endpoints, opts *BucketRegionsQueryOption opts.Client, bucketHosts, !opts.UseInsecureProtocol, - opts.RetryMax, + retryMax, opts.HostFreezeDuration, - r, - cs, - bf, + opts.Resolver, + opts.Chooser, + opts.Backoff, opts.BeforeResolve, opts.AfterResolve, opts.ResolveError, @@ -193,14 +186,14 @@ func NewBucketRegionsQuery(bucketHosts Endpoints, opts *BucketRegionsQueryOption }, nil } -func getPersistentCache(opts *BucketRegionsQueryOptions) (*cache.Cache, error) { +func getPersistentCache(persistentFilePath string, compactInterval, persistentDuration time.Duration) (*cache.Cache, error) { var ( persistentCache *cache.Cache ok bool err error ) - crc64Value := calcPersistentCacheCrc64(opts) + crc64Value := calcPersistentCacheCrc64(persistentFilePath, compactInterval, persistentDuration) persistentCachesLock.Lock() defer persistentCachesLock.Unlock() @@ -210,9 +203,9 @@ func getPersistentCache(opts *BucketRegionsQueryOptions) (*cache.Cache, error) { if persistentCache, ok = persistentCaches[crc64Value]; !ok { persistentCache, err = cache.NewPersistentCache( reflect.TypeOf(&v4QueryCacheValue{}), - opts.PersistentFilePath, - opts.CompactInterval, - opts.PersistentDuration, + persistentFilePath, + compactInterval, + persistentDuration, func(err error) { log.Warn(fmt.Sprintf("BucketRegionsQuery persist error: %s", err)) }) @@ -342,6 +335,7 @@ func makeBucketQueryClient( afterResponse func(*http.Response, *retrier.RetrierOptions, error), ) clientv2.Client { is := []clientv2.Interceptor{ + clientv2.NewAntiHijackingInterceptor(), clientv2.NewHostsRetryInterceptor(clientv2.HostsRetryConfig{ RetryMax: len(bucketHosts.Preferred) + len(bucketHosts.Alternative), HostFreezeDuration: hostFreezeDuration, @@ -365,15 +359,11 @@ func makeBucketQueryClient( return clientv2.NewClient(client, is...) } -func (opts *BucketRegionsQueryOptions) toBytes() []byte { +func calcPersistentCacheCrc64(persistentFilePath string, compactInterval, persistentDuration time.Duration) uint64 { bytes := make([]byte, 0, 1024) - bytes = strconv.AppendInt(bytes, int64(opts.CompactInterval), 36) - bytes = append(bytes, []byte(opts.PersistentFilePath)...) + bytes = strconv.AppendInt(bytes, int64(compactInterval), 36) + bytes = append(bytes, []byte(persistentFilePath)...) bytes = append(bytes, byte(0)) - bytes = strconv.AppendInt(bytes, int64(opts.PersistentDuration), 36) - return bytes -} - -func calcPersistentCacheCrc64(opts *BucketRegionsQueryOptions) uint64 { - return crc64.Checksum(opts.toBytes(), crc64.MakeTable(crc64.ISO)) + bytes = strconv.AppendInt(bytes, int64(persistentDuration), 36) + return crc64.Checksum(bytes, crc64.MakeTable(crc64.ISO)) } diff --git a/storagev2/region/query_test.go b/storagev2/region/query_test.go index 0c4abb21..197e75dc 100644 --- a/storagev2/region/query_test.go +++ b/storagev2/region/query_test.go @@ -27,6 +27,7 @@ func TestBucketRegionsQuery(t *testing.T) { if gotBucketName := r.URL.Query().Get("bucket"); gotBucketName != bucketName { t.Fatalf("Unexpected bucket: %s", gotBucketName) } + w.Header().Add("X-ReqId", "fakereqid") if _, err := io.WriteString(w, mockUcQueryResponseBody()); err != nil { t.Fatal(err) } diff --git a/storagev2/resolver/resolver.go b/storagev2/resolver/resolver.go index 6197fa10..58a249aa 100644 --- a/storagev2/resolver/resolver.go +++ b/storagev2/resolver/resolver.go @@ -96,41 +96,45 @@ func NewCacheResolver(resolver Resolver, opts *CacheResolverConfig) (Resolver, e if opts == nil { opts = &CacheResolverConfig{} } - if opts.CompactInterval == time.Duration(0) { - opts.CompactInterval = 60 * time.Second + compactInterval := opts.CompactInterval + if compactInterval == time.Duration(0) { + compactInterval = 60 * time.Second } - if opts.PersistentFilePath == "" { - opts.PersistentFilePath = filepath.Join(os.TempDir(), "qiniu-golang-sdk", cacheFileName) + persistentFilePath := opts.PersistentFilePath + if persistentFilePath == "" { + persistentFilePath = filepath.Join(os.TempDir(), "qiniu-golang-sdk", cacheFileName) } - if opts.PersistentDuration == time.Duration(0) { - opts.PersistentDuration = 60 * time.Second + persistentDuration := opts.PersistentDuration + if persistentDuration == time.Duration(0) { + persistentDuration = 60 * time.Second } - if opts.CacheLifetime == time.Duration(0) { - opts.CacheLifetime = 120 * time.Second + cacheLifetime := opts.CacheLifetime + if cacheLifetime == time.Duration(0) { + cacheLifetime = 120 * time.Second } if resolver == nil { resolver = staticDefaultResolver } - persistentCache, err := getPersistentCache(opts) + persistentCache, err := getPersistentCache(persistentFilePath, compactInterval, persistentDuration) if err != nil { return nil, err } return &cacheResolver{ cache: persistentCache, resolver: resolver, - cacheLifetime: opts.CacheLifetime, + cacheLifetime: cacheLifetime, }, nil } -func getPersistentCache(opts *CacheResolverConfig) (*cache.Cache, error) { +func getPersistentCache(persistentFilePath string, compactInterval, persistentDuration time.Duration) (*cache.Cache, error) { var ( persistentCache *cache.Cache ok bool err error ) - crc64Value := calcPersistentCacheCrc64(opts) + crc64Value := calcPersistentCacheCrc64(persistentFilePath, compactInterval, persistentDuration) persistentCachesLock.Lock() defer persistentCachesLock.Unlock() @@ -140,9 +144,9 @@ func getPersistentCache(opts *CacheResolverConfig) (*cache.Cache, error) { if persistentCache, ok = persistentCaches[crc64Value]; !ok { persistentCache, err = cache.NewPersistentCache( reflect.TypeOf(&resolverCacheValue{}), - opts.PersistentFilePath, - opts.CompactInterval, - opts.PersistentDuration, + persistentFilePath, + compactInterval, + persistentDuration, func(err error) { log.Warn(fmt.Sprintf("BucketRegionsQuery persist error: %s", err)) }) @@ -202,16 +206,11 @@ func (*cacheResolver) localIp(host string) (string, error) { return conn.LocalAddr().(*net.UDPAddr).IP.String(), nil } -func (opts *CacheResolverConfig) toBytes() []byte { +func calcPersistentCacheCrc64(persistentFilePath string, compactInterval, persistentDuration time.Duration) uint64 { bytes := make([]byte, 0, 1024) - bytes = strconv.AppendInt(bytes, int64(opts.CompactInterval), 36) - bytes = strconv.AppendInt(bytes, int64(opts.PersistentDuration), 36) - bytes = strconv.AppendInt(bytes, int64(opts.CacheLifetime), 36) - bytes = append(bytes, []byte(opts.PersistentFilePath)...) + bytes = strconv.AppendInt(bytes, int64(compactInterval), 36) + bytes = strconv.AppendInt(bytes, int64(persistentDuration), 36) + bytes = append(bytes, []byte(persistentFilePath)...) bytes = append(bytes, byte(0)) - return bytes -} - -func calcPersistentCacheCrc64(opts *CacheResolverConfig) uint64 { - return crc64.Checksum(opts.toBytes(), crc64.MakeTable(crc64.ISO)) + return crc64.Checksum(bytes, crc64.MakeTable(crc64.ISO)) } diff --git a/storagev2/resolver/resolver_test.go b/storagev2/resolver/resolver_test.go index 0c5bc433..9d2baa74 100644 --- a/storagev2/resolver/resolver_test.go +++ b/storagev2/resolver/resolver_test.go @@ -5,7 +5,9 @@ package resolver_test import ( "context" + "io/ioutil" "net" + "os" "testing" "github.com/qiniu/go-sdk/v7/storagev2/resolver" @@ -31,8 +33,18 @@ func (mr *mockResolver) Resolve(ctx context.Context, host string) ([]net.IP, err } func TestCacheResolver(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + mr := &mockResolver{m: map[string][]net.IP{"upload.qiniup.com": {net.IPv4(1, 1, 1, 1)}}, c: make(map[string]int)} - resolver, err := resolver.NewCacheResolver(mr, nil) + resolver, err := resolver.NewCacheResolver(mr, &resolver.CacheResolverConfig{ + PersistentFilePath: tmpFile.Name(), + }) + if err != nil { t.Fatal(err) } diff --git a/storagev2/retrier/retrier.go b/storagev2/retrier/retrier.go index 1738c2dc..5055865b 100644 --- a/storagev2/retrier/retrier.go +++ b/storagev2/retrier/retrier.go @@ -2,6 +2,7 @@ package retrier import ( "context" + "errors" "net" "net/http" "net/url" @@ -78,15 +79,15 @@ func isResponseRetryable(resp *http.Response) bool { if resp == nil { return false } - return isStatusCodeRetryable(resp.StatusCode) + return IsStatusCodeRetryable(resp.StatusCode) } -func isStatusCodeRetryable(statusCode int) bool { +func IsStatusCodeRetryable(statusCode int) bool { if statusCode < 500 { return false } - if statusCode == 501 || statusCode == 509 || statusCode == 573 || statusCode == 579 || + if statusCode == 501 || statusCode == 509 || statusCode == 579 || statusCode == 608 || statusCode == 612 || statusCode == 614 || statusCode == 616 || statusCode == 618 || statusCode == 630 || statusCode == 631 || statusCode == 632 || statusCode == 640 || statusCode == 701 { return false @@ -108,6 +109,8 @@ func IsErrorRetryable(err error) bool { } } +var ErrMaliciousResponse = errors.New("malicious response") + func getRetryDecisionForError(err error) RetryDecision { if err == nil { return DontRetry @@ -137,7 +140,11 @@ func getRetryDecisionForError(err error) RetryDecision { } unwrapedErr := unwrapUnderlyingError(err) - if os.IsTimeout(unwrapedErr) { + if unwrapedErr == context.DeadlineExceeded { + return DontRetry + } else if unwrapedErr == ErrMaliciousResponse { + return RetryRequest + } else if os.IsTimeout(unwrapedErr) { return RetryRequest } else if dnsError, ok := unwrapedErr.(*net.DNSError); ok && isDnsNotFoundError(dnsError) { return TryNextHost @@ -148,10 +155,17 @@ func getRetryDecisionForError(err error) RetryDecision { default: return DontRetry } + } else if errno, ok := unwrapedErr.(syscall.Errno); ok { + switch errno { + case syscall.ECONNREFUSED, syscall.ECONNABORTED, syscall.ECONNRESET: + return TryNextHost + default: + return DontRetry + } } else if unwrapedErr == context.Canceled { return DontRetry } else if clientErr, ok := unwrapedErr.(*clientv1.ErrorInfo); ok { - if isStatusCodeRetryable(clientErr.Code) { + if IsStatusCodeRetryable(clientErr.Code) { return RetryRequest } else { return DontRetry diff --git a/storagev2/uploader/credentials_uptoken_signer.go b/storagev2/uploader/credentials_uptoken_signer.go new file mode 100644 index 00000000..f42e43a1 --- /dev/null +++ b/storagev2/uploader/credentials_uptoken_signer.go @@ -0,0 +1,83 @@ +package uploader + +import ( + "context" + "sync" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +type credentialsUpTokenSigner struct { + credentials credentials.CredentialsProvider + bucketName string + tokenTtl time.Duration + cacheTtl time.Duration + + cacheMutux sync.Mutex + cachedPolicy uptoken.PutPolicy + cachedCredentials *credentials.Credentials + policyCachedAt time.Time + credentialsCachedAt time.Time +} + +func (signer *credentialsUpTokenSigner) GetPutPolicy(ctx context.Context) (uptoken.PutPolicy, error) { + var err error + + signer.cacheMutux.Lock() + defer signer.cacheMutux.Unlock() + + now := time.Now() + if signer.cachedPolicy == nil || signer.policyCachedAt.Add(signer.cacheTtl).Before(now) { + signer.cachedPolicy, err = uptoken.NewPutPolicy(signer.bucketName, now.Add(signer.tokenTtl)) + if err != nil { + return nil, err + } + signer.policyCachedAt = now + } + + return signer.cachedPolicy, nil +} + +func (signer *credentialsUpTokenSigner) GetAccessKey(ctx context.Context) (string, error) { + cred, err := signer.getCredentials(ctx) + if err != nil { + return "", err + } + return cred.AccessKey, nil +} + +func (signer *credentialsUpTokenSigner) GetUpToken(ctx context.Context) (string, error) { + putPolicy, err := signer.GetPutPolicy(ctx) + if err != nil { + return "", err + } + cred, err := signer.getCredentials(ctx) + if err != nil { + return "", err + } + return uptoken.NewSigner(putPolicy, cred).GetUpToken(ctx) +} + +func (signer *credentialsUpTokenSigner) getCredentials(ctx context.Context) (*credentials.Credentials, error) { + var err error + + signer.cacheMutux.Lock() + defer signer.cacheMutux.Unlock() + + now := time.Now() + if signer.cachedCredentials == nil || signer.credentialsCachedAt.Add(signer.cacheTtl).Before(now) { + signer.cachedCredentials, err = signer.credentials.Get(ctx) + if err != nil { + return nil, err + } + signer.credentialsCachedAt = now + } + + return signer.cachedCredentials, nil +} + +func newCredentialsUpTokenSigner(credentials credentials.CredentialsProvider, bucketName string, tokenTtl, cacheTtl time.Duration) uptoken.Provider { + return &credentialsUpTokenSigner{credentials: credentials, bucketName: bucketName, tokenTtl: tokenTtl, cacheTtl: cacheTtl} +} diff --git a/storagev2/uploader/form_uploader_test.go b/storagev2/uploader/form_uploader_test.go new file mode 100644 index 00000000..3e5533af --- /dev/null +++ b/storagev2/uploader/form_uploader_test.go @@ -0,0 +1,319 @@ +//go:build unit +// +build unit + +package uploader + +import ( + "bytes" + "context" + "crypto/md5" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/qiniu/go-sdk/v7/client" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/region" +) + +func TestFormUploader(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "form-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + hasher := md5.New() + if _, err = io.CopyN(tmpFile, io.TeeReader(r, hasher), 1024*1024); err != nil { + t.Fatal(err) + } + if _, err = tmpFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + expectedMd5 := hasher.Sum(nil) + + serveMux := http.NewServeMux() + serveMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Fatalf("unexpected method: %s", r.Method) + } + if err := r.ParseMultipartForm(2 * 1024 * 1024); err != nil { + t.Fatal(err) + } + if values := r.MultipartForm.Value["key"]; len(values) != 1 || values[0] != "testkey" { + t.Fatalf("unexpected key") + } + if values := r.MultipartForm.Value["token"]; len(values) != 1 || !strings.HasPrefix(values[0], "testak:") { + t.Fatalf("unexpected token") + } + if values := r.MultipartForm.Value["x-qn-meta-a"]; len(values) != 1 || values[0] != "b" { + t.Fatalf("unexpected x-qn-meta-a") + } + if values := r.MultipartForm.Value["x-qn-meta-c"]; len(values) != 1 || values[0] != "d" { + t.Fatalf("unexpected x-qn-meta-c") + } + if values := r.MultipartForm.Value["x:a"]; len(values) != 1 || values[0] != "b" { + t.Fatalf("unexpected x:a") + } + if values := r.MultipartForm.Value["x:c"]; len(values) != 1 || values[0] != "d" { + t.Fatalf("unexpected x:c") + } + if files := r.MultipartForm.File["file"]; len(files) != 1 || files[0].Filename != "testfilename" || files[0].Size != 1024*1024 { + t.Fatalf("unexpected file") + } else if contentType := files[0].Header.Get("Content-Type"); contentType != "application/json" { + t.Fatalf("unexpected file content-type") + } else if file, err := files[0].Open(); err != nil { + t.Fatal(err) + } else { + defer file.Close() + hasher := md5.New() + if _, err = io.Copy(hasher, file); err != nil { + t.Fatal(err) + } + if !bytes.Equal(hasher.Sum(nil), expectedMd5) { + t.Fatalf("unexpected file content") + } + } + w.Header().Add("x-reqid", "fakereqid") + w.Write([]byte(`{"ok":true}`)) + }) + server := httptest.NewServer(serveMux) + defer server.Close() + + formUploader := NewFormUploader(&FormUploaderOptions{ + Options: http_client.Options{ + Regions: ®ion.Region{Up: region.Endpoints{Preferred: []string{server.URL}}}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + }) + var ( + returnValue struct { + Ok bool `json:"ok"` + } + key = "testkey" + lastUploaded uint64 + ) + if err = formUploader.UploadFile(context.Background(), tmpFile.Name(), &ObjectOptions{ + BucketName: "testbucket", + ObjectName: &key, + FileName: "testfilename", + ContentType: "application/json", + Metadata: map[string]string{"a": "b", "c": "d"}, + CustomVars: map[string]string{"a": "b", "c": "d"}, + OnUploadingProgress: func(progress *UploadingProgress) { + if progress.TotalSize != 1024*1024 { + t.Fatalf("unexpected file size") + } else if progress.Uploaded > progress.TotalSize { + t.Fatalf("unexpected uploaded") + } else if lu := atomic.SwapUint64(&lastUploaded, progress.Uploaded); lu > progress.Uploaded || lu > progress.TotalSize { + t.Fatalf("unexpected uploaded") + } + }, + }, &returnValue); err != nil { + t.Fatal(err) + } + if !returnValue.Ok { + t.Fatalf("unexpected response value") + } +} + +func TestFormUploaderRetry(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "form-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + hasher := md5.New() + if _, err = io.CopyN(tmpFile, io.TeeReader(r, hasher), 1024*1024); err != nil { + t.Fatal(err) + } + if _, err = tmpFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + expectedMd5 := hasher.Sum(nil) + + var handlerCalled_1, handlerCalled_2, handlerCalled_3 uint64 + + serveMux_1 := http.NewServeMux() + serveMux_1.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint64(&handlerCalled_1, 1) + if r.Method != http.MethodPost { + t.Fatalf("unexpected method: %s", r.Method) + } + if err := r.ParseMultipartForm(2 * 1024 * 1024); err != nil { + t.Fatal(err) + } + if values := r.MultipartForm.Value["key"]; len(values) != 1 || values[0] != "testkey" { + t.Fatalf("unexpected key") + } + if values := r.MultipartForm.Value["token"]; len(values) != 1 || !strings.HasPrefix(values[0], "testak:") { + t.Fatalf("unexpected token") + } + if values := r.MultipartForm.Value["x-qn-meta-a"]; len(values) != 1 || values[0] != "b" { + t.Fatalf("unexpected x-qn-meta-a") + } + if values := r.MultipartForm.Value["x-qn-meta-c"]; len(values) != 1 || values[0] != "d" { + t.Fatalf("unexpected x-qn-meta-c") + } + if values := r.MultipartForm.Value["x:a"]; len(values) != 1 || values[0] != "b" { + t.Fatalf("unexpected x:a") + } + if values := r.MultipartForm.Value["x:c"]; len(values) != 1 || values[0] != "d" { + t.Fatalf("unexpected x:c") + } + if files := r.MultipartForm.File["file"]; len(files) != 1 || files[0].Filename != "testfilename" || files[0].Size != 1024*1024 { + t.Fatalf("unexpected file") + } else if contentType := files[0].Header.Get("Content-Type"); contentType != "application/json" { + t.Fatalf("unexpected file content-type") + } else if file, err := files[0].Open(); err != nil { + t.Fatal(err) + } else { + defer file.Close() + hasher := md5.New() + if _, err = io.Copy(hasher, file); err != nil { + t.Fatal(err) + } + if !bytes.Equal(hasher.Sum(nil), expectedMd5) { + t.Fatalf("unexpected file content") + } + } + w.Header().Add("x-reqid", "fakereqid") + w.Write([]byte(`{"ok":true}`)) + }) + server_1 := httptest.NewServer(serveMux_1) + defer server_1.Close() + + serveMux_2 := http.NewServeMux() + serveMux_2.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint64(&handlerCalled_2, 1) + if r.Method != http.MethodPost { + t.Fatalf("unexpected method: %s", r.Method) + } + w.Header().Add("x-reqid", "fakereqid") + w.WriteHeader(599) + }) + server_2 := httptest.NewServer(serveMux_2) + defer server_2.Close() + + serveMux_3 := http.NewServeMux() + serveMux_3.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint64(&handlerCalled_3, 1) + if r.Method != http.MethodPost { + t.Fatalf("unexpected method: %s", r.Method) + } + w.Header().Add("x-reqid", "fakereqid") + w.WriteHeader(504) + }) + server_3 := httptest.NewServer(serveMux_3) + defer server_3.Close() + + handlerCalled_4 := uint64(0) + serveMux_4 := http.NewServeMux() + serveMux_4.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint64(&handlerCalled_4, 1) + if r.Method != http.MethodPost { + t.Fatalf("unexpected method: %s", r.Method) + } + w.Header().Add("x-reqid", "fakereqid") + w.WriteHeader(612) + }) + server_4 := httptest.NewServer(serveMux_4) + defer server_4.Close() + + var ( + returnValue struct { + Ok bool `json:"ok"` + } + key = "testkey" + ) + + formUploader := NewFormUploader(&FormUploaderOptions{ + Options: http_client.Options{ + Regions: regions{[]*region.Region{ + {Up: region.Endpoints{Preferred: []string{server_3.URL}}}, + {Up: region.Endpoints{Preferred: []string{server_2.URL}}}, + {Up: region.Endpoints{Preferred: []string{server_1.URL}}}, + }}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + }) + if err = formUploader.UploadFile(context.Background(), tmpFile.Name(), &ObjectOptions{ + BucketName: "testbucket", + ObjectName: &key, + FileName: "testfilename", + ContentType: "application/json", + Metadata: map[string]string{"a": "b", "c": "d"}, + CustomVars: map[string]string{"a": "b", "c": "d"}, + }, &returnValue); err != nil { + t.Fatal(err) + } + if !returnValue.Ok { + t.Fatalf("unexpected response value") + } + if count := atomic.LoadUint64(&handlerCalled_1); count != 1 { + t.Fatalf("unexpected handler call count: %d", count) + } + if count := atomic.LoadUint64(&handlerCalled_2); count != 4 { + t.Fatalf("unexpected handler call count: %d", count) + } + if count := atomic.LoadUint64(&handlerCalled_3); count != 4 { + t.Fatalf("unexpected handler call count: %d", count) + } + atomic.StoreUint64(&handlerCalled_1, 0) + atomic.StoreUint64(&handlerCalled_2, 0) + atomic.StoreUint64(&handlerCalled_3, 0) + + formUploader = NewFormUploader(&FormUploaderOptions{ + Options: http_client.Options{ + Regions: regions{[]*region.Region{ + {Up: region.Endpoints{Preferred: []string{server_3.URL}}}, + {Up: region.Endpoints{Preferred: []string{server_2.URL}}}, + {Up: region.Endpoints{Preferred: []string{server_4.URL}}}, + }}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + }) + if err = formUploader.UploadFile(context.Background(), tmpFile.Name(), &ObjectOptions{ + RegionsProvider: nil, + BucketName: "testbucket", + ObjectName: &key, + FileName: "testfilename", + ContentType: "application/json", + Metadata: map[string]string{"a": "b", "c": "d"}, + CustomVars: map[string]string{"a": "b", "c": "d"}, + }, &returnValue); err != nil { + if errInfo, ok := err.(*client.ErrorInfo); !ok || errInfo.Code != 612 { + t.Fatal(err) + } + } + if count := atomic.LoadUint64(&handlerCalled_4); count != 1 { + t.Fatalf("unexpected handler call count: %d", count) + } + if count := atomic.LoadUint64(&handlerCalled_2); count != 4 { + t.Fatalf("unexpected handler call count: %d", count) + } + if count := atomic.LoadUint64(&handlerCalled_3); count != 4 { + t.Fatalf("unexpected handler call count: %d", count) + } +} + +type regions struct { + regions []*region.Region +} + +func (group regions) GetRegions(context.Context) ([]*region.Region, error) { + return group.regions, nil +} diff --git a/storagev2/uploader/interfaces.go b/storagev2/uploader/interfaces.go new file mode 100644 index 00000000..1bcce80e --- /dev/null +++ b/storagev2/uploader/interfaces.go @@ -0,0 +1,81 @@ +package uploader + +import ( + "context" + "io" + + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + resumablerecorder "github.com/qiniu/go-sdk/v7/storagev2/uploader/resumable_recorder" + "github.com/qiniu/go-sdk/v7/storagev2/uploader/source" + "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +type ( + // 上传对象接口 + Uploader interface { + // 上传文件 + UploadFile(context.Context, string, *ObjectOptions, interface{}) error + + // 上传 io.Reader + UploadReader(context.Context, io.Reader, *ObjectOptions, interface{}) error + } + + // 分片上传器接口 + MultiPartsUploader interface { + // 初始化分片上传 + InitializeParts(context.Context, source.Source, *MultiPartsObjectOptions) (InitializedParts, error) + + // 尝试恢复分片,如果返回 nil 表示恢复失败 + TryToResume(context.Context, source.Source, *MultiPartsObjectOptions) InitializedParts + + // 上传分片 + UploadPart(context.Context, InitializedParts, source.Part, *UploadPartOptions) (UploadedPart, error) + + // 完成分片上传,生成对象 + CompleteParts(context.Context, InitializedParts, []UploadedPart, interface{}) error + + // 获取分片上传选项 + MultiPartsUploaderOptions() *MultiPartsUploaderOptions + } + + // 经过初始化的分片上传 + InitializedParts interface { + // 关闭分片上传,InitializedParts 一旦使用完毕,无论是否成功,都应该调用该方法关闭 + io.Closer + } + + // 已经上传的分片 + UploadedPart interface { + // 分片编号 + PartNumber() uint64 + + // 分片偏移量 + Offset() uint64 + + // 分片大小 + PartSize() uint64 + } + + // 分片上传调度器 + multiPartsUploaderScheduler interface { + // 上传数据源的全部分片 + UploadParts(context.Context, InitializedParts, source.Source, *UploadPartsOptions) ([]UploadedPart, error) + + // 获取分片上传器实例 + MultiPartsUploader() MultiPartsUploader + + // 获取最大分片大小 + PartSize() uint64 + } + // 分片上传器选项 + MultiPartsUploaderOptions struct { + // HTTP 客户端选项 + httpclient.Options + + // 上传凭证接口 + UpTokenProvider uptoken.Provider + + // 可恢复记录,如果不设置,则无法进行断点续传 + ResumableRecorder resumablerecorder.ResumableRecorder + } +) diff --git a/storagev2/uploader/multi_parts_uploader_test.go b/storagev2/uploader/multi_parts_uploader_test.go new file mode 100644 index 00000000..5fddc90f --- /dev/null +++ b/storagev2/uploader/multi_parts_uploader_test.go @@ -0,0 +1,671 @@ +//go:build unit +// +build unit + +package uploader + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "strconv" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/gorilla/mux" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/region" + resumablerecorder "github.com/qiniu/go-sdk/v7/storagev2/uploader/resumable_recorder" +) + +func TestMultiPartsUploader(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + if _, err = io.CopyN(tmpFile, r, 5*1024*1024); err != nil { + t.Fatal(err) + } + if _, err = tmpFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + var server *httptest.Server + serveMux := mux.NewRouter() + serveMux.HandleFunc("/mkblk/4194304", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + expectedBody, err := internal_io.ReadAll(io.NewSectionReader(tmpFile, 0, 4*1024*1024)) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(actualBody, expectedBody) { + t.Fatalf("unexpected body") + } + jsonBody, err := json.Marshal(&apis.ResumableUploadV1MakeBlockResponse{ + Ctx: "testctx1", + Checksum: "testchecksum1", + Crc32: int64(crc32.ChecksumIEEE(actualBody)), + Host: server.URL, + ExpiredAt: time.Now().Add(1 * time.Hour).Unix(), + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonBody) + }).Methods(http.MethodPost) + serveMux.HandleFunc("/mkblk/1048576", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + expectedBody, err := internal_io.ReadAll(io.NewSectionReader(tmpFile, 4*1024*1024, 1*1024*1024)) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(actualBody, expectedBody) { + t.Fatalf("unexpected body") + } + jsonBody, err := json.Marshal(&apis.ResumableUploadV1MakeBlockResponse{ + Ctx: "testctx2", + Checksum: "testchecksum2", + Crc32: int64(crc32.ChecksumIEEE(actualBody)), + Host: server.URL, + ExpiredAt: time.Now().Add(1 * time.Hour).Unix(), + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonBody) + }).Methods(http.MethodPost) + serveMux.PathPrefix("/mkfile/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + components := strings.Split(strings.TrimPrefix(r.URL.Path, "/mkfile/"), "/") + if components[0] != strconv.FormatInt(5*1024*1024, 10) { + t.Fatalf("unexpected fileSize") + } + components = components[1:] + for len(components) > 0 { + switch components[0] { + case "key": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "testkey" { + t.Fatalf("unexpected key") + } + case "fname": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "testfilename" { + t.Fatalf("unexpected fname") + } + case "mimeType": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "application/json" { + t.Fatalf("unexpected mimeType") + } + case "x-qn-meta-a": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "b" { + t.Fatalf("unexpected x-qn-meta-a") + } + case "x-qn-meta-c": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "d" { + t.Fatalf("unexpected x-qn-meta-c") + } + case "x:a": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "b" { + t.Fatalf("unexpected x:a") + } + case "x:c": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "d" { + t.Fatalf("unexpected x:c") + } + default: + t.Fatalf("unexpected component key: %s", components[0]) + } + components = components[2:] + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(actualBody) != "testctx1,testctx2" { + t.Fatalf("unexpected body") + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write([]byte(`{"ok":true}`)) + }).Methods(http.MethodPost) + server = httptest.NewServer(serveMux) + defer server.Close() + + multiPartsUploader := newMultiPartsUploader(newConcurrentMultiPartsUploaderScheduler( + NewMultiPartsUploaderV1(&MultiPartsUploaderOptions{ + Options: http_client.Options{ + Regions: ®ion.Region{Up: region.Endpoints{Preferred: []string{server.URL}}}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + }), &concurrentMultiPartsUploaderSchedulerOptions{PartSize: 1 << 22, Concurrency: 2}, + )) + + var ( + key = "testkey" + returnValue struct { + Ok bool `json:"ok"` + } + lastUploaded uint64 + ) + if err = multiPartsUploader.UploadFile(context.Background(), tmpFile.Name(), &ObjectOptions{ + BucketName: "testbucket", + ObjectName: &key, + FileName: "testfilename", + ContentType: "application/json", + Metadata: map[string]string{"a": "b", "c": "d"}, + CustomVars: map[string]string{"a": "b", "c": "d"}, + OnUploadingProgress: func(progress *UploadingProgress) { + if progress.TotalSize != 5*1024*1024 { + t.Fatalf("unexpected file size") + } else if progress.Uploaded > progress.TotalSize { + t.Fatalf("unexpected uploaded") + } else if lu := atomic.SwapUint64(&lastUploaded, progress.Uploaded); lu > progress.Uploaded || lu > progress.TotalSize { + t.Fatalf("unexpected uploaded") + } + }, + }, &returnValue); err != nil { + t.Fatal(err) + } else if !returnValue.Ok { + t.Fatalf("unexpected response body") + } +} + +func TestMultiPartsUploaderResuming(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + tmpFile, err := ioutil.TempFile("", "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + if _, err = io.CopyN(tmpFile, r, 5*1024*1024); err != nil { + t.Fatal(err) + } + if _, err = tmpFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + var server *httptest.Server + serveMux := mux.NewRouter() + serveMux.PathPrefix("/mkfile/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + components := strings.Split(strings.TrimPrefix(r.URL.Path, "/mkfile/"), "/") + if components[0] != strconv.FormatInt(5*1024*1024, 10) { + t.Fatalf("unexpected fileSize") + } + components = components[1:] + for len(components) > 0 { + switch components[0] { + case "key": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "testkey" { + t.Fatalf("unexpected key") + } + case "fname": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "testfilename" { + t.Fatalf("unexpected fname") + } + case "mimeType": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "application/json" { + t.Fatalf("unexpected mimeType") + } + case "x-qn-meta-a": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "b" { + t.Fatalf("unexpected x-qn-meta-a") + } + case "x-qn-meta-c": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "d" { + t.Fatalf("unexpected x-qn-meta-c") + } + case "x:a": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "b" { + t.Fatalf("unexpected x:a") + } + case "x:c": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "d" { + t.Fatalf("unexpected x:c") + } + default: + t.Fatalf("unexpected component key: %s", components[0]) + } + components = components[2:] + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(actualBody) != "testctx1,testctx2" { + t.Fatalf("unexpected body") + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write([]byte(`{"ok":true}`)) + }).Methods(http.MethodPost) + server = httptest.NewServer(serveMux) + defer server.Close() + + tmpFileStat, err := tmpFile.Stat() + if err != nil { + t.Fatal(err) + } + tmpFileSourceID := fmt.Sprintf("%d:%d:%s", tmpFileStat.Size(), tmpFileStat.ModTime().UnixNano(), tmpFile.Name()) + + resumableRecorder := resumablerecorder.NewJsonFileSystemResumableRecorder(tmpDir) + medium := resumableRecorder.OpenForCreatingNew(&resumablerecorder.ResumableRecorderOpenArgs{ + AccessKey: "testak", + BucketName: "testbucket", + ObjectName: "testkey", + SourceID: tmpFileSourceID, + PartSize: 4 * 1024 * 1024, + TotalSize: 5 * 1024 * 1024, + UpEndpoints: region.Endpoints{Preferred: []string{server.URL}}, + }) + if err = medium.Write(&resumablerecorder.ResumableRecord{ + PartID: "testctx1", + Offset: 0, + PartSize: 4 * 1024 * 1024, + PartNumber: 1, + ExpiredAt: time.Now().Add(1 * time.Hour), + }); err != nil { + t.Fatal(err) + } + if err = medium.Write(&resumablerecorder.ResumableRecord{ + PartID: "testctx2", + Offset: 4 * 1024 * 1024, + PartSize: 1 * 1024 * 1024, + PartNumber: 2, + ExpiredAt: time.Now().Add(1 * time.Hour), + }); err != nil { + t.Fatal(err) + } + if err = medium.Close(); err != nil { + t.Fatal(err) + } + + multiPartsUploader := newMultiPartsUploader(newConcurrentMultiPartsUploaderScheduler( + NewMultiPartsUploaderV1(&MultiPartsUploaderOptions{ + ResumableRecorder: resumableRecorder, + Options: http_client.Options{ + Regions: ®ion.Region{Up: region.Endpoints{Preferred: []string{server.URL}}}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + }), &concurrentMultiPartsUploaderSchedulerOptions{PartSize: 1 << 22, Concurrency: 2}, + )) + + var ( + key = "testkey" + returnValue struct { + Ok bool `json:"ok"` + } + lastUploaded uint64 + ) + if err = multiPartsUploader.UploadFile(context.Background(), tmpFile.Name(), &ObjectOptions{ + BucketName: "testbucket", + ObjectName: &key, + FileName: "testfilename", + ContentType: "application/json", + Metadata: map[string]string{"a": "b", "c": "d"}, + CustomVars: map[string]string{"a": "b", "c": "d"}, + OnUploadingProgress: func(progress *UploadingProgress) { + if progress.TotalSize != 5*1024*1024 { + t.Fatalf("unexpected file size") + } else if progress.Uploaded > progress.TotalSize { + t.Fatalf("unexpected uploaded") + } else if lu := atomic.SwapUint64(&lastUploaded, progress.Uploaded); lu > progress.Uploaded || lu > progress.TotalSize { + t.Fatalf("unexpected uploaded") + } + }, + }, &returnValue); err != nil { + t.Fatal(err) + } else if !returnValue.Ok { + t.Fatalf("unexpected response body") + } +} + +func TestMultiPartsUploaderRetry(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + if _, err = io.CopyN(tmpFile, r, 5*1024*1024); err != nil { + t.Fatal(err) + } + if _, err = tmpFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + var handlerCalled_1, handlerCalled_2, handlerCalled_3 uint64 + serveMux_1 := mux.NewRouter() + serveMux_1.HandleFunc("/mkblk/{blockSize}", func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint64(&handlerCalled_1, 1) + w.Header().Add("X-ReqId", "fakereqid") + w.WriteHeader(599) + }).Methods(http.MethodPost) + server_1 := httptest.NewServer(serveMux_1) + defer server_1.Close() + + serveMux_2 := mux.NewRouter() + serveMux_2.HandleFunc("/mkblk/{blockSize}", func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint64(&handlerCalled_2, 1) + w.WriteHeader(200) + }).Methods(http.MethodPost) + server_2 := httptest.NewServer(serveMux_2) + defer server_2.Close() + + var server_3 *httptest.Server + serveMux_3 := mux.NewRouter() + serveMux_3.HandleFunc("/mkblk/4194304", func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint64(&handlerCalled_3, 1) + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + expectedBody, err := internal_io.ReadAll(io.NewSectionReader(tmpFile, 0, 4*1024*1024)) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(actualBody, expectedBody) { + t.Fatalf("unexpected body") + } + jsonBody, err := json.Marshal(&apis.ResumableUploadV1MakeBlockResponse{ + Ctx: "testctx1", + Checksum: "testchecksum1", + Crc32: int64(crc32.ChecksumIEEE(actualBody)), + Host: server_3.URL, + ExpiredAt: time.Now().Add(1 * time.Hour).Unix(), + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonBody) + }).Methods(http.MethodPost) + serveMux_3.HandleFunc("/mkblk/1048576", func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint64(&handlerCalled_3, 1) + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + expectedBody, err := internal_io.ReadAll(io.NewSectionReader(tmpFile, 4*1024*1024, 1*1024*1024)) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(actualBody, expectedBody) { + t.Fatalf("unexpected body") + } + jsonBody, err := json.Marshal(&apis.ResumableUploadV1MakeBlockResponse{ + Ctx: "testctx2", + Checksum: "testchecksum2", + Crc32: int64(crc32.ChecksumIEEE(actualBody)), + Host: server_3.URL, + ExpiredAt: time.Now().Add(1 * time.Hour).Unix(), + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonBody) + }).Methods(http.MethodPost) + serveMux_3.PathPrefix("/mkfile/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint64(&handlerCalled_3, 1) + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + components := strings.Split(strings.TrimPrefix(r.URL.Path, "/mkfile/"), "/") + if components[0] != strconv.FormatInt(5*1024*1024, 10) { + t.Fatalf("unexpected fileSize") + } + components = components[1:] + for len(components) > 0 { + switch components[0] { + case "key": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "testkey" { + t.Fatalf("unexpected key") + } + case "fname": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "testfilename" { + t.Fatalf("unexpected fname") + } + case "mimeType": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "application/json" { + t.Fatalf("unexpected mimeType") + } + case "x-qn-meta-a": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "b" { + t.Fatalf("unexpected x-qn-meta-a") + } + case "x-qn-meta-c": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "d" { + t.Fatalf("unexpected x-qn-meta-c") + } + case "x:a": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "b" { + t.Fatalf("unexpected x:a") + } + case "x:c": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "d" { + t.Fatalf("unexpected x:c") + } + default: + t.Fatalf("unexpected component key: %s", components[0]) + } + components = components[2:] + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(actualBody) != "testctx1,testctx2" { + t.Fatalf("unexpected body") + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write([]byte(`{"ok":true}`)) + }).Methods(http.MethodPost) + server_3 = httptest.NewServer(serveMux_3) + defer server_3.Close() + + multiPartsUploader := newMultiPartsUploader(newSerialMultiPartsUploaderScheduler( + NewMultiPartsUploaderV1(&MultiPartsUploaderOptions{ + Options: http_client.Options{ + Regions: regions{[]*region.Region{ + {Up: region.Endpoints{Preferred: []string{server_1.URL}}}, + {Up: region.Endpoints{Preferred: []string{server_2.URL}}}, + {Up: region.Endpoints{Preferred: []string{server_3.URL}}}, + }}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + }), &serialMultiPartsUploaderSchedulerOptions{PartSize: 1 << 22}, + )) + + var ( + key = "testkey" + returnValue struct { + Ok bool `json:"ok"` + } + lastUploaded uint64 + ) + if err = multiPartsUploader.UploadFile(context.Background(), tmpFile.Name(), &ObjectOptions{ + BucketName: "testbucket", + ObjectName: &key, + FileName: "testfilename", + ContentType: "application/json", + Metadata: map[string]string{"a": "b", "c": "d"}, + CustomVars: map[string]string{"a": "b", "c": "d"}, + OnUploadingProgress: func(progress *UploadingProgress) { + if progress.TotalSize != 5*1024*1024 { + t.Fatalf("unexpected file size") + } + atomic.StoreUint64(&lastUploaded, progress.Uploaded) + }, + }, &returnValue); err != nil { + t.Fatal(err) + } else if !returnValue.Ok { + t.Fatalf("unexpected response body") + } + + if fileSize := atomic.LoadUint64(&lastUploaded); fileSize != 5*1024*1024 { + t.Fatalf("unexpected file size: %d", fileSize) + } + if count := atomic.LoadUint64(&handlerCalled_1); count != 4 { + t.Fatalf("unexpected handler call count: %d", count) + } + if count := atomic.LoadUint64(&handlerCalled_2); count != 4 { + t.Fatalf("unexpected handler call count: %d", count) + } + if count := atomic.LoadUint64(&handlerCalled_3); count != 3 { + t.Fatalf("unexpected handler call count: %d", count) + } +} diff --git a/storagev2/uploader/multi_parts_uploader_v1.go b/storagev2/uploader/multi_parts_uploader_v1.go new file mode 100644 index 00000000..f0756d55 --- /dev/null +++ b/storagev2/uploader/multi_parts_uploader_v1.go @@ -0,0 +1,250 @@ +package uploader + +import ( + "context" + "errors" + "io" + "strings" + "time" + + internal_io "github.com/qiniu/go-sdk/v7/internal/io" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/retrier" + resumablerecorder "github.com/qiniu/go-sdk/v7/storagev2/uploader/resumable_recorder" + "github.com/qiniu/go-sdk/v7/storagev2/uploader/source" +) + +type ( + multiPartsUploaderV1 struct { + storage *apis.Storage + options *MultiPartsUploaderOptions + } + + multiPartsUploaderV1InitializedParts struct { + multiPartsObjectOptions *MultiPartsObjectOptions + records map[uint64]resumedMultiPartsUploaderV1Record + medium resumablerecorder.WriteableResumableRecorderMedium + src source.Source + } + + multiPartsUploaderV1UploadedPart struct { + ctx string + crc32 uint32 + partNumber, offset, size uint64 + } + + resumedMultiPartsUploaderV1Record struct { + ctx string + crc32 uint32 + offset, size uint64 + expiredAt time.Time + } +) + +// 创建分片上传器 V1 +func NewMultiPartsUploaderV1(options *MultiPartsUploaderOptions) MultiPartsUploader { + if options == nil { + options = &MultiPartsUploaderOptions{} + } + return &multiPartsUploaderV1{apis.NewStorage(&options.Options), options} +} + +func (uploader *multiPartsUploaderV1) InitializeParts(ctx context.Context, src source.Source, multiPartsObjectOptions *MultiPartsObjectOptions) (InitializedParts, error) { + if multiPartsObjectOptions == nil { + multiPartsObjectOptions = &MultiPartsObjectOptions{} + } + if multiPartsObjectOptions.PartSize == 0 { + multiPartsObjectOptions.PartSize = 1 << 22 + } + medium := tryToOpenResumableRecorderForAppending(ctx, src, multiPartsObjectOptions, uploader.options) + return &multiPartsUploaderV1InitializedParts{multiPartsObjectOptions, nil, medium, src}, nil +} + +func (uploader *multiPartsUploaderV1) TryToResume(ctx context.Context, src source.Source, multiPartsObjectOptions *MultiPartsObjectOptions) InitializedParts { + if multiPartsObjectOptions == nil { + multiPartsObjectOptions = &MultiPartsObjectOptions{} + } + if multiPartsObjectOptions.PartSize == 0 { + multiPartsObjectOptions.PartSize = 1 << 22 + } + + readableMedium := tryToOpenResumableRecorderForReading(ctx, src, multiPartsObjectOptions, uploader.options) + if readableMedium == nil { + return nil + } + defer readableMedium.Close() + + records := make(map[uint64]resumedMultiPartsUploaderV1Record) + for { + var record resumablerecorder.ResumableRecord + if err := readableMedium.Next(&record); err != nil { + break + } + records[record.PartNumber] = resumedMultiPartsUploaderV1Record{ + ctx: record.PartID, + crc32: record.CRC32, + offset: record.Offset, + size: record.PartSize, + expiredAt: record.ExpiredAt, + } + } + readableMedium.Close() + + medium := tryToOpenResumableRecorderForAppending(ctx, src, multiPartsObjectOptions, uploader.options) + return &multiPartsUploaderV1InitializedParts{multiPartsObjectOptions, records, medium, src} +} + +func (uploader *multiPartsUploaderV1) UploadPart(ctx context.Context, initialized InitializedParts, part source.Part, options *UploadPartOptions) (UploadedPart, error) { + initializedParts, ok := initialized.(*multiPartsUploaderV1InitializedParts) + if !ok { + return nil, errors.New("unrecognized initialized parts") + } + if len(initializedParts.records) > 0 { + if record, ok := initializedParts.records[part.PartNumber()]; ok { + if record.offset == part.Offset() && record.size == part.Size() { + if options != nil && options.OnUploadingProgress != nil { + options.OnUploadingProgress(&UploadingPartProgress{Uploaded: record.size, PartSize: record.size}) + } + return multiPartsUploaderV1UploadedPart{ + ctx: record.ctx, + crc32: record.crc32, + offset: record.offset, + size: record.size, + partNumber: part.PartNumber(), + }, nil + } + } + } + return uploader.uploadPart(ctx, initializedParts, part, options) +} + +func (uploader *multiPartsUploaderV1) uploadPart(ctx context.Context, initialized *multiPartsUploaderV1InitializedParts, part source.Part, options *UploadPartOptions) (UploadedPart, error) { + apisOptions := apis.Options{ + OverwrittenRegion: initialized.multiPartsObjectOptions.RegionsProvider, + } + if options != nil && options.OnUploadingProgress != nil { + apisOptions.OnRequestProgress = func(uploaded, totalSize uint64) { + options.OnUploadingProgress(&UploadingPartProgress{Uploaded: uploaded, PartSize: totalSize}) + } + } + upToken, err := getUpToken(uploader.options.Credentials, &initialized.multiPartsObjectOptions.ObjectOptions, uploader.options.UpTokenProvider) + if err != nil { + return nil, err + } + + response, err := uploader.storage.ResumableUploadV1MakeBlock(ctx, &apis.ResumableUploadV1MakeBlockRequest{ + BlockSize: int64(part.Size()), + UpToken: upToken, + Body: internal_io.MakeReadSeekCloserFromReader(part), + }, &apisOptions) + if err != nil { + return nil, err + } else if response.Crc32 > 0 { + if _, err = part.Seek(0, io.SeekStart); err != nil { + return nil, err + } + crc32, err := crc32FromReadSeeker(part) + if err != nil { + return nil, err + } + if crc32 != uint32(response.Crc32) { + return nil, errors.New("unexpected crc32") + } + } + + if medium := initialized.medium; medium != nil { + medium.Write(&resumablerecorder.ResumableRecord{ + PartID: response.Ctx, + Offset: part.Offset(), + PartSize: part.Size(), + PartNumber: part.PartNumber(), + ExpiredAt: time.Unix(response.ExpiredAt, 0), + CRC32: uint32(response.Crc32), + }) + } + + return multiPartsUploaderV1UploadedPart{ + ctx: response.Ctx, + crc32: uint32(response.Crc32), + offset: part.Offset(), + size: part.Size(), + partNumber: part.PartNumber(), + }, nil +} + +func (uploader *multiPartsUploaderV1) CompleteParts(ctx context.Context, initialized InitializedParts, parts []UploadedPart, returnValue interface{}) error { + initializedParts, ok := initialized.(*multiPartsUploaderV1InitializedParts) + if !ok { + return errors.New("unrecognized initialized parts") + } + options := apis.Options{ + OverwrittenRegion: initializedParts.multiPartsObjectOptions.RegionsProvider, + } + upToken, err := getUpToken(uploader.options.Credentials, &initializedParts.multiPartsObjectOptions.ObjectOptions, uploader.options.UpTokenProvider) + if err != nil { + return err + } + + var ( + ctxs = make([]string, 0, len(parts)) + size uint64 + ) + for _, part := range parts { + uploadedPart, ok := part.(multiPartsUploaderV1UploadedPart) + if !ok { + return errors.New("unrecognized uploaded part") + } + ctxs = append(ctxs, uploadedPart.ctx) + size += uploadedPart.size + } + + _, err = uploader.storage.ResumableUploadV1MakeFile(ctx, &apis.ResumableUploadV1MakeFileRequest{ + Size: int64(size), + ObjectName: initializedParts.multiPartsObjectOptions.ObjectName, + FileName: initializedParts.multiPartsObjectOptions.FileName, + MimeType: initializedParts.multiPartsObjectOptions.ContentType, + CustomData: mergeCustomVarsAndMetadata(initializedParts.multiPartsObjectOptions.Metadata, initializedParts.multiPartsObjectOptions.CustomVars), + UpToken: upToken, + Body: internal_io.NewBytesNopCloser([]byte(strings.Join(ctxs, ","))), + ResponseBody: returnValue, + }, &options) + if err == nil || !retrier.IsErrorRetryable(err) { + if medium := initializedParts.medium; medium != nil { + medium.Close() + } + initializedParts.medium = nil + tryToDeleteResumableRecorderMedium(ctx, initializedParts.src, initializedParts.multiPartsObjectOptions, uploader.options) + } + return err +} + +func (uploader *multiPartsUploaderV1) MultiPartsUploaderOptions() *MultiPartsUploaderOptions { + return uploader.options +} + +func (initialized *multiPartsUploaderV1InitializedParts) Close() error { + if initialized.medium != nil { + return initialized.medium.Close() + } + return nil +} + +func (uploadedPart multiPartsUploaderV1UploadedPart) Offset() uint64 { + return uploadedPart.offset +} + +func (uploadedPart multiPartsUploaderV1UploadedPart) PartNumber() uint64 { + return uploadedPart.partNumber +} + +func (uploadedPart multiPartsUploaderV1UploadedPart) PartSize() uint64 { + return uploadedPart.size +} + +func (uploadedPart multiPartsUploaderV1UploadedPart) Crc32() uint32 { + return uploadedPart.crc32 +} + +func (uploadedPart multiPartsUploaderV1UploadedPart) Ctx() string { + return uploadedPart.ctx +} diff --git a/storagev2/uploader/multi_parts_uploader_v1_test.go b/storagev2/uploader/multi_parts_uploader_v1_test.go new file mode 100644 index 00000000..c3a6c105 --- /dev/null +++ b/storagev2/uploader/multi_parts_uploader_v1_test.go @@ -0,0 +1,508 @@ +//go:build unit +// +build unit + +package uploader_test + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "strconv" + "strings" + "testing" + "time" + + "github.com/gorilla/mux" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/region" + "github.com/qiniu/go-sdk/v7/storagev2/uploader" + resumablerecorder "github.com/qiniu/go-sdk/v7/storagev2/uploader/resumable_recorder" + "github.com/qiniu/go-sdk/v7/storagev2/uploader/source" +) + +func TestMultiPartsUploaderV1(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + if _, err = io.CopyN(tmpFile, r, 5*1024*1024); err != nil { + t.Fatal(err) + } + if _, err = tmpFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + var server *httptest.Server + serveMux := mux.NewRouter() + serveMux.HandleFunc("/mkblk/4194304", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + expectedBody, err := internal_io.ReadAll(io.NewSectionReader(tmpFile, 0, 4*1024*1024)) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(actualBody, expectedBody) { + t.Fatalf("unexpected body") + } + jsonBody, err := json.Marshal(&apis.ResumableUploadV1MakeBlockResponse{ + Ctx: "testctx1", + Checksum: "testchecksum1", + Crc32: int64(crc32.ChecksumIEEE(actualBody)), + Host: server.URL, + ExpiredAt: time.Now().Add(1 * time.Hour).Unix(), + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonBody) + }).Methods(http.MethodPost) + serveMux.HandleFunc("/mkblk/1048576", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + expectedBody, err := internal_io.ReadAll(io.NewSectionReader(tmpFile, 4*1024*1024, 1*1024*1024)) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(actualBody, expectedBody) { + t.Fatalf("unexpected body") + } + jsonBody, err := json.Marshal(&apis.ResumableUploadV1MakeBlockResponse{ + Ctx: "testctx2", + Checksum: "testchecksum2", + Crc32: int64(crc32.ChecksumIEEE(actualBody)), + Host: server.URL, + ExpiredAt: time.Now().Add(1 * time.Hour).Unix(), + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonBody) + }).Methods(http.MethodPost) + serveMux.PathPrefix("/mkfile/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + components := strings.Split(strings.TrimPrefix(r.URL.Path, "/mkfile/"), "/") + if components[0] != strconv.FormatInt(5*1024*1024, 10) { + t.Fatalf("unexpected fileSize") + } + components = components[1:] + for len(components) > 0 { + switch components[0] { + case "key": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "testkey" { + t.Fatalf("unexpected key") + } + case "fname": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "testfilename" { + t.Fatalf("unexpected fname") + } + case "mimeType": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "application/json" { + t.Fatalf("unexpected mimeType") + } + case "x-qn-meta-a": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "b" { + t.Fatalf("unexpected x-qn-meta-a") + } + case "x-qn-meta-c": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "d" { + t.Fatalf("unexpected x-qn-meta-c") + } + case "x:a": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "b" { + t.Fatalf("unexpected x:a") + } + case "x:c": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "d" { + t.Fatalf("unexpected x:c") + } + default: + t.Fatalf("unexpected component key: %s", components[0]) + } + components = components[2:] + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(actualBody) != "testctx1,testctx2" { + t.Fatalf("unexpected body") + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write([]byte(`{"ok":true}`)) + }).Methods(http.MethodPost) + server = httptest.NewServer(serveMux) + defer server.Close() + + multiPartsUploaderV1 := uploader.NewMultiPartsUploaderV1(&uploader.MultiPartsUploaderOptions{ + Options: http_client.Options{ + Regions: ®ion.Region{Up: region.Endpoints{Preferred: []string{server.URL}}}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + }) + + src, err := source.NewFileSource(tmpFile.Name()) + if err != nil { + t.Fatal(err) + } + key := "testkey" + initializedPart, err := multiPartsUploaderV1.InitializeParts(context.Background(), src, &uploader.MultiPartsObjectOptions{ + uploader.ObjectOptions{ + BucketName: "testbucket", + ObjectName: &key, + FileName: "testfilename", + ContentType: "application/json", + Metadata: map[string]string{"a": "b", "c": "d"}, + CustomVars: map[string]string{"a": "b", "c": "d"}, + }, + 4 * 1024 * 1024, + }) + if err != nil { + t.Fatal(err) + } + defer initializedPart.Close() + + part, err := src.Slice(4 * 1024 * 1024) + if err != nil { + t.Fatal(err) + } + lastUploaded := uint64(0) + uploadedPart_1, err := multiPartsUploaderV1.UploadPart(context.Background(), initializedPart, part, &uploader.UploadPartOptions{ + OnUploadingProgress: func(progress *uploader.UploadingPartProgress) { + if progress.PartSize != 4*1024*1024 { + t.Fatalf("unexpected partSize") + } + if progress.Uploaded < lastUploaded || progress.Uploaded > progress.PartSize { + t.Fatalf("unexpected uploaded") + } + lastUploaded = progress.Uploaded + }, + }) + if err != nil { + t.Fatal(err) + } + + part, err = src.Slice(4 * 1024 * 1024) + if err != nil { + t.Fatal(err) + } + lastUploaded = 0 + uploadedPart_2, err := multiPartsUploaderV1.UploadPart(context.Background(), initializedPart, part, &uploader.UploadPartOptions{ + OnUploadingProgress: func(progress *uploader.UploadingPartProgress) { + if progress.PartSize != 1*1024*1024 { + t.Fatalf("unexpected partSize") + } + if progress.Uploaded < lastUploaded || progress.Uploaded > progress.PartSize { + t.Fatalf("unexpected uploaded") + } + lastUploaded = progress.Uploaded + }, + }) + if err != nil { + t.Fatal(err) + } + + var returnValue struct { + Ok bool `json:"ok"` + } + err = multiPartsUploaderV1.CompleteParts(context.Background(), initializedPart, []uploader.UploadedPart{uploadedPart_1, uploadedPart_2}, &returnValue) + if err != nil { + t.Fatal(err) + } else if !returnValue.Ok { + t.Fatalf("unexpected response body") + } +} + +func TestMultiPartsUploaderV1Resuming(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + tmpFile, err := ioutil.TempFile("", "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + if _, err = io.CopyN(tmpFile, r, 5*1024*1024); err != nil { + t.Fatal(err) + } + if _, err = tmpFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + var server *httptest.Server + serveMux := mux.NewRouter() + serveMux.PathPrefix("/mkfile/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + components := strings.Split(strings.TrimPrefix(r.URL.Path, "/mkfile/"), "/") + if components[0] != strconv.FormatInt(5*1024*1024, 10) { + t.Fatalf("unexpected fileSize") + } + components = components[1:] + for len(components) > 0 { + switch components[0] { + case "key": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "testkey" { + t.Fatalf("unexpected key") + } + case "fname": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "testfilename" { + t.Fatalf("unexpected fname") + } + case "mimeType": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "application/json" { + t.Fatalf("unexpected mimeType") + } + case "x-qn-meta-a": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "b" { + t.Fatalf("unexpected x-qn-meta-a") + } + case "x-qn-meta-c": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "d" { + t.Fatalf("unexpected x-qn-meta-c") + } + case "x:a": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "b" { + t.Fatalf("unexpected x:a") + } + case "x:c": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "d" { + t.Fatalf("unexpected x:c") + } + default: + t.Fatalf("unexpected component key: %s", components[0]) + } + components = components[2:] + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(actualBody) != "testctx1,testctx2" { + t.Fatalf("unexpected body") + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write([]byte(`{"ok":true}`)) + }).Methods(http.MethodPost) + server = httptest.NewServer(serveMux) + defer server.Close() + + tmpFileStat, err := tmpFile.Stat() + if err != nil { + t.Fatal(err) + } + tmpFileSourceID := fmt.Sprintf("%d:%d:%s", tmpFileStat.Size(), tmpFileStat.ModTime().UnixNano(), tmpFile.Name()) + + resumableRecorder := resumablerecorder.NewJsonFileSystemResumableRecorder(tmpDir) + medium := resumableRecorder.OpenForCreatingNew(&resumablerecorder.ResumableRecorderOpenArgs{ + AccessKey: "testak", + BucketName: "testbucket", + ObjectName: "testkey", + SourceID: tmpFileSourceID, + PartSize: 4 * 1024 * 1024, + TotalSize: 5 * 1024 * 1024, + UpEndpoints: region.Endpoints{Preferred: []string{server.URL}}, + }) + if err = medium.Write(&resumablerecorder.ResumableRecord{ + PartID: "testctx1", + Offset: 0, + PartSize: 4 * 1024 * 1024, + PartNumber: 1, + ExpiredAt: time.Now().Add(1 * time.Hour), + }); err != nil { + t.Fatal(err) + } + if err = medium.Write(&resumablerecorder.ResumableRecord{ + PartID: "testctx2", + Offset: 4 * 1024 * 1024, + PartSize: 1 * 1024 * 1024, + PartNumber: 2, + ExpiredAt: time.Now().Add(1 * time.Hour), + }); err != nil { + t.Fatal(err) + } + if err = medium.Close(); err != nil { + t.Fatal(err) + } + + multiPartsUploaderV1 := uploader.NewMultiPartsUploaderV1(&uploader.MultiPartsUploaderOptions{ + ResumableRecorder: resumableRecorder, + Options: http_client.Options{ + Regions: ®ion.Region{Up: region.Endpoints{Preferred: []string{server.URL}}}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + }) + + src, err := source.NewFileSource(tmpFile.Name()) + if err != nil { + t.Fatal(err) + } + key := "testkey" + initializedPart := multiPartsUploaderV1.TryToResume(context.Background(), src, &uploader.MultiPartsObjectOptions{ + uploader.ObjectOptions{ + BucketName: "testbucket", + ObjectName: &key, + FileName: "testfilename", + ContentType: "application/json", + Metadata: map[string]string{"a": "b", "c": "d"}, + CustomVars: map[string]string{"a": "b", "c": "d"}, + }, + 4 * 1024 * 1024, + }) + if initializedPart == nil { + t.Fatalf("initializedPart is nil") + } + defer initializedPart.Close() + + part, err := src.Slice(4 * 1024 * 1024) + if err != nil { + t.Fatal(err) + } + uploadedPart_1, err := multiPartsUploaderV1.UploadPart(context.Background(), initializedPart, part, &uploader.UploadPartOptions{ + OnUploadingProgress: func(progress *uploader.UploadingPartProgress) { + if progress.PartSize != 4*1024*1024 { + t.Fatalf("unexpected partSize") + } + if progress.Uploaded != 4*1024*1024 { + t.Fatalf("unexpected uploaded") + } + }, + }) + if err != nil { + t.Fatal(err) + } + + part, err = src.Slice(4 * 1024 * 1024) + if err != nil { + t.Fatal(err) + } + uploadedPart_2, err := multiPartsUploaderV1.UploadPart(context.Background(), initializedPart, part, &uploader.UploadPartOptions{ + OnUploadingProgress: func(progress *uploader.UploadingPartProgress) { + if progress.PartSize != 1024*1024 { + t.Fatalf("unexpected partSize") + } + if progress.Uploaded != 1024*1024 { + t.Fatalf("unexpected uploaded") + } + }, + }) + if err != nil { + t.Fatal(err) + } + + var returnValue struct { + Ok bool `json:"ok"` + } + err = multiPartsUploaderV1.CompleteParts(context.Background(), initializedPart, []uploader.UploadedPart{uploadedPart_1, uploadedPart_2}, &returnValue) + if err != nil { + t.Fatal(err) + } else if !returnValue.Ok { + t.Fatalf("unexpected response body") + } +} diff --git a/storagev2/uploader/multi_parts_uploader_v2.go b/storagev2/uploader/multi_parts_uploader_v2.go new file mode 100644 index 00000000..240597a1 --- /dev/null +++ b/storagev2/uploader/multi_parts_uploader_v2.go @@ -0,0 +1,346 @@ +package uploader + +import ( + "context" + "crypto/md5" + "encoding/hex" + "errors" + "io" + "time" + + internal_io "github.com/qiniu/go-sdk/v7/internal/io" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/apis/resumable_upload_v2_complete_multipart_upload" + "github.com/qiniu/go-sdk/v7/storagev2/retrier" + resumablerecorder "github.com/qiniu/go-sdk/v7/storagev2/uploader/resumable_recorder" + "github.com/qiniu/go-sdk/v7/storagev2/uploader/source" +) + +type ( + multiPartsUploaderV2 struct { + storage *apis.Storage + options *MultiPartsUploaderOptions + } + + multiPartsUploaderV2InitializedParts struct { + bucketName, uploadID string + multiPartsObjectOptions *MultiPartsObjectOptions + expiredAt time.Time + records map[uint64]resumedMultiPartsUploaderV2Record + medium resumablerecorder.WriteableResumableRecorderMedium + src source.Source + } + + multiPartsUploaderV2UploadedPart struct { + partNumber, offset, size uint64 + etag string + md5 [md5.Size]byte + } + + resumedMultiPartsUploaderV2Record struct { + uploadID, etag string + md5 [md5.Size]byte + partNumber, offset, size uint64 + expiredAt time.Time + } +) + +// 创建分片上传器 V2 +func NewMultiPartsUploaderV2(options *MultiPartsUploaderOptions) MultiPartsUploader { + if options == nil { + options = &MultiPartsUploaderOptions{} + } + return &multiPartsUploaderV2{apis.NewStorage(&options.Options), options} +} + +func (uploader *multiPartsUploaderV2) InitializeParts(ctx context.Context, src source.Source, multiPartsObjectOptions *MultiPartsObjectOptions) (InitializedParts, error) { + if multiPartsObjectOptions == nil { + multiPartsObjectOptions = &MultiPartsObjectOptions{} + } + if multiPartsObjectOptions.PartSize == 0 { + multiPartsObjectOptions.PartSize = 1 << 22 + } + + upToken, err := getUpToken(uploader.options.Credentials, &multiPartsObjectOptions.ObjectOptions, uploader.options.UpTokenProvider) + if err != nil { + return nil, err + } + + bucketName, err := guessBucketName(ctx, multiPartsObjectOptions.BucketName, upToken) + if err != nil { + return nil, err + } else if multiPartsObjectOptions.BucketName == "" { + multiPartsObjectOptions.BucketName = bucketName + } + + response, err := uploader.storage.ResumableUploadV2InitiateMultipartUpload(ctx, &apis.ResumableUploadV2InitiateMultipartUploadRequest{ + BucketName: bucketName, + ObjectName: multiPartsObjectOptions.ObjectName, + UpToken: upToken, + }, &apis.Options{ + OverwrittenRegion: multiPartsObjectOptions.RegionsProvider, + }) + if err != nil { + return nil, err + } + + medium := tryToOpenResumableRecorderForAppending(ctx, src, multiPartsObjectOptions, uploader.options) + return &multiPartsUploaderV2InitializedParts{ + bucketName: bucketName, + uploadID: response.UploadId, + multiPartsObjectOptions: multiPartsObjectOptions, + expiredAt: time.Unix(response.ExpiredAt, 0), + medium: medium, + src: src, + }, nil +} + +func (uploader *multiPartsUploaderV2) TryToResume(ctx context.Context, src source.Source, multiPartsObjectOptions *MultiPartsObjectOptions) InitializedParts { + if multiPartsObjectOptions == nil { + multiPartsObjectOptions = &MultiPartsObjectOptions{} + } + if multiPartsObjectOptions.PartSize == 0 { + multiPartsObjectOptions.PartSize = 1 << 22 + } + + upToken, err := getUpToken(uploader.options.Credentials, &multiPartsObjectOptions.ObjectOptions, uploader.options.UpTokenProvider) + if err != nil { + return nil + } + + bucketName, err := guessBucketName(ctx, multiPartsObjectOptions.BucketName, upToken) + if err != nil { + return nil + } else if multiPartsObjectOptions.BucketName == "" { + multiPartsObjectOptions.BucketName = bucketName + } + + readableMedium := tryToOpenResumableRecorderForReading(ctx, src, multiPartsObjectOptions, uploader.options) + if readableMedium == nil { + return nil + } + defer readableMedium.Close() + + var ( + records = make(map[uint64]resumedMultiPartsUploaderV2Record) + uploadID string + expiredAt time.Time + ) + for { + var record resumablerecorder.ResumableRecord + if err := readableMedium.Next(&record); err != nil { + break + } + records[record.PartNumber] = resumedMultiPartsUploaderV2Record{ + uploadID: record.UploadID, + etag: record.PartID, + md5: record.MD5, + partNumber: record.PartNumber, + offset: record.Offset, + size: record.PartSize, + expiredAt: record.ExpiredAt, + } + if uploadID == "" { + uploadID = record.UploadID + expiredAt = record.ExpiredAt + } + } + readableMedium.Close() + if uploadID == "" { + return nil + } + + medium := tryToOpenResumableRecorderForAppending(ctx, src, multiPartsObjectOptions, uploader.options) + return &multiPartsUploaderV2InitializedParts{ + bucketName: bucketName, + uploadID: uploadID, + multiPartsObjectOptions: multiPartsObjectOptions, + expiredAt: expiredAt, + records: records, + medium: medium, + src: src, + } +} + +func (uploader *multiPartsUploaderV2) UploadPart(ctx context.Context, initialized InitializedParts, part source.Part, options *UploadPartOptions) (UploadedPart, error) { + initializedParts, ok := initialized.(*multiPartsUploaderV2InitializedParts) + if !ok { + return nil, errors.New("unrecognized initialized parts") + } + if len(initializedParts.records) > 0 { + if record, ok := initializedParts.records[part.PartNumber()]; ok { + if record.offset == part.Offset() && record.size == part.Size() { + if options != nil && options.OnUploadingProgress != nil { + options.OnUploadingProgress(&UploadingPartProgress{Uploaded: record.size, PartSize: record.size}) + } + return multiPartsUploaderV2UploadedPart{ + partNumber: record.partNumber, + offset: record.offset, + size: record.size, + etag: record.etag, + md5: record.md5, + }, nil + } + } + } + return uploader.uploadPart(ctx, initializedParts, part, options) +} + +func (uploader *multiPartsUploaderV2) uploadPart(ctx context.Context, initialized *multiPartsUploaderV2InitializedParts, part source.Part, options *UploadPartOptions) (UploadedPart, error) { + apisOptions := apis.Options{ + OverwrittenRegion: initialized.multiPartsObjectOptions.RegionsProvider, + } + if options != nil && options.OnUploadingProgress != nil { + apisOptions.OnRequestProgress = func(uploaded, partSize uint64) { + options.OnUploadingProgress(&UploadingPartProgress{Uploaded: uploaded, PartSize: partSize}) + } + } + upToken, err := getUpToken(uploader.options.Credentials, &initialized.multiPartsObjectOptions.ObjectOptions, uploader.options.UpTokenProvider) + if err != nil { + return nil, err + } + + md5, err := md5FromReadSeeker(part) + if err != nil { + return nil, err + } + + response, err := uploader.storage.ResumableUploadV2UploadPart(ctx, &apis.ResumableUploadV2UploadPartRequest{ + BucketName: initialized.bucketName, + ObjectName: initialized.multiPartsObjectOptions.ObjectName, + UploadId: initialized.uploadID, + PartNumber: int64(part.PartNumber()), + Md5: hex.EncodeToString(md5[:]), + UpToken: upToken, + Body: internal_io.MakeReadSeekCloserFromReader(part), + }, &apisOptions) + if err != nil { + return nil, err + } + + if medium := initialized.medium; medium != nil { + medium.Write(&resumablerecorder.ResumableRecord{ + UploadID: initialized.uploadID, + PartID: response.Etag, + Offset: part.Offset(), + PartSize: part.Size(), + PartNumber: part.PartNumber(), + ExpiredAt: initialized.expiredAt, + MD5: md5, + }) + } + + return multiPartsUploaderV2UploadedPart{ + partNumber: part.PartNumber(), + offset: part.Offset(), + size: part.Size(), + etag: response.Etag, + md5: md5, + }, nil +} + +func (uploader *multiPartsUploaderV2) CompleteParts(ctx context.Context, initialized InitializedParts, parts []UploadedPart, returnValue interface{}) error { + initializedParts, ok := initialized.(*multiPartsUploaderV2InitializedParts) + if !ok { + return errors.New("unrecognized initialized parts") + } + options := apis.Options{ + OverwrittenRegion: initializedParts.multiPartsObjectOptions.RegionsProvider, + } + upToken, err := getUpToken(uploader.options.Credentials, &initializedParts.multiPartsObjectOptions.ObjectOptions, uploader.options.UpTokenProvider) + if err != nil { + return err + } + + completedParts := make(resumable_upload_v2_complete_multipart_upload.Parts, 0, len(parts)) + for _, part := range parts { + uploadedPart, ok := part.(multiPartsUploaderV2UploadedPart) + if !ok { + return errors.New("unrecognized uploaded part") + } + completedParts = append(completedParts, resumable_upload_v2_complete_multipart_upload.PartInfo{ + PartNumber: int64(uploadedPart.partNumber), + Etag: uploadedPart.etag, + }) + } + + metadata := make(map[string]string) + for k, v := range initializedParts.multiPartsObjectOptions.Metadata { + metadata[normalizeMetadataKey(k)] = v + } + + customVars := make(map[string]string) + for k, v := range initializedParts.multiPartsObjectOptions.CustomVars { + customVars[normalizeCustomVarKey(k)] = v + } + + _, err = uploader.storage.ResumableUploadV2CompleteMultipartUpload(ctx, &apis.ResumableUploadV2CompleteMultipartUploadRequest{ + BucketName: initializedParts.bucketName, + ObjectName: initializedParts.multiPartsObjectOptions.ObjectName, + UploadId: initializedParts.uploadID, + UpToken: upToken, + Parts: completedParts, + FileName: initializedParts.multiPartsObjectOptions.FileName, + MimeType: initializedParts.multiPartsObjectOptions.ContentType, + Metadata: metadata, + CustomVars: customVars, + ResponseBody: returnValue, + }, &options) + if err == nil || !retrier.IsErrorRetryable(err) { + if medium := initializedParts.medium; medium != nil { + medium.Close() + } + initializedParts.medium = nil + tryToDeleteResumableRecorderMedium(ctx, initializedParts.src, initializedParts.multiPartsObjectOptions, uploader.options) + } + return err +} + +func (uploader *multiPartsUploaderV2) MultiPartsUploaderOptions() *MultiPartsUploaderOptions { + return uploader.options +} + +func (initialized *multiPartsUploaderV2InitializedParts) Close() error { + if initialized.medium != nil { + return initialized.medium.Close() + } + return nil +} + +func (uploadedPart multiPartsUploaderV2UploadedPart) Offset() uint64 { + return uploadedPart.offset +} + +func (uploadedPart multiPartsUploaderV2UploadedPart) PartNumber() uint64 { + return uploadedPart.partNumber +} + +func (uploadedPart multiPartsUploaderV2UploadedPart) PartSize() uint64 { + return uploadedPart.size +} + +func (uploadedPart multiPartsUploaderV2UploadedPart) Etag() string { + return uploadedPart.etag +} + +func (uploadedPart multiPartsUploaderV2UploadedPart) MD5() [md5.Size]byte { + return uploadedPart.md5 +} + +func md5FromReadSeeker(r io.ReadSeeker) (md5Result [md5.Size]byte, err error) { + var offset int64 + + offset, err = r.Seek(0, io.SeekCurrent) + if err != nil { + return + } + hasher := md5.New() + if _, err = io.Copy(hasher, r); err != nil { + return + } + if _, err = r.Seek(offset, io.SeekStart); err != nil { + return + } + copy(md5Result[:], hasher.Sum(nil)) + return +} diff --git a/storagev2/uploader/multi_parts_uploader_v2_test.go b/storagev2/uploader/multi_parts_uploader_v2_test.go new file mode 100644 index 00000000..b156c68c --- /dev/null +++ b/storagev2/uploader/multi_parts_uploader_v2_test.go @@ -0,0 +1,471 @@ +//go:build unit +// +build unit + +package uploader_test + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + "time" + + "github.com/gorilla/mux" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/region" + "github.com/qiniu/go-sdk/v7/storagev2/uploader" + resumablerecorder "github.com/qiniu/go-sdk/v7/storagev2/uploader/resumable_recorder" + "github.com/qiniu/go-sdk/v7/storagev2/uploader/source" +) + +func TestMultiPartsUploaderV2(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + if _, err = io.CopyN(tmpFile, r, 5*1024*1024); err != nil { + t.Fatal(err) + } + if _, err = tmpFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + serveMux := mux.NewRouter() + serveMux.HandleFunc("/buckets/{bucketName}/objects/{encodedObjectName}/uploads", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + vars := mux.Vars(r) + if vars["bucketName"] != "testbucket" { + t.Fatalf("unexpected bucket name") + } + objectBytes, err := base64.URLEncoding.DecodeString(vars["encodedObjectName"]) + if err != nil { + t.Fatal(err) + } else if string(objectBytes) != "testkey" { + t.Fatalf("unexpected object name") + } + jsonBytes, err := json.Marshal(&apis.ResumableUploadV2InitiateMultipartUploadResponse{ + UploadId: "testuploadID", + ExpiredAt: time.Now().Add(1 * time.Hour).Unix(), + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonBytes) + }).Methods(http.MethodPost) + serveMux.HandleFunc("/buckets/{bucketName}/objects/{encodedObjectName}/uploads/{uploadID}/{partNumber}", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + vars := mux.Vars(r) + if vars["bucketName"] != "testbucket" { + t.Fatalf("unexpected bucket name") + } + objectBytes, err := base64.URLEncoding.DecodeString(vars["encodedObjectName"]) + if err != nil { + t.Fatal(err) + } else if string(objectBytes) != "testkey" { + t.Fatalf("unexpected object name") + } + if vars["uploadID"] != "testuploadID" { + t.Fatalf("unexpected upload id") + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + var expectedBody, jsonBody []byte + switch vars["partNumber"] { + case "1": + expectedBody, err = internal_io.ReadAll(io.NewSectionReader(tmpFile, 0, 4*1024*1024)) + if err != nil { + t.Fatal(err) + } + case "2": + expectedBody, err = internal_io.ReadAll(io.NewSectionReader(tmpFile, 4*1024*1024, 1024*1024)) + if err != nil { + t.Fatal(err) + } + default: + t.Fatalf("unexpected part number") + } + if !bytes.Equal(actualBody, expectedBody) { + t.Fatalf("unexpected body") + } + md5Sum := md5.Sum(actualBody) + if r.Header.Get("Content-MD5") != hex.EncodeToString(md5Sum[:]) { + t.Fatalf("unexpected content-md5") + } + switch vars["partNumber"] { + case "1": + jsonBody, err = json.Marshal(&apis.ResumableUploadV2UploadPartResponse{ + Etag: "testetag1", + Md5: r.Header.Get("Content-MD5"), + }) + case "2": + jsonBody, err = json.Marshal(&apis.ResumableUploadV2UploadPartResponse{ + Etag: "testetag2", + Md5: r.Header.Get("Content-MD5"), + }) + } + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonBody) + }).Methods(http.MethodPut) + serveMux.HandleFunc("/buckets/{bucketName}/objects/{encodedObjectName}/uploads/{uploadID}", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + vars := mux.Vars(r) + if vars["bucketName"] != "testbucket" { + t.Fatalf("unexpected bucket name") + } + objectBytes, err := base64.URLEncoding.DecodeString(vars["encodedObjectName"]) + if err != nil { + t.Fatal(err) + } else if string(objectBytes) != "testkey" { + t.Fatalf("unexpected object name") + } + if vars["uploadID"] != "testuploadID" { + t.Fatalf("unexpected upload id") + } + requestBodyBytes, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + var body apis.ResumableUploadV2CompleteMultipartUploadRequest + if err = body.UnmarshalJSON(requestBodyBytes); err != nil { + t.Fatalf("unexpected request body") + } + if len(body.Parts) != 2 { + t.Fatalf("unexpected parts") + } else if body.Parts[0].PartNumber != 1 { + t.Fatalf("unexpected part number") + } else if body.Parts[0].Etag != "testetag1" { + t.Fatalf("unexpected part number") + } else if body.Parts[1].PartNumber != 2 { + t.Fatalf("unexpected part number") + } else if body.Parts[1].Etag != "testetag2" { + t.Fatalf("unexpected part number") + } + if body.FileName != "testfilename" { + t.Fatalf("unexpected fileName") + } + if body.MimeType != "application/json" { + t.Fatalf("unexpected mimeType") + } + if len(body.Metadata) != 2 { + t.Fatalf("unexpected metadata") + } else if body.Metadata["x-qn-meta-a"] != "b" { + t.Fatalf("unexpected x-qn-meta-a") + } else if body.Metadata["x-qn-meta-c"] != "d" { + t.Fatalf("unexpected x-qn-meta-c") + } else if body.CustomVars["x:a"] != "b" { + t.Fatalf("unexpected x:a") + } else if body.CustomVars["x:c"] != "d" { + t.Fatalf("unexpected x:c") + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write([]byte(`{"ok":true}`)) + }).Methods(http.MethodPost) + server := httptest.NewServer(serveMux) + defer server.Close() + + multiPartsUploaderV2 := uploader.NewMultiPartsUploaderV2(&uploader.MultiPartsUploaderOptions{ + Options: http_client.Options{ + Regions: ®ion.Region{Up: region.Endpoints{Preferred: []string{server.URL}}}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + }) + + src, err := source.NewFileSource(tmpFile.Name()) + if err != nil { + t.Fatal(err) + } + key := "testkey" + initializedPart, err := multiPartsUploaderV2.InitializeParts(context.Background(), src, &uploader.MultiPartsObjectOptions{ + uploader.ObjectOptions{ + BucketName: "testbucket", + ObjectName: &key, + FileName: "testfilename", + ContentType: "application/json", + Metadata: map[string]string{"a": "b", "c": "d"}, + CustomVars: map[string]string{"a": "b", "c": "d"}, + }, + 4 * 1024 * 1024, + }) + if err != nil { + t.Fatal(err) + } + defer initializedPart.Close() + + part, err := src.Slice(4 * 1024 * 1024) + if err != nil { + t.Fatal(err) + } + lastUploaded := uint64(0) + uploadedPart_1, err := multiPartsUploaderV2.UploadPart(context.Background(), initializedPart, part, &uploader.UploadPartOptions{ + OnUploadingProgress: func(progress *uploader.UploadingPartProgress) { + if progress.PartSize != 4*1024*1024 { + t.Fatalf("unexpected partSize") + } + if progress.Uploaded < lastUploaded || progress.Uploaded > progress.PartSize { + t.Fatalf("unexpected uploaded") + } + lastUploaded = progress.Uploaded + }, + }) + if err != nil { + t.Fatal(err) + } + + part, err = src.Slice(4 * 1024 * 1024) + if err != nil { + t.Fatal(err) + } + lastUploaded = 0 + uploadedPart_2, err := multiPartsUploaderV2.UploadPart(context.Background(), initializedPart, part, &uploader.UploadPartOptions{ + OnUploadingProgress: func(progress *uploader.UploadingPartProgress) { + if progress.PartSize != 1*1024*1024 { + t.Fatalf("unexpected partSize") + } + if progress.Uploaded < lastUploaded || progress.Uploaded > progress.PartSize { + t.Fatalf("unexpected uploaded") + } + lastUploaded = progress.Uploaded + }, + }) + if err != nil { + t.Fatal(err) + } + + var returnValue struct { + Ok bool `json:"ok"` + } + err = multiPartsUploaderV2.CompleteParts(context.Background(), initializedPart, []uploader.UploadedPart{uploadedPart_1, uploadedPart_2}, &returnValue) + if err != nil { + t.Fatal(err) + } else if !returnValue.Ok { + t.Fatalf("unexpected response body") + } +} + +func TestMultiPartsUploaderV2Resuming(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + tmpFile, err := ioutil.TempFile("", "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + if _, err = io.CopyN(tmpFile, r, 5*1024*1024); err != nil { + t.Fatal(err) + } + if _, err = tmpFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + serveMux := mux.NewRouter() + serveMux.HandleFunc("/buckets/{bucketName}/objects/{encodedObjectName}/uploads/{uploadID}", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + vars := mux.Vars(r) + if vars["bucketName"] != "testbucket" { + t.Fatalf("unexpected bucket name") + } + objectBytes, err := base64.URLEncoding.DecodeString(vars["encodedObjectName"]) + if err != nil { + t.Fatal(err) + } else if string(objectBytes) != "testkey" { + t.Fatalf("unexpected object name") + } + if vars["uploadID"] != "testuploadID" { + t.Fatalf("unexpected upload id") + } + requestBodyBytes, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + var body apis.ResumableUploadV2CompleteMultipartUploadRequest + if err = body.UnmarshalJSON(requestBodyBytes); err != nil { + t.Fatalf("unexpected request body") + } + if len(body.Parts) != 2 { + t.Fatalf("unexpected parts") + } else if body.Parts[0].PartNumber != 1 { + t.Fatalf("unexpected part number") + } else if body.Parts[0].Etag != "testetag1" { + t.Fatalf("unexpected part number") + } else if body.Parts[1].PartNumber != 2 { + t.Fatalf("unexpected part number") + } else if body.Parts[1].Etag != "testetag2" { + t.Fatalf("unexpected part number") + } + if body.FileName != "testfilename" { + t.Fatalf("unexpected fileName") + } + if body.MimeType != "application/json" { + t.Fatalf("unexpected mimeType") + } + if len(body.Metadata) != 2 { + t.Fatalf("unexpected metadata") + } else if body.Metadata["x-qn-meta-a"] != "b" { + t.Fatalf("unexpected x-qn-meta-a") + } else if body.Metadata["x-qn-meta-c"] != "d" { + t.Fatalf("unexpected x-qn-meta-c") + } else if body.CustomVars["x:a"] != "b" { + t.Fatalf("unexpected x:a") + } else if body.CustomVars["x:c"] != "d" { + t.Fatalf("unexpected x:c") + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write([]byte(`{"ok":true}`)) + }).Methods(http.MethodPost) + server := httptest.NewServer(serveMux) + defer server.Close() + + tmpFileStat, err := tmpFile.Stat() + if err != nil { + t.Fatal(err) + } + tmpFileSourceID := fmt.Sprintf("%d:%d:%s", tmpFileStat.Size(), tmpFileStat.ModTime().UnixNano(), tmpFile.Name()) + + resumableRecorder := resumablerecorder.NewJsonFileSystemResumableRecorder(tmpDir) + medium := resumableRecorder.OpenForCreatingNew(&resumablerecorder.ResumableRecorderOpenArgs{ + AccessKey: "testak", + BucketName: "testbucket", + ObjectName: "testkey", + SourceID: tmpFileSourceID, + PartSize: 4 * 1024 * 1024, + TotalSize: 5 * 1024 * 1024, + UpEndpoints: region.Endpoints{Preferred: []string{server.URL}}, + }) + if err = medium.Write(&resumablerecorder.ResumableRecord{ + UploadID: "testuploadID", + PartID: "testetag1", + Offset: 0, + PartSize: 4 * 1024 * 1024, + PartNumber: 1, + ExpiredAt: time.Now().Add(1 * time.Hour), + }); err != nil { + t.Fatal(err) + } + if err = medium.Write(&resumablerecorder.ResumableRecord{ + UploadID: "testuploadID", + PartID: "testetag2", + Offset: 4 * 1024 * 1024, + PartSize: 1 * 1024 * 1024, + PartNumber: 2, + ExpiredAt: time.Now().Add(1 * time.Hour), + }); err != nil { + t.Fatal(err) + } + if err = medium.Close(); err != nil { + t.Fatal(err) + } + + multiPartsUploaderV2 := uploader.NewMultiPartsUploaderV2(&uploader.MultiPartsUploaderOptions{ + ResumableRecorder: resumableRecorder, + Options: http_client.Options{ + Regions: ®ion.Region{Up: region.Endpoints{Preferred: []string{server.URL}}}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + }) + + src, err := source.NewFileSource(tmpFile.Name()) + if err != nil { + t.Fatal(err) + } + key := "testkey" + initializedPart := multiPartsUploaderV2.TryToResume(context.Background(), src, &uploader.MultiPartsObjectOptions{ + uploader.ObjectOptions{ + BucketName: "testbucket", + ObjectName: &key, + FileName: "testfilename", + ContentType: "application/json", + Metadata: map[string]string{"a": "b", "c": "d"}, + CustomVars: map[string]string{"a": "b", "c": "d"}, + }, + 4 * 1024 * 1024, + }) + if initializedPart == nil { + t.Fatalf("initializedPart is nil") + } + defer initializedPart.Close() + + part, err := src.Slice(4 * 1024 * 1024) + if err != nil { + t.Fatal(err) + } + uploadedPart_1, err := multiPartsUploaderV2.UploadPart(context.Background(), initializedPart, part, &uploader.UploadPartOptions{ + OnUploadingProgress: func(progress *uploader.UploadingPartProgress) { + if progress.PartSize != 4*1024*1024 { + t.Fatalf("unexpected partSize") + } + if progress.Uploaded != 4*1024*1024 { + t.Fatalf("unexpected uploaded") + } + }, + }) + if err != nil { + t.Fatal(err) + } + + part, err = src.Slice(4 * 1024 * 1024) + if err != nil { + t.Fatal(err) + } + uploadedPart_2, err := multiPartsUploaderV2.UploadPart(context.Background(), initializedPart, part, &uploader.UploadPartOptions{ + OnUploadingProgress: func(progress *uploader.UploadingPartProgress) { + if progress.PartSize != 1024*1024 { + t.Fatalf("unexpected partSize") + } + if progress.Uploaded != 1024*1024 { + t.Fatalf("unexpected uploaded") + } + }, + }) + if err != nil { + t.Fatal(err) + } + + var returnValue struct { + Ok bool `json:"ok"` + } + err = multiPartsUploaderV2.CompleteParts(context.Background(), initializedPart, []uploader.UploadedPart{uploadedPart_1, uploadedPart_2}, &returnValue) + if err != nil { + t.Fatal(err) + } else if !returnValue.Ok { + t.Fatalf("unexpected response body") + } +} diff --git a/storagev2/uploader/params.go b/storagev2/uploader/params.go new file mode 100644 index 00000000..0ecc651f --- /dev/null +++ b/storagev2/uploader/params.go @@ -0,0 +1,114 @@ +package uploader + +import ( + "github.com/qiniu/go-sdk/v7/storagev2/region" + "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +type ( + // 对象上传选项 + ObjectOptions struct { + // 区域提供者,可选 + RegionsProvider region.RegionsProvider + + // 上传凭证接口,可选 + // 但如果不传值,则必须给出 BucketName,并且配合 Uploader 的 Credentials 自动生成 UpToken + UpToken uptoken.Provider + + // 空间名称,可选,但如果不传值,则必须给出 UpToken + BucketName string + + // 对象名称 + ObjectName *string + + // 文件名称 + FileName string + + // 文件 MIME 类型 + ContentType string + + // 自定义元数据 + Metadata map[string]string + + // 自定义变量 + CustomVars map[string]string + + // 对象上传进度 + OnUploadingProgress func(*UploadingProgress) + } + + // 分片上传对象上传选项 + MultiPartsObjectOptions struct { + // 对象上传选项 + ObjectOptions + + // 分片大小,如果不填写,默认为 4 MB + PartSize uint64 + } + + // 分片上传进度 + UploadingPartProgress struct { + Uploaded uint64 // 已经上传的数据量,单位为字节 + PartSize uint64 // 分片大小,单位为字节 + } + + // 对象上传进度 + UploadingProgress struct { + Uploaded uint64 // 已经上传的数据量,单位为字节 + TotalSize uint64 // 总数据量,单位为字节 + } + + // 上传分片列表选项 + UploadPartsOptions struct { + // 分片上传进度 + OnUploadingProgress func(partNumber uint64, progress *UploadingPartProgress) + // 分片上传成功后回调函数 + OnPartUploaded func(UploadedPart) error + } + + // 上传分片选项 + UploadPartOptions struct { + // 分片上传进度 + OnUploadingProgress func(*UploadingPartProgress) + } + + DirectoryOptions struct { + // 区域提供者 + RegionsProvider region.RegionsProvider + + // 上传凭证 + UpToken uptoken.Provider + + // 空间名称 + BucketName string + + // 上传并发度 + ObjectConcurrency int + + // 上传前回调函数 + BeforeObjectUpload func(filePath string, objectOptions *ObjectOptions) + + // 上传进度 + OnUploadingProgress func(filePath string, progress *UploadingProgress) + + // 对象上传成功后回调 + OnObjectUploaded func(filePath string, info *UploadedObjectInfo) + + // 是否在空间内创建目录 + ShouldCreateDirectory bool + + // 是否上传指定对象 + ShouldUploadObject func(filePath string, objectOptions *ObjectOptions) bool + + // 更改对象名称 + UpdateObjectName func(string) string + + // 分隔符,默认为 / + PathSeparator string + } + + // 已经上传的对象信息 + UploadedObjectInfo struct { + Size uint64 // 对象大小 + } +) diff --git a/storagev2/uploader/resumable_recorder.go b/storagev2/uploader/resumable_recorder.go new file mode 100644 index 00000000..40953434 --- /dev/null +++ b/storagev2/uploader/resumable_recorder.go @@ -0,0 +1,98 @@ +package uploader + +import ( + "context" + + resumablerecorder "github.com/qiniu/go-sdk/v7/storagev2/uploader/resumable_recorder" + "github.com/qiniu/go-sdk/v7/storagev2/uploader/source" + "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +func tryToOpenResumableRecorderForReading(ctx context.Context, src source.Source, multiPartsObjectOptions *MultiPartsObjectOptions, multiPartsUploaderOptions *MultiPartsUploaderOptions) resumablerecorder.ReadableResumableRecorderMedium { + if options := makeResumableRecorderOpenArgs(ctx, src, multiPartsObjectOptions, multiPartsUploaderOptions); options != nil { + if resumableRecorder := multiPartsUploaderOptions.ResumableRecorder; resumableRecorder != nil { + return resumableRecorder.OpenForReading(options) + } + } + return nil +} + +func tryToOpenResumableRecorderForAppending(ctx context.Context, src source.Source, multiPartsObjectOptions *MultiPartsObjectOptions, multiPartsUploaderOptions *MultiPartsUploaderOptions) resumablerecorder.WriteableResumableRecorderMedium { + if options := makeResumableRecorderOpenArgs(ctx, src, multiPartsObjectOptions, multiPartsUploaderOptions); options != nil { + if resumableRecorder := multiPartsUploaderOptions.ResumableRecorder; resumableRecorder != nil { + medium := resumableRecorder.OpenForAppending(options) + if medium == nil { + medium = resumableRecorder.OpenForCreatingNew(options) + } + return medium + } + } + return nil +} + +func tryToDeleteResumableRecorderMedium(ctx context.Context, src source.Source, multiPartsObjectOptions *MultiPartsObjectOptions, multiPartsUploaderOptions *MultiPartsUploaderOptions) { + if options := makeResumableRecorderOpenArgs(ctx, src, multiPartsObjectOptions, multiPartsUploaderOptions); options != nil { + if resumableRecorder := multiPartsUploaderOptions.ResumableRecorder; resumableRecorder != nil { + resumableRecorder.Delete(options) + } + } +} + +func makeResumableRecorderOpenArgs(ctx context.Context, src source.Source, multiPartsObjectOptions *MultiPartsObjectOptions, multiPartsUploaderOptions *MultiPartsUploaderOptions) *resumablerecorder.ResumableRecorderOpenArgs { + sourceID, err := src.SourceID() + if err != nil || sourceID == "" { + return nil + } + + upToken, err := getUpToken(multiPartsUploaderOptions.Credentials, &multiPartsObjectOptions.ObjectOptions, multiPartsUploaderOptions.UpTokenProvider) + if err != nil { + return nil + } + accessKey, err := upToken.GetAccessKey(ctx) + if err != nil { + return nil + } + + bucketName, err := guessBucketName(ctx, multiPartsObjectOptions.BucketName, upToken) + if err != nil { + return nil + } + + var objectName string + if multiPartsObjectOptions.ObjectName != nil { + objectName = *multiPartsObjectOptions.ObjectName + } + + var totalSize uint64 + if sizedSource, ok := src.(source.SizedSource); ok { + if ts, err := sizedSource.TotalSize(); err == nil { + totalSize = ts + } + } + + regions, err := getRegions(ctx, upToken, bucketName, &multiPartsUploaderOptions.Options) + if err != nil || len(regions) == 0 { + return nil + } + + return &resumablerecorder.ResumableRecorderOpenArgs{ + AccessKey: accessKey, + BucketName: bucketName, + ObjectName: objectName, + SourceID: sourceID, + PartSize: multiPartsObjectOptions.PartSize, + TotalSize: totalSize, + UpEndpoints: regions[0].Up, + } +} + +func guessBucketName(ctx context.Context, bucketName string, upTokenProvider uptoken.Provider) (string, error) { + if bucketName == "" { + if putPolicy, err := upTokenProvider.GetPutPolicy(ctx); err != nil { + return "", err + } else if bucketName, err = putPolicy.GetBucketName(); err != nil { + return "", err + } + } + return bucketName, nil +} diff --git a/storagev2/uploader/resumable_recorder/dummy.go b/storagev2/uploader/resumable_recorder/dummy.go new file mode 100644 index 00000000..4b458e69 --- /dev/null +++ b/storagev2/uploader/resumable_recorder/dummy.go @@ -0,0 +1,28 @@ +package resumablerecorder + +type dummyResumableRecorder struct{} + +// 创建假的可恢复记录仪 +func NewDummyResumableRecorder() ResumableRecorder { + return dummyResumableRecorder{} +} + +func (dummyResumableRecorder) OpenForReading(*ResumableRecorderOpenArgs) ReadableResumableRecorderMedium { + return nil +} + +func (dummyResumableRecorder) OpenForAppending(*ResumableRecorderOpenArgs) WriteableResumableRecorderMedium { + return nil +} + +func (dummyResumableRecorder) OpenForCreatingNew(*ResumableRecorderOpenArgs) WriteableResumableRecorderMedium { + return nil +} + +func (dummyResumableRecorder) Delete(*ResumableRecorderOpenArgs) error { + return nil +} + +func (dummyResumableRecorder) ClearExpired() error { + return nil +} diff --git a/storagev2/uploader/resumable_recorder/json_file_system.go b/storagev2/uploader/resumable_recorder/json_file_system.go new file mode 100644 index 00000000..c49707aa --- /dev/null +++ b/storagev2/uploader/resumable_recorder/json_file_system.go @@ -0,0 +1,302 @@ +package resumablerecorder + +import ( + "crypto/md5" + "crypto/sha1" + "encoding/binary" + "encoding/hex" + "encoding/json" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "time" + + "github.com/gofrs/flock" + "github.com/qiniu/go-sdk/v7/storagev2/region" + "modernc.org/fileutil" +) + +type ( + jsonFileSystemResumableRecorder struct { + dirPath string + } + jsonFileSystemResumableRecorderReadableMedium struct { + file *os.File + decoder *json.Decoder + } + jsonFileSystemResumableRecorderWritableMedium struct { + file *os.File + encoder *json.Encoder + } +) + +const jsonFileSystemResumableRecorderLock = "json_file_system_resumable_recorder_01.lock" + +// 创建记录文件系统的可恢复记录仪 +func NewJsonFileSystemResumableRecorder(dirPath string) ResumableRecorder { + return jsonFileSystemResumableRecorder{dirPath} +} + +func (frr jsonFileSystemResumableRecorder) OpenForReading(options *ResumableRecorderOpenArgs) ReadableResumableRecorderMedium { + if options == nil { + options = &ResumableRecorderOpenArgs{} + } + if options.SourceID == "" { + return nil + } + + err := os.MkdirAll(frr.dirPath, 0700) + if err != nil { + return nil + } + file, err := os.Open(frr.getFilePath(options)) + if err != nil { + return nil + } + _ = fileutil.Fadvise(file, 0, 0, fileutil.POSIX_FADV_SEQUENTIAL) + decoder := json.NewDecoder(file) + if verified, err := jsonFileSystemResumableRecorderVerifyHeaderLine(decoder, options); err != nil || !verified { + return nil + } + return jsonFileSystemResumableRecorderReadableMedium{file, decoder} +} + +func (frr jsonFileSystemResumableRecorder) OpenForAppending(options *ResumableRecorderOpenArgs) WriteableResumableRecorderMedium { + if options == nil { + options = &ResumableRecorderOpenArgs{} + } + if options.SourceID == "" { + return nil + } + + file, err := os.OpenFile(frr.getFilePath(options), os.O_WRONLY|os.O_APPEND, 0600) + if err != nil { + return nil + } + return jsonFileSystemResumableRecorderWritableMedium{file, json.NewEncoder(file)} +} + +func (frr jsonFileSystemResumableRecorder) OpenForCreatingNew(options *ResumableRecorderOpenArgs) WriteableResumableRecorderMedium { + if options == nil { + options = &ResumableRecorderOpenArgs{} + } + if options.SourceID == "" { + return nil + } + + file, err := os.OpenFile(frr.getFilePath(options), os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return nil + } + encoder := json.NewEncoder(file) + if err := jsonFileSystemResumableRecorderWriteHeaderLine(encoder, options); err != nil { + return nil + } + return jsonFileSystemResumableRecorderWritableMedium{file, encoder} +} + +func (frr jsonFileSystemResumableRecorder) Delete(options *ResumableRecorderOpenArgs) error { + return os.Remove(frr.getFilePath(options)) +} + +func (frr jsonFileSystemResumableRecorder) ClearExpired() error { + jsonFileSystemResumableRecorderLockFilePath := filepath.Join(frr.dirPath, jsonFileSystemResumableRecorderLock) + lock := flock.New(jsonFileSystemResumableRecorderLockFilePath) + locked, err := lock.TryLock() + if err != nil { + return err + } else if !locked { + return nil + } + defer lock.Unlock() + + fileInfos, err := ioutil.ReadDir(frr.dirPath) + if err != nil { + return err + } + for _, fileInfo := range fileInfos { + if !fileInfo.Mode().IsRegular() { + continue + } + if fileInfo.Name() == jsonFileSystemResumableRecorderLock { + continue + } + filePath := filepath.Join(frr.dirPath, fileInfo.Name()) + if err = frr.tryToClearPath(filePath); err != nil { + os.Remove(filePath) + } + } + return nil +} + +func (frr jsonFileSystemResumableRecorder) tryToClearPath(filePath string) error { + file, err := os.Open(filePath) + if err != nil { + return err + } + defer file.Close() + + _ = fileutil.Fadvise(file, 0, 0, fileutil.POSIX_FADV_SEQUENTIAL) + decoder := json.NewDecoder(file) + var ( + lineOptions jsonBasedResumableRecorderOpenArgs + jrr jsonBasedResumableRecord + ) + if err = decoder.Decode(&lineOptions); err != nil { + return nil + } + if lineOptions.Version != fileSystemResumableRecorderVersion { + return nil + } + for { + if err := decoder.Decode(&jrr); err != nil { + if err == io.EOF { + break + } + return err + } else if time.Now().Before(time.Unix(jrr.ExpiredAt, 0)) { + return nil + } + } + return errors.New("no valid resumable record") +} + +func (frr jsonFileSystemResumableRecorder) fileName(options *ResumableRecorderOpenArgs) string { + hasher := sha1.New() + hasher.Write([]byte(options.SourceID)) + hasher.Write([]byte{0}) + hasher.Write([]byte(options.AccessKey)) + hasher.Write([]byte{0}) + hasher.Write([]byte(options.BucketName)) + hasher.Write([]byte{0}) + hasher.Write([]byte(options.ObjectName)) + hasher.Write([]byte{0}) + for _, endpoint := range options.UpEndpoints.Preferred { + hasher.Write([]byte(endpoint)) + hasher.Write([]byte{1}) + } + hasher.Write([]byte{0}) + for _, endpoint := range options.UpEndpoints.Alternative { + hasher.Write([]byte(endpoint)) + hasher.Write([]byte{1}) + } + hasher.Write([]byte{0}) + binary.Write(hasher, binary.LittleEndian, options.PartSize) + binary.Write(hasher, binary.LittleEndian, options.TotalSize) + return hex.EncodeToString(hasher.Sum(nil)) +} + +func (frr jsonFileSystemResumableRecorder) getFilePath(options *ResumableRecorderOpenArgs) string { + return filepath.Join(frr.dirPath, frr.fileName(options)) +} + +type ( + jsonBasedResumableRecorderOpenArgs struct { + AccessKey string `json:"a,omitempty"` + BucketName string `json:"b,omitempty"` + ObjectName string `json:"o,omitempty"` + SourceID string `json:"s,omitempty"` + PartSize uint64 `json:"p,omitempty"` + TotalSize uint64 `json:"t,omitempty"` + UpEndpoints region.Endpoints `json:"u,omitempty"` + Version uint32 `json:"v,omitempty"` + } + + jsonBasedResumableRecord struct { + UploadID string `json:"u,omitempty"` + PartID string `json:"p,omitempty"` + Offset uint64 `json:"o,omitempty"` + PartNumber uint64 `json:"n,omitempty"` + PartSize uint64 `json:"s,omitempty"` + ExpiredAt int64 `json:"e,omitempty"` + CRC32 uint32 `json:"c,omitempty"` + MD5 string `json:"m,omitempty"` + } +) + +const fileSystemResumableRecorderVersion uint32 = 1 + +func jsonFileSystemResumableRecorderWriteHeaderLine(encoder *json.Encoder, options *ResumableRecorderOpenArgs) error { + return encoder.Encode(&jsonBasedResumableRecorderOpenArgs{ + AccessKey: options.AccessKey, + BucketName: options.BucketName, + ObjectName: options.ObjectName, + SourceID: options.SourceID, + PartSize: options.PartSize, + TotalSize: options.TotalSize, + UpEndpoints: options.UpEndpoints, + Version: fileSystemResumableRecorderVersion, + }) +} + +func jsonFileSystemResumableRecorderVerifyHeaderLine(decoder *json.Decoder, options *ResumableRecorderOpenArgs) (bool, error) { + var lineOptions jsonBasedResumableRecorderOpenArgs + err := decoder.Decode(&lineOptions) + if err != nil { + return false, err + } + return reflect.DeepEqual(lineOptions, jsonBasedResumableRecorderOpenArgs{ + AccessKey: options.AccessKey, + BucketName: options.BucketName, + ObjectName: options.ObjectName, + SourceID: options.SourceID, + PartSize: options.PartSize, + TotalSize: options.TotalSize, + UpEndpoints: options.UpEndpoints, + Version: fileSystemResumableRecorderVersion, + }), nil +} + +func (medium jsonFileSystemResumableRecorderReadableMedium) Next(rr *ResumableRecord) error { + var jrr jsonBasedResumableRecord + for { + if err := medium.decoder.Decode(&jrr); err != nil { + return err + } else if time.Now().Before(time.Unix(jrr.ExpiredAt, 0)) { + break + } + } + md5Bytes, err := hex.DecodeString(jrr.MD5) + if err != nil { + return err + } else if len(md5Bytes) != md5.Size { + return errors.New("invalid md5 bytes") + } + + *rr = ResumableRecord{ + UploadID: jrr.UploadID, + PartID: jrr.PartID, + Offset: jrr.Offset, + PartNumber: jrr.PartNumber, + PartSize: jrr.PartSize, + ExpiredAt: time.Unix(jrr.ExpiredAt, 0), + CRC32: jrr.CRC32, + } + copy(rr.MD5[:], md5Bytes) + return nil +} + +func (medium jsonFileSystemResumableRecorderReadableMedium) Close() error { + return medium.file.Close() +} + +func (medium jsonFileSystemResumableRecorderWritableMedium) Write(rr *ResumableRecord) error { + jrr := jsonBasedResumableRecord{ + UploadID: rr.UploadID, + PartID: rr.PartID, + Offset: rr.Offset, + PartNumber: rr.PartNumber, + PartSize: rr.PartSize, + ExpiredAt: rr.ExpiredAt.Unix(), + CRC32: rr.CRC32, + MD5: hex.EncodeToString(rr.MD5[:]), + } + return medium.encoder.Encode(&jrr) +} + +func (medium jsonFileSystemResumableRecorderWritableMedium) Close() error { + return medium.file.Close() +} diff --git a/storagev2/uploader/resumable_recorder/json_file_system_test.go b/storagev2/uploader/resumable_recorder/json_file_system_test.go new file mode 100644 index 00000000..c1320ebf --- /dev/null +++ b/storagev2/uploader/resumable_recorder/json_file_system_test.go @@ -0,0 +1,122 @@ +//go:build unit +// +build unit + +package resumablerecorder_test + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/region" + resumablerecorder "github.com/qiniu/go-sdk/v7/storagev2/uploader/resumable_recorder" +) + +func TestJsonFileSystemResumableRecorder(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + options := resumablerecorder.ResumableRecorderOpenArgs{ + AccessKey: "testak", + BucketName: "test-bucket", + ObjectName: "test-object", + SourceID: "/tmp/fakeFile", + PartSize: 4 * 1024 * 1024, + TotalSize: 100 * 1024 * 1024, + UpEndpoints: region.Endpoints{ + Preferred: []string{"https://uc.qiniuapi.com", "https://kodo-config.qiniuapi.com"}, + Alternative: []string{"https://uc.qbox.me"}, + }, + } + fs := resumablerecorder.NewJsonFileSystemResumableRecorder(tmpDir) + writableMedium := fs.OpenForCreatingNew(&options) + for i := uint64(0); i < 3; i++ { + if err = writableMedium.Write(&resumablerecorder.ResumableRecord{ + UploadID: "test-upload-id", + PartID: fmt.Sprintf("test-part-%d", i+1), + Offset: i * 4 * 1024 * 1024, + PartNumber: i + 1, + ExpiredAt: time.Now().Add(10 * time.Second), + }); err != nil { + t.Fatal(err) + } + } + if err = writableMedium.Close(); err != nil { + t.Fatal(err) + } + writableMedium = fs.OpenForAppending(&options) + if err = writableMedium.Write(&resumablerecorder.ResumableRecord{ + UploadID: "test-upload-id", + PartID: fmt.Sprintf("test-part-%d", 3+1), + Offset: 3 * 4 * 1024 * 1024, + PartNumber: 3 + 1, + ExpiredAt: time.Now().Add(10 * time.Second), + }); err != nil { + t.Fatal(err) + } + if err = writableMedium.Close(); err != nil { + t.Fatal(err) + } + + options2 := options + options2.ObjectName = "test-object-2" + writableMedium = fs.OpenForCreatingNew(&options2) + for i := uint64(0); i < 4; i++ { + if err = writableMedium.Write(&resumablerecorder.ResumableRecord{ + UploadID: "test-upload-id-2", + PartID: fmt.Sprintf("test-part-%d", i+1), + Offset: i * 4 * 1024 * 1024, + PartNumber: i + 1, + ExpiredAt: time.Now().Add(10 * time.Second), + }); err != nil { + t.Fatal(err) + } + } + if err = writableMedium.Close(); err != nil { + t.Fatal(err) + } + + readableMedium := fs.OpenForReading(&options) + for i := uint64(0); i < 4; i++ { + var rr resumablerecorder.ResumableRecord + + if err = readableMedium.Next(&rr); err != nil { + t.Fatal(err) + } + + if rr.UploadID != "test-upload-id" { + t.Fatalf("unexpected uploadID: %s", rr.UploadID) + } + if rr.PartID != fmt.Sprintf("test-part-%d", i+1) { + t.Fatalf("unexpected partID: %s", rr.PartID) + } + if rr.Offset != i*4*1024*1024 { + t.Fatalf("unexpected offset: %d", rr.Offset) + } + if rr.PartNumber != i+1 { + t.Fatalf("unexpected partNumber: %d", rr.PartNumber) + } + } + if err = readableMedium.Close(); err != nil { + t.Fatal(err) + } + + time.Sleep(11 * time.Second) + if err = fs.ClearExpired(); err != nil { + t.Fatal(err) + } + + readableMedium = fs.OpenForReading(&options) + if readableMedium != nil { + t.Fatalf("unexpected readable medium") + } + + readableMedium = fs.OpenForReading(&options2) + if readableMedium != nil { + t.Fatalf("unexpected readable medium") + } +} diff --git a/storagev2/uploader/resumable_recorder/resumable_recorder.go b/storagev2/uploader/resumable_recorder/resumable_recorder.go new file mode 100644 index 00000000..f9a42429 --- /dev/null +++ b/storagev2/uploader/resumable_recorder/resumable_recorder.go @@ -0,0 +1,96 @@ +package resumablerecorder + +import ( + "crypto/md5" + "io" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/region" +) + +type ( + // 可恢复记录仪选项 + ResumableRecorderOpenArgs struct { + // AccessKey + AccessKey string + + // 空间名称 + BucketName string + + // 对象名称 + ObjectName string + + // 数据源 ID + SourceID string + + // 分片大小 + PartSize uint64 + + // 数据源大小 + TotalSize uint64 + + // 上传服务 URL + UpEndpoints region.Endpoints + } + + // 可恢复记录仪接口 + ResumableRecorder interface { + // 打开记录仪介质以读取记录 + OpenForReading(*ResumableRecorderOpenArgs) ReadableResumableRecorderMedium + + // 打开记录仪介质以追加记录 + OpenForAppending(*ResumableRecorderOpenArgs) WriteableResumableRecorderMedium + + // 新建记录仪介质以追加记录 + OpenForCreatingNew(*ResumableRecorderOpenArgs) WriteableResumableRecorderMedium + + // 删除记录仪介质 + Delete(*ResumableRecorderOpenArgs) error + + // 清理过期的记录仪介质 + ClearExpired() error + } + + // 只读的可恢复记录仪介质接口 + ReadableResumableRecorderMedium interface { + io.Closer + + // 读取下一条记录 + Next(*ResumableRecord) error + } + + // 只追加的可恢复记录仪介质接口 + WriteableResumableRecorderMedium interface { + io.Closer + + // 写入下一条记录 + Write(*ResumableRecord) error + } + + // 可恢复记录 + ResumableRecord struct { + // 上传对象 ID + UploadID string + + // 上传分片 ID + PartID string + + // 分片偏移量 + Offset uint64 + + // 分片大小 + PartSize uint64 + + // 分片编号 + PartNumber uint64 + + // 分片过期时间 + ExpiredAt time.Time + + // 分片内容 CRC32 + CRC32 uint32 + + // 分片内容 MD5 + MD5 [md5.Size]byte + } +) diff --git a/storagev2/uploader/schedulers.go b/storagev2/uploader/schedulers.go new file mode 100644 index 00000000..5930463a --- /dev/null +++ b/storagev2/uploader/schedulers.go @@ -0,0 +1,175 @@ +package uploader + +import ( + "context" + "sort" + "sync" + + "github.com/qiniu/go-sdk/v7/storagev2/uploader/source" + "golang.org/x/sync/errgroup" +) + +type ( + serialMultiPartsUploaderScheduler struct { + uploader MultiPartsUploader + partSize uint64 + } + + // 串行分片上传调度器选项 + serialMultiPartsUploaderSchedulerOptions struct { + PartSize uint64 // 分片大小 + } + + concurrentMultiPartsUploaderScheduler struct { + uploader MultiPartsUploader + partSize uint64 + concurrency int + } + + // 并行分片上传调度器选项 + concurrentMultiPartsUploaderSchedulerOptions struct { + PartSize uint64 // 分片大小 + Concurrency int // 并发度 + } +) + +// 创建串行分片上传调度器 +func newSerialMultiPartsUploaderScheduler(uploader MultiPartsUploader, options *serialMultiPartsUploaderSchedulerOptions) multiPartsUploaderScheduler { + if options == nil { + options = &serialMultiPartsUploaderSchedulerOptions{} + } + partSize := options.PartSize + if partSize == 0 { + partSize = 1 << 22 + } else if partSize < (1 << 20) { + partSize = 1 << 20 + } else if partSize > (1 << 30) { + partSize = 1 << 30 + } + return serialMultiPartsUploaderScheduler{uploader, partSize} +} + +// 创建并行分片上传调度器 +func newConcurrentMultiPartsUploaderScheduler(uploader MultiPartsUploader, options *concurrentMultiPartsUploaderSchedulerOptions) multiPartsUploaderScheduler { + if options == nil { + options = &concurrentMultiPartsUploaderSchedulerOptions{} + } + partSize := options.PartSize + if partSize == 0 { + partSize = 1 << 22 + } else if partSize < (1 << 20) { + partSize = 1 << 20 + } else if partSize > (1 << 30) { + partSize = 1 << 30 + } + concurrency := options.Concurrency + if concurrency <= 0 { + concurrency = 4 + } + + return concurrentMultiPartsUploaderScheduler{uploader, partSize, concurrency} +} + +func (scheduler serialMultiPartsUploaderScheduler) UploadParts(ctx context.Context, initialized InitializedParts, src source.Source, options *UploadPartsOptions) ([]UploadedPart, error) { + parts := make([]UploadedPart, 0) + for { + part, err := src.Slice(scheduler.partSize) + if err != nil { + return nil, err + } + if part == nil { + break + } + var uploadPartParam UploadPartOptions + if options != nil && options.OnUploadingProgress != nil { + uploadPartParam.OnUploadingProgress = func(progress *UploadingPartProgress) { + options.OnUploadingProgress(part.PartNumber(), &UploadingPartProgress{Uploaded: progress.Uploaded, PartSize: part.Size()}) + } + } + uploadedPart, err := scheduler.uploader.UploadPart(ctx, initialized, part, &uploadPartParam) + if err != nil { + return nil, err + } + if options != nil && options.OnPartUploaded != nil { + if err = options.OnPartUploaded(uploadedPart); err != nil { + return nil, err + } + } + parts = append(parts, uploadedPart) + } + return parts, nil +} + +func (scheduler serialMultiPartsUploaderScheduler) MultiPartsUploader() MultiPartsUploader { + return scheduler.uploader +} + +func (scheduler serialMultiPartsUploaderScheduler) PartSize() uint64 { + return scheduler.partSize +} + +func (scheduler concurrentMultiPartsUploaderScheduler) UploadParts(ctx context.Context, initialized InitializedParts, src source.Source, options *UploadPartsOptions) ([]UploadedPart, error) { + var ( + parts []UploadedPart + partsLock sync.Mutex + ) + if ss, ok := src.(source.SizedSource); ok { + totalSize, err := ss.TotalSize() + if err != nil { + return nil, err + } + partsCount := (totalSize + scheduler.partSize - 1) / scheduler.partSize + parts = make([]UploadedPart, 0, partsCount) + } + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(scheduler.concurrency) + var onUploadingProgressMutex sync.Mutex + for { + part, err := src.Slice(scheduler.partSize) + if err != nil { + return nil, err + } + if part == nil { + break + } + g.Go(func() error { + var uploadPartParam UploadPartOptions + if options != nil && options.OnUploadingProgress != nil { + uploadPartParam.OnUploadingProgress = func(progress *UploadingPartProgress) { + onUploadingProgressMutex.Lock() + defer onUploadingProgressMutex.Unlock() + options.OnUploadingProgress(part.PartNumber(), progress) + } + } + uploadedPart, err := scheduler.uploader.UploadPart(ctx, initialized, part, &uploadPartParam) + if err != nil { + return err + } + if options != nil && options.OnPartUploaded != nil { + if err = options.OnPartUploaded(uploadedPart); err != nil { + return err + } + } + + partsLock.Lock() + defer partsLock.Unlock() + parts = append(parts, uploadedPart) + return nil + }) + } + if err := g.Wait(); err != nil { + return nil, err + } + sort.Slice(parts, func(i, j int) bool { + return parts[i].Offset() < parts[j].Offset() + }) + return parts, nil +} + +func (scheduler concurrentMultiPartsUploaderScheduler) MultiPartsUploader() MultiPartsUploader { + return scheduler.uploader +} + +func (scheduler concurrentMultiPartsUploaderScheduler) PartSize() uint64 { + return scheduler.partSize +} diff --git a/storagev2/uploader/schedulers_test.go b/storagev2/uploader/schedulers_test.go new file mode 100644 index 00000000..4596c28d --- /dev/null +++ b/storagev2/uploader/schedulers_test.go @@ -0,0 +1,273 @@ +//go:build unit +// +build unit + +package uploader + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "hash/crc32" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "strconv" + "strings" + "testing" + "time" + + "github.com/gorilla/mux" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/region" + "github.com/qiniu/go-sdk/v7/storagev2/uploader/source" +) + +func TestMultiPartsUploaderScheduler(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + if _, err = io.CopyN(tmpFile, r, 5*1024*1024); err != nil { + t.Fatal(err) + } + if _, err = tmpFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + var server *httptest.Server + serveMux := mux.NewRouter() + serveMux.HandleFunc("/mkblk/4194304", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + expectedBody, err := internal_io.ReadAll(io.NewSectionReader(tmpFile, 0, 4*1024*1024)) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(actualBody, expectedBody) { + t.Fatalf("unexpected body") + } + jsonBody, err := json.Marshal(&apis.ResumableUploadV1MakeBlockResponse{ + Ctx: "testctx1", + Checksum: "testchecksum1", + Crc32: int64(crc32.ChecksumIEEE(actualBody)), + Host: server.URL, + ExpiredAt: time.Now().Add(1 * time.Hour).Unix(), + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonBody) + }).Methods(http.MethodPost) + serveMux.HandleFunc("/mkblk/1048576", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + expectedBody, err := internal_io.ReadAll(io.NewSectionReader(tmpFile, 4*1024*1024, 1*1024*1024)) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(actualBody, expectedBody) { + t.Fatalf("unexpected body") + } + jsonBody, err := json.Marshal(&apis.ResumableUploadV1MakeBlockResponse{ + Ctx: "testctx2", + Checksum: "testchecksum2", + Crc32: int64(crc32.ChecksumIEEE(actualBody)), + Host: server.URL, + ExpiredAt: time.Now().Add(1 * time.Hour).Unix(), + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonBody) + }).Methods(http.MethodPost) + serveMux.PathPrefix("/mkfile/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + components := strings.Split(strings.TrimPrefix(r.URL.Path, "/mkfile/"), "/") + if components[0] != strconv.FormatInt(5*1024*1024, 10) { + t.Fatalf("unexpected fileSize") + } + components = components[1:] + for len(components) > 0 { + switch components[0] { + case "key": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "testkey" { + t.Fatalf("unexpected key") + } + case "fname": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "testfilename" { + t.Fatalf("unexpected fname") + } + case "mimeType": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "application/json" { + t.Fatalf("unexpected mimeType") + } + case "x-qn-meta-a": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "b" { + t.Fatalf("unexpected x-qn-meta-a") + } + case "x-qn-meta-c": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "d" { + t.Fatalf("unexpected x-qn-meta-c") + } + case "x:a": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "b" { + t.Fatalf("unexpected x:a") + } + case "x:c": + value := components[1] + valueBytes, err := base64.URLEncoding.DecodeString(value) + if err != nil { + t.Fatal(err) + } + if string(valueBytes) != "d" { + t.Fatalf("unexpected x:c") + } + default: + t.Fatalf("unexpected component key: %s", components[0]) + } + components = components[2:] + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(actualBody) != "testctx1,testctx2" { + t.Fatalf("unexpected body") + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write([]byte(`{"ok":true}`)) + }).Methods(http.MethodPost) + server = httptest.NewServer(serveMux) + defer server.Close() + + schedulers := []multiPartsUploaderScheduler{ + newSerialMultiPartsUploaderScheduler(NewMultiPartsUploaderV1(&MultiPartsUploaderOptions{ + Options: http_client.Options{ + Regions: ®ion.Region{Up: region.Endpoints{Preferred: []string{server.URL}}}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + }), &serialMultiPartsUploaderSchedulerOptions{PartSize: 1 << 22}), + newConcurrentMultiPartsUploaderScheduler(NewMultiPartsUploaderV1(&MultiPartsUploaderOptions{ + Options: http_client.Options{ + Regions: ®ion.Region{Up: region.Endpoints{Preferred: []string{server.URL}}}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + }), &concurrentMultiPartsUploaderSchedulerOptions{PartSize: 1 << 22, Concurrency: 2}), + } + key := "testkey" + for _, scheduler := range schedulers { + src, err := source.NewFileSource(tmpFile.Name()) + if err != nil { + t.Fatal(err) + } + initializedPart, err := scheduler.MultiPartsUploader().InitializeParts(context.Background(), src, &MultiPartsObjectOptions{ + ObjectOptions{ + BucketName: "testbucket", + ObjectName: &key, + FileName: "testfilename", + ContentType: "application/json", + Metadata: map[string]string{"a": "b", "c": "d"}, + CustomVars: map[string]string{"a": "b", "c": "d"}, + }, + 4 * 1024 * 1024, + }) + if err != nil { + t.Fatal(err) + } + defer initializedPart.Close() + + var lastUploaded [2]uint64 + var uploadedPartSizes [2]uint64 + uploadedParts, err := scheduler.UploadParts(context.Background(), initializedPart, src, &UploadPartsOptions{ + OnUploadingProgress: func(partNumber uint64, progress *UploadingPartProgress) { + if partNumber == 1 && progress.PartSize != 4*1024*1024 { + t.Fatalf("unexpected partSize") + } else if partNumber == 2 && progress.PartSize != 1024*1024 { + t.Fatalf("unexpected partSize") + } else if progress.Uploaded < lastUploaded[partNumber-1] || progress.Uploaded > progress.PartSize { + t.Fatalf("unexpected uploaded") + } + lastUploaded[partNumber-1] = progress.Uploaded + }, + OnPartUploaded: func(part UploadedPart) error { + if uploadedPartSizes[part.PartNumber()-1] > 0 { + t.Fatalf("unexpected OnPartUploaded call") + } else { + uploadedPartSizes[part.PartNumber()-1] = part.PartSize() + } + return nil + }, + }) + if err != nil { + t.Fatal(err) + } + var returnValue struct { + Ok bool `json:"ok"` + } + err = scheduler.MultiPartsUploader().CompleteParts(context.Background(), initializedPart, uploadedParts, &returnValue) + if err != nil { + t.Fatal(err) + } else if !returnValue.Ok { + t.Fatalf("unexpected response body") + } else if lastUploaded[0] != 4*1024*1024 || lastUploaded[1] != 1024*1024 { + t.Fatalf("unexpected OnUploadingProgress call") + } else if uploadedPartSizes[0] != 4*1024*1024 || uploadedPartSizes[1] != 1024*1024 { + t.Fatalf("unexpected OnPartUploaded call") + } + } +} diff --git a/storagev2/uploader/source/source.go b/storagev2/uploader/source/source.go new file mode 100644 index 00000000..686e9af6 --- /dev/null +++ b/storagev2/uploader/source/source.go @@ -0,0 +1,353 @@ +package source + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "sync/atomic" + + internal_io "github.com/qiniu/go-sdk/v7/internal/io" +) + +type ( + // 数据源 + Source interface { + io.Closer + + // 切片 + Slice(uint64) (Part, error) + + // 数据源 ID + SourceID() (string, error) + + // 获取文件,如果数据源不是文件,则返回 nil + GetFile() *os.File + } + + // 预知大小的数据源 + SizedSource interface { + Source + + // 获取数据源大小 + TotalSize() (uint64, error) + } + + // 可重置的数据源 + ResetableSource interface { + Source + + // 重置数据源 + Reset() error + } + + // 分片 + Part interface { + io.ReadSeeker + + // 分片偏移量 + Offset() uint64 + + // 分片大小 + Size() uint64 + + // 分片编号,从 1 开始 + PartNumber() uint64 + } + + seekablePart struct { + *io.SectionReader + partNumber, offset uint64 + } + + unseekablePart struct { + *bytes.Reader + partNumber, offset, size uint64 + } + + readSeekCloseSource struct { + rscra *readSeekCloseReaderAt + off uint64 + sourceID string + partNumber uint64 + m sync.Mutex + } + + readSeekCloseReaderAt struct { + r internal_io.ReadSeekCloser + off int64 + m sync.Mutex + } + + readCloseSource struct { + r io.ReadCloser + sourceID string + offset, partNumber uint64 + } + + ReadAtSeekCloser interface { + io.ReaderAt + io.Seeker + io.Closer + } + + readAtSeekCloseSource struct { + r ReadAtSeekCloser + off uint64 + sourceID string + partNumber uint64 + m sync.Mutex + } +) + +// 将 io.ReadSeekCloser 封装为数据源 +func NewReadSeekCloserSource(r internal_io.ReadSeekCloser, sourceID string) Source { + return &readSeekCloseSource{rscra: newReadSeekCloseReaderAt(r), sourceID: sourceID} +} + +func (rscs *readSeekCloseSource) Slice(n uint64) (Part, error) { + rscs.m.Lock() + defer rscs.m.Unlock() + + offset := rscs.off + if totalSize, err := rscs.TotalSize(); err != nil { + return nil, err + } else if offset >= totalSize { + return nil, nil + } else if n > totalSize-offset { + n = totalSize - offset + } + rscs.off += n + rscs.partNumber += 1 + return seekablePart{ + io.NewSectionReader(rscs.rscra, int64(offset), int64(n)), + rscs.partNumber, + uint64(offset), + }, nil +} + +func (rscs *readSeekCloseSource) TotalSize() (uint64, error) { + return rscs.rscra.TotalSize() +} + +func (rscs *readSeekCloseSource) SourceID() (string, error) { + return rscs.sourceID, nil +} + +func (rscs *readSeekCloseSource) Close() error { + return rscs.rscra.Close() +} + +func (rscs *readSeekCloseSource) Reset() error { + rscs.m.Lock() + defer rscs.m.Unlock() + + rscs.off = 0 + rscs.partNumber = 0 + return nil +} + +func (rscs *readSeekCloseSource) GetFile() *os.File { + return rscs.rscra.GetFile() +} + +func newReadSeekCloseReaderAt(r internal_io.ReadSeekCloser) *readSeekCloseReaderAt { + return &readSeekCloseReaderAt{r: r, off: -1} +} + +func (rscra *readSeekCloseReaderAt) ReadAt(b []byte, off int64) (n int, err error) { + rscra.m.Lock() + defer rscra.m.Unlock() + + if rscra.off != off { + if rscra.off, err = rscra.r.Seek(off, io.SeekStart); err != nil { + return + } + } + n, err = rscra.r.Read(b) + rscra.off += int64(n) + return +} + +func (rscra *readSeekCloseReaderAt) TotalSize() (uint64, error) { + rscra.m.Lock() + defer rscra.m.Unlock() + + var err error + + if rscra.off < 0 { + if rscra.off, err = rscra.r.Seek(0, io.SeekCurrent); err != nil { + return 0, err + } + } + len, err := rscra.r.Seek(0, io.SeekEnd) + if err != nil { + return 0, err + } + _, err = rscra.r.Seek(rscra.off, io.SeekStart) + return uint64(len), err +} + +func (rscra *readSeekCloseReaderAt) Close() error { + return rscra.r.Close() +} + +func (rscra *readSeekCloseReaderAt) GetFile() *os.File { + if file, ok := rscra.r.(*os.File); ok { + return file + } else { + return nil + } +} + +// 将 io.ReadAt + io.Seek + io.Closer 封装为数据源 +func NewReadAtSeekCloserSource(r ReadAtSeekCloser, sourceID string) Source { + return &readAtSeekCloseSource{r: r, sourceID: sourceID} +} + +func (racs *readAtSeekCloseSource) Slice(n uint64) (Part, error) { + racs.m.Lock() + defer racs.m.Unlock() + + offset := racs.off + if totalSize, err := racs.TotalSize(); err != nil { + return nil, err + } else if offset >= totalSize { + return nil, nil + } else if n > totalSize-offset { + n = totalSize - offset + } + racs.off += n + racs.partNumber += 1 + return seekablePart{ + io.NewSectionReader(racs.r, int64(offset), int64(n)), + racs.partNumber, + uint64(offset), + }, nil +} + +func (racs *readAtSeekCloseSource) TotalSize() (uint64, error) { + curPos, err := racs.r.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + totalSize, err := racs.r.Seek(0, io.SeekEnd) + if err != nil { + return 0, err + } + if _, err = racs.r.Seek(curPos, io.SeekStart); err != nil { + return 0, err + } + return uint64(totalSize), nil +} + +func (racs *readAtSeekCloseSource) SourceID() (string, error) { + return racs.sourceID, nil +} + +func (racs *readAtSeekCloseSource) Close() error { + return racs.r.Close() +} + +func (racs *readAtSeekCloseSource) Reset() error { + racs.m.Lock() + defer racs.m.Unlock() + + racs.off = 0 + racs.partNumber = 0 + return nil +} + +func (racs *readAtSeekCloseSource) GetFile() *os.File { + if file, ok := racs.r.(*os.File); ok { + return file + } else { + return nil + } +} + +// 将 io.ReadCloser 封装为数据源 +func NewReadCloserSource(r io.ReadCloser, sourceID string) Source { + return &readCloseSource{r: r, sourceID: sourceID} +} + +func (rcs *readCloseSource) Slice(n uint64) (Part, error) { + buf := make([]byte, n) + haveRead, err := io.ReadFull(rcs.r, buf) + if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { + return nil, err + } else if haveRead == 0 { + return nil, nil + } + return &unseekablePart{ + bytes.NewReader(buf[:haveRead]), + atomic.AddUint64(&rcs.partNumber, 1), + atomic.AddUint64(&rcs.offset, uint64(haveRead)) - uint64(haveRead), + uint64(haveRead), + }, nil +} + +func (rcs *readCloseSource) SourceID() (string, error) { + return rcs.sourceID, nil +} + +func (rcs *readCloseSource) Close() error { + return rcs.r.Close() +} + +func (racs *readCloseSource) GetFile() *os.File { + if file, ok := racs.r.(*os.File); ok { + return file + } else { + return nil + } +} + +func (p seekablePart) PartNumber() uint64 { + return p.partNumber +} + +func (p seekablePart) Offset() uint64 { + return p.offset +} + +func (p seekablePart) Size() uint64 { + return uint64(p.SectionReader.Size()) +} + +func (p unseekablePart) PartNumber() uint64 { + return p.partNumber +} + +func (p unseekablePart) Offset() uint64 { + return p.offset +} + +func (p unseekablePart) Size() uint64 { + return p.size +} + +// 将文件封装为数据源 +func NewFileSource(filePath string) (Source, error) { + file, err := os.Open(filePath) + if err != nil { + return nil, err + } + if !canSeekReally(file) { + return NewReadCloserSource(file, ""), nil + } else if absFilePath, err := filepath.Abs(filePath); err != nil { + return nil, err + } else if fileInfo, err := file.Stat(); err != nil { + return nil, err + } else { + sourceID := fmt.Sprintf("%d:%d:%s", fileInfo.Size(), fileInfo.ModTime().UnixNano(), absFilePath) + return NewReadAtSeekCloserSource(file, sourceID), nil + } +} + +func canSeekReally(seeker io.Seeker) bool { + _, err := seeker.Seek(0, io.SeekCurrent) + return err == nil +} diff --git a/storagev2/uploader/source/source_test.go b/storagev2/uploader/source/source_test.go new file mode 100644 index 00000000..628dab1d --- /dev/null +++ b/storagev2/uploader/source/source_test.go @@ -0,0 +1,107 @@ +//go:build unit +// +build unit + +package source_test + +import ( + "bytes" + "io" + "io/ioutil" + "math/rand" + "os" + "sync" + "testing" + "time" + + internal_io "github.com/qiniu/go-sdk/v7/internal/io" + uploader "github.com/qiniu/go-sdk/v7/storagev2/uploader/source" +) + +func TestSeekableSource(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "test-seekable-source-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + if _, err = io.CopyN(tmpFile, rand.New(rand.NewSource(time.Now().UnixNano())), 4096); err != nil { + t.Fatal(err) + } + if _, err = tmpFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + source := uploader.NewReadSeekCloserSource(tmpFile, tmpFile.Name()) + testSource(t, source, tmpFile) + source = uploader.NewReadAtSeekCloserSource(tmpFile, tmpFile.Name()) + testSource(t, source, tmpFile) +} + +func testSource(t *testing.T, source uploader.Source, originalFile *os.File) { + if ts, err := source.(uploader.SizedSource).TotalSize(); err != nil { + t.Fatal(err) + } else if ts != 4096 { + t.Fatalf("Unexpected file size: %d", ts) + } + + if sk, err := source.SourceID(); err != nil { + t.Fatal(err) + } else if sk != originalFile.Name() { + t.Fatalf("Unexpected source key: %#v", sk) + } + + parts := make([]uploader.Part, 0, 16) + for i := 0; i < 16; i++ { + part, err := source.Slice(256) + if err != nil { + t.Fatal(err) + } + parts = append(parts, part) + } + for i, part := range parts { + if part.PartNumber() != uint64(i+1) { + t.Fatalf("Unexpected part number: %d", part.PartNumber()) + } + } + var wg sync.WaitGroup + for i := 0; i < 16; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + testPart(t, parts[i], int64(i*256), originalFile) + }(i) + } + wg.Wait() +} + +func testPart(t *testing.T, part uploader.Part, offset int64, originalFile *os.File) { + partData, err := internal_io.ReadAll(part) + if err != nil { + t.Fatal(err) + } + assertReaderEqual(t, originalFile, offset, partData) + + if _, err := part.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + partData, err = internal_io.ReadAll(part) + if err != nil { + t.Fatal(err) + } + assertReaderEqual(t, originalFile, offset, partData) +} + +func assertReaderEqual(t *testing.T, file *os.File, offset int64, expectedData []byte) { + data := make([]byte, len(expectedData)) + n, err := file.ReadAt(data, offset) + if err != nil { + t.Fatal(err) + } else if n != len(expectedData) { + t.Fatalf("Unexpected read data size %d", n) + } + if !bytes.Equal(data, expectedData) { + t.Fatalf("Range (%d-%d) of file (%s) is inequal", offset, offset+int64(len(expectedData)), file.Name()) + } +} diff --git a/storagev2/uploader/upload_manager.go b/storagev2/uploader/upload_manager.go new file mode 100644 index 00000000..0ec29e1a --- /dev/null +++ b/storagev2/uploader/upload_manager.go @@ -0,0 +1,268 @@ +package uploader + +import ( + "bytes" + "context" + "io" + "net/http" + "os" + "path/filepath" + "strings" + + internal_io "github.com/qiniu/go-sdk/v7/internal/io" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + resumablerecorder "github.com/qiniu/go-sdk/v7/storagev2/uploader/resumable_recorder" + "github.com/qiniu/go-sdk/v7/storagev2/uptoken" + "golang.org/x/sync/errgroup" +) + +type ( + // 上传器 + UploadManager struct { + options httpclient.Options + upTokenProvider uptoken.Provider + resumableRecorder resumablerecorder.ResumableRecorder + partSize uint64 + multiPartsThreshold uint64 + concurrency int + multiPartsUploaderVersion MultiPartsUploaderVersion + } + + // 上传器选项 + UploadManagerOptions struct { + // HTTP 客户端选项 + httpclient.Options + + // 上传凭证接口 + UpTokenProvider uptoken.Provider + + // 可恢复记录,如果不设置,则无法进行断点续传 + ResumableRecorder resumablerecorder.ResumableRecorder + + // 分片大小,如果不填写,默认为 4 MB + PartSize uint64 + + // 分片上传阈值,如果不填写,默认为 4 MB + MultiPartsThreshold uint64 + + // 分片上传并行度,如果不填写,默认为 1 + Concurrency int + + // 分片上传版本,如果不填写,默认为 V2 + MultiPartsUploaderVersion MultiPartsUploaderVersion + } + + // 分片上传版本 + MultiPartsUploaderVersion uint8 +) + +const ( + // 分片上传 V1 + MultiPartsUploaderVersionV1 MultiPartsUploaderVersion = 1 + + // 分片上传 V2 + MultiPartsUploaderVersionV2 MultiPartsUploaderVersion = 2 +) + +// 创建上传器 +func NewUploadManager(options *UploadManagerOptions) *UploadManager { + if options == nil { + options = &UploadManagerOptions{} + } + partSize := options.PartSize + if partSize == 0 { + partSize = 1 << 22 + } else if partSize < (1 << 20) { + partSize = 1 << 20 + } else if partSize > (1 << 30) { + partSize = 1 << 30 + } + multiPartsThreshold := options.MultiPartsThreshold + if multiPartsThreshold == 0 { + multiPartsThreshold = partSize + } + concurrency := options.Concurrency + if concurrency == 0 { + concurrency = 4 + } + uploadManager := UploadManager{ + options: options.Options, + upTokenProvider: options.UpTokenProvider, + resumableRecorder: options.ResumableRecorder, + partSize: partSize, + multiPartsThreshold: multiPartsThreshold, + concurrency: concurrency, + multiPartsUploaderVersion: options.MultiPartsUploaderVersion, + } + return &uploadManager +} + +// 上传目录 +func (uploadManager *UploadManager) UploadDirectory(ctx context.Context, directoryPath string, directoryOptions *DirectoryOptions) error { + if directoryOptions == nil { + directoryOptions = &DirectoryOptions{} + } + objectConcurrency := directoryOptions.ObjectConcurrency + if objectConcurrency == 0 { + objectConcurrency = 4 + } + pathSeparator := directoryOptions.PathSeparator + if pathSeparator == "" { + pathSeparator = "/" + } + + if !strings.HasSuffix(directoryPath, string(filepath.Separator)) { + directoryPath += string(filepath.Separator) + } + + updateObjectName := directoryOptions.UpdateObjectName + if updateObjectName == nil { + updateObjectName = func(path string) string { return path } + } + generateObjectName := func(path string) string { + path = strings.TrimPrefix(path, directoryPath) + if pathSeparator != string(filepath.Separator) { + path = strings.Replace(path, string(filepath.Separator), pathSeparator, -1) + } + return updateObjectName(path) + } + + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(objectConcurrency) + + err := filepath.Walk(directoryPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + g.Go(func() error { + objectName := generateObjectName(path) + if info.Mode().IsRegular() { + objectOptions := ObjectOptions{ + RegionsProvider: directoryOptions.RegionsProvider, + UpToken: directoryOptions.UpToken, + BucketName: directoryOptions.BucketName, + ObjectName: &objectName, + FileName: filepath.Base(path), + } + if directoryOptions.ShouldUploadObject != nil && !directoryOptions.ShouldUploadObject(path, &objectOptions) { + return nil + } + if directoryOptions.BeforeObjectUpload != nil { + directoryOptions.BeforeObjectUpload(path, &objectOptions) + } + if directoryOptions.OnUploadingProgress != nil { + objectOptions.OnUploadingProgress = func(progress *UploadingProgress) { + directoryOptions.OnUploadingProgress(path, progress) + } + } + err = uploadManager.UploadFile(ctx, path, &objectOptions, nil) + if err == nil && directoryOptions.OnObjectUploaded != nil { + directoryOptions.OnObjectUploaded(path, &UploadedObjectInfo{Size: uint64(info.Size())}) + } + } else if directoryOptions.ShouldCreateDirectory && info.IsDir() { + if !strings.HasSuffix(objectName, pathSeparator) { + objectName += pathSeparator + } + objectOptions := ObjectOptions{ + RegionsProvider: directoryOptions.RegionsProvider, + UpToken: directoryOptions.UpToken, + BucketName: directoryOptions.BucketName, + ObjectName: &objectName, + FileName: filepath.Base(path), + } + err = uploadManager.UploadReader(ctx, http.NoBody, &objectOptions, nil) + } + return err + }) + return nil + }) + if err != nil { + return err + } + + return g.Wait() + +} + +// 上传文件 +func (uploadManager *UploadManager) UploadFile(ctx context.Context, path string, objectOptions *ObjectOptions, returnValue interface{}) error { + if objectOptions == nil { + objectOptions = &ObjectOptions{} + } + + fileInfo, err := os.Stat(path) + if err != nil { + return err + } + + var uploader Uploader + if fileInfo.Size() > int64(uploadManager.multiPartsThreshold) { + uploader = newMultiPartsUploader(uploadManager.getScheduler()) + } else { + uploader = uploadManager.getFormUploader() + } + + return uploader.UploadFile(ctx, path, objectOptions, returnValue) +} + +// 上传 io.Reader +func (uploadManager *UploadManager) UploadReader(ctx context.Context, reader io.Reader, objectOptions *ObjectOptions, returnValue interface{}) error { + var uploader Uploader + + if objectOptions == nil { + objectOptions = &ObjectOptions{} + } + + if rscs, ok := reader.(io.ReadSeeker); ok && canSeekReally(rscs) { + size, err := getSeekerSize(rscs) + if err == nil && size > uploadManager.multiPartsThreshold { + uploader = newMultiPartsUploader(uploadManager.getScheduler()) + } + } + if uploader == nil { + firstPartBytes, err := internal_io.ReadAll(io.LimitReader(reader, int64(uploadManager.multiPartsThreshold+1))) + if err != nil { + return err + } + reader = io.MultiReader(bytes.NewReader(firstPartBytes), reader) + if len(firstPartBytes) > int(uploadManager.multiPartsThreshold) { + uploader = newMultiPartsUploader(uploadManager.getScheduler()) + } else { + uploader = uploadManager.getFormUploader() + } + } + + return uploader.UploadReader(ctx, reader, objectOptions, returnValue) +} + +func (uploadManager *UploadManager) getScheduler() multiPartsUploaderScheduler { + if uploadManager.concurrency > 1 { + return newConcurrentMultiPartsUploaderScheduler(uploadManager.getMultiPartsUploader(), &concurrentMultiPartsUploaderSchedulerOptions{ + PartSize: uploadManager.partSize, Concurrency: uploadManager.concurrency, + }) + } else { + return newSerialMultiPartsUploaderScheduler(uploadManager.getMultiPartsUploader(), &serialMultiPartsUploaderSchedulerOptions{ + PartSize: uploadManager.partSize, + }) + } +} + +func (uploadManager *UploadManager) getMultiPartsUploader() MultiPartsUploader { + multiPartsUploaderOptions := MultiPartsUploaderOptions{ + Options: uploadManager.options, + UpTokenProvider: uploadManager.upTokenProvider, + ResumableRecorder: uploadManager.resumableRecorder, + } + if uploadManager.multiPartsUploaderVersion == MultiPartsUploaderVersionV1 { + return NewMultiPartsUploaderV1(&multiPartsUploaderOptions) + } else { + return NewMultiPartsUploaderV2(&multiPartsUploaderOptions) + } +} + +func (uploadManager *UploadManager) getFormUploader() Uploader { + return NewFormUploader(&FormUploaderOptions{ + Options: uploadManager.options, + UpToken: uploadManager.upTokenProvider, + }) +} diff --git a/storagev2/uploader/upload_manager_test.go b/storagev2/uploader/upload_manager_test.go new file mode 100644 index 00000000..06434b7e --- /dev/null +++ b/storagev2/uploader/upload_manager_test.go @@ -0,0 +1,619 @@ +//go:build unit +// +build unit + +package uploader_test + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/json" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + "github.com/gorilla/mux" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/region" + "github.com/qiniu/go-sdk/v7/storagev2/uploader" +) + +func TestUploadManagerUploadFile(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + if _, err = io.CopyN(tmpFile, r, 5*1024*1024); err != nil { + t.Fatal(err) + } + if _, err = tmpFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + serveMux := mux.NewRouter() + serveMux.HandleFunc("/buckets/{bucketName}/objects/{encodedObjectName}/uploads", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + vars := mux.Vars(r) + if vars["bucketName"] != "testbucket" { + t.Fatalf("unexpected bucket name") + } + objectBytes, err := base64.URLEncoding.DecodeString(vars["encodedObjectName"]) + if err != nil { + t.Fatal(err) + } else if string(objectBytes) != "testkey" { + t.Fatalf("unexpected object name") + } + jsonBytes, err := json.Marshal(&apis.ResumableUploadV2InitiateMultipartUploadResponse{ + UploadId: "testuploadID", + ExpiredAt: time.Now().Add(1 * time.Hour).Unix(), + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonBytes) + }).Methods(http.MethodPost) + serveMux.HandleFunc("/buckets/{bucketName}/objects/{encodedObjectName}/uploads/{uploadID}/{partNumber}", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + vars := mux.Vars(r) + if vars["bucketName"] != "testbucket" { + t.Fatalf("unexpected bucket name") + } + objectBytes, err := base64.URLEncoding.DecodeString(vars["encodedObjectName"]) + if err != nil { + t.Fatal(err) + } else if string(objectBytes) != "testkey" { + t.Fatalf("unexpected object name") + } + if vars["uploadID"] != "testuploadID" { + t.Fatalf("unexpected upload id") + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + var expectedBody, jsonBody []byte + switch vars["partNumber"] { + case "1": + expectedBody, err = internal_io.ReadAll(io.NewSectionReader(tmpFile, 0, 4*1024*1024)) + if err != nil { + t.Fatal(err) + } + case "2": + expectedBody, err = internal_io.ReadAll(io.NewSectionReader(tmpFile, 4*1024*1024, 1024*1024)) + if err != nil { + t.Fatal(err) + } + default: + t.Fatalf("unexpected part number") + } + if !bytes.Equal(actualBody, expectedBody) { + t.Fatalf("unexpected body") + } + md5Sum := md5.Sum(actualBody) + if r.Header.Get("Content-MD5") != hex.EncodeToString(md5Sum[:]) { + t.Fatalf("unexpected content-md5") + } + switch vars["partNumber"] { + case "1": + jsonBody, err = json.Marshal(&apis.ResumableUploadV2UploadPartResponse{ + Etag: "testetag1", + Md5: r.Header.Get("Content-MD5"), + }) + case "2": + jsonBody, err = json.Marshal(&apis.ResumableUploadV2UploadPartResponse{ + Etag: "testetag2", + Md5: r.Header.Get("Content-MD5"), + }) + } + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonBody) + }).Methods(http.MethodPut) + serveMux.HandleFunc("/buckets/{bucketName}/objects/{encodedObjectName}/uploads/{uploadID}", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + vars := mux.Vars(r) + if vars["bucketName"] != "testbucket" { + t.Fatalf("unexpected bucket name") + } + objectBytes, err := base64.URLEncoding.DecodeString(vars["encodedObjectName"]) + if err != nil { + t.Fatal(err) + } else if string(objectBytes) != "testkey" { + t.Fatalf("unexpected object name") + } + if vars["uploadID"] != "testuploadID" { + t.Fatalf("unexpected upload id") + } + requestBodyBytes, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + var body apis.ResumableUploadV2CompleteMultipartUploadRequest + if err = body.UnmarshalJSON(requestBodyBytes); err != nil { + t.Fatalf("unexpected request body") + } + if len(body.Parts) != 2 { + t.Fatalf("unexpected parts") + } else if body.Parts[0].PartNumber != 1 { + t.Fatalf("unexpected part number") + } else if body.Parts[0].Etag != "testetag1" { + t.Fatalf("unexpected part number") + } else if body.Parts[1].PartNumber != 2 { + t.Fatalf("unexpected part number") + } else if body.Parts[1].Etag != "testetag2" { + t.Fatalf("unexpected part number") + } + if body.FileName != "testfilename" { + t.Fatalf("unexpected fileName") + } + if body.MimeType != "application/json" { + t.Fatalf("unexpected mimeType") + } + if len(body.Metadata) != 2 { + t.Fatalf("unexpected metadata") + } else if body.Metadata["x-qn-meta-a"] != "b" { + t.Fatalf("unexpected x-qn-meta-a") + } else if body.Metadata["x-qn-meta-c"] != "d" { + t.Fatalf("unexpected x-qn-meta-c") + } else if body.CustomVars["x:a"] != "b" { + t.Fatalf("unexpected x:a") + } else if body.CustomVars["x:c"] != "d" { + t.Fatalf("unexpected x:c") + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write([]byte(`{"ok":true}`)) + }).Methods(http.MethodPost) + server := httptest.NewServer(serveMux) + defer server.Close() + + var ( + uploadManager = uploader.NewUploadManager(&uploader.UploadManagerOptions{ + Options: http_client.Options{ + Regions: ®ion.Region{Up: region.Endpoints{Preferred: []string{server.URL}}}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + Concurrency: 2, + }) + returnValue struct { + Ok bool `json:"ok"` + } + key = "testkey" + ) + + err = uploadManager.UploadFile(context.Background(), tmpFile.Name(), &uploader.ObjectOptions{ + BucketName: "testbucket", + ObjectName: &key, + FileName: "testfilename", + ContentType: "application/json", + Metadata: map[string]string{"a": "b", "c": "d"}, + CustomVars: map[string]string{"a": "b", "c": "d"}, + }, &returnValue) + if err != nil { + t.Fatal(err) + } else if !returnValue.Ok { + t.Fatalf("unexpected response body") + } +} + +func TestUploadManagerUploadReader(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + if _, err = io.CopyN(tmpFile, r, 5*1024*1024); err != nil { + t.Fatal(err) + } + if _, err = tmpFile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + serveMux := mux.NewRouter() + serveMux.HandleFunc("/buckets/{bucketName}/objects/{encodedObjectName}/uploads", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + vars := mux.Vars(r) + if vars["bucketName"] != "testbucket" { + t.Fatalf("unexpected bucket name") + } + objectBytes, err := base64.URLEncoding.DecodeString(vars["encodedObjectName"]) + if err != nil { + t.Fatal(err) + } else if string(objectBytes) != "testkey" { + t.Fatalf("unexpected object name") + } + jsonBytes, err := json.Marshal(&apis.ResumableUploadV2InitiateMultipartUploadResponse{ + UploadId: "testuploadID", + ExpiredAt: time.Now().Add(1 * time.Hour).Unix(), + }) + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonBytes) + }).Methods(http.MethodPost) + serveMux.HandleFunc("/buckets/{bucketName}/objects/{encodedObjectName}/uploads/{uploadID}/{partNumber}", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + vars := mux.Vars(r) + if vars["bucketName"] != "testbucket" { + t.Fatalf("unexpected bucket name") + } + objectBytes, err := base64.URLEncoding.DecodeString(vars["encodedObjectName"]) + if err != nil { + t.Fatal(err) + } else if string(objectBytes) != "testkey" { + t.Fatalf("unexpected object name") + } + if vars["uploadID"] != "testuploadID" { + t.Fatalf("unexpected upload id") + } + actualBody, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + var expectedBody, jsonBody []byte + switch vars["partNumber"] { + case "1": + expectedBody, err = internal_io.ReadAll(io.NewSectionReader(tmpFile, 0, 4*1024*1024)) + if err != nil { + t.Fatal(err) + } + case "2": + expectedBody, err = internal_io.ReadAll(io.NewSectionReader(tmpFile, 4*1024*1024, 1024*1024)) + if err != nil { + t.Fatal(err) + } + default: + t.Fatalf("unexpected part number") + } + if !bytes.Equal(actualBody, expectedBody) { + t.Fatalf("unexpected body") + } + md5Sum := md5.Sum(actualBody) + if r.Header.Get("Content-MD5") != hex.EncodeToString(md5Sum[:]) { + t.Fatalf("unexpected content-md5") + } + switch vars["partNumber"] { + case "1": + jsonBody, err = json.Marshal(&apis.ResumableUploadV2UploadPartResponse{ + Etag: "testetag1", + Md5: r.Header.Get("Content-MD5"), + }) + case "2": + jsonBody, err = json.Marshal(&apis.ResumableUploadV2UploadPartResponse{ + Etag: "testetag2", + Md5: r.Header.Get("Content-MD5"), + }) + } + if err != nil { + t.Fatal(err) + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write(jsonBody) + }).Methods(http.MethodPut) + serveMux.HandleFunc("/buckets/{bucketName}/objects/{encodedObjectName}/uploads/{uploadID}", func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("Authorization"), "UpToken testak:") { + t.Fatalf("unexpected authorization") + } + vars := mux.Vars(r) + if vars["bucketName"] != "testbucket" { + t.Fatalf("unexpected bucket name") + } + objectBytes, err := base64.URLEncoding.DecodeString(vars["encodedObjectName"]) + if err != nil { + t.Fatal(err) + } else if string(objectBytes) != "testkey" { + t.Fatalf("unexpected object name") + } + if vars["uploadID"] != "testuploadID" { + t.Fatalf("unexpected upload id") + } + requestBodyBytes, err := internal_io.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + var body apis.ResumableUploadV2CompleteMultipartUploadRequest + if err = body.UnmarshalJSON(requestBodyBytes); err != nil { + t.Fatalf("unexpected request body") + } + if len(body.Parts) != 2 { + t.Fatalf("unexpected parts") + } else if body.Parts[0].PartNumber != 1 { + t.Fatalf("unexpected part number") + } else if body.Parts[0].Etag != "testetag1" { + t.Fatalf("unexpected part number") + } else if body.Parts[1].PartNumber != 2 { + t.Fatalf("unexpected part number") + } else if body.Parts[1].Etag != "testetag2" { + t.Fatalf("unexpected part number") + } + if body.FileName != "testfilename" { + t.Fatalf("unexpected fileName") + } + if body.MimeType != "application/json" { + t.Fatalf("unexpected mimeType") + } + if len(body.Metadata) != 2 { + t.Fatalf("unexpected metadata") + } else if body.Metadata["x-qn-meta-a"] != "b" { + t.Fatalf("unexpected x-qn-meta-a") + } else if body.Metadata["x-qn-meta-c"] != "d" { + t.Fatalf("unexpected x-qn-meta-c") + } else if body.CustomVars["x:a"] != "b" { + t.Fatalf("unexpected x:a") + } else if body.CustomVars["x:c"] != "d" { + t.Fatalf("unexpected x:c") + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write([]byte(`{"ok":true}`)) + }).Methods(http.MethodPost) + server := httptest.NewServer(serveMux) + defer server.Close() + + var ( + uploadManager = uploader.NewUploadManager(&uploader.UploadManagerOptions{ + Options: http_client.Options{ + Regions: ®ion.Region{Up: region.Endpoints{Preferred: []string{server.URL}}}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + Concurrency: 2, + }) + returnValue struct { + Ok bool `json:"ok"` + } + key = "testkey" + pipeR, pipeW = io.Pipe() + wg sync.WaitGroup + ) + wg.Add(1) + defer pipeR.Close() + + go func(t *testing.T, w io.WriteCloser) { + defer wg.Done() + defer w.Close() + if _, err = io.Copy(w, tmpFile); err != nil { + t.Error(err) + } + }(t, pipeW) + + err = uploadManager.UploadReader(context.Background(), pipeR, &uploader.ObjectOptions{ + BucketName: "testbucket", + ObjectName: &key, + FileName: "testfilename", + ContentType: "application/json", + Metadata: map[string]string{"a": "b", "c": "d"}, + CustomVars: map[string]string{"a": "b", "c": "d"}, + }, &returnValue) + if err != nil { + t.Fatal(err) + } else if !returnValue.Ok { + t.Fatalf("unexpected response body") + } + + wg.Wait() +} + +func TestUploadManagerUploadDirectory(t *testing.T) { + testUploadManagerUploadDirectory(t, true) + testUploadManagerUploadDirectory(t, false) +} + +func testUploadManagerUploadDirectory(t *testing.T, createDirectory bool) { + var localFiles, remoteObjects sync.Map + + tmpDir_1, err := ioutil.TempDir("", "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir_1) + + const objectPrefix = "remoteDirectory" + remoteObjects.Store(objectPrefix+"/", (*os.File)(nil)) + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + tmpFile_1, err := ioutil.TempFile(tmpDir_1, "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile_1.Name()) + defer tmpFile_1.Close() + + if _, err = io.CopyN(tmpFile_1, r, 1024*1024); err != nil { + t.Fatal(err) + } + if relativePath, err := filepath.Rel(tmpDir_1, tmpFile_1.Name()); err != nil { + t.Fatal(err) + } else { + remoteObjects.Store(strings.Replace(filepath.Join(objectPrefix, relativePath), string(filepath.Separator), "/", -1), tmpFile_1) + } + localFiles.Store(tmpFile_1.Name(), uint64(0)) + + tmpDir_2, err := ioutil.TempDir(tmpDir_1, "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + if relativeDir, err := filepath.Rel(tmpDir_1, tmpDir_2); err != nil { + t.Fatal(err) + } else { + remoteObjects.Store(strings.Replace(filepath.Join(objectPrefix, relativeDir)+string(filepath.Separator), string(filepath.Separator), "/", -1), (*os.File)(nil)) + } + + tmpFile_2, err := ioutil.TempFile(tmpDir_2, "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile_2.Name()) + defer tmpFile_2.Close() + + if _, err = io.CopyN(tmpFile_2, r, 1024*1024); err != nil { + t.Fatal(err) + } + if relativePath, err := filepath.Rel(tmpDir_1, tmpFile_2.Name()); err != nil { + t.Fatal(err) + } else { + remoteObjects.Store(strings.Replace(filepath.Join(objectPrefix, relativePath), string(filepath.Separator), "/", -1), tmpFile_2) + } + localFiles.Store(tmpFile_2.Name(), uint64(0)) + + tmpDir_3, err := ioutil.TempDir(tmpDir_2, "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + if relativeDir, err := filepath.Rel(tmpDir_1, tmpDir_3); err != nil { + t.Fatal(err) + } else { + remoteObjects.Store(strings.Replace(filepath.Join(objectPrefix, relativeDir)+string(filepath.Separator), string(filepath.Separator), "/", -1), (*os.File)(nil)) + } + + tmpFile_3, err := ioutil.TempFile(tmpDir_3, "multi-parts-uploader-test-*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile_3.Name()) + defer tmpFile_3.Close() + + if _, err = io.CopyN(tmpFile_3, r, 1024*1024); err != nil { + t.Fatal(err) + } + if relativePath, err := filepath.Rel(tmpDir_1, tmpFile_3.Name()); err != nil { + t.Fatal(err) + } else { + remoteObjects.Store(strings.Replace(filepath.Join(objectPrefix, relativePath), string(filepath.Separator), "/", -1), tmpFile_3) + } + localFiles.Store(tmpFile_3.Name(), uint64(0)) + + serveMux := http.NewServeMux() + serveMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Fatalf("unexpected method: %s", r.Method) + } + if err := r.ParseMultipartForm(2 * 1024 * 1024); err != nil { + t.Fatal(err) + } + if values := r.MultipartForm.Value["token"]; len(values) != 1 || !strings.HasPrefix(values[0], "testak:") { + t.Fatalf("unexpected token") + } + + key := r.MultipartForm.Value["key"][0] + if expectedValue, ok := remoteObjects.Load(key); !ok { + t.Fatalf("unexpected key") + } else if expectedObject, ok := expectedValue.(*os.File); !ok { + t.Fatalf("unexpected key") + } else { + remoteObjects.Delete(key) + multiPartFile := r.MultipartForm.File["file"][0] + receivedFile, err := multiPartFile.Open() + if err != nil { + t.Fatal(err) + } + defer receivedFile.Close() + + receivedFileBytes, err := internal_io.ReadAll(receivedFile) + if err != nil { + t.Fatal(err) + } + + if expectedObject == nil { + if !createDirectory { + t.Fatalf("unexpected directory creation") + } + if len(receivedFileBytes) != 0 { + t.Fatalf("content of directory should be empty") + } + } else { + if _, err = expectedObject.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + expectedObjectBytes, err := internal_io.ReadAll(expectedObject) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(expectedObjectBytes, receivedFileBytes) { + t.Fatalf("unexpected content") + } + } + } + w.Header().Add("X-ReqId", "fakereqid") + w.Write([]byte(`{"ok":true}`)) + }) + server := httptest.NewServer(serveMux) + defer server.Close() + + var uploadManager = uploader.NewUploadManager(&uploader.UploadManagerOptions{ + Options: http_client.Options{ + Regions: ®ion.Region{Up: region.Endpoints{Preferred: []string{server.URL}}}, + Credentials: credentials.NewCredentials("testak", "testsk"), + }, + }) + + err = uploadManager.UploadDirectory(context.Background(), tmpDir_1, &uploader.DirectoryOptions{ + BucketName: "testbucket", + UpdateObjectName: func(path string) string { + return objectPrefix + "/" + path + }, + BeforeObjectUpload: func(filePath string, _ *uploader.ObjectOptions) { + if _, ok := localFiles.Load(filePath); !ok { + t.Fatalf("unexpected filePath") + } + }, + OnUploadingProgress: func(filePath string, progress *uploader.UploadingProgress) { + if progress.TotalSize != 1024*1024 { + t.Fatalf("unexpected totalSize") + } + if lastUploadedValue, ok := localFiles.Load(filePath); !ok { + t.Fatalf("unexpected filePath") + } else if lastUploaded, ok := lastUploadedValue.(uint64); !ok { + t.Fatalf("unexpected filePath") + } else if progress.Uploaded < lastUploaded { + t.Fatalf("unexpected uploaded") + } else { + localFiles.Store(filePath, progress.Uploaded) + } + }, + OnObjectUploaded: func(filePath string, info *uploader.UploadedObjectInfo) { + if info.Size != 1024*1024 { + t.Fatalf("unexpected size") + } + if _, ok := localFiles.Load(filePath); !ok { + t.Fatalf("unexpected filePath") + } + }, + ShouldCreateDirectory: createDirectory, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/storagev2/uploader/uploaders.go b/storagev2/uploader/uploaders.go new file mode 100644 index 00000000..5566f8e4 --- /dev/null +++ b/storagev2/uploader/uploaders.go @@ -0,0 +1,492 @@ +package uploader + +import ( + "bytes" + "context" + stderrors "errors" + "hash/crc32" + "io" + "io/ioutil" + "os" + "strings" + "sync" + "time" + + internal_io "github.com/qiniu/go-sdk/v7/internal/io" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + creds "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/region" + "github.com/qiniu/go-sdk/v7/storagev2/retrier" + "github.com/qiniu/go-sdk/v7/storagev2/uploader/source" + "github.com/qiniu/go-sdk/v7/storagev2/uptoken" + "modernc.org/fileutil" +) + +type ( + // 表单上传器选项 + FormUploaderOptions struct { + httpclient.Options + + // 上传凭证 + UpToken uptoken.Provider + } + + formUploader struct { + storage *apis.Storage + options *FormUploaderOptions + } + + multiPartsUploader struct { + scheduler multiPartsUploaderScheduler + } +) + +// 创建表单上传器 +func NewFormUploader(options *FormUploaderOptions) Uploader { + if options == nil { + options = &FormUploaderOptions{} + } + return formUploader{apis.NewStorage(&options.Options), options} +} + +func (uploader formUploader) UploadFile(ctx context.Context, path string, objectOptions *ObjectOptions, returnValue interface{}) error { + if objectOptions == nil { + objectOptions = &ObjectOptions{} + } + upToken, err := getUpToken(uploader.options.Credentials, objectOptions, uploader.options.UpToken) + if err != nil { + return err + } + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + fileInfo, err := file.Stat() + if err != nil { + return err + } + + fileSize := uint64(fileInfo.Size()) + _ = fileutil.Fadvise(file, 0, 0, fileutil.POSIX_FADV_SEQUENTIAL) + + crc32, err := crc32FromReadSeeker(file) + if err != nil { + return err + } + var onRequestProgress func(uploaded, totalSize uint64) + if onUploadingProgress := objectOptions.OnUploadingProgress; onUploadingProgress != nil { + onRequestProgress = func(uploaded, totalSize uint64) { + onUploadingProgress(&UploadingProgress{Uploaded: uploaded, TotalSize: totalSize}) + } + } + return uploader.upload(ctx, file, fileSize, upToken, objectOptions.BucketName, objectOptions.ObjectName, objectOptions.FileName, objectOptions.ContentType, + crc32, mergeCustomVarsAndMetadata(objectOptions.Metadata, objectOptions.CustomVars), onRequestProgress, returnValue) +} + +func (uploader formUploader) UploadReader(ctx context.Context, reader io.Reader, objectOptions *ObjectOptions, returnValue interface{}) error { + var ( + rsc io.ReadSeeker + size uint64 + ok bool + ) + if objectOptions == nil { + objectOptions = &ObjectOptions{} + } + upToken, err := getUpToken(uploader.options.Credentials, objectOptions, uploader.options.UpToken) + if err != nil { + return err + } + if rsc, ok = reader.(io.ReadSeeker); ok && canSeekReally(rsc) { + if size, err = getSeekerSize(rsc); err != nil { + return err + } + } else { + dataBytes, err := internal_io.ReadAll(reader) + if err != nil { + return err + } + size = uint64(len(dataBytes)) + rsc = bytes.NewReader(dataBytes) + } + crc32, err := crc32FromReadSeeker(rsc) + if err != nil { + return err + } + var onRequestProgress func(uploaded, totalSize uint64) + if onUploadingProgress := objectOptions.OnUploadingProgress; onUploadingProgress != nil { + onRequestProgress = func(uploaded, totalSize uint64) { + onUploadingProgress(&UploadingProgress{Uploaded: uploaded, TotalSize: totalSize}) + } + } + return uploader.upload(ctx, rsc, size, upToken, objectOptions.BucketName, objectOptions.ObjectName, objectOptions.FileName, objectOptions.ContentType, + crc32, mergeCustomVarsAndMetadata(objectOptions.Metadata, objectOptions.CustomVars), onRequestProgress, returnValue) +} + +func (uploader formUploader) upload( + ctx context.Context, reader io.ReadSeeker, size uint64, upToken uptoken.Provider, bucketName string, + objectName *string, fileName, contentType string, crc32 uint32, customData map[string]string, + onRequestProgress func(uint64, uint64), returnValue interface{}, +) error { + return forEachRegion(ctx, upToken, bucketName, &uploader.options.Options, func(region *region.Region) (bool, error) { + err := uploader.uploadToRegion(ctx, region, reader, size, upToken, objectName, fileName, contentType, + crc32, customData, onRequestProgress, returnValue) + return true, err + }) +} + +func (uploader formUploader) uploadToRegion( + ctx context.Context, region *region.Region, reader io.ReadSeeker, size uint64, upToken uptoken.Provider, + objectName *string, fileName, contentType string, crc32 uint32, customData map[string]string, + onRequestProgress func(uint64, uint64), returnValue interface{}, +) error { + options := apis.Options{OverwrittenRegion: region} + request := apis.PostObjectRequest{ + ObjectName: objectName, + UploadToken: upToken, + Crc32: int64(crc32), + File: httpclient.MultipartFormBinaryData{ + Data: internal_io.NewReadSeekableNopCloser(reader), + Name: fileName, + ContentType: contentType, + }, + CustomData: customData, + ResponseBody: returnValue, + } + if onRequestProgress != nil { + options.OnRequestProgress = func(uploaded, _ uint64) { + if uploaded > size { + uploaded = size + } + onRequestProgress(uploaded, size) + } + } + _, err := uploader.storage.PostObject(ctx, &request, &options) + return err +} + +func newMultiPartsUploader(scheduler multiPartsUploaderScheduler) Uploader { + return multiPartsUploader{scheduler} +} + +func (uploader multiPartsUploader) UploadFile(ctx context.Context, path string, objectOptions *ObjectOptions, returnValue interface{}) error { + if objectOptions == nil { + objectOptions = &ObjectOptions{} + } + options := uploader.scheduler.MultiPartsUploader().MultiPartsUploaderOptions() + if options == nil { + options = &MultiPartsUploaderOptions{} + } + + upToken, err := getUpToken(options.Credentials, objectOptions, options.UpTokenProvider) + if err != nil { + return err + } + + src, err := source.NewFileSource(path) + if err != nil { + return err + } + defer src.Close() + + if file := src.GetFile(); file != nil { + _ = fileutil.Fadvise(file, 0, 0, fileutil.POSIX_FADV_SEQUENTIAL) + } + + return uploader.upload(ctx, src, upToken, &options.Options, objectOptions, returnValue) +} + +func (uploader multiPartsUploader) UploadReader(ctx context.Context, reader io.Reader, objectOptions *ObjectOptions, returnValue interface{}) error { + if objectOptions == nil { + objectOptions = &ObjectOptions{} + } + + options := uploader.scheduler.MultiPartsUploader().MultiPartsUploaderOptions() + if options == nil { + options = &MultiPartsUploaderOptions{} + } + + upToken, err := getUpToken(options.Credentials, objectOptions, options.UpTokenProvider) + if err != nil { + return err + } + + var src source.Source + if rss, ok := reader.(io.ReadSeeker); ok && canSeekReally(rss) { + if rasc, ok := rss.(source.ReadAtSeekCloser); ok { + src = source.NewReadAtSeekCloserSource(rasc, "") + } else if rscs, ok := rss.(internal_io.ReadSeekCloser); ok { + src = source.NewReadSeekCloserSource(rscs, "") + } else { + src = source.NewReadSeekCloserSource(internal_io.MakeReadSeekCloserFromReader(rss), "") + } + } else { + src = source.NewReadCloserSource(ioutil.NopCloser(reader), "") + } + + return uploader.upload(ctx, src, upToken, &options.Options, objectOptions, returnValue) +} + +func (uploader multiPartsUploader) upload(ctx context.Context, src source.Source, upToken uptoken.Provider, httpClientOptions *httpclient.Options, objectOptions *ObjectOptions, returnValue interface{}) error { + resumed, err := uploader.uploadResumedParts(ctx, src, objectOptions, returnValue) + if err == nil && resumed { + return nil + } else if resumed { + if rsrc, ok := src.(source.ResetableSource); ok { + if resetErr := rsrc.Reset(); resetErr == nil { + return err + } + } + } + return uploader.tryToUploadToEachRegion(ctx, src, upToken, httpClientOptions, objectOptions, returnValue) +} + +func (uploader multiPartsUploader) uploadResumedParts(ctx context.Context, src source.Source, objectOptions *ObjectOptions, returnValue interface{}) (bool, error) { + multiPartsObjectOptions := MultiPartsObjectOptions{*objectOptions, uploader.scheduler.PartSize()} + if initializedParts := uploader.scheduler.MultiPartsUploader().TryToResume(ctx, src, &multiPartsObjectOptions); initializedParts == nil { + return false, nil + } else { + defer initializedParts.Close() + var size uint64 + if ssrc, ok := src.(source.SizedSource); ok { + if totalSize, sizeErr := ssrc.TotalSize(); sizeErr == nil { + size = totalSize + } + } + if err := uploader.uploadPartsAndComplete(ctx, src, size, initializedParts, objectOptions, returnValue); err != nil { + return true, err + } else { + return true, nil + } + } +} + +func (uploader multiPartsUploader) tryToUploadToEachRegion(ctx context.Context, src source.Source, upToken uptoken.Provider, httpClientOptions *httpclient.Options, objectOptions *ObjectOptions, returnValue interface{}) error { + return forEachRegion(ctx, upToken, objectOptions.BucketName, httpClientOptions, func(region *region.Region) (bool, error) { + objectOptions.RegionsProvider = region + multiPartsObjectOptions := MultiPartsObjectOptions{*objectOptions, uploader.scheduler.PartSize()} + initializedParts, err := uploader.scheduler.MultiPartsUploader().InitializeParts(ctx, src, &multiPartsObjectOptions) + var size uint64 + if ssrc, ok := src.(source.SizedSource); ok { + if totalSize, sizeErr := ssrc.TotalSize(); sizeErr == nil { + size = totalSize + } + } + if err == nil { + defer initializedParts.Close() + if err = uploader.uploadPartsAndComplete(ctx, src, size, initializedParts, objectOptions, returnValue); err == nil { + return true, nil + } + } + if rsrc, ok := src.(source.ResetableSource); ok { + if resetErr := rsrc.Reset(); resetErr == nil { + return true, err + } + } + return false, err + }) +} + +func (uploader multiPartsUploader) uploadPartsAndComplete(ctx context.Context, src source.Source, size uint64, initializedParts InitializedParts, objectOptions *ObjectOptions, returnValue interface{}) error { + var uploadPartsOptions UploadPartsOptions + if objectOptions.OnUploadingProgress != nil { + progress := newUploadingPartsProgress() + uploadPartsOptions.OnUploadingProgress = func(partNumber uint64, p *UploadingPartProgress) { + progress.setPartUploadingProgress(partNumber, p.Uploaded) + objectOptions.OnUploadingProgress(&UploadingProgress{Uploaded: progress.totalUploaded(), TotalSize: size}) + } + uploadPartsOptions.OnPartUploaded = func(part UploadedPart) error { + progress.partUploaded(part.PartNumber(), part.PartSize()) + objectOptions.OnUploadingProgress(&UploadingProgress{Uploaded: progress.totalUploaded(), TotalSize: size}) + return nil + } + } + uploadParts, err := uploader.scheduler.UploadParts(ctx, initializedParts, src, &uploadPartsOptions) + if err != nil { + return err + } + return uploader.scheduler.MultiPartsUploader().CompleteParts(ctx, initializedParts, uploadParts, returnValue) +} + +func getUpToken(c creds.CredentialsProvider, objectOptions *ObjectOptions, upTokenProvider uptoken.Provider) (uptoken.Provider, error) { + if objectOptions.UpToken != nil { + return objectOptions.UpToken, nil + } else if upTokenProvider != nil { + return upTokenProvider, nil + } else { + if c == nil { + c = creds.Default() + } + if c != nil && objectOptions.BucketName != "" { + return newCredentialsUpTokenSigner(c, objectOptions.BucketName, 1*time.Hour, 10*time.Minute), nil + } else { + return nil, errors.MissingRequiredFieldError{Name: "UpToken"} + } + } +} + +func crc32FromReadSeeker(r io.ReadSeeker) (uint32, error) { + offset, err := r.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + hasher := crc32.NewIEEE() + if _, err = io.Copy(hasher, r); err != nil { + return 0, err + } + if _, err = r.Seek(offset, io.SeekStart); err != nil { + return 0, err + } + return hasher.Sum32(), nil +} + +func mergeCustomVarsAndMetadata(metadata, customVars map[string]string) map[string]string { + result := make(map[string]string, len(metadata)+len(customVars)) + for k, v := range metadata { + result[normalizeMetadataKey(k)] = v + } + for k, v := range customVars { + result[normalizeCustomVarKey(k)] = v + } + return result +} + +func normalizeMetadataKey(k string) string { + if !strings.HasPrefix(k, "x-qn-meta-") { + k = "x-qn-meta-" + k + } + return k +} + +func normalizeCustomVarKey(k string) string { + if !strings.HasPrefix(k, "x:") { + k = "x:" + k + } + return k +} + +func canSeekReally(seeker io.Seeker) bool { + _, err := seeker.Seek(0, io.SeekCurrent) + return err == nil +} + +func getSeekerSize(seeker io.Seeker) (uint64, error) { + currentOffset, err := seeker.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + endOffset, err := seeker.Seek(0, io.SeekEnd) + if err != nil { + return 0, err + } + _, err = seeker.Seek(currentOffset, io.SeekStart) + if err != nil { + return 0, err + } + return uint64(endOffset - currentOffset), nil +} + +func getRegions(ctx context.Context, upToken uptoken.Provider, bucketName string, options *httpclient.Options) (regions []*region.Region, err error) { + regionsProvider := options.Regions + if regionsProvider == nil { + var ( + accessKey string + putPolicy uptoken.PutPolicy + ) + query := options.BucketQuery + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + queryOptions := region.BucketRegionsQueryOptions{ + UseInsecureProtocol: options.UseInsecureProtocol, + HostFreezeDuration: options.HostFreezeDuration, + Client: options.BasicHTTPClient, + } + if hostRetryConfig := options.HostRetryConfig; hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return + } + } + if accessKey, err = upToken.GetAccessKey(ctx); err != nil { + return + } + if bucketName == "" { + if putPolicy, err = upToken.GetPutPolicy(ctx); err != nil { + return + } else if bucketName, err = putPolicy.GetBucketName(); err != nil { + return + } + } + regionsProvider = query.Query(accessKey, bucketName) + } + regions, err = regionsProvider.GetRegions(ctx) + return +} + +func forEachRegion(ctx context.Context, upToken uptoken.Provider, bucketName string, options *httpclient.Options, fn func(*region.Region) (bool, error)) (err error) { + var ( + regions []*region.Region + retryable bool + ) + + regions, err = getRegions(ctx, upToken, bucketName, options) + if err != nil { + return + } + if len(regions) == 0 { + err = stderrors.New("none of regions got") + return + } + for _, region := range regions { + if retryable, err = fn(region); err != nil { + if !retryable || !retrier.IsErrorRetryable(err) { + break + } + } else { + break + } + } + return +} + +type uploadingPartsProgress struct { + uploaded uint64 + uploading map[uint64]uint64 + lock sync.Mutex +} + +func newUploadingPartsProgress() *uploadingPartsProgress { + return &uploadingPartsProgress{ + uploading: make(map[uint64]uint64), + } +} + +func (progress *uploadingPartsProgress) setPartUploadingProgress(partNumber, uploaded uint64) { + progress.lock.Lock() + defer progress.lock.Unlock() + + progress.uploading[partNumber] = uploaded +} + +func (progress *uploadingPartsProgress) partUploaded(partNumber, partSize uint64) { + progress.lock.Lock() + defer progress.lock.Unlock() + + delete(progress.uploading, partNumber) + progress.uploaded += partSize +} + +func (progress *uploadingPartsProgress) totalUploaded() uint64 { + progress.lock.Lock() + defer progress.lock.Unlock() + + uploaded := progress.uploaded + for _, b := range progress.uploading { + uploaded += b + } + return uploaded +} diff --git a/storagev2/uplog/uplog.go b/storagev2/uplog/uplog.go index 14d177fa..3ef94d97 100644 --- a/storagev2/uplog/uplog.go +++ b/storagev2/uplog/uplog.go @@ -51,6 +51,16 @@ func IsUplogEnabled() bool { return uplog.IsUplogEnabled() } +// GetUplogMaxStorageBytes 获取日志最大存储容量 +func GetUplogMaxStorageBytes() uint64 { + return uplog.GetUplogMaxStorageBytes() +} + +// SetUplogMaxStorageBytes 设置日志最大存储容量 +func SetUplogMaxStorageBytes(max uint64) { + uplog.SetUplogMaxStorageBytes(max) +} + // SetUplogFileBufferDirPath 设置日志文件缓存目录 func SetUplogFileBufferDirPath(path string) { uplog.SetUplogFileBufferDirPath(path) diff --git a/storagev2/uptoken/uploadtoken.go b/storagev2/uptoken/uploadtoken.go index 85632671..b3c32685 100644 --- a/storagev2/uptoken/uploadtoken.go +++ b/storagev2/uptoken/uploadtoken.go @@ -77,8 +77,8 @@ func (signer *signer) onceGetCredentials(ctx context.Context) (*credentials.Cred signer.onceCredentials.Do(func() { if signer.credentialsProvider != nil { signer.credentials, err = signer.credentialsProvider.Get(ctx) - } else { - signer.credentials = credentials.Default() + } else if defaultCreds := credentials.Default(); defaultCreds != nil { + signer.credentials = defaultCreds } }) return signer.credentials, err